+ ln -s /usr/share/clickhouse-test/ci/stress.py /usr/bin/stress
+ ln -s /usr/share/clickhouse-test/clickhouse-test /usr/bin/clickhouse-test
+ source /attach_gdb.lib
++ source /utils.lib
+ source /stress_tests.lib
++ sysctl kernel.core_pattern=core.%e.%p-%P
kernel.core_pattern = core.%e.%p-%P
++ OK='\tOK\t\N\t'
++ FAIL='\tFAIL\t\N\t'
++ FAILURE_CONTEXT_LINES=100
++ FAILURE_CONTEXT_MAX_LINE_WIDTH=300
+ install_packages package_folder
+ dpkg -i package_folder/clickhouse-common-static_24.3.12.76.altinitystable+ubsan_amd64.deb
Selecting previously unselected package clickhouse-common-static.
(Reading database ... 49227 files and directories currently installed.)
Preparing to unpack .../clickhouse-common-static_24.3.12.76.altinitystable+ubsan_amd64.deb ...
Unpacking clickhouse-common-static (24.3.12.76.altinitystable+ubsan) ...
Setting up clickhouse-common-static (24.3.12.76.altinitystable+ubsan) ...
+ dpkg -i package_folder/clickhouse-common-static-dbg_24.3.12.76.altinitystable+ubsan_amd64.deb
Selecting previously unselected package clickhouse-common-static-dbg.
(Reading database ... 49256 files and directories currently installed.)
Preparing to unpack .../clickhouse-common-static-dbg_24.3.12.76.altinitystable+ubsan_amd64.deb ...
Unpacking clickhouse-common-static-dbg (24.3.12.76.altinitystable+ubsan) ...
Setting up clickhouse-common-static-dbg (24.3.12.76.altinitystable+ubsan) ...
+ dpkg -i package_folder/clickhouse-server_24.3.12.76.altinitystable+ubsan_amd64.deb
Selecting previously unselected package clickhouse-server.
(Reading database ... 49265 files and directories currently installed.)
Preparing to unpack .../clickhouse-server_24.3.12.76.altinitystable+ubsan_amd64.deb ...
Unpacking clickhouse-server (24.3.12.76.altinitystable+ubsan) ...
Setting up clickhouse-server (24.3.12.76.altinitystable+ubsan) ...
ClickHouse binary is already located at /usr/bin/clickhouse
Symlink /usr/bin/clickhouse-server already exists but it points to /clickhouse. Will replace the old symlink to /usr/bin/clickhouse.
Creating symlink /usr/bin/clickhouse-server to /usr/bin/clickhouse.
Creating symlink /usr/bin/clickhouse-client to /usr/bin/clickhouse.
Creating symlink /usr/bin/clickhouse-local to /usr/bin/clickhouse.
Creating symlink /usr/bin/clickhouse-benchmark to /usr/bin/clickhouse.
Creating symlink /usr/bin/clickhouse-obfuscator to /usr/bin/clickhouse.
Creating symlink /usr/bin/clickhouse-git-import to /usr/bin/clickhouse.
Creating symlink /usr/bin/clickhouse-compressor to /usr/bin/clickhouse.
Creating symlink /usr/bin/clickhouse-format to /usr/bin/clickhouse.
Symlink /usr/bin/clickhouse-extract-from-config already exists but it points to /clickhouse. Will replace the old symlink to /usr/bin/clickhouse.
Creating symlink /usr/bin/clickhouse-extract-from-config to /usr/bin/clickhouse.
Symlink /usr/bin/clickhouse-keeper already exists but it points to /clickhouse. Will replace the old symlink to /usr/bin/clickhouse.
Creating symlink /usr/bin/clickhouse-keeper to /usr/bin/clickhouse.
Symlink /usr/bin/clickhouse-keeper-converter already exists but it points to /clickhouse. Will replace the old symlink to /usr/bin/clickhouse.
Creating symlink /usr/bin/clickhouse-keeper-converter to /usr/bin/clickhouse.
Creating symlink /usr/bin/clickhouse-disks to /usr/bin/clickhouse.
Creating symlink /usr/bin/ch to /usr/bin/clickhouse.
Creating symlink /usr/bin/chl to /usr/bin/clickhouse.
Creating symlink /usr/bin/chc to /usr/bin/clickhouse.
Creating clickhouse group if it does not exist.
groupadd -r clickhouse
Creating clickhouse user if it does not exist.
useradd -r --shell /bin/false --home-dir /nonexistent -g clickhouse clickhouse
Will set ulimits for clickhouse user in /etc/security/limits.d/clickhouse.conf.
Creating config directory /etc/clickhouse-server/config.d that is used for tweaks of main server configuration.
Creating config directory /etc/clickhouse-server/users.d that is used for tweaks of users configuration.
Config file /etc/clickhouse-server/config.xml already exists, will keep it and extract path info from it.
/etc/clickhouse-server/config.xml has /var/lib/clickhouse/ as data path.
/etc/clickhouse-server/config.xml has /var/log/clickhouse-server/ as log path.
Users config file /etc/clickhouse-server/users.xml already exists, will keep it and extract users info from it.
Log directory /var/log/clickhouse-server/ already exists.
Creating data directory /var/lib/clickhouse/.
Creating pid directory /var/run/clickhouse-server.
chown -R clickhouse:clickhouse '/var/log/clickhouse-server/'
chown -R clickhouse:clickhouse '/var/run/clickhouse-server'
chown clickhouse:clickhouse '/var/lib/clickhouse/'
groupadd -r clickhouse-bridge
useradd -r --shell /bin/false --home-dir /nonexistent -g clickhouse-bridge clickhouse-bridge
chown -R clickhouse-bridge:clickhouse-bridge '/usr/bin/clickhouse-odbc-bridge'
chown -R clickhouse-bridge:clickhouse-bridge '/usr/bin/clickhouse-library-bridge'
Password for the default user is an empty string. See /etc/clickhouse-server/users.xml and /etc/clickhouse-server/users.d to change it.
Setting capabilities for clickhouse binary. This is optional.
chown -R clickhouse:clickhouse '/etc/clickhouse-server'
ClickHouse has been successfully installed.
Start clickhouse-server with:
sudo clickhouse start
Start clickhouse-client with:
clickhouse-client
+ dpkg -i package_folder/clickhouse-client_24.3.12.76.altinitystable+ubsan_amd64.deb
Selecting previously unselected package clickhouse-client.
(Reading database ... 49282 files and directories currently installed.)
Preparing to unpack .../clickhouse-client_24.3.12.76.altinitystable+ubsan_amd64.deb ...
Unpacking clickhouse-client (24.3.12.76.altinitystable+ubsan) ...
Setting up clickhouse-client (24.3.12.76.altinitystable+ubsan) ...
+ export THREAD_FUZZER_CPU_TIME_PERIOD_US=1000
+ THREAD_FUZZER_CPU_TIME_PERIOD_US=1000
+ export THREAD_FUZZER_SLEEP_PROBABILITY=0.1
+ THREAD_FUZZER_SLEEP_PROBABILITY=0.1
+ export THREAD_FUZZER_SLEEP_TIME_US_MAX=100000
+ THREAD_FUZZER_SLEEP_TIME_US_MAX=100000
+ export THREAD_FUZZER_pthread_mutex_lock_BEFORE_MIGRATE_PROBABILITY=1
+ THREAD_FUZZER_pthread_mutex_lock_BEFORE_MIGRATE_PROBABILITY=1
+ export THREAD_FUZZER_pthread_mutex_lock_AFTER_MIGRATE_PROBABILITY=1
+ THREAD_FUZZER_pthread_mutex_lock_AFTER_MIGRATE_PROBABILITY=1
+ export THREAD_FUZZER_pthread_mutex_unlock_BEFORE_MIGRATE_PROBABILITY=1
+ THREAD_FUZZER_pthread_mutex_unlock_BEFORE_MIGRATE_PROBABILITY=1
+ export THREAD_FUZZER_pthread_mutex_unlock_AFTER_MIGRATE_PROBABILITY=1
+ THREAD_FUZZER_pthread_mutex_unlock_AFTER_MIGRATE_PROBABILITY=1
+ export THREAD_FUZZER_pthread_mutex_lock_BEFORE_SLEEP_PROBABILITY=0.001
+ THREAD_FUZZER_pthread_mutex_lock_BEFORE_SLEEP_PROBABILITY=0.001
+ export THREAD_FUZZER_pthread_mutex_lock_AFTER_SLEEP_PROBABILITY=0.001
+ THREAD_FUZZER_pthread_mutex_lock_AFTER_SLEEP_PROBABILITY=0.001
+ export THREAD_FUZZER_pthread_mutex_unlock_BEFORE_SLEEP_PROBABILITY=0.001
+ THREAD_FUZZER_pthread_mutex_unlock_BEFORE_SLEEP_PROBABILITY=0.001
+ export THREAD_FUZZER_pthread_mutex_unlock_AFTER_SLEEP_PROBABILITY=0.001
+ THREAD_FUZZER_pthread_mutex_unlock_AFTER_SLEEP_PROBABILITY=0.001
+ export THREAD_FUZZER_pthread_mutex_lock_BEFORE_SLEEP_TIME_US_MAX=10000
+ THREAD_FUZZER_pthread_mutex_lock_BEFORE_SLEEP_TIME_US_MAX=10000
+ export THREAD_FUZZER_pthread_mutex_lock_AFTER_SLEEP_TIME_US_MAX=10000
+ THREAD_FUZZER_pthread_mutex_lock_AFTER_SLEEP_TIME_US_MAX=10000
+ export THREAD_FUZZER_pthread_mutex_unlock_BEFORE_SLEEP_TIME_US_MAX=10000
+ THREAD_FUZZER_pthread_mutex_unlock_BEFORE_SLEEP_TIME_US_MAX=10000
+ export THREAD_FUZZER_pthread_mutex_unlock_AFTER_SLEEP_TIME_US_MAX=10000
+ THREAD_FUZZER_pthread_mutex_unlock_AFTER_SLEEP_TIME_US_MAX=10000
+ export THREAD_FUZZER_EXPLICIT_SLEEP_PROBABILITY=0.01
+ THREAD_FUZZER_EXPLICIT_SLEEP_PROBABILITY=0.01
+ export THREAD_FUZZER_EXPLICIT_MEMORY_EXCEPTION_PROBABILITY=0.01
+ THREAD_FUZZER_EXPLICIT_MEMORY_EXCEPTION_PROBABILITY=0.01
+ export ZOOKEEPER_FAULT_INJECTION=1
+ ZOOKEEPER_FAULT_INJECTION=1
+ configure
+ export USE_DATABASE_ORDINARY=1
+ USE_DATABASE_ORDINARY=1
+ export EXPORT_S3_STORAGE_POLICIES=1
+ EXPORT_S3_STORAGE_POLICIES=1
+ /usr/share/clickhouse-test/config/install.sh
+ DEST_SERVER_PATH=/etc/clickhouse-server
+ DEST_CLIENT_PATH=/etc/clickhouse-client
+++ dirname /usr/share/clickhouse-test/config/install.sh
++ cd /usr/share/clickhouse-test/config
++ pwd -P
+ SRC_PATH=/usr/share/clickhouse-test/config
+ echo 'Going to install test configs from /usr/share/clickhouse-test/config into /etc/clickhouse-server'
+ mkdir -p /etc/clickhouse-server/config.d/
Going to install test configs from /usr/share/clickhouse-test/config into /etc/clickhouse-server
+ mkdir -p /etc/clickhouse-server/users.d/
+ mkdir -p /etc/clickhouse-client
+ ln -sf /usr/share/clickhouse-test/config/config.d/zookeeper_write.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/max_num_to_warn.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/listen.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/text_log.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/blob_storage_log.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/custom_settings_prefixes.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/enable_access_control_improvements.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/macros.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/secure_ports.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/clusters.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/graphite.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/graphite_alternative.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/database_atomic.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/max_concurrent_queries.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/merge_tree_settings.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/backoff_failed_mutation.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/merge_tree_old_dirs_cleanup.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/test_cluster_with_incorrect_pw.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/keeper_port.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/logging_no_rotate.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/merge_tree.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/lost_forever_check.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/tcp_with_proxy.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/prometheus.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/top_level_domains_lists.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/top_level_domains_path.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/transactions.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/encryption.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/CORS.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/zookeeper_log.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/logger_trace.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/named_collection.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/ssl_certs.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/filesystem_cache_log.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/session_log.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/system_unfreeze.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/enable_zero_copy_replication.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/nlp.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/forbidden_headers.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/enable_keeper_map.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/custom_disks_base_path.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/display_name.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/reverse_dns_query_function.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/compressed_marks_and_index.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/disable_s3_env_credentials.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/enable_wait_for_shutdown_replicated_tables.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/backups.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/filesystem_caches_path.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/validate_tcp_client_information.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/zero_copy_destructive_operations.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/block_number.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/handlers.yaml /etc/clickhouse-server/config.d/
+ '[' /etc/clickhouse-server = /etc/clickhouse-server ']'
+ ln -sf /usr/share/clickhouse-test/config/config.d/legacy_geobase.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/users.d/log_queries.xml /etc/clickhouse-server/users.d/
+ ln -sf /usr/share/clickhouse-test/config/users.d/readonly.xml /etc/clickhouse-server/users.d/
+ ln -sf /usr/share/clickhouse-test/config/users.d/access_management.xml /etc/clickhouse-server/users.d/
+ ln -sf /usr/share/clickhouse-test/config/users.d/database_atomic_drop_detach_sync.xml /etc/clickhouse-server/users.d/
+ ln -sf /usr/share/clickhouse-test/config/users.d/opentelemetry.xml /etc/clickhouse-server/users.d/
+ ln -sf /usr/share/clickhouse-test/config/users.d/remote_queries.xml /etc/clickhouse-server/users.d/
+ ln -sf /usr/share/clickhouse-test/config/users.d/session_log_test.xml /etc/clickhouse-server/users.d/
+ ln -sf /usr/share/clickhouse-test/config/users.d/memory_profiler.xml /etc/clickhouse-server/users.d/
+ ln -sf /usr/share/clickhouse-test/config/users.d/no_fsync_metadata.xml /etc/clickhouse-server/users.d/
+ ln -sf /usr/share/clickhouse-test/config/users.d/filelog.xml /etc/clickhouse-server/users.d/
+ ln -sf /usr/share/clickhouse-test/config/users.d/enable_blobs_check.xml /etc/clickhouse-server/users.d/
+ ln -sf /usr/share/clickhouse-test/config/users.d/marks.xml /etc/clickhouse-server/users.d/
+ ln -sf /usr/share/clickhouse-test/config/users.d/insert_keeper_retries.xml /etc/clickhouse-server/users.d/
+ ln -sf /usr/share/clickhouse-test/config/users.d/prefetch_settings.xml /etc/clickhouse-server/users.d/
+ ln -sf /usr/share/clickhouse-test/config/users.d/nonconst_timezone.xml /etc/clickhouse-server/users.d/
+ ln -sf /usr/share/clickhouse-test/config/users.d/allow_introspection_functions.yaml /etc/clickhouse-server/users.d/
+ ln -sf /usr/share/clickhouse-test/config/users.d/replicated_ddl_entry.xml /etc/clickhouse-server/users.d/
+ [[ -n '' ]]
+ ln -sf /usr/share/clickhouse-test/config/users.d/timeouts.xml /etc/clickhouse-server/users.d/
+ ln -sf /usr/share/clickhouse-test/config/ints_dictionary.xml /etc/clickhouse-server/
+ ln -sf /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/
+ ln -sf /usr/share/clickhouse-test/config/decimals_dictionary.xml /etc/clickhouse-server/
+ ln -sf /usr/share/clickhouse-test/config/executable_dictionary.xml /etc/clickhouse-server/
+ ln -sf /usr/share/clickhouse-test/config/executable_pool_dictionary.xml /etc/clickhouse-server/
+ ln -sf /usr/share/clickhouse-test/config/test_function.xml /etc/clickhouse-server/
+ ln -sf /usr/share/clickhouse-test/config/top_level_domains /etc/clickhouse-server/
+ ln -sf /usr/share/clickhouse-test/config/regions_hierarchy.txt /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/regions_names_en.txt /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/ext-en.txt /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/ext-ru.txt /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/lem-en.bin /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/server.key /etc/clickhouse-server/
+ ln -sf /usr/share/clickhouse-test/config/server.crt /etc/clickhouse-server/
+ ln -sf /usr/share/clickhouse-test/config/dhparam.pem /etc/clickhouse-server/
+ ln -sf --backup=simple --suffix=_original.xml /usr/share/clickhouse-test/config/config.d/query_masking_rules.xml /etc/clickhouse-server/config.d/
+ [[ -n 1 ]]
+ [[ 1 -eq 1 ]]
+ rm -f /etc/clickhouse-server/config.d/zookeeper.xml
+ ln -sf /usr/share/clickhouse-test/config/config.d/zookeeper_fault_injection.xml /etc/clickhouse-server/config.d/
+ [[ -n '' ]]
+ rm -f /etc/clickhouse-server/config.d/cannot_allocate_thread_injection.xml
+ value=1
+ sed --follow-symlinks -i 's|[01]|1|' /etc/clickhouse-server/config.d/keeper_port.xml
+ value=59768832
+ sed --follow-symlinks -i 's|[[:digit:]]\+|59768832|' /etc/clickhouse-server/config.d/keeper_port.xml
+ value=55238656
+ sed --follow-symlinks -i 's|[[:digit:]]\+|55238656|' /etc/clickhouse-server/config.d/keeper_port.xml
+ [[ -n '' ]]
+ [[ -n 1 ]]
+ [[ 1 -eq 1 ]]
+ ln -sf /usr/share/clickhouse-test/config/users.d/database_ordinary.xml /etc/clickhouse-server/users.d/
+ [[ -n '' ]]
+ ARM=aarch64
++ uname -m
+ OS=x86_64
x86_64
+ [[ -n 1 ]]
+ echo x86_64
+ [[ '' -eq 1 ]]
Adding azure configuration
+ [[ x86_64 == \a\a\r\c\h\6\4 ]]
+ echo 'Adding azure configuration'
+ ln -sf /usr/share/clickhouse-test/config/config.d/azure_storage_conf.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/storage_conf.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/storage_conf_02944.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/storage_conf_02963.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/storage_conf_02961.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/users.d/s3_cache.xml /etc/clickhouse-server/users.d/
+ ln -sf /usr/share/clickhouse-test/config/users.d/s3_cache_new.xml /etc/clickhouse-server/users.d/
+ [[ -n '' ]]
+ ln -sf /usr/share/clickhouse-test/config/client_config.xml /etc/clickhouse-client/config.xml
+ sudo cat /etc/clickhouse-server/config.d/keeper_port.xml
+ sed 's|100000|10000|'
+ sudo mv /etc/clickhouse-server/config.d/keeper_port.xml.tmp /etc/clickhouse-server/config.d/keeper_port.xml
+ [[ -n 1 ]]
+ [[ 1 -eq 1 ]]
+ randomize_config_boolean_value filtered_list keeper_port
+ value=1
+ sudo cat /etc/clickhouse-server/config.d/keeper_port.xml
+ sed 's|[01]|1|'
+ sudo mv /etc/clickhouse-server/config.d/keeper_port.xml.tmp /etc/clickhouse-server/config.d/keeper_port.xml
+ randomize_config_boolean_value multi_read keeper_port
+ value=1
+ sudo cat /etc/clickhouse-server/config.d/keeper_port.xml
+ sed 's|[01]|1|'
+ sudo mv /etc/clickhouse-server/config.d/keeper_port.xml.tmp /etc/clickhouse-server/config.d/keeper_port.xml
+ randomize_config_boolean_value check_not_exists keeper_port
+ value=0
+ sudo cat /etc/clickhouse-server/config.d/keeper_port.xml
+ sed 's|[01]|0|'
+ sudo mv /etc/clickhouse-server/config.d/keeper_port.xml.tmp /etc/clickhouse-server/config.d/keeper_port.xml
+ randomize_config_boolean_value create_if_not_exists keeper_port
+ value=1
+ sudo cat /etc/clickhouse-server/config.d/keeper_port.xml
+ sed 's|[01]|1|'
+ sudo mv /etc/clickhouse-server/config.d/keeper_port.xml.tmp /etc/clickhouse-server/config.d/keeper_port.xml
+ sudo chown clickhouse /etc/clickhouse-server/config.d/keeper_port.xml
+ sudo chgrp clickhouse /etc/clickhouse-server/config.d/keeper_port.xml
+ [[ -n 1 ]]
+ [[ 1 -eq 1 ]]
+ randomize_config_boolean_value use_compression zookeeper_fault_injection
+ value=0
+ sudo cat /etc/clickhouse-server/config.d/zookeeper_fault_injection.xml
+ sed 's|[01]|0|'
+ sudo mv /etc/clickhouse-server/config.d/zookeeper_fault_injection.xml.tmp /etc/clickhouse-server/config.d/zookeeper_fault_injection.xml
+ randomize_config_boolean_value allow_experimental_block_number_column block_number
+ value=1
+ sudo cat /etc/clickhouse-server/config.d/block_number.xml
+ sed 's|[01]|1|'
+ sudo mv /etc/clickhouse-server/config.d/block_number.xml.tmp /etc/clickhouse-server/config.d/block_number.xml
+ echo 'ASAN_OPTIONS='\''malloc_context_size=10 verbosity=1 allocator_release_to_os_interval_ms=10000'\'''
+ export 'ASAN_OPTIONS=malloc_context_size=10 allocator_release_to_os_interval_ms=10000'
+ ASAN_OPTIONS='malloc_context_size=10 allocator_release_to_os_interval_ms=10000'
+ sudo chown root: /var/lib/clickhouse
+ echo '1'
+ local total_mem
++ awk '/MemTotal/ { print $(NF-1) }' /proc/meminfo
+ total_mem=32086440
+ total_mem=32856514560
+ max_server_memory_usage_to_ram_ratio=0.5
Setting max_server_memory_usage_to_ram_ratio to 0.5
+ echo 'Setting max_server_memory_usage_to_ram_ratio to 0.5'
+ cat
+ local max_users_mem
+ max_users_mem=9856954368
+ echo 'Setting max_memory_usage_for_user=9856954368 and max_memory_usage for queries to 10G'
+ cat
Setting max_memory_usage_for_user=9856954368 and max_memory_usage for queries to 10G
+ cat
+ ./setup_minio.sh stateless
+ azurite-blob --blobHost 0.0.0.0 --blobPort 10000 --debug /azurite_log
+ export MINIO_ROOT_USER=clickhouse
+ MINIO_ROOT_USER=clickhouse
+ export MINIO_ROOT_PASSWORD=clickhouse
+ MINIO_ROOT_PASSWORD=clickhouse
+ main stateless
+ local query_dir
++ check_arg stateless
++ local query_dir
++ '[' '!' 1 -eq 1 ']'
++ case "$1" in
++ query_dir=0_stateless
++ echo 0_stateless
+ query_dir=0_stateless
+ '[' '!' -f ./minio ']'
+ start_minio
+ mkdir -p ./minio_data
+ ./minio --version
Azurite Blob service is starting on 0.0.0.0:10000
Azurite Blob service successfully listens on http://0.0.0.0:10000
minio version RELEASE.2022-01-03T18-22-58Z
+ wait_for_it
+ ./minio server --address :11111 ./minio_data
+ local counter=0
+ local max_counter=60
+ local url=http://localhost:11111
+ params=('--silent' '--verbose')
+ local params
+ curl --silent --verbose http://localhost:11111
+ grep AccessDenied
trying to connect to minio
+ [[ 0 == \6\0 ]]
+ echo 'trying to connect to minio'
+ sleep 1
+ counter=1
+ curl --silent --verbose http://localhost:11111
+ grep AccessDenied
+ [[ 1 == \6\0 ]]
+ echo 'trying to connect to minio'
+ sleep 1
trying to connect to minio
+ counter=2
+ curl --silent --verbose http://localhost:11111
+ grep AccessDenied
trying to connect to minio
+ [[ 2 == \6\0 ]]
+ echo 'trying to connect to minio'
+ sleep 1
+ counter=3
+ curl --silent --verbose http://localhost:11111
+ grep AccessDenied
+ [[ 3 == \6\0 ]]
+ echo 'trying to connect to minio'
+ sleep 1
trying to connect to minio
+ counter=4
+ curl --silent --verbose http://localhost:11111
+ grep AccessDenied
trying to connect to minio
+ [[ 4 == \6\0 ]]
+ echo 'trying to connect to minio'
+ sleep 1
+ counter=5
+ curl --silent --verbose http://localhost:11111
+ grep AccessDenied
+ [[ 5 == \6\0 ]]
+ echo 'trying to connect to minio'
+ sleep 1
trying to connect to minio
+ counter=6
+ curl --silent --verbose http://localhost:11111
+ grep AccessDenied
+ [[ 6 == \6\0 ]]
+ echo 'trying to connect to minio'
+ sleep 1
trying to connect to minio
+ counter=7
+ curl --silent --verbose http://localhost:11111
+ grep AccessDenied
+ [[ 7 == \6\0 ]]
+ echo 'trying to connect to minio'
+ sleep 1
trying to connect to minio
+ counter=8
+ curl --silent --verbose http://localhost:11111
+ grep AccessDenied
+ [[ 8 == \6\0 ]]
+ echo 'trying to connect to minio'
+ sleep 1
trying to connect to minio
+ counter=9
+ curl --silent --verbose http://localhost:11111
+ grep AccessDenied
+ [[ 9 == \6\0 ]]
+ echo 'trying to connect to minio'
+ sleep 1
trying to connect to minio
+ counter=10
+ curl --silent --verbose http://localhost:11111
+ grep AccessDenied
trying to connect to minio
+ [[ 10 == \6\0 ]]
+ echo 'trying to connect to minio'
+ sleep 1
API: http://172.17.0.2:11111 http://127.0.0.1:11111
Console: http://172.17.0.2:33453 http://127.0.0.1:33453
Documentation: https://docs.min.io
WARNING: Console endpoint is listening on a dynamic port (33453), please use --console-address ":PORT" to choose a static port.
+ counter=11
+ curl --silent --verbose http://localhost:11111
+ grep AccessDenied
AccessDenied
Access Denied./180522AD2C5BA97Dcac1602a-7e56-491e-b405-da119c28fcf6
+ lsof -i :11111
COMMAND PID USER FD TYPE DEVICE SIZE/OFF NODE NAME
minio 289 root 10u IPv6 31543 0t0 TCP *:11111 (LISTEN)
+ sleep 5
+ setup_minio stateless
+ local test_type=stateless
+ ./mc alias set clickminio http://localhost:11111 clickhouse clickhouse
Added `clickminio` successfully.
+ ./mc admin user add clickminio test testtest
Added user `test` successfully.
+ ./mc admin policy set clickminio readwrite user=test
Policy `readwrite` is set on user `test`
+ ./mc mb clickminio/test
Bucket created successfully `clickminio/test`.
+ '[' stateless = stateless ']'
+ ./mc policy set public clickminio/test
Access permission for `clickminio/test` is set to `public`
+ upload_data 0_stateless /usr/share/clickhouse-test
+ local query_dir=0_stateless
+ local test_path=/usr/share/clickhouse-test
+ local data_path=/usr/share/clickhouse-test/queries/0_stateless/data_minio
++ ls /usr/share/clickhouse-test/queries/0_stateless/data_minio
02366_data.jsonl
+ for file in $(ls "${data_path}")
+ echo 02366_data.jsonl
+ ./mc cp /usr/share/clickhouse-test/queries/0_stateless/data_minio/02366_data.jsonl clickminio/test/02366_data.jsonl
`/usr/share/clickhouse-test/queries/0_stateless/data_minio/02366_data.jsonl` -> `clickminio/test/02366_data.jsonl`
Total: 0 B, Transferred: 0 B, Speed: 0 B/s
02731.arrow
+ for file in $(ls "${data_path}")
+ echo 02731.arrow
+ ./mc cp /usr/share/clickhouse-test/queries/0_stateless/data_minio/02731.arrow clickminio/test/02731.arrow
`/usr/share/clickhouse-test/queries/0_stateless/data_minio/02731.arrow` -> `clickminio/test/02731.arrow`
Total: 0 B, Transferred: 3.82 MiB, Speed: 126.43 MiB/s
02731.parquet
+ for file in $(ls "${data_path}")
+ echo 02731.parquet
+ ./mc cp /usr/share/clickhouse-test/queries/0_stateless/data_minio/02731.parquet clickminio/test/02731.parquet
`/usr/share/clickhouse-test/queries/0_stateless/data_minio/02731.parquet` -> `clickminio/test/02731.parquet`
Total: 0 B, Transferred: 1.57 MiB, Speed: 95.35 MiB/s
02876.parquet
+ for file in $(ls "${data_path}")
+ echo 02876.parquet
+ ./mc cp /usr/share/clickhouse-test/queries/0_stateless/data_minio/02876.parquet clickminio/test/02876.parquet
`/usr/share/clickhouse-test/queries/0_stateless/data_minio/02876.parquet` -> `clickminio/test/02876.parquet`
Total: 0 B, Transferred: 293 B, Speed: 38.68 KiB/s
a.tsv
+ for file in $(ls "${data_path}")
+ echo a.tsv
+ ./mc cp /usr/share/clickhouse-test/queries/0_stateless/data_minio/a.tsv clickminio/test/a.tsv
`/usr/share/clickhouse-test/queries/0_stateless/data_minio/a.tsv` -> `clickminio/test/a.tsv`
Total: 0 B, Transferred: 24 B, Speed: 3.25 KiB/s
b.tsv
+ for file in $(ls "${data_path}")
+ echo b.tsv
+ ./mc cp /usr/share/clickhouse-test/queries/0_stateless/data_minio/b.tsv clickminio/test/b.tsv
`/usr/share/clickhouse-test/queries/0_stateless/data_minio/b.tsv` -> `clickminio/test/b.tsv`
Total: 0 B, Transferred: 33 B, Speed: 5.20 KiB/s
c.tsv
+ for file in $(ls "${data_path}")
+ echo c.tsv
+ ./mc cp /usr/share/clickhouse-test/queries/0_stateless/data_minio/c.tsv clickminio/test/c.tsv
`/usr/share/clickhouse-test/queries/0_stateless/data_minio/c.tsv` -> `clickminio/test/c.tsv`
Total: 0 B, Transferred: 33 B, Speed: 5.04 KiB/s
+ for file in $(ls "${data_path}")
+ echo tsv_with_header.tsv
+ ./mc cp /usr/share/clickhouse-test/queries/0_stateless/data_minio/tsv_with_header.tsv clickminio/test/tsv_with_header.tsv
tsv_with_header.tsv
`/usr/share/clickhouse-test/queries/0_stateless/data_minio/tsv_with_header.tsv` -> `clickminio/test/tsv_with_header.tsv`
Total: 0 B, Transferred: 44 B, Speed: 6.74 KiB/s
+ setup_aws_credentials
+ local minio_root_user=clickhouse
+ local minio_root_password=clickhouse
+ mkdir -p /root/.aws
+ cat
+ config_logs_export_cluster /etc/clickhouse-server/config.d/system_logs_export.yaml
+ set +x
File /tmp/export-logs-config.sh does not exist, do not setup
+ start_server
+ counter=0
+ max_attempt=120
+ clickhouse-client --query 'SELECT 1'
Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
+ '[' 0 -gt 120 ']'
+ clickhouse start --user root
+ sleep 0.5
+ counter=1
+ clickhouse-client --query 'SELECT 1'
Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
+ '[' 1 -gt 120 ']'
+ clickhouse start --user root
+ sleep 0.5
+ counter=2
+ clickhouse-client --query 'SELECT 1'
Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
+ '[' 2 -gt 120 ']'
+ clickhouse start --user root
+ sleep 0.5
+ counter=3
+ clickhouse-client --query 'SELECT 1'
Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
+ '[' 3 -gt 120 ']'
+ clickhouse start --user root
+ sleep 0.5
+ counter=4
+ clickhouse-client --query 'SELECT 1'
Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
+ '[' 4 -gt 120 ']'
+ clickhouse start --user root
+ sleep 0.5
+ counter=5
+ clickhouse-client --query 'SELECT 1'
Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
+ '[' 5 -gt 120 ']'
+ clickhouse start --user root
+ sleep 0.5
+ counter=6
+ clickhouse-client --query 'SELECT 1'
Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
+ '[' 6 -gt 120 ']'
+ clickhouse start --user root
127.0.0.1 - - [05/Nov/2024:17:29:30 +0000] "PUT /devstoreaccount1/cont?restype=container HTTP/1.1" 201 -
+ sleep 0.5
127.0.0.1 - - [05/Nov/2024:17:29:30 +0000] "PUT /devstoreaccount1/cont/jdvjuprkldzddcbojihnrvahtvujsmkl?blockid=bldhfdekmehxskoodaojpfzphqzubuvxzpgcmmoniyrfiyffyonrfujqxyupqqro&comp=block HTTP/1.1" 201 -
127.0.0.1 - - [05/Nov/2024:17:29:30 +0000] "PUT /devstoreaccount1/cont/jdvjuprkldzddcbojihnrvahtvujsmkl?comp=blocklist HTTP/1.1" 201 -
127.0.0.1 - - [05/Nov/2024:17:29:30 +0000] "GET /devstoreaccount1/cont/jdvjuprkldzddcbojihnrvahtvujsmkl HTTP/1.1" 206 4
127.0.0.1 - - [05/Nov/2024:17:29:30 +0000] "GET /devstoreaccount1/cont/jdvjuprkldzddcbojihnrvahtvujsmkl HTTP/1.1" 206 2
127.0.0.1 - - [05/Nov/2024:17:29:30 +0000] "DELETE /devstoreaccount1/cont/jdvjuprkldzddcbojihnrvahtvujsmkl HTTP/1.1" 202 -
+ counter=7
+ clickhouse-client --query 'SELECT 1'
1
+ attach_gdb_to_clickhouse
++ kill -l SIGRTMIN
+ RTMIN=34
+ echo '
set follow-fork-mode parent
handle SIGHUP nostop noprint pass
handle SIGINT nostop noprint pass
handle SIGQUIT nostop noprint pass
handle SIGPIPE nostop noprint pass
handle SIGTERM nostop noprint pass
handle SIGUSR1 nostop noprint pass
handle SIGUSR2 nostop noprint pass
handle SIG34 nostop noprint pass
info signals
continue
backtrace full
thread apply all backtrace full
info registers
disassemble /s
up
disassemble /s
up
disassemble /s
p "done"
detach
quit
'
+ sleep 5
+ ts '%Y-%m-%d %H:%M:%S'
++ cat /var/run/clickhouse-server/clickhouse-server.pid
+ gdb -batch -command script.gdb -p 583
+ run_with_retry 60 clickhouse-client --query 'SELECT '\''Connected to clickhouse-server after attaching gdb'\'''
+ [[ hxB =~ e ]]
+ set_e=false
+ set +e
+ local total_retries=60
+ shift
+ local retry=0
+ '[' 0 -ge 60 ']'
+ clickhouse-client --query 'SELECT '\''Connected to clickhouse-server after attaching gdb'\'''
Connected to clickhouse-server after attaching gdb
+ false
+ return
+ setup_logs_replication
+ set +x
File /tmp/export-logs-config.sh does not exist, do not setup
+ clickhouse-client --query 'CREATE DATABASE datasets'
+ clickhouse-client --multiquery
+ clickhouse-client --query 'SHOW TABLES FROM datasets'
hits_v1
visits_v1
+ clickhouse-client --query 'CREATE DATABASE IF NOT EXISTS test'
+ stop_server
+ local max_tries=90
+ local check_hang=true
+ local pid
++ cat /var/run/clickhouse-server/clickhouse-server.pid
+ pid=583
+ clickhouse stop --max-tries 90 --do-not-kill
/var/run/clickhouse-server/clickhouse-server.pid file exists and contains pid = 583.
The process with pid = 583 is running.
Sent terminate signal to process with pid 583.
Waiting for server to stop
/var/run/clickhouse-server/clickhouse-server.pid file exists and contains pid = 583.
The process with pid = 583 is running.
Waiting for server to stop
/var/run/clickhouse-server/clickhouse-server.pid file exists and contains pid = 583.
The process with pid = 583 is running.
Waiting for server to stop
/var/run/clickhouse-server/clickhouse-server.pid file exists and contains pid = 583.
The process with pid = 583 is running.
Waiting for server to stop
/var/run/clickhouse-server/clickhouse-server.pid file exists and contains pid = 583.
The process with pid = 583 is running.
Waiting for server to stop
/var/run/clickhouse-server/clickhouse-server.pid file exists and contains pid = 583.
The process with pid = 583 is running.
Waiting for server to stop
/var/run/clickhouse-server/clickhouse-server.pid file exists and contains pid = 583.
The process with pid = 583 is running.
Waiting for server to stop
/var/run/clickhouse-server/clickhouse-server.pid file exists and contains pid = 583.
The process with pid = 583 is running.
Waiting for server to stop
/var/run/clickhouse-server/clickhouse-server.pid file exists and contains pid = 583.
The process with pid = 583 is running.
Waiting for server to stop
/var/run/clickhouse-server/clickhouse-server.pid file exists and contains pid = 583.
The process with pid = 583 is running.
Waiting for server to stop
/var/run/clickhouse-server/clickhouse-server.pid file exists and contains pid = 583.
The process with pid = 583 is running.
Waiting for server to stop
/var/run/clickhouse-server/clickhouse-server.pid file exists and contains pid = 583.
The process with pid = 583 is running.
Waiting for server to stop
/var/run/clickhouse-server/clickhouse-server.pid file exists and contains pid = 583.
The process with pid = 583 is running.
Waiting for server to stop
/var/run/clickhouse-server/clickhouse-server.pid file exists and contains pid = 583.
The process with pid = 583 is running.
Waiting for server to stop
/var/run/clickhouse-server/clickhouse-server.pid file exists and contains pid = 583.
The process with pid = 583 is running.
Waiting for server to stop
Now there is no clickhouse-server process.
Server stopped
script.gdb:13: Error in sourced command file:
No stack.
+ return
+ mv /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/clickhouse-server.initial.log
+ cache_policy=
+ '[' 0 -eq 1 ']'
+ cache_policy=LRU
Using cache policy: LRU
+ echo 'Using cache policy: LRU'
+ '[' LRU = SLRU ']'
+ start_server
+ counter=0
+ max_attempt=120
+ clickhouse-client --query 'SELECT 1'
Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
+ '[' 0 -gt 120 ']'
+ clickhouse start --user root
+ sleep 0.5
+ counter=1
+ clickhouse-client --query 'SELECT 1'
Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
+ '[' 1 -gt 120 ']'
+ clickhouse start --user root
+ sleep 0.5
+ counter=2
+ clickhouse-client --query 'SELECT 1'
Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
+ '[' 2 -gt 120 ']'
+ clickhouse start --user root
+ sleep 0.5
+ counter=3
+ clickhouse-client --query 'SELECT 1'
Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
+ '[' 3 -gt 120 ']'
+ clickhouse start --user root
+ sleep 0.5
+ counter=4
+ clickhouse-client --query 'SELECT 1'
Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
+ '[' 4 -gt 120 ']'
+ clickhouse start --user root
+ sleep 0.5
127.0.0.1 - - [05/Nov/2024:17:31:07 +0000] "PUT /devstoreaccount1/cont?restype=container HTTP/1.1" 409 -
127.0.0.1 - - [05/Nov/2024:17:31:07 +0000] "PUT /devstoreaccount1/cont/sdlhafjbowehguqemstxadnsbxscycdt?blockid=xzsbbcjovippibowkgzhpjbrguwipaxyjcdppbtevcmxzghcgfkioerdzhhnicjz&comp=block HTTP/1.1" 201 -
127.0.0.1 - - [05/Nov/2024:17:31:07 +0000] "PUT /devstoreaccount1/cont/sdlhafjbowehguqemstxadnsbxscycdt?comp=blocklist HTTP/1.1" 201 -
127.0.0.1 - - [05/Nov/2024:17:31:07 +0000] "GET /devstoreaccount1/cont/sdlhafjbowehguqemstxadnsbxscycdt HTTP/1.1" 206 4
127.0.0.1 - - [05/Nov/2024:17:31:07 +0000] "GET /devstoreaccount1/cont/sdlhafjbowehguqemstxadnsbxscycdt HTTP/1.1" 206 2
127.0.0.1 - - [05/Nov/2024:17:31:07 +0000] "DELETE /devstoreaccount1/cont/sdlhafjbowehguqemstxadnsbxscycdt HTTP/1.1" 202 -
+ counter=5
+ clickhouse-client --query 'SELECT 1'
Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
+ '[' 5 -gt 120 ']'
+ clickhouse start --user root
+ sleep 0.5
+ counter=6
+ clickhouse-client --query 'SELECT 1'
Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
+ '[' 6 -gt 120 ']'
+ clickhouse start --user root
+ sleep 0.5
+ counter=7
+ clickhouse-client --query 'SELECT 1'
Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
+ '[' 7 -gt 120 ']'
+ clickhouse start --user root
+ sleep 0.5
+ counter=8
+ clickhouse-client --query 'SELECT 1'
Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
+ '[' 8 -gt 120 ']'
+ clickhouse start --user root
+ sleep 0.5
+ counter=9
+ clickhouse-client --query 'SELECT 1'
Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
+ '[' 9 -gt 120 ']'
+ clickhouse start --user root
+ sleep 0.5
+ counter=10
+ clickhouse-client --query 'SELECT 1'
Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
+ '[' 10 -gt 120 ']'
+ clickhouse start --user root
+ sleep 0.5
+ counter=11
+ clickhouse-client --query 'SELECT 1'
Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
+ '[' 11 -gt 120 ']'
+ clickhouse start --user root
+ sleep 0.5
+ counter=12
+ clickhouse-client --query 'SELECT 1'
Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
+ '[' 12 -gt 120 ']'
+ clickhouse start --user root
+ sleep 0.5
+ counter=13
+ clickhouse-client --query 'SELECT 1'
Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
+ '[' 13 -gt 120 ']'
+ clickhouse start --user root
+ sleep 0.5
+ counter=14
+ clickhouse-client --query 'SELECT 1'
Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
+ '[' 14 -gt 120 ']'
+ clickhouse start --user root
+ sleep 0.5
+ counter=15
+ clickhouse-client --query 'SELECT 1'
Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
+ '[' 15 -gt 120 ']'
+ clickhouse start --user root
+ sleep 0.5
+ counter=16
+ clickhouse-client --query 'SELECT 1'
Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
+ '[' 16 -gt 120 ']'
+ clickhouse start --user root
+ sleep 0.5
+ counter=17
+ clickhouse-client --query 'SELECT 1'
Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
+ '[' 17 -gt 120 ']'
+ clickhouse start --user root
+ sleep 0.5
+ counter=18
+ clickhouse-client --query 'SELECT 1'
Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
+ '[' 18 -gt 120 ']'
+ clickhouse start --user root
+ sleep 0.5
+ counter=19
+ clickhouse-client --query 'SELECT 1'
Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
+ '[' 19 -gt 120 ']'
+ clickhouse start --user root
+ sleep 0.5
+ counter=20
+ clickhouse-client --query 'SELECT 1'
Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
+ '[' 20 -gt 120 ']'
+ clickhouse start --user root
+ sleep 0.5
+ counter=21
+ clickhouse-client --query 'SELECT 1'
Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
+ '[' 21 -gt 120 ']'
+ clickhouse start --user root
+ sleep 0.5
+ counter=22
+ clickhouse-client --query 'SELECT 1'
Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
+ '[' 22 -gt 120 ']'
+ clickhouse start --user root
+ sleep 0.5
+ counter=23
+ clickhouse-client --query 'SELECT 1'
Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
+ '[' 23 -gt 120 ']'
+ clickhouse start --user root
+ sleep 0.5
+ counter=24
+ clickhouse-client --query 'SELECT 1'
Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
+ '[' 24 -gt 120 ']'
+ clickhouse start --user root
+ sleep 0.5
+ counter=25
+ clickhouse-client --query 'SELECT 1'
Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
+ '[' 25 -gt 120 ']'
+ clickhouse start --user root
+ sleep 0.5
+ counter=26
+ clickhouse-client --query 'SELECT 1'
Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
+ '[' 26 -gt 120 ']'
+ clickhouse start --user root
+ sleep 0.5
+ counter=27
+ clickhouse-client --query 'SELECT 1'
1
+ attach_gdb_to_clickhouse
++ kill -l SIGRTMIN
+ RTMIN=34
+ echo '
set follow-fork-mode parent
handle SIGHUP nostop noprint pass
handle SIGINT nostop noprint pass
handle SIGQUIT nostop noprint pass
handle SIGPIPE nostop noprint pass
handle SIGTERM nostop noprint pass
handle SIGUSR1 nostop noprint pass
handle SIGUSR2 nostop noprint pass
handle SIG34 nostop noprint pass
info signals
continue
backtrace full
thread apply all backtrace full
info registers
disassemble /s
up
disassemble /s
up
disassemble /s
p "done"
detach
quit
'
+ sleep 5
+ ts '%Y-%m-%d %H:%M:%S'
++ cat /var/run/clickhouse-server/clickhouse-server.pid
+ gdb -batch -command script.gdb -p 1418
+ run_with_retry 60 clickhouse-client --query 'SELECT '\''Connected to clickhouse-server after attaching gdb'\'''
+ [[ hxB =~ e ]]
+ set_e=false
+ set +e
+ local total_retries=60
+ shift
+ local retry=0
+ '[' 0 -ge 60 ']'
+ clickhouse-client --query 'SELECT '\''Connected to clickhouse-server after attaching gdb'\'''
Connected to clickhouse-server after attaching gdb
+ false
+ return
+ clickhouse-client --query 'SHOW TABLES FROM datasets'
hits_v1
visits_v1
+ clickhouse-client --query 'SHOW TABLES FROM test'
+ clickhouse-client --query 'CREATE TABLE test.hits_s3 (WatchID UInt64, JavaEnable UInt8, Title String, GoodEvent Int16,
EventTime DateTime, EventDate Date, CounterID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RegionID UInt32,
UserID UInt64, CounterClass Int8, OS UInt8, UserAgent UInt8, URL String, Referer String, URLDomain String, RefererDomain String,
Refresh UInt8, IsRobot UInt8, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32),
RefererRegions Array(UInt32), ResolutionWidth UInt16, ResolutionHeight UInt16, ResolutionDepth UInt8, FlashMajor UInt8,
FlashMinor UInt8, FlashMinor2 String, NetMajor UInt8, NetMinor UInt8, UserAgentMajor UInt16, UserAgentMinor FixedString(2),
CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, MobilePhone UInt8, MobilePhoneModel String, Params String,
IPNetworkID UInt32, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, IsArtifical UInt8,
WindowClientWidth UInt16, WindowClientHeight UInt16, ClientTimeZone Int16, ClientEventTime DateTime, SilverlightVersion1 UInt8,
SilverlightVersion2 UInt8, SilverlightVersion3 UInt32, SilverlightVersion4 UInt16, PageCharset String, CodeVersion UInt32,
IsLink UInt8, IsDownload UInt8, IsNotBounce UInt8, FUniqID UInt64, HID UInt32, IsOldCounter UInt8, IsEvent UInt8,
IsParameter UInt8, DontCountHits UInt8, WithHash UInt8, HitColor FixedString(1), UTCEventTime DateTime, Age UInt8,
Sex UInt8, Income UInt8, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), RemoteIP UInt32,
RemoteIP6 FixedString(16), WindowName Int32, OpenerName Int32, HistoryLength Int16, BrowserLanguage FixedString(2),
BrowserCountry FixedString(2), SocialNetwork String, SocialAction String, HTTPError UInt16, SendTiming Int32,
DNSTiming Int32, ConnectTiming Int32, ResponseStartTiming Int32, ResponseEndTiming Int32, FetchTiming Int32,
RedirectTiming Int32, DOMInteractiveTiming Int32, DOMContentLoadedTiming Int32, DOMCompleteTiming Int32,
LoadEventStartTiming Int32, LoadEventEndTiming Int32, NSToDOMContentLoadedTiming Int32, FirstPaintTiming Int32,
RedirectCount Int8, SocialSourceNetworkID UInt8, SocialSourcePage String, ParamPrice Int64, ParamOrderID String,
ParamCurrency FixedString(3), ParamCurrencyID UInt16, GoalsReached Array(UInt32), OpenstatServiceName String,
OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String,
UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, RefererHash UInt64,
URLHash UInt64, CLID UInt32, YCLID UInt64, ShareService String, ShareURL String, ShareTitle String,
ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64),
IslandID FixedString(16), RequestNum UInt32, RequestTry UInt8) ENGINE = MergeTree() PARTITION BY toYYYYMM(EventDate)
ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192, storage_policy='\''s3_cache'\'''
+ clickhouse-client --query 'CREATE TABLE test.hits (WatchID UInt64, JavaEnable UInt8, Title String, GoodEvent Int16,
EventTime DateTime, EventDate Date, CounterID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RegionID UInt32,
UserID UInt64, CounterClass Int8, OS UInt8, UserAgent UInt8, URL String, Referer String, URLDomain String,
RefererDomain String, Refresh UInt8, IsRobot UInt8, RefererCategories Array(UInt16), URLCategories Array(UInt16),
URLRegions Array(UInt32), RefererRegions Array(UInt32), ResolutionWidth UInt16, ResolutionHeight UInt16, ResolutionDepth UInt8,
FlashMajor UInt8, FlashMinor UInt8, FlashMinor2 String, NetMajor UInt8, NetMinor UInt8, UserAgentMajor UInt16,
UserAgentMinor FixedString(2), CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, MobilePhone UInt8,
MobilePhoneModel String, Params String, IPNetworkID UInt32, TraficSourceID Int8, SearchEngineID UInt16,
SearchPhrase String, AdvEngineID UInt8, IsArtifical UInt8, WindowClientWidth UInt16, WindowClientHeight UInt16,
ClientTimeZone Int16, ClientEventTime DateTime, SilverlightVersion1 UInt8, SilverlightVersion2 UInt8, SilverlightVersion3 UInt32,
SilverlightVersion4 UInt16, PageCharset String, CodeVersion UInt32, IsLink UInt8, IsDownload UInt8, IsNotBounce UInt8,
FUniqID UInt64, HID UInt32, IsOldCounter UInt8, IsEvent UInt8, IsParameter UInt8, DontCountHits UInt8, WithHash UInt8,
HitColor FixedString(1), UTCEventTime DateTime, Age UInt8, Sex UInt8, Income UInt8, Interests UInt16, Robotness UInt8,
GeneralInterests Array(UInt16), RemoteIP UInt32, RemoteIP6 FixedString(16), WindowName Int32, OpenerName Int32,
HistoryLength Int16, BrowserLanguage FixedString(2), BrowserCountry FixedString(2), SocialNetwork String, SocialAction String,
HTTPError UInt16, SendTiming Int32, DNSTiming Int32, ConnectTiming Int32, ResponseStartTiming Int32, ResponseEndTiming Int32,
FetchTiming Int32, RedirectTiming Int32, DOMInteractiveTiming Int32, DOMContentLoadedTiming Int32, DOMCompleteTiming Int32,
LoadEventStartTiming Int32, LoadEventEndTiming Int32, NSToDOMContentLoadedTiming Int32, FirstPaintTiming Int32,
RedirectCount Int8, SocialSourceNetworkID UInt8, SocialSourcePage String, ParamPrice Int64, ParamOrderID String,
ParamCurrency FixedString(3), ParamCurrencyID UInt16, GoalsReached Array(UInt32), OpenstatServiceName String,
OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String,
UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, RefererHash UInt64,
URLHash UInt64, CLID UInt32, YCLID UInt64, ShareService String, ShareURL String, ShareTitle String,
ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64),
IslandID FixedString(16), RequestNum UInt32, RequestTry UInt8) ENGINE = MergeTree() PARTITION BY toYYYYMM(EventDate)
ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192, storage_policy='\''s3_cache'\'''
+ clickhouse-client --query 'CREATE TABLE test.visits (CounterID UInt32, StartDate Date, Sign Int8, IsNew UInt8,
VisitID UInt64, UserID UInt64, StartTime DateTime, Duration UInt32, UTCStartTime DateTime, PageViews Int32,
Hits Int32, IsBounce UInt8, Referer String, StartURL String, RefererDomain String, StartURLDomain String,
EndURL String, LinkURL String, IsDownload UInt8, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String,
AdvEngineID UInt8, PlaceID Int32, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32),
RefererRegions Array(UInt32), IsYandex UInt8, GoalReachesDepth Int32, GoalReachesURL Int32, GoalReachesAny Int32,
SocialSourceNetworkID UInt8, SocialSourcePage String, MobilePhoneModel String, ClientEventTime DateTime, RegionID UInt32,
ClientIP UInt32, ClientIP6 FixedString(16), RemoteIP UInt32, RemoteIP6 FixedString(16), IPNetworkID UInt32,
SilverlightVersion3 UInt32, CodeVersion UInt32, ResolutionWidth UInt16, ResolutionHeight UInt16, UserAgentMajor UInt16,
UserAgentMinor UInt16, WindowClientWidth UInt16, WindowClientHeight UInt16, SilverlightVersion2 UInt8, SilverlightVersion4 UInt16,
FlashVersion3 UInt16, FlashVersion4 UInt16, ClientTimeZone Int16, OS UInt8, UserAgent UInt8, ResolutionDepth UInt8,
FlashMajor UInt8, FlashMinor UInt8, NetMajor UInt8, NetMinor UInt8, MobilePhone UInt8, SilverlightVersion1 UInt8,
Age UInt8, Sex UInt8, Income UInt8, JavaEnable UInt8, CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8,
BrowserLanguage UInt16, BrowserCountry UInt16, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16),
Params Array(String), Goals Nested(ID UInt32, Serial UInt32, EventTime DateTime, Price Int64, OrderID String, CurrencyID UInt32),
WatchIDs Array(UInt64), ParamSumPrice Int64, ParamCurrency FixedString(3), ParamCurrencyID UInt16, ClickLogID UInt64,
ClickEventID Int32, ClickGoodEvent Int32, ClickEventTime DateTime, ClickPriorityID Int32, ClickPhraseID Int32, ClickPageID Int32,
ClickPlaceID Int32, ClickTypeID Int32, ClickResourceID Int32, ClickCost UInt32, ClickClientIP UInt32, ClickDomainID UInt32,
ClickURL String, ClickAttempt UInt8, ClickOrderID UInt32, ClickBannerID UInt32, ClickMarketCategoryID UInt32, ClickMarketPP UInt32,
ClickMarketCategoryName String, ClickMarketPPName String, ClickAWAPSCampaignName String, ClickPageName String, ClickTargetType UInt16,
ClickTargetPhraseID UInt64, ClickContextType UInt8, ClickSelectType Int8, ClickOptions String, ClickGroupBannerID Int32,
OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String,
UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, FirstVisit DateTime,
PredLastVisit Date, LastVisit Date, TotalVisits UInt32, TraficSource Nested(ID Int8, SearchEngineID UInt16, AdvEngineID UInt8,
PlaceID UInt16, SocialSourceNetworkID UInt8, Domain String, SearchPhrase String, SocialSourcePage String), Attendance FixedString(16),
CLID UInt32, YCLID UInt64, NormalizedRefererHash UInt64, SearchPhraseHash UInt64, RefererDomainHash UInt64, NormalizedStartURLHash UInt64,
StartURLDomainHash UInt64, NormalizedEndURLHash UInt64, TopLevelDomain UInt64, URLScheme UInt64, OpenstatServiceNameHash UInt64,
OpenstatCampaignIDHash UInt64, OpenstatAdIDHash UInt64, OpenstatSourceIDHash UInt64, UTMSourceHash UInt64, UTMMediumHash UInt64,
UTMCampaignHash UInt64, UTMContentHash UInt64, UTMTermHash UInt64, FromHash UInt64, WebVisorEnabled UInt8, WebVisorActivity UInt32,
ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64),
Market Nested(Type UInt8, GoalID UInt32, OrderID String, OrderPrice Int64, PP UInt32, DirectPlaceID UInt32, DirectOrderID UInt32,
DirectBannerID UInt32, GoodID String, GoodName String, GoodQuantity Int32, GoodPrice Int64), IslandID FixedString(16))
ENGINE = CollapsingMergeTree(Sign) PARTITION BY toYYYYMM(StartDate) ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID)
SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192, storage_policy='\''s3_cache'\'''
+ clickhouse-client --query 'INSERT INTO test.hits_s3 SELECT * FROM datasets.hits_v1 SETTINGS enable_filesystem_cache_on_write_operations=0'
+ clickhouse-client --query 'INSERT INTO test.hits SELECT * FROM datasets.hits_v1 SETTINGS enable_filesystem_cache_on_write_operations=0'
+ clickhouse-client --query 'INSERT INTO test.visits SELECT * FROM datasets.visits_v1 SETTINGS enable_filesystem_cache_on_write_operations=0'
+ clickhouse-client --query 'DROP TABLE datasets.visits_v1 SYNC'
+ clickhouse-client --query 'DROP TABLE datasets.hits_v1 SYNC'
+ clickhouse-client --query 'SHOW TABLES FROM test'
hits
hits_s3
visits
+ clickhouse-client --query 'SYSTEM STOP THREAD FUZZER'
+ stop_server
+ local max_tries=90
+ local check_hang=true
+ local pid
++ cat /var/run/clickhouse-server/clickhouse-server.pid
+ pid=1418
+ clickhouse stop --max-tries 90 --do-not-kill
script.gdb:13: Error in sourced command file:
No stack.
/var/run/clickhouse-server/clickhouse-server.pid file exists and contains pid = 1418.
The process with pid = 1418 is running.
Sent terminate signal to process with pid 1418.
Waiting for server to stop
/var/run/clickhouse-server/clickhouse-server.pid file exists and contains pid = 1418.
The process with pid = 1418 is running.
Waiting for server to stop
/var/run/clickhouse-server/clickhouse-server.pid file exists and contains pid = 1418.
The process with pid = 1418 is running.
Waiting for server to stop
/var/run/clickhouse-server/clickhouse-server.pid file exists and contains pid = 1418.
The process with pid = 1418 is running.
Waiting for server to stop
/var/run/clickhouse-server/clickhouse-server.pid file exists and contains pid = 1418.
The process with pid = 1418 is running.
Waiting for server to stop
/var/run/clickhouse-server/clickhouse-server.pid file exists and contains pid = 1418.
The process with pid = 1418 is running.
Waiting for server to stop
/var/run/clickhouse-server/clickhouse-server.pid file exists and contains pid = 1418.
The process with pid = 1418 is running.
Waiting for server to stop
/var/run/clickhouse-server/clickhouse-server.pid file exists and contains pid = 1418.
The process with pid = 1418 is running.
Waiting for server to stop
/var/run/clickhouse-server/clickhouse-server.pid file exists and contains pid = 1418.
The process with pid = 1418 is running.
Waiting for server to stop
/var/run/clickhouse-server/clickhouse-server.pid file exists and contains pid = 1418.
The process with pid = 1418 is running.
Waiting for server to stop
/var/run/clickhouse-server/clickhouse-server.pid file exists and contains pid = 1418.
The process with pid = 1418 is running.
Waiting for server to stop
Now there is no clickhouse-server process.
Server stopped
+ return
+ export USE_S3_STORAGE_FOR_MERGE_TREE=1
+ USE_S3_STORAGE_FOR_MERGE_TREE=1
+ export RANDOMIZE_OBJECT_KEY_TYPE=1
+ RANDOMIZE_OBJECT_KEY_TYPE=1
+ export ZOOKEEPER_FAULT_INJECTION=1
+ ZOOKEEPER_FAULT_INJECTION=1
+ export THREAD_POOL_FAULT_INJECTION=1
+ THREAD_POOL_FAULT_INJECTION=1
+ configure
+ export USE_DATABASE_ORDINARY=1
+ USE_DATABASE_ORDINARY=1
+ export EXPORT_S3_STORAGE_POLICIES=1
+ EXPORT_S3_STORAGE_POLICIES=1
+ /usr/share/clickhouse-test/config/install.sh
+ DEST_SERVER_PATH=/etc/clickhouse-server
+ DEST_CLIENT_PATH=/etc/clickhouse-client
+++ dirname /usr/share/clickhouse-test/config/install.sh
++ cd /usr/share/clickhouse-test/config
++ pwd -P
Going to install test configs from /usr/share/clickhouse-test/config into /etc/clickhouse-server
+ SRC_PATH=/usr/share/clickhouse-test/config
+ echo 'Going to install test configs from /usr/share/clickhouse-test/config into /etc/clickhouse-server'
+ mkdir -p /etc/clickhouse-server/config.d/
+ mkdir -p /etc/clickhouse-server/users.d/
+ mkdir -p /etc/clickhouse-client
+ ln -sf /usr/share/clickhouse-test/config/config.d/zookeeper_write.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/max_num_to_warn.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/listen.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/text_log.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/blob_storage_log.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/custom_settings_prefixes.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/enable_access_control_improvements.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/macros.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/secure_ports.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/clusters.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/graphite.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/graphite_alternative.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/database_atomic.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/max_concurrent_queries.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/merge_tree_settings.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/backoff_failed_mutation.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/merge_tree_old_dirs_cleanup.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/test_cluster_with_incorrect_pw.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/keeper_port.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/logging_no_rotate.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/merge_tree.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/lost_forever_check.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/tcp_with_proxy.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/prometheus.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/top_level_domains_lists.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/top_level_domains_path.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/transactions.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/encryption.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/CORS.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/zookeeper_log.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/logger_trace.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/named_collection.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/ssl_certs.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/filesystem_cache_log.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/session_log.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/system_unfreeze.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/enable_zero_copy_replication.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/nlp.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/forbidden_headers.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/enable_keeper_map.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/custom_disks_base_path.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/display_name.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/reverse_dns_query_function.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/compressed_marks_and_index.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/disable_s3_env_credentials.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/enable_wait_for_shutdown_replicated_tables.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/backups.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/filesystem_caches_path.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/validate_tcp_client_information.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/zero_copy_destructive_operations.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/block_number.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/handlers.yaml /etc/clickhouse-server/config.d/
+ '[' /etc/clickhouse-server = /etc/clickhouse-server ']'
+ ln -sf /usr/share/clickhouse-test/config/config.d/legacy_geobase.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/users.d/log_queries.xml /etc/clickhouse-server/users.d/
+ ln -sf /usr/share/clickhouse-test/config/users.d/readonly.xml /etc/clickhouse-server/users.d/
+ ln -sf /usr/share/clickhouse-test/config/users.d/access_management.xml /etc/clickhouse-server/users.d/
+ ln -sf /usr/share/clickhouse-test/config/users.d/database_atomic_drop_detach_sync.xml /etc/clickhouse-server/users.d/
+ ln -sf /usr/share/clickhouse-test/config/users.d/opentelemetry.xml /etc/clickhouse-server/users.d/
+ ln -sf /usr/share/clickhouse-test/config/users.d/remote_queries.xml /etc/clickhouse-server/users.d/
+ ln -sf /usr/share/clickhouse-test/config/users.d/session_log_test.xml /etc/clickhouse-server/users.d/
+ ln -sf /usr/share/clickhouse-test/config/users.d/memory_profiler.xml /etc/clickhouse-server/users.d/
+ ln -sf /usr/share/clickhouse-test/config/users.d/no_fsync_metadata.xml /etc/clickhouse-server/users.d/
+ ln -sf /usr/share/clickhouse-test/config/users.d/filelog.xml /etc/clickhouse-server/users.d/
+ ln -sf /usr/share/clickhouse-test/config/users.d/enable_blobs_check.xml /etc/clickhouse-server/users.d/
+ ln -sf /usr/share/clickhouse-test/config/users.d/marks.xml /etc/clickhouse-server/users.d/
+ ln -sf /usr/share/clickhouse-test/config/users.d/insert_keeper_retries.xml /etc/clickhouse-server/users.d/
+ ln -sf /usr/share/clickhouse-test/config/users.d/prefetch_settings.xml /etc/clickhouse-server/users.d/
+ ln -sf /usr/share/clickhouse-test/config/users.d/nonconst_timezone.xml /etc/clickhouse-server/users.d/
+ ln -sf /usr/share/clickhouse-test/config/users.d/allow_introspection_functions.yaml /etc/clickhouse-server/users.d/
+ ln -sf /usr/share/clickhouse-test/config/users.d/replicated_ddl_entry.xml /etc/clickhouse-server/users.d/
+ [[ -n '' ]]
+ ln -sf /usr/share/clickhouse-test/config/users.d/timeouts.xml /etc/clickhouse-server/users.d/
+ ln -sf /usr/share/clickhouse-test/config/ints_dictionary.xml /etc/clickhouse-server/
+ ln -sf /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/
+ ln -sf /usr/share/clickhouse-test/config/decimals_dictionary.xml /etc/clickhouse-server/
+ ln -sf /usr/share/clickhouse-test/config/executable_dictionary.xml /etc/clickhouse-server/
+ ln -sf /usr/share/clickhouse-test/config/executable_pool_dictionary.xml /etc/clickhouse-server/
+ ln -sf /usr/share/clickhouse-test/config/test_function.xml /etc/clickhouse-server/
+ ln -sf /usr/share/clickhouse-test/config/top_level_domains /etc/clickhouse-server/
+ ln -sf /usr/share/clickhouse-test/config/regions_hierarchy.txt /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/regions_names_en.txt /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/ext-en.txt /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/ext-ru.txt /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/lem-en.bin /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/server.key /etc/clickhouse-server/
+ ln -sf /usr/share/clickhouse-test/config/server.crt /etc/clickhouse-server/
+ ln -sf /usr/share/clickhouse-test/config/dhparam.pem /etc/clickhouse-server/
+ ln -sf --backup=simple --suffix=_original.xml /usr/share/clickhouse-test/config/config.d/query_masking_rules.xml /etc/clickhouse-server/config.d/
+ [[ -n 1 ]]
+ [[ 1 -eq 1 ]]
+ rm -f /etc/clickhouse-server/config.d/zookeeper.xml
+ ln -sf /usr/share/clickhouse-test/config/config.d/zookeeper_fault_injection.xml /etc/clickhouse-server/config.d/
+ [[ -n 1 ]]
+ [[ 1 -eq 1 ]]
+ ln -sf /usr/share/clickhouse-test/config/config.d/cannot_allocate_thread_injection.xml /etc/clickhouse-server/config.d/
+ value=1
+ sed --follow-symlinks -i 's|[01]|1|' /etc/clickhouse-server/config.d/keeper_port.xml
+ value=27367424
+ sed --follow-symlinks -i 's|[[:digit:]]\+|27367424|' /etc/clickhouse-server/config.d/keeper_port.xml
+ value=22693888
+ sed --follow-symlinks -i 's|[[:digit:]]\+|22693888|' /etc/clickhouse-server/config.d/keeper_port.xml
+ [[ -n '' ]]
+ [[ -n 1 ]]
+ [[ 1 -eq 1 ]]
+ ln -sf /usr/share/clickhouse-test/config/users.d/database_ordinary.xml /etc/clickhouse-server/users.d/
+ [[ -n 1 ]]
+ [[ 1 -eq 1 ]]
+ object_key_types_options=("generate-suffix" "generate-full-key" "generate-template-key")
+ object_key_type=generate-suffix
+ [[ -n 1 ]]
+ [[ 1 -eq 1 ]]
+ object_key_type=generate-suffix
+ case $object_key_type in
+ ln -sf /usr/share/clickhouse-test/config/config.d/s3_storage_policy_by_default.xml /etc/clickhouse-server/config.d/
+ ARM=aarch64
++ uname -m
+ OS=x86_64
+ [[ -n 1 ]]
+ echo x86_64
+ [[ '' -eq 1 ]]
x86_64
Adding azure configuration
+ [[ x86_64 == \a\a\r\c\h\6\4 ]]
+ echo 'Adding azure configuration'
+ ln -sf /usr/share/clickhouse-test/config/config.d/azure_storage_conf.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/storage_conf.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/storage_conf_02944.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/storage_conf_02963.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/config.d/storage_conf_02961.xml /etc/clickhouse-server/config.d/
+ ln -sf /usr/share/clickhouse-test/config/users.d/s3_cache.xml /etc/clickhouse-server/users.d/
+ ln -sf /usr/share/clickhouse-test/config/users.d/s3_cache_new.xml /etc/clickhouse-server/users.d/
+ [[ -n '' ]]
+ ln -sf /usr/share/clickhouse-test/config/client_config.xml /etc/clickhouse-client/config.xml
+ sudo cat /etc/clickhouse-server/config.d/keeper_port.xml
+ sed 's|100000|10000|'
+ sudo mv /etc/clickhouse-server/config.d/keeper_port.xml.tmp /etc/clickhouse-server/config.d/keeper_port.xml
+ [[ -n 1 ]]
+ [[ 1 -eq 1 ]]
+ randomize_config_boolean_value filtered_list keeper_port
+ value=1
+ sudo cat /etc/clickhouse-server/config.d/keeper_port.xml
+ sed 's|[01]|1|'
+ sudo mv /etc/clickhouse-server/config.d/keeper_port.xml.tmp /etc/clickhouse-server/config.d/keeper_port.xml
+ randomize_config_boolean_value multi_read keeper_port
+ value=0
+ sudo cat /etc/clickhouse-server/config.d/keeper_port.xml
+ sed 's|[01]|0|'
+ sudo mv /etc/clickhouse-server/config.d/keeper_port.xml.tmp /etc/clickhouse-server/config.d/keeper_port.xml
+ randomize_config_boolean_value check_not_exists keeper_port
+ value=1
+ sudo cat /etc/clickhouse-server/config.d/keeper_port.xml
+ sed 's|[01]|1|'
+ sudo mv /etc/clickhouse-server/config.d/keeper_port.xml.tmp /etc/clickhouse-server/config.d/keeper_port.xml
+ randomize_config_boolean_value create_if_not_exists keeper_port
+ value=1
+ sudo cat /etc/clickhouse-server/config.d/keeper_port.xml
+ sed 's|[01]|1|'
+ sudo mv /etc/clickhouse-server/config.d/keeper_port.xml.tmp /etc/clickhouse-server/config.d/keeper_port.xml
+ sudo chown clickhouse /etc/clickhouse-server/config.d/keeper_port.xml
+ sudo chgrp clickhouse /etc/clickhouse-server/config.d/keeper_port.xml
+ [[ -n 1 ]]
+ [[ 1 -eq 1 ]]
+ randomize_config_boolean_value use_compression zookeeper_fault_injection
+ value=0
+ sudo cat /etc/clickhouse-server/config.d/zookeeper_fault_injection.xml
+ sed 's|[01]|0|'
+ sudo mv /etc/clickhouse-server/config.d/zookeeper_fault_injection.xml.tmp /etc/clickhouse-server/config.d/zookeeper_fault_injection.xml
+ randomize_config_boolean_value allow_experimental_block_number_column block_number
+ value=0
+ sudo cat /etc/clickhouse-server/config.d/block_number.xml
+ sed 's|[01]|0|'
+ sudo mv /etc/clickhouse-server/config.d/block_number.xml.tmp /etc/clickhouse-server/config.d/block_number.xml
+ echo 'ASAN_OPTIONS='\''malloc_context_size=10 verbosity=1 allocator_release_to_os_interval_ms=10000'\'''
+ export 'ASAN_OPTIONS=malloc_context_size=10 allocator_release_to_os_interval_ms=10000'
+ ASAN_OPTIONS='malloc_context_size=10 allocator_release_to_os_interval_ms=10000'
+ sudo chown root: /var/lib/clickhouse
+ echo '1'
+ local total_mem
++ awk '/MemTotal/ { print $(NF-1) }' /proc/meminfo
+ total_mem=32086440
+ total_mem=32856514560
+ max_server_memory_usage_to_ram_ratio=0.5
+ echo 'Setting max_server_memory_usage_to_ram_ratio to 0.5'
+ cat
Setting max_server_memory_usage_to_ram_ratio to 0.5
Setting max_memory_usage_for_user=9856954368 and max_memory_usage for queries to 10G
+ local max_users_mem
+ max_users_mem=9856954368
+ echo 'Setting max_memory_usage_for_user=9856954368 and max_memory_usage for queries to 10G'
+ cat
+ cat
+ sudo cat /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml
+ sed 's|s3|s3default|'
+ mv /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml.tmp /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml
+ sudo chown clickhouse /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml
+ sudo chgrp clickhouse /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml
+ sudo cat /etc/clickhouse-server/config.d/logger_trace.xml
+ sed 's|trace|test|'
+ mv /etc/clickhouse-server/config.d/logger_trace.xml.tmp /etc/clickhouse-server/config.d/logger_trace.xml
+ '[' LRU = SLRU ']'
++ date +%-d
+ '[' 1 -eq 1 ']'
+ sudo echo 'true'
+ start_server
+ counter=0
+ max_attempt=120
+ clickhouse-client --query 'SELECT 1'
Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
+ '[' 0 -gt 120 ']'
+ clickhouse start --user root
+ sleep 0.5
127.0.0.1 - - [05/Nov/2024:17:44:59 +0000] "PUT /devstoreaccount1/cont?restype=container HTTP/1.1" 409 -
127.0.0.1 - - [05/Nov/2024:17:44:59 +0000] "PUT /devstoreaccount1/cont/yttjahmmzorznqitxbccoaaxmkyhbbas?blockid=vbmvdzczuzzhhmlvlbesdkixxemrocomvmktgbdsmjiipyzepnchaixmtiqjcnlc&comp=block HTTP/1.1" 201 -
127.0.0.1 - - [05/Nov/2024:17:44:59 +0000] "PUT /devstoreaccount1/cont/yttjahmmzorznqitxbccoaaxmkyhbbas?comp=blocklist HTTP/1.1" 201 -
127.0.0.1 - - [05/Nov/2024:17:44:59 +0000] "GET /devstoreaccount1/cont/yttjahmmzorznqitxbccoaaxmkyhbbas HTTP/1.1" 206 4
127.0.0.1 - - [05/Nov/2024:17:44:59 +0000] "GET /devstoreaccount1/cont/yttjahmmzorznqitxbccoaaxmkyhbbas HTTP/1.1" 206 2
127.0.0.1 - - [05/Nov/2024:17:45:00 +0000] "DELETE /devstoreaccount1/cont/yttjahmmzorznqitxbccoaaxmkyhbbas HTTP/1.1" 202 -
+ counter=1
+ clickhouse-client --query 'SELECT 1'
Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
+ '[' 1 -gt 120 ']'
+ clickhouse start --user root
+ sleep 0.5
+ counter=2
+ clickhouse-client --query 'SELECT 1'
Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
+ '[' 2 -gt 120 ']'
+ clickhouse start --user root
+ sleep 0.5
+ counter=3
+ clickhouse-client --query 'SELECT 1'
Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
+ '[' 3 -gt 120 ']'
+ clickhouse start --user root
+ sleep 0.5
+ counter=4
+ clickhouse-client --query 'SELECT 1'
Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
+ '[' 4 -gt 120 ']'
+ clickhouse start --user root
+ sleep 0.5
+ counter=5
+ clickhouse-client --query 'SELECT 1'
Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
+ '[' 5 -gt 120 ']'
+ clickhouse start --user root
+ sleep 0.5
+ counter=6
+ clickhouse-client --query 'SELECT 1'
Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
+ '[' 6 -gt 120 ']'
+ clickhouse start --user root
+ sleep 0.5
+ counter=7
+ clickhouse-client --query 'SELECT 1'
Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
+ '[' 7 -gt 120 ']'
+ clickhouse start --user root
+ sleep 0.5
+ counter=8
+ clickhouse-client --query 'SELECT 1'
1
+ attach_gdb_to_clickhouse
++ kill -l SIGRTMIN
+ RTMIN=34
+ echo '
set follow-fork-mode parent
handle SIGHUP nostop noprint pass
handle SIGINT nostop noprint pass
handle SIGQUIT nostop noprint pass
handle SIGPIPE nostop noprint pass
handle SIGTERM nostop noprint pass
handle SIGUSR1 nostop noprint pass
handle SIGUSR2 nostop noprint pass
handle SIG34 nostop noprint pass
info signals
continue
backtrace full
thread apply all backtrace full
info registers
disassemble /s
up
disassemble /s
up
disassemble /s
p "done"
detach
quit
'
+ sleep 5
+ ts '%Y-%m-%d %H:%M:%S'
++ cat /var/run/clickhouse-server/clickhouse-server.pid
+ gdb -batch -command script.gdb -p 2728
+ run_with_retry 60 clickhouse-client --query 'SELECT '\''Connected to clickhouse-server after attaching gdb'\'''
+ [[ hxB =~ e ]]
+ set_e=false
+ set +e
+ local total_retries=60
+ shift
+ local retry=0
+ '[' 0 -ge 60 ']'
+ clickhouse-client --query 'SELECT '\''Connected to clickhouse-server after attaching gdb'\'''
Connected to clickhouse-server after attaching gdb
+ false
+ return
+ stress --hung-check --drop-databases --output-folder test_output --skip-func-tests '' --global-time-limit 1200
2024-11-05 18:46:03,163 Run func tests '/usr/bin/clickhouse-test --global_time_limit=1200 '
2024-11-05 18:46:03,664 Run func tests '/usr/bin/clickhouse-test --order=random --database=test_1 --client-option join_use_nulls=1 join_algorithm='parallel_hash' memory_tracker_fault_probability=0.001 merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability=0.05 group_by_use_nulls=1 --global_time_limit=1200 '
2024-11-05 18:46:04,166 Run func tests '/usr/bin/clickhouse-test --order=random --db-engine="Replicated('/test/db/test_2', 's1', 'r1')" --client-option allow_experimental_database_replicated=1 enable_deflate_qpl_codec=1 enable_zstd_qat_codec=1 use_query_cache=1 http_make_head_request=0 --global_time_limit=1200 '
2024-11-05 18:46:04,667 Run func tests '/usr/bin/clickhouse-test --order=random --database=test_3 --client-option join_algorithm='partial_merge' group_by_use_nulls=1 --global_time_limit=1200 '
2024-11-05 18:46:05,168 Run func tests '/usr/bin/clickhouse-test --order=random --client-option join_use_nulls=1 --global_time_limit=1200 '
2024-11-05 18:46:05,669 Run func tests '/usr/bin/clickhouse-test --order=random --db-engine="Replicated('/test/db/test_5', 's1', 'r1')" --database=test_5 --client-option allow_experimental_database_replicated=1 enable_deflate_qpl_codec=1 enable_zstd_qat_codec=1 join_algorithm='full_sorting_merge' group_by_use_nulls=1 --global_time_limit=1200 '
2024-11-05 18:46:06,170 Run func tests '/usr/bin/clickhouse-test --order=random --client-option use_query_cache=1 memory_tracker_fault_probability=0.001 merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability=0.05 --global_time_limit=1200 '
2024-11-05 18:46:06,672 Run func tests '/usr/bin/clickhouse-test --order=random --database=test_7 --client-option join_use_nulls=1 join_algorithm='grace_hash' group_by_use_nulls=1 http_make_head_request=0 --global_time_limit=1200 '
2024-11-05 18:46:07,173 Run func tests '/usr/bin/clickhouse-test --order=random --db-engine="Replicated('/test/db/test_8', 's1', 'r1')" --client-option allow_experimental_database_replicated=1 enable_deflate_qpl_codec=1 enable_zstd_qat_codec=1 use_query_cache=1 --global_time_limit=1200 '
2024-11-05 18:46:07,674 Run func tests '/usr/bin/clickhouse-test --order=random --database=test_9 --client-option join_algorithm='auto' max_rows_in_join=1000 use_query_cache=1 group_by_use_nulls=1 --global_time_limit=1200 '
2024-11-05 18:46:08,175 Run func tests '/usr/bin/clickhouse-test --order=random --client-option join_use_nulls=1 --global_time_limit=1200 '
2024-11-05 18:46:08,676 Run func tests '/usr/bin/clickhouse-test --order=random --db-engine="Replicated('/test/db/test_11', 's1', 'r1')" --database=test_11 --client-option allow_experimental_database_replicated=1 enable_deflate_qpl_codec=1 enable_zstd_qat_codec=1 join_algorithm='parallel_hash' use_query_cache=1 memory_tracker_fault_probability=0.001 merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability=0.05 group_by_use_nulls=1 --global_time_limit=1200 '
2024-11-05 18:46:09,178 Run func tests '/usr/bin/clickhouse-test --order=random --client-option implicit_transaction=1 throw_on_unsupported_query_inside_transaction=0 optimize_trivial_approximate_count_query=1 --global_time_limit=1200 '
2024-11-05 18:46:09,678 Run func tests '/usr/bin/clickhouse-test --order=random --database=test_13 --client-option join_use_nulls=1 join_algorithm='partial_merge' use_query_cache=1 group_by_use_nulls=1 --global_time_limit=1200 '
2024-11-05 18:46:10,180 Run func tests '/usr/bin/clickhouse-test --order=random --db-engine="Replicated('/test/db/test_14', 's1', 'r1')" --client-option allow_experimental_database_replicated=1 enable_deflate_qpl_codec=1 enable_zstd_qat_codec=1 http_make_head_request=0 --global_time_limit=1200 '
2024-11-05 18:46:10,681 Run func tests '/usr/bin/clickhouse-test --order=random --database=test_15 --client-option join_algorithm='full_sorting_merge' use_query_cache=1 group_by_use_nulls=1 http_make_head_request=0 --global_time_limit=1200 '
2024-11-05 18:46:11,182 Will wait functests to finish
2024-11-05 18:46:11,182 Finished 11 from 16 processes
2024-11-05 18:46:16,186 Finished 11 from 16 processes
2024-11-05 18:46:21,190 Finished 11 from 16 processes
2024-11-05 18:46:26,194 Finished 11 from 16 processes
2024-11-05 18:46:31,198 Finished 11 from 16 processes
2024-11-05 18:46:36,203 Finished 11 from 16 processes
2024-11-05 18:46:41,206 Finished 11 from 16 processes
2024-11-05 18:46:46,211 Finished 11 from 16 processes
2024-11-05 18:46:51,214 Finished 11 from 16 processes
2024-11-05 18:46:56,220 Finished 11 from 16 processes
2024-11-05 18:47:01,222 Finished 11 from 16 processes
2024-11-05 18:47:06,226 Finished 11 from 16 processes
2024-11-05 18:47:11,230 Finished 11 from 16 processes
2024-11-05 18:47:16,234 Finished 11 from 16 processes
2024-11-05 18:47:21,238 Finished 11 from 16 processes
2024-11-05 18:47:26,242 Finished 11 from 16 processes
2024-11-05 18:47:31,246 Finished 11 from 16 processes
2024-11-05 18:47:36,250 Finished 11 from 16 processes
2024-11-05 18:47:41,254 Finished 11 from 16 processes
2024-11-05 18:47:46,258 Finished 11 from 16 processes
2024-11-05 18:47:51,262 Finished 11 from 16 processes
2024-11-05 18:47:56,266 Finished 11 from 16 processes
2024-11-05 18:48:01,270 Finished 11 from 16 processes
2024-11-05 18:48:06,274 Finished 11 from 16 processes
2024-11-05 18:48:11,278 Finished 11 from 16 processes
2024-11-05 18:48:16,282 Finished 11 from 16 processes
2024-11-05 18:48:21,286 Finished 11 from 16 processes
2024-11-05 18:48:26,290 Finished 11 from 16 processes
2024-11-05 18:48:31,294 Finished 11 from 16 processes
2024-11-05 18:48:36,298 Finished 11 from 16 processes
2024-11-05 18:48:41,301 Finished 11 from 16 processes
2024-11-05 18:48:46,302 Finished 11 from 16 processes
2024-11-05 18:48:51,306 Finished 11 from 16 processes
2024-11-05 18:48:56,310 Finished 11 from 16 processes
2024-11-05 18:49:01,314 Finished 11 from 16 processes
2024-11-05 18:49:06,318 Finished 11 from 16 processes
2024-11-05 18:49:11,322 Finished 11 from 16 processes
2024-11-05 18:49:16,328 Finished 11 from 16 processes
2024-11-05 18:49:21,330 Finished 11 from 16 processes
2024-11-05 18:49:26,334 Finished 11 from 16 processes
2024-11-05 18:49:31,338 Finished 11 from 16 processes
2024-11-05 18:49:36,342 Finished 11 from 16 processes
2024-11-05 18:49:41,346 Finished 11 from 16 processes
2024-11-05 18:49:46,352 Finished 11 from 16 processes
2024-11-05 18:49:51,354 Finished 11 from 16 processes
2024-11-05 18:49:56,358 Finished 11 from 16 processes
2024-11-05 18:50:01,363 Finished 11 from 16 processes
2024-11-05 18:50:06,365 Finished 11 from 16 processes
2024-11-05 18:50:11,370 Finished 11 from 16 processes
2024-11-05 18:50:16,375 Finished 11 from 16 processes
2024-11-05 18:50:21,378 Finished 11 from 16 processes
2024-11-05 18:50:26,384 Finished 11 from 16 processes
2024-11-05 18:50:31,386 Finished 11 from 16 processes
2024-11-05 18:50:36,390 Finished 11 from 16 processes
2024-11-05 18:50:41,394 Finished 11 from 16 processes
2024-11-05 18:50:46,400 Finished 11 from 16 processes
2024-11-05 18:50:51,402 Finished 11 from 16 processes
2024-11-05 18:50:56,406 Finished 11 from 16 processes
2024-11-05 18:51:01,411 Finished 11 from 16 processes
2024-11-05 18:51:06,414 Finished 11 from 16 processes
2024-11-05 18:51:11,418 Finished 11 from 16 processes
2024-11-05 18:51:16,422 Finished 11 from 16 processes
2024-11-05 18:51:21,426 Finished 11 from 16 processes
2024-11-05 18:51:26,430 Finished 11 from 16 processes
2024-11-05 18:51:31,436 Finished 11 from 16 processes
2024-11-05 18:51:36,438 Finished 11 from 16 processes
2024-11-05 18:51:41,444 Finished 11 from 16 processes
2024-11-05 18:51:46,446 Finished 11 from 16 processes
2024-11-05 18:51:51,450 Finished 11 from 16 processes
2024-11-05 18:51:56,454 Finished 11 from 16 processes
2024-11-05 18:52:01,458 Finished 11 from 16 processes
2024-11-05 18:52:06,462 Finished 11 from 16 processes
2024-11-05 18:52:11,466 Finished 11 from 16 processes
2024-11-05 18:52:16,470 Finished 11 from 16 processes
2024-11-05 18:52:21,474 Finished 11 from 16 processes
2024-11-05 18:52:26,478 Finished 11 from 16 processes
2024-11-05 18:52:31,482 Finished 11 from 16 processes
2024-11-05 18:52:36,486 Finished 11 from 16 processes
2024-11-05 18:52:41,492 Finished 11 from 16 processes
2024-11-05 18:52:46,496 Finished 11 from 16 processes
2024-11-05 18:52:51,498 Finished 11 from 16 processes
2024-11-05 18:52:56,504 Finished 11 from 16 processes
2024-11-05 18:53:01,506 Finished 11 from 16 processes
2024-11-05 18:53:06,510 Finished 11 from 16 processes
2024-11-05 18:53:11,512 Finished 11 from 16 processes
2024-11-05 18:53:16,517 Finished 11 from 16 processes
2024-11-05 18:53:21,522 Finished 11 from 16 processes
2024-11-05 18:53:26,526 Finished 11 from 16 processes
2024-11-05 18:53:31,530 Finished 11 from 16 processes
2024-11-05 18:53:36,534 Finished 11 from 16 processes
2024-11-05 18:53:41,538 Finished 11 from 16 processes
API: PutObjectPart(bucket=test, object=s3/tit/smgedelpiaajeaqyouuztibhtahef)
Time: 17:53:43 UTC 11/05/2024
DeploymentID: cac1602a-7e56-491e-b405-da119c28fcf6
RequestID: 180524021C1F024C
RemoteHost: ::1
Host: localhost:11111
UserAgent: aws-sdk-cpp/1.11.234 ua/2.0 md/aws-crt#0.24.11-dev+5b67ba5a os/Linux/5.15.0-122-generic md/arch#x86_64 lang/c++#C++23 md/Clang#17.0.6 cfg/retry-mode#custom api/S3
Error: open /minio_data/.minio.sys/multipart/33997d3e1ee98f6dde0f5abc0c4db303291102cd2b560a3a4916643164e792b1/4bac86ce-82fc-421b-92b0-629244d9ff77/00002.d9f15be772929726021274af7b4f6b80.16777216: no such file or directory (*fs.PathError)
uploadID=4bac86ce-82fc-421b-92b0-629244d9ff77, partPath=/minio_data/.minio.sys/multipart/33997d3e1ee98f6dde0f5abc0c4db303291102cd2b560a3a4916643164e792b1/4bac86ce-82fc-421b-92b0-629244d9ff77/00002.d9f15be772929726021274af7b4f6b80.16777216, filepath=/minio_data/.minio.sys/tmp/3334e782-8896-4e8e-93af-817bf392fded/4bac86ce-82fc-421b-92b0-629244d9ff77.9ec8dd08-3184-4fba-bcc8-f7fbfcb92c5f
1: cmd/fs-v1-multipart.go:121:cmd.(*FSObjects).backgroundAppend()
2024-11-05 18:53:46,542 Finished 11 from 16 processes
2024-11-05 18:53:51,547 Finished 11 from 16 processes
2024-11-05 18:53:56,550 Finished 11 from 16 processes
2024-11-05 18:54:01,556 Finished 11 from 16 processes
2024-11-05 18:54:06,560 Finished 11 from 16 processes
2024-11-05 18:54:11,562 Finished 11 from 16 processes
2024-11-05 18:54:16,568 Finished 11 from 16 processes
2024-11-05 18:54:21,573 Finished 11 from 16 processes
2024-11-05 18:54:26,574 Finished 11 from 16 processes
2024-11-05 18:54:31,578 Finished 11 from 16 processes
2024-11-05 18:54:36,582 Finished 11 from 16 processes
2024-11-05 18:54:41,584 Finished 11 from 16 processes
2024-11-05 18:54:46,586 Finished 11 from 16 processes
2024-11-05 18:54:51,592 Finished 11 from 16 processes
2024-11-05 18:54:56,594 Finished 11 from 16 processes
2024-11-05 18:55:01,598 Finished 11 from 16 processes
2024-11-05 18:55:06,602 Finished 11 from 16 processes
2024-11-05 18:55:11,606 Finished 11 from 16 processes
2024-11-05 18:55:16,612 Finished 11 from 16 processes
2024-11-05 18:55:21,614 Finished 11 from 16 processes
2024-11-05 18:55:26,618 Finished 11 from 16 processes
2024-11-05 18:55:31,624 Finished 11 from 16 processes
2024-11-05 18:55:36,626 Finished 11 from 16 processes
2024-11-05 18:55:41,630 Finished 11 from 16 processes
2024-11-05 18:55:46,634 Finished 11 from 16 processes
2024-11-05 18:55:51,638 Finished 11 from 16 processes
2024-11-05 18:55:56,642 Finished 11 from 16 processes
2024-11-05 18:56:01,646 Finished 11 from 16 processes
2024-11-05 18:56:06,650 Finished 11 from 16 processes
2024-11-05 18:56:11,654 Finished 11 from 16 processes
2024-11-05 18:56:16,658 Finished 11 from 16 processes
2024-11-05 18:56:21,662 Finished 11 from 16 processes
2024-11-05 18:56:26,667 Finished 11 from 16 processes
2024-11-05 18:56:31,670 Finished 11 from 16 processes
2024-11-05 18:56:36,674 Finished 11 from 16 processes
2024-11-05 18:56:41,678 Finished 11 from 16 processes
2024-11-05 18:56:46,682 Finished 11 from 16 processes
2024-11-05 18:56:51,686 Finished 11 from 16 processes
2024-11-05 18:56:56,690 Finished 11 from 16 processes
2024-11-05 18:57:01,695 Finished 11 from 16 processes
2024-11-05 18:57:06,698 Finished 11 from 16 processes
2024-11-05 18:57:11,702 Finished 11 from 16 processes
2024-11-05 18:57:16,706 Finished 11 from 16 processes
2024-11-05 18:57:21,710 Finished 11 from 16 processes
2024-11-05 18:57:26,714 Finished 11 from 16 processes
2024-11-05 18:57:31,717 Finished 11 from 16 processes
2024-11-05 18:57:36,721 Finished 11 from 16 processes
2024-11-05 18:57:41,726 Finished 11 from 16 processes
2024-11-05 18:57:46,732 Finished 11 from 16 processes
2024-11-05 18:57:51,737 Finished 11 from 16 processes
2024-11-05 18:57:56,738 Finished 11 from 16 processes
2024-11-05 18:58:01,742 Finished 11 from 16 processes
2024-11-05 18:58:06,746 Finished 11 from 16 processes
2024-11-05 18:58:11,747 Finished 11 from 16 processes
2024-11-05 18:58:16,750 Finished 11 from 16 processes
2024-11-05 18:58:21,754 Finished 11 from 16 processes
2024-11-05 18:58:26,759 Finished 11 from 16 processes
2024-11-05 18:58:31,764 Finished 11 from 16 processes
2024-11-05 18:58:36,768 Finished 11 from 16 processes
2024-11-05 18:58:41,770 Finished 11 from 16 processes
2024-11-05 18:58:46,774 Finished 11 from 16 processes
2024-11-05 18:58:51,778 Finished 11 from 16 processes
2024-11-05 18:58:56,782 Finished 11 from 16 processes
2024-11-05 18:59:01,784 Finished 11 from 16 processes
2024-11-05 18:59:06,786 Finished 11 from 16 processes
2024-11-05 18:59:11,792 Finished 11 from 16 processes
2024-11-05 18:59:16,794 Finished 11 from 16 processes
2024-11-05 18:59:21,798 Finished 11 from 16 processes
2024-11-05 18:59:26,802 Finished 11 from 16 processes
2024-11-05 18:59:31,805 Finished 11 from 16 processes
2024-11-05 18:59:36,806 Finished 11 from 16 processes
2024-11-05 18:59:41,807 Finished 11 from 16 processes
2024-11-05 18:59:46,812 Finished 11 from 16 processes
2024-11-05 18:59:51,814 Finished 11 from 16 processes
2024-11-05 18:59:56,818 Finished 11 from 16 processes
2024-11-05 19:00:01,822 Finished 11 from 16 processes
2024-11-05 19:00:06,827 Finished 11 from 16 processes
2024-11-05 19:00:11,833 Finished 11 from 16 processes
2024-11-05 19:00:16,836 Finished 11 from 16 processes
2024-11-05 19:00:21,840 Finished 11 from 16 processes
2024-11-05 19:00:26,846 Finished 11 from 16 processes
2024-11-05 19:00:31,850 Finished 11 from 16 processes
2024-11-05 19:00:36,855 Finished 11 from 16 processes
2024-11-05 19:00:41,858 Finished 11 from 16 processes
2024-11-05 19:00:46,864 Finished 11 from 16 processes
2024-11-05 19:00:51,866 Finished 11 from 16 processes
2024-11-05 19:00:56,870 Finished 11 from 16 processes
2024-11-05 19:01:01,874 Finished 11 from 16 processes
API: PutObjectPart(bucket=test, object=test/nre/npstxvmgkmgomjallreunrsxnfsyd)
Time: 18:01:01 UTC 11/05/2024
DeploymentID: cac1602a-7e56-491e-b405-da119c28fcf6
RequestID: 1805246834579352
RemoteHost: 127.0.0.1
Host: localhost:11111
UserAgent: aws-sdk-cpp/1.11.234 ua/2.0 md/aws-crt#0.24.11-dev+5b67ba5a os/Linux/5.15.0-122-generic md/arch#x86_64 lang/c++#C++23 md/Clang#17.0.6 cfg/retry-mode#custom api/S3
Error: open /minio_data/.minio.sys/multipart/4a9cec591d10ff33e565bf3499f7eb31bd802d907b9eead36be88d1a34659436/94337a28-bbb1-4990-8091-774b8875973d/00002.11c3fb3d4e383b8e3648f62a7d7644be.7597876: no such file or directory (*fs.PathError)
uploadID=94337a28-bbb1-4990-8091-774b8875973d, partPath=/minio_data/.minio.sys/multipart/4a9cec591d10ff33e565bf3499f7eb31bd802d907b9eead36be88d1a34659436/94337a28-bbb1-4990-8091-774b8875973d/00002.11c3fb3d4e383b8e3648f62a7d7644be.7597876, filepath=/minio_data/.minio.sys/tmp/3334e782-8896-4e8e-93af-817bf392fded/94337a28-bbb1-4990-8091-774b8875973d.ba63222b-eb56-4c06-88c7-111e0c499ac6
1: cmd/fs-v1-multipart.go:121:cmd.(*FSObjects).backgroundAppend()
API: PutObjectPart(bucket=test, object=test/wmt/kxjgcmzstlhwajmrqxfiwmemsyxzt)
Time: 18:01:01 UTC 11/05/2024
DeploymentID: cac1602a-7e56-491e-b405-da119c28fcf6
RequestID: 18052468379CDDCB
RemoteHost: ::1
Host: localhost:11111
UserAgent: aws-sdk-cpp/1.11.234 ua/2.0 md/aws-crt#0.24.11-dev+5b67ba5a os/Linux/5.15.0-122-generic md/arch#x86_64 lang/c++#C++23 md/Clang#17.0.6 cfg/retry-mode#custom api/S3
Error: open /minio_data/.minio.sys/multipart/dd3f7a5d23d32d7fc09eb4ddd5a6d66f4fadfad2beb83ade2d6152d94a40f22e/462948dc-63b4-4f00-8f9e-d3966cade368/00002.3e16884f863b162feb8114646d993dbf.6493730: no such file or directory (*fs.PathError)
uploadID=462948dc-63b4-4f00-8f9e-d3966cade368, partPath=/minio_data/.minio.sys/multipart/dd3f7a5d23d32d7fc09eb4ddd5a6d66f4fadfad2beb83ade2d6152d94a40f22e/462948dc-63b4-4f00-8f9e-d3966cade368/00002.3e16884f863b162feb8114646d993dbf.6493730, filepath=/minio_data/.minio.sys/tmp/3334e782-8896-4e8e-93af-817bf392fded/462948dc-63b4-4f00-8f9e-d3966cade368.498146da-0a63-4bfe-9501-3bfb6d3e1b3e
1: cmd/fs-v1-multipart.go:121:cmd.(*FSObjects).backgroundAppend()
2024-11-05 19:01:06,878 Finished 11 from 16 processes
2024-11-05 19:01:11,882 Finished 11 from 16 processes
2024-11-05 19:01:16,886 Finished 11 from 16 processes
2024-11-05 19:01:21,890 Finished 11 from 16 processes
2024-11-05 19:01:26,893 Finished 11 from 16 processes
2024-11-05 19:01:31,898 Finished 11 from 16 processes
2024-11-05 19:01:36,902 Finished 11 from 16 processes
2024-11-05 19:01:41,906 Finished 11 from 16 processes
2024-11-05 19:01:46,910 Finished 11 from 16 processes
2024-11-05 19:01:51,915 Finished 11 from 16 processes
2024-11-05 19:01:56,918 Finished 11 from 16 processes
2024-11-05 19:02:01,922 Finished 11 from 16 processes
2024-11-05 19:02:06,926 Finished 11 from 16 processes
2024-11-05 19:02:11,930 Finished 11 from 16 processes
2024-11-05 19:02:16,934 Finished 11 from 16 processes
2024-11-05 19:02:21,938 Finished 11 from 16 processes
2024-11-05 19:02:26,942 Finished 11 from 16 processes
2024-11-05 19:02:31,948 Finished 11 from 16 processes
2024-11-05 19:02:36,950 Finished 11 from 16 processes
2024-11-05 19:02:41,954 Finished 11 from 16 processes
2024-11-05 19:02:46,958 Finished 11 from 16 processes
2024-11-05 19:02:51,964 Finished 11 from 16 processes
2024-11-05 19:02:56,966 Finished 11 from 16 processes
2024-11-05 19:03:01,970 Finished 11 from 16 processes
2024-11-05 19:03:06,974 Finished 11 from 16 processes
2024-11-05 19:03:11,978 Finished 11 from 16 processes
2024-11-05 19:03:16,982 Finished 11 from 16 processes
2024-11-05 19:03:21,986 Finished 11 from 16 processes
2024-11-05 19:03:26,990 Finished 11 from 16 processes
2024-11-05 19:03:31,994 Finished 11 from 16 processes
2024-11-05 19:03:36,998 Finished 11 from 16 processes
2024-11-05 19:03:42,002 Finished 11 from 16 processes
2024-11-05 19:03:47,006 Finished 11 from 16 processes
2024-11-05 19:03:52,010 Finished 11 from 16 processes
2024-11-05 19:03:57,014 Finished 11 from 16 processes
2024-11-05 19:04:02,018 Finished 11 from 16 processes
2024-11-05 19:04:07,022 Finished 11 from 16 processes
2024-11-05 19:04:12,026 Finished 11 from 16 processes
2024-11-05 19:04:17,030 Finished 11 from 16 processes
2024-11-05 19:04:22,034 Finished 11 from 16 processes
2024-11-05 19:04:27,038 Finished 11 from 16 processes
2024-11-05 19:04:32,042 Finished 11 from 16 processes
2024-11-05 19:04:37,046 Finished 11 from 16 processes
2024-11-05 19:04:42,050 Finished 11 from 16 processes
2024-11-05 19:04:47,054 Finished 11 from 16 processes
2024-11-05 19:04:52,058 Finished 11 from 16 processes
2024-11-05 19:04:57,062 Finished 11 from 16 processes
2024-11-05 19:05:02,066 Finished 11 from 16 processes
2024-11-05 19:05:07,067 Finished 11 from 16 processes
2024-11-05 19:05:12,071 Finished 11 from 16 processes
2024-11-05 19:05:17,074 Finished 11 from 16 processes
2024-11-05 19:05:22,079 Finished 11 from 16 processes
2024-11-05 19:05:27,082 Finished 11 from 16 processes
2024-11-05 19:05:32,086 Finished 11 from 16 processes
2024-11-05 19:05:37,090 Finished 11 from 16 processes
2024-11-05 19:05:42,094 Finished 11 from 16 processes
2024-11-05 19:05:47,098 Finished 11 from 16 processes
2024-11-05 19:05:52,103 Finished 11 from 16 processes
2024-11-05 19:05:57,107 Finished 11 from 16 processes
2024-11-05 19:06:02,112 Finished 11 from 16 processes
2024-11-05 19:06:07,114 Finished 11 from 16 processes
2024-11-05 19:06:12,118 Finished 12 from 16 processes
2024-11-05 19:06:17,124 Finished 12 from 16 processes
2024-11-05 19:06:22,126 Finished 13 from 16 processes
2024-11-05 19:06:27,130 Finished 13 from 16 processes
2024-11-05 19:06:32,135 Finished 13 from 16 processes
2024-11-05 19:06:37,138 Finished 13 from 16 processes
2024-11-05 19:06:42,142 Finished 13 from 16 processes
2024-11-05 19:06:47,146 Finished 13 from 16 processes
2024-11-05 19:06:52,149 Finished 13 from 16 processes
2024-11-05 19:06:57,150 Finished 13 from 16 processes
2024-11-05 19:07:02,156 Finished 13 from 16 processes
2024-11-05 19:07:07,158 Finished 13 from 16 processes
2024-11-05 19:07:12,163 Finished 13 from 16 processes
2024-11-05 19:07:17,167 Finished 13 from 16 processes
2024-11-05 19:07:22,170 Finished 13 from 16 processes
2024-11-05 19:07:27,174 Finished 13 from 16 processes
2024-11-05 19:07:32,178 Finished 13 from 16 processes
2024-11-05 19:07:37,182 Finished 13 from 16 processes
2024-11-05 19:07:42,186 Finished 13 from 16 processes
2024-11-05 19:07:47,189 Finished 13 from 16 processes
2024-11-05 19:07:52,190 Finished 13 from 16 processes
2024-11-05 19:07:57,192 Finished 13 from 16 processes
2024-11-05 19:08:02,198 Finished 13 from 16 processes
2024-11-05 19:08:07,203 Finished 13 from 16 processes
2024-11-05 19:08:12,206 Finished 13 from 16 processes
2024-11-05 19:08:17,210 Finished 13 from 16 processes
2024-11-05 19:08:22,215 Finished 13 from 16 processes
2024-11-05 19:08:27,218 Finished 13 from 16 processes
2024-11-05 19:08:32,222 Finished 14 from 16 processes
2024-11-05 19:08:37,227 Finished 14 from 16 processes
2024-11-05 19:08:42,230 Finished 14 from 16 processes
2024-11-05 19:08:47,234 Finished 14 from 16 processes
2024-11-05 19:08:52,238 Finished 14 from 16 processes
2024-11-05 19:08:57,242 Finished 14 from 16 processes
2024-11-05 19:09:02,247 Finished 14 from 16 processes
2024-11-05 19:09:07,250 Finished 14 from 16 processes
API: PutObjectPart(bucket=test, object=s3/ofl/voxsgpgyuzmibvsjtjxzxthbqqtpk)
Time: 18:09:10 UTC 11/05/2024
DeploymentID: cac1602a-7e56-491e-b405-da119c28fcf6
RequestID: 180524DA007015D8
RemoteHost: ::1
Host: localhost:11111
UserAgent: aws-sdk-cpp/1.11.234 ua/2.0 md/aws-crt#0.24.11-dev+5b67ba5a os/Linux/5.15.0-122-generic md/arch#x86_64 lang/c++#C++23 md/Clang#17.0.6 cfg/retry-mode#custom api/S3
Error: open /minio_data/.minio.sys/multipart/6dd473e8239c5a78b9e39992b61eba9125d6a7d316b74d7d45efefb826bbe400/a27e7793-709a-4632-a7f3-753c49b6841e/00002.d9f15be772929726021274af7b4f6b80.16777216: no such file or directory (*fs.PathError)
uploadID=a27e7793-709a-4632-a7f3-753c49b6841e, partPath=/minio_data/.minio.sys/multipart/6dd473e8239c5a78b9e39992b61eba9125d6a7d316b74d7d45efefb826bbe400/a27e7793-709a-4632-a7f3-753c49b6841e/00002.d9f15be772929726021274af7b4f6b80.16777216, filepath=/minio_data/.minio.sys/tmp/3334e782-8896-4e8e-93af-817bf392fded/a27e7793-709a-4632-a7f3-753c49b6841e.e42c727f-a686-4372-bea4-8eba10381c66
1: cmd/fs-v1-multipart.go:121:cmd.(*FSObjects).backgroundAppend()
2024-11-05 19:09:12,254 Finished 14 from 16 processes
2024-11-05 19:09:17,258 Finished 14 from 16 processes
2024-11-05 19:09:22,262 Finished 14 from 16 processes
2024-11-05 19:09:27,266 Finished 14 from 16 processes
2024-11-05 19:09:32,267 Finished 14 from 16 processes
2024-11-05 19:09:37,270 Finished 14 from 16 processes
2024-11-05 19:09:42,276 Finished 14 from 16 processes
2024-11-05 19:09:47,279 Finished 14 from 16 processes
2024-11-05 19:09:52,282 Finished 14 from 16 processes
2024-11-05 19:09:57,286 Finished 14 from 16 processes
2024-11-05 19:10:02,290 Finished 14 from 16 processes
2024-11-05 19:10:07,296 Finished 14 from 16 processes
2024-11-05 19:10:12,301 Finished 14 from 16 processes
2024-11-05 19:10:17,302 Finished 14 from 16 processes
2024-11-05 19:10:22,307 Finished 14 from 16 processes
2024-11-05 19:10:27,310 Finished 14 from 16 processes
2024-11-05 19:10:32,314 Finished 14 from 16 processes
2024-11-05 19:10:37,316 Finished 14 from 16 processes
2024-11-05 19:10:42,318 Finished 14 from 16 processes
2024-11-05 19:10:47,322 Finished 14 from 16 processes
2024-11-05 19:10:52,324 Finished 14 from 16 processes
2024-11-05 19:10:57,326 Finished 14 from 16 processes
2024-11-05 19:11:02,332 Finished 14 from 16 processes
2024-11-05 19:11:07,334 Finished 14 from 16 processes
2024-11-05 19:11:12,337 Finished 14 from 16 processes
2024-11-05 19:11:17,342 Finished 14 from 16 processes
2024-11-05 19:11:22,346 Finished 14 from 16 processes
2024-11-05 19:11:27,350 Finished 14 from 16 processes
2024-11-05 19:11:32,354 Finished 14 from 16 processes
2024-11-05 19:11:37,360 Finished 15 from 16 processes
2024-11-05 19:11:42,362 All processes finished
2024-11-05 19:11:42,363 Compressing stress logs
2024-11-05 19:11:42,397 Logs compressed
2024-11-05 19:11:42,397 Will terminate gdb (if any)
2024-11-05 19:11:42,397 Running command: kill -TERM $(pidof gdb)
2024-11-05 19:11:42,401 Running command: timeout 50s tail --pid=$(pidof gdb) -f /dev/null || kill -9 $(pidof gdb) ||:
Quit
2024-11-05 19:11:43,418 Running command: kill -CONT $(cat /var/run/clickhouse-server/clickhouse-server.pid) && clickhouse client -q 'SELECT 1 FORMAT Null'
2024-11-05 19:11:43,784 Running command: clickhouse client -q "SYSTEM STOP THREAD FUZZER" --max_untracked_memory=1Gi --memory_profiler_step=1Gi --max_memory_usage_for_user=0 --max_memory_usage_in_client=1000000000
2024-11-05 19:11:43,998 Running command: clickhouse client -q "SYSTEM START MERGES" --max_untracked_memory=1Gi --memory_profiler_step=1Gi --max_memory_usage_for_user=0 --max_memory_usage_in_client=1000000000
Received exception from server (version 24.3.12):
Code: 696. DB::Exception: Received from localhost:9000. DB::Exception: Load job 'startup table test_3.t_02156_mt2' -> Code: 695. DB::Exception: Load job 'load table test_3.t_02156_mt2' failed: Code: 439. DB::Exception: Cannot schedule a task: cannot allocate thread (threads=6, jobs=6): Cannot attach table `test_3`.`t_02156_mt2` from metadata file /var/lib/clickhouse/store/aca/acaaf27a-4238-4240-8c2c-c8a24dd64228/t_02156_mt2.sql from query ATTACH TABLE test_3.t_02156_mt2 UUID 'c3069277-240f-45c2-8ab2-e0b8455c2acb' (`k` UInt32, `v` String) ENGINE = MergeTree ORDER BY k SETTINGS min_bytes_for_wide_part = 0, index_granularity = 12146, ratio_of_defaults_for_sparse_serialization = 1., replace_long_file_name_to_hash = false, max_file_name_length = 0, min_bytes_for_full_part_storage = 536870912, compact_parts_max_bytes_to_buffer = 144942780, compact_parts_max_granules_to_buffer = 115, compact_parts_merge_max_bytes_to_prefetch_part = 20595776, merge_max_block_size = 2226, old_parts_lifetime = 30., prefer_fetch_merged_part_size_threshold = 1, vertical_merge_algorithm_min_rows_to_activate = 1000000, vertical_merge_algorithm_min_columns_to_activate = 100, min_merge_bytes_to_use_direct_io = 10737418240, index_granularity_bytes = 954667, concurrent_part_removal_threshold = 0, allow_vertical_merges_from_compact_to_wide_parts = true, cache_populated_by_fetch = false, marks_compress_block_size = 40483, primary_key_compress_block_size = 62390. (CANNOT_SCHEDULE_TASK),. (ASYNC_LOAD_CANCELED)
(query: SYSTEM START MERGES)
2024-11-05 19:11:44,163 Command returend 184, retrying
Received exception from server (version 24.3.12):
Code: 696. DB::Exception: Received from localhost:9000. DB::Exception: Load job 'startup table test_3.t_02156_mt2' -> Code: 695. DB::Exception: Load job 'load table test_3.t_02156_mt2' failed: Code: 439. DB::Exception: Cannot schedule a task: cannot allocate thread (threads=6, jobs=6): Cannot attach table `test_3`.`t_02156_mt2` from metadata file /var/lib/clickhouse/store/aca/acaaf27a-4238-4240-8c2c-c8a24dd64228/t_02156_mt2.sql from query ATTACH TABLE test_3.t_02156_mt2 UUID 'c3069277-240f-45c2-8ab2-e0b8455c2acb' (`k` UInt32, `v` String) ENGINE = MergeTree ORDER BY k SETTINGS min_bytes_for_wide_part = 0, index_granularity = 12146, ratio_of_defaults_for_sparse_serialization = 1., replace_long_file_name_to_hash = false, max_file_name_length = 0, min_bytes_for_full_part_storage = 536870912, compact_parts_max_bytes_to_buffer = 144942780, compact_parts_max_granules_to_buffer = 115, compact_parts_merge_max_bytes_to_prefetch_part = 20595776, merge_max_block_size = 2226, old_parts_lifetime = 30., prefer_fetch_merged_part_size_threshold = 1, vertical_merge_algorithm_min_rows_to_activate = 1000000, vertical_merge_algorithm_min_columns_to_activate = 100, min_merge_bytes_to_use_direct_io = 10737418240, index_granularity_bytes = 954667, concurrent_part_removal_threshold = 0, allow_vertical_merges_from_compact_to_wide_parts = true, cache_populated_by_fetch = false, marks_compress_block_size = 40483, primary_key_compress_block_size = 62390. (CANNOT_SCHEDULE_TASK),. (ASYNC_LOAD_CANCELED)
(query: SYSTEM START MERGES)
2024-11-05 19:11:44,377 Command returend 184, retrying
Received exception from server (version 24.3.12):
Code: 696. DB::Exception: Received from localhost:9000. DB::Exception: Load job 'startup table test_3.t_02156_mt2' -> Code: 695. DB::Exception: Load job 'load table test_3.t_02156_mt2' failed: Code: 439. DB::Exception: Cannot schedule a task: cannot allocate thread (threads=6, jobs=6): Cannot attach table `test_3`.`t_02156_mt2` from metadata file /var/lib/clickhouse/store/aca/acaaf27a-4238-4240-8c2c-c8a24dd64228/t_02156_mt2.sql from query ATTACH TABLE test_3.t_02156_mt2 UUID 'c3069277-240f-45c2-8ab2-e0b8455c2acb' (`k` UInt32, `v` String) ENGINE = MergeTree ORDER BY k SETTINGS min_bytes_for_wide_part = 0, index_granularity = 12146, ratio_of_defaults_for_sparse_serialization = 1., replace_long_file_name_to_hash = false, max_file_name_length = 0, min_bytes_for_full_part_storage = 536870912, compact_parts_max_bytes_to_buffer = 144942780, compact_parts_max_granules_to_buffer = 115, compact_parts_merge_max_bytes_to_prefetch_part = 20595776, merge_max_block_size = 2226, old_parts_lifetime = 30., prefer_fetch_merged_part_size_threshold = 1, vertical_merge_algorithm_min_rows_to_activate = 1000000, vertical_merge_algorithm_min_columns_to_activate = 100, min_merge_bytes_to_use_direct_io = 10737418240, index_granularity_bytes = 954667, concurrent_part_removal_threshold = 0, allow_vertical_merges_from_compact_to_wide_parts = true, cache_populated_by_fetch = false, marks_compress_block_size = 40483, primary_key_compress_block_size = 62390. (CANNOT_SCHEDULE_TASK),. (ASYNC_LOAD_CANCELED)
(query: SYSTEM START MERGES)
2024-11-05 19:11:45,543 Command returend 184, retrying
Received exception from server (version 24.3.12):
Code: 696. DB::Exception: Received from localhost:9000. DB::Exception: Load job 'startup table test_3.t_02156_mt2' -> Code: 695. DB::Exception: Load job 'load table test_3.t_02156_mt2' failed: Code: 439. DB::Exception: Cannot schedule a task: cannot allocate thread (threads=6, jobs=6): Cannot attach table `test_3`.`t_02156_mt2` from metadata file /var/lib/clickhouse/store/aca/acaaf27a-4238-4240-8c2c-c8a24dd64228/t_02156_mt2.sql from query ATTACH TABLE test_3.t_02156_mt2 UUID 'c3069277-240f-45c2-8ab2-e0b8455c2acb' (`k` UInt32, `v` String) ENGINE = MergeTree ORDER BY k SETTINGS min_bytes_for_wide_part = 0, index_granularity = 12146, ratio_of_defaults_for_sparse_serialization = 1., replace_long_file_name_to_hash = false, max_file_name_length = 0, min_bytes_for_full_part_storage = 536870912, compact_parts_max_bytes_to_buffer = 144942780, compact_parts_max_granules_to_buffer = 115, compact_parts_merge_max_bytes_to_prefetch_part = 20595776, merge_max_block_size = 2226, old_parts_lifetime = 30., prefer_fetch_merged_part_size_threshold = 1, vertical_merge_algorithm_min_rows_to_activate = 1000000, vertical_merge_algorithm_min_columns_to_activate = 100, min_merge_bytes_to_use_direct_io = 10737418240, index_granularity_bytes = 954667, concurrent_part_removal_threshold = 0, allow_vertical_merges_from_compact_to_wide_parts = true, cache_populated_by_fetch = false, marks_compress_block_size = 40483, primary_key_compress_block_size = 62390. (CANNOT_SCHEDULE_TASK),. (ASYNC_LOAD_CANCELED)
(query: SYSTEM START MERGES)
2024-11-05 19:11:47,710 Command returend 184, retrying
Received exception from server (version 24.3.12):
Code: 696. DB::Exception: Received from localhost:9000. DB::Exception: Load job 'startup table test_3.t_02156_mt2' -> Code: 695. DB::Exception: Load job 'load table test_3.t_02156_mt2' failed: Code: 439. DB::Exception: Cannot schedule a task: cannot allocate thread (threads=6, jobs=6): Cannot attach table `test_3`.`t_02156_mt2` from metadata file /var/lib/clickhouse/store/aca/acaaf27a-4238-4240-8c2c-c8a24dd64228/t_02156_mt2.sql from query ATTACH TABLE test_3.t_02156_mt2 UUID 'c3069277-240f-45c2-8ab2-e0b8455c2acb' (`k` UInt32, `v` String) ENGINE = MergeTree ORDER BY k SETTINGS min_bytes_for_wide_part = 0, index_granularity = 12146, ratio_of_defaults_for_sparse_serialization = 1., replace_long_file_name_to_hash = false, max_file_name_length = 0, min_bytes_for_full_part_storage = 536870912, compact_parts_max_bytes_to_buffer = 144942780, compact_parts_max_granules_to_buffer = 115, compact_parts_merge_max_bytes_to_prefetch_part = 20595776, merge_max_block_size = 2226, old_parts_lifetime = 30., prefer_fetch_merged_part_size_threshold = 1, vertical_merge_algorithm_min_rows_to_activate = 1000000, vertical_merge_algorithm_min_columns_to_activate = 100, min_merge_bytes_to_use_direct_io = 10737418240, index_granularity_bytes = 954667, concurrent_part_removal_threshold = 0, allow_vertical_merges_from_compact_to_wide_parts = true, cache_populated_by_fetch = false, marks_compress_block_size = 40483, primary_key_compress_block_size = 62390. (CANNOT_SCHEDULE_TASK),. (ASYNC_LOAD_CANCELED)
(query: SYSTEM START MERGES)
2024-11-05 19:11:50,877 Command returend 184, retrying
2024-11-05 19:11:54,881 Running command: clickhouse client -q "SYSTEM START DISTRIBUTED SENDS" --max_untracked_memory=1Gi --memory_profiler_step=1Gi --max_memory_usage_for_user=0 --max_memory_usage_in_client=1000000000
Received exception from server (version 24.3.12):
Code: 696. DB::Exception: Received from localhost:9000. DB::Exception: Load job 'startup table test_3.t_02156_mt2' -> Code: 695. DB::Exception: Load job 'load table test_3.t_02156_mt2' failed: Code: 439. DB::Exception: Cannot schedule a task: cannot allocate thread (threads=6, jobs=6): Cannot attach table `test_3`.`t_02156_mt2` from metadata file /var/lib/clickhouse/store/aca/acaaf27a-4238-4240-8c2c-c8a24dd64228/t_02156_mt2.sql from query ATTACH TABLE test_3.t_02156_mt2 UUID 'c3069277-240f-45c2-8ab2-e0b8455c2acb' (`k` UInt32, `v` String) ENGINE = MergeTree ORDER BY k SETTINGS min_bytes_for_wide_part = 0, index_granularity = 12146, ratio_of_defaults_for_sparse_serialization = 1., replace_long_file_name_to_hash = false, max_file_name_length = 0, min_bytes_for_full_part_storage = 536870912, compact_parts_max_bytes_to_buffer = 144942780, compact_parts_max_granules_to_buffer = 115, compact_parts_merge_max_bytes_to_prefetch_part = 20595776, merge_max_block_size = 2226, old_parts_lifetime = 30., prefer_fetch_merged_part_size_threshold = 1, vertical_merge_algorithm_min_rows_to_activate = 1000000, vertical_merge_algorithm_min_columns_to_activate = 100, min_merge_bytes_to_use_direct_io = 10737418240, index_granularity_bytes = 954667, concurrent_part_removal_threshold = 0, allow_vertical_merges_from_compact_to_wide_parts = true, cache_populated_by_fetch = false, marks_compress_block_size = 40483, primary_key_compress_block_size = 62390. (CANNOT_SCHEDULE_TASK),. (ASYNC_LOAD_CANCELED)
(query: SYSTEM START DISTRIBUTED SENDS)
2024-11-05 19:11:55,046 Command returend 184, retrying
Received exception from server (version 24.3.12):
Code: 696. DB::Exception: Received from localhost:9000. DB::Exception: Load job 'startup table test_3.t_02156_mt2' -> Code: 695. DB::Exception: Load job 'load table test_3.t_02156_mt2' failed: Code: 439. DB::Exception: Cannot schedule a task: cannot allocate thread (threads=6, jobs=6): Cannot attach table `test_3`.`t_02156_mt2` from metadata file /var/lib/clickhouse/store/aca/acaaf27a-4238-4240-8c2c-c8a24dd64228/t_02156_mt2.sql from query ATTACH TABLE test_3.t_02156_mt2 UUID 'c3069277-240f-45c2-8ab2-e0b8455c2acb' (`k` UInt32, `v` String) ENGINE = MergeTree ORDER BY k SETTINGS min_bytes_for_wide_part = 0, index_granularity = 12146, ratio_of_defaults_for_sparse_serialization = 1., replace_long_file_name_to_hash = false, max_file_name_length = 0, min_bytes_for_full_part_storage = 536870912, compact_parts_max_bytes_to_buffer = 144942780, compact_parts_max_granules_to_buffer = 115, compact_parts_merge_max_bytes_to_prefetch_part = 20595776, merge_max_block_size = 2226, old_parts_lifetime = 30., prefer_fetch_merged_part_size_threshold = 1, vertical_merge_algorithm_min_rows_to_activate = 1000000, vertical_merge_algorithm_min_columns_to_activate = 100, min_merge_bytes_to_use_direct_io = 10737418240, index_granularity_bytes = 954667, concurrent_part_removal_threshold = 0, allow_vertical_merges_from_compact_to_wide_parts = true, cache_populated_by_fetch = false, marks_compress_block_size = 40483, primary_key_compress_block_size = 62390. (CANNOT_SCHEDULE_TASK),. (ASYNC_LOAD_CANCELED)
(query: SYSTEM START DISTRIBUTED SENDS)
2024-11-05 19:11:55,261 Command returend 184, retrying
Received exception from server (version 24.3.12):
Code: 696. DB::Exception: Received from localhost:9000. DB::Exception: Load job 'startup table test_3.t_02156_mt2' -> Code: 695. DB::Exception: Load job 'load table test_3.t_02156_mt2' failed: Code: 439. DB::Exception: Cannot schedule a task: cannot allocate thread (threads=6, jobs=6): Cannot attach table `test_3`.`t_02156_mt2` from metadata file /var/lib/clickhouse/store/aca/acaaf27a-4238-4240-8c2c-c8a24dd64228/t_02156_mt2.sql from query ATTACH TABLE test_3.t_02156_mt2 UUID 'c3069277-240f-45c2-8ab2-e0b8455c2acb' (`k` UInt32, `v` String) ENGINE = MergeTree ORDER BY k SETTINGS min_bytes_for_wide_part = 0, index_granularity = 12146, ratio_of_defaults_for_sparse_serialization = 1., replace_long_file_name_to_hash = false, max_file_name_length = 0, min_bytes_for_full_part_storage = 536870912, compact_parts_max_bytes_to_buffer = 144942780, compact_parts_max_granules_to_buffer = 115, compact_parts_merge_max_bytes_to_prefetch_part = 20595776, merge_max_block_size = 2226, old_parts_lifetime = 30., prefer_fetch_merged_part_size_threshold = 1, vertical_merge_algorithm_min_rows_to_activate = 1000000, vertical_merge_algorithm_min_columns_to_activate = 100, min_merge_bytes_to_use_direct_io = 10737418240, index_granularity_bytes = 954667, concurrent_part_removal_threshold = 0, allow_vertical_merges_from_compact_to_wide_parts = true, cache_populated_by_fetch = false, marks_compress_block_size = 40483, primary_key_compress_block_size = 62390. (CANNOT_SCHEDULE_TASK),. (ASYNC_LOAD_CANCELED)
(query: SYSTEM START DISTRIBUTED SENDS)
2024-11-05 19:11:56,476 Command returend 184, retrying
Received exception from server (version 24.3.12):
Code: 696. DB::Exception: Received from localhost:9000. DB::Exception: Load job 'startup table test_3.t_02156_mt2' -> Code: 695. DB::Exception: Load job 'load table test_3.t_02156_mt2' failed: Code: 439. DB::Exception: Cannot schedule a task: cannot allocate thread (threads=6, jobs=6): Cannot attach table `test_3`.`t_02156_mt2` from metadata file /var/lib/clickhouse/store/aca/acaaf27a-4238-4240-8c2c-c8a24dd64228/t_02156_mt2.sql from query ATTACH TABLE test_3.t_02156_mt2 UUID 'c3069277-240f-45c2-8ab2-e0b8455c2acb' (`k` UInt32, `v` String) ENGINE = MergeTree ORDER BY k SETTINGS min_bytes_for_wide_part = 0, index_granularity = 12146, ratio_of_defaults_for_sparse_serialization = 1., replace_long_file_name_to_hash = false, max_file_name_length = 0, min_bytes_for_full_part_storage = 536870912, compact_parts_max_bytes_to_buffer = 144942780, compact_parts_max_granules_to_buffer = 115, compact_parts_merge_max_bytes_to_prefetch_part = 20595776, merge_max_block_size = 2226, old_parts_lifetime = 30., prefer_fetch_merged_part_size_threshold = 1, vertical_merge_algorithm_min_rows_to_activate = 1000000, vertical_merge_algorithm_min_columns_to_activate = 100, min_merge_bytes_to_use_direct_io = 10737418240, index_granularity_bytes = 954667, concurrent_part_removal_threshold = 0, allow_vertical_merges_from_compact_to_wide_parts = true, cache_populated_by_fetch = false, marks_compress_block_size = 40483, primary_key_compress_block_size = 62390. (CANNOT_SCHEDULE_TASK),. (ASYNC_LOAD_CANCELED)
(query: SYSTEM START DISTRIBUTED SENDS)
2024-11-05 19:11:58,693 Command returend 184, retrying
Received exception from server (version 24.3.12):
Code: 696. DB::Exception: Received from localhost:9000. DB::Exception: Load job 'startup table test_3.t_02156_mt2' -> Code: 695. DB::Exception: Load job 'load table test_3.t_02156_mt2' failed: Code: 439. DB::Exception: Cannot schedule a task: cannot allocate thread (threads=6, jobs=6): Cannot attach table `test_3`.`t_02156_mt2` from metadata file /var/lib/clickhouse/store/aca/acaaf27a-4238-4240-8c2c-c8a24dd64228/t_02156_mt2.sql from query ATTACH TABLE test_3.t_02156_mt2 UUID 'c3069277-240f-45c2-8ab2-e0b8455c2acb' (`k` UInt32, `v` String) ENGINE = MergeTree ORDER BY k SETTINGS min_bytes_for_wide_part = 0, index_granularity = 12146, ratio_of_defaults_for_sparse_serialization = 1., replace_long_file_name_to_hash = false, max_file_name_length = 0, min_bytes_for_full_part_storage = 536870912, compact_parts_max_bytes_to_buffer = 144942780, compact_parts_max_granules_to_buffer = 115, compact_parts_merge_max_bytes_to_prefetch_part = 20595776, merge_max_block_size = 2226, old_parts_lifetime = 30., prefer_fetch_merged_part_size_threshold = 1, vertical_merge_algorithm_min_rows_to_activate = 1000000, vertical_merge_algorithm_min_columns_to_activate = 100, min_merge_bytes_to_use_direct_io = 10737418240, index_granularity_bytes = 954667, concurrent_part_removal_threshold = 0, allow_vertical_merges_from_compact_to_wide_parts = true, cache_populated_by_fetch = false, marks_compress_block_size = 40483, primary_key_compress_block_size = 62390. (CANNOT_SCHEDULE_TASK),. (ASYNC_LOAD_CANCELED)
(query: SYSTEM START DISTRIBUTED SENDS)
2024-11-05 19:12:01,859 Command returend 184, retrying
2024-11-05 19:12:05,861 Running command: clickhouse client -q "SYSTEM START TTL MERGES" --max_untracked_memory=1Gi --memory_profiler_step=1Gi --max_memory_usage_for_user=0 --max_memory_usage_in_client=1000000000
Received exception from server (version 24.3.12):
Code: 696. DB::Exception: Received from localhost:9000. DB::Exception: Load job 'startup table test_3.t_02156_mt2' -> Code: 695. DB::Exception: Load job 'load table test_3.t_02156_mt2' failed: Code: 439. DB::Exception: Cannot schedule a task: cannot allocate thread (threads=6, jobs=6): Cannot attach table `test_3`.`t_02156_mt2` from metadata file /var/lib/clickhouse/store/aca/acaaf27a-4238-4240-8c2c-c8a24dd64228/t_02156_mt2.sql from query ATTACH TABLE test_3.t_02156_mt2 UUID 'c3069277-240f-45c2-8ab2-e0b8455c2acb' (`k` UInt32, `v` String) ENGINE = MergeTree ORDER BY k SETTINGS min_bytes_for_wide_part = 0, index_granularity = 12146, ratio_of_defaults_for_sparse_serialization = 1., replace_long_file_name_to_hash = false, max_file_name_length = 0, min_bytes_for_full_part_storage = 536870912, compact_parts_max_bytes_to_buffer = 144942780, compact_parts_max_granules_to_buffer = 115, compact_parts_merge_max_bytes_to_prefetch_part = 20595776, merge_max_block_size = 2226, old_parts_lifetime = 30., prefer_fetch_merged_part_size_threshold = 1, vertical_merge_algorithm_min_rows_to_activate = 1000000, vertical_merge_algorithm_min_columns_to_activate = 100, min_merge_bytes_to_use_direct_io = 10737418240, index_granularity_bytes = 954667, concurrent_part_removal_threshold = 0, allow_vertical_merges_from_compact_to_wide_parts = true, cache_populated_by_fetch = false, marks_compress_block_size = 40483, primary_key_compress_block_size = 62390. (CANNOT_SCHEDULE_TASK),. (ASYNC_LOAD_CANCELED)
(query: SYSTEM START TTL MERGES)
2024-11-05 19:12:06,026 Command returend 184, retrying
Received exception from server (version 24.3.12):
Code: 696. DB::Exception: Received from localhost:9000. DB::Exception: Load job 'startup table test_3.t_02156_mt2' -> Code: 695. DB::Exception: Load job 'load table test_3.t_02156_mt2' failed: Code: 439. DB::Exception: Cannot schedule a task: cannot allocate thread (threads=6, jobs=6): Cannot attach table `test_3`.`t_02156_mt2` from metadata file /var/lib/clickhouse/store/aca/acaaf27a-4238-4240-8c2c-c8a24dd64228/t_02156_mt2.sql from query ATTACH TABLE test_3.t_02156_mt2 UUID 'c3069277-240f-45c2-8ab2-e0b8455c2acb' (`k` UInt32, `v` String) ENGINE = MergeTree ORDER BY k SETTINGS min_bytes_for_wide_part = 0, index_granularity = 12146, ratio_of_defaults_for_sparse_serialization = 1., replace_long_file_name_to_hash = false, max_file_name_length = 0, min_bytes_for_full_part_storage = 536870912, compact_parts_max_bytes_to_buffer = 144942780, compact_parts_max_granules_to_buffer = 115, compact_parts_merge_max_bytes_to_prefetch_part = 20595776, merge_max_block_size = 2226, old_parts_lifetime = 30., prefer_fetch_merged_part_size_threshold = 1, vertical_merge_algorithm_min_rows_to_activate = 1000000, vertical_merge_algorithm_min_columns_to_activate = 100, min_merge_bytes_to_use_direct_io = 10737418240, index_granularity_bytes = 954667, concurrent_part_removal_threshold = 0, allow_vertical_merges_from_compact_to_wide_parts = true, cache_populated_by_fetch = false, marks_compress_block_size = 40483, primary_key_compress_block_size = 62390. (CANNOT_SCHEDULE_TASK),. (ASYNC_LOAD_CANCELED)
(query: SYSTEM START TTL MERGES)
2024-11-05 19:12:06,241 Command returend 184, retrying
Received exception from server (version 24.3.12):
Code: 696. DB::Exception: Received from localhost:9000. DB::Exception: Load job 'startup table test_3.t_02156_mt2' -> Code: 695. DB::Exception: Load job 'load table test_3.t_02156_mt2' failed: Code: 439. DB::Exception: Cannot schedule a task: cannot allocate thread (threads=6, jobs=6): Cannot attach table `test_3`.`t_02156_mt2` from metadata file /var/lib/clickhouse/store/aca/acaaf27a-4238-4240-8c2c-c8a24dd64228/t_02156_mt2.sql from query ATTACH TABLE test_3.t_02156_mt2 UUID 'c3069277-240f-45c2-8ab2-e0b8455c2acb' (`k` UInt32, `v` String) ENGINE = MergeTree ORDER BY k SETTINGS min_bytes_for_wide_part = 0, index_granularity = 12146, ratio_of_defaults_for_sparse_serialization = 1., replace_long_file_name_to_hash = false, max_file_name_length = 0, min_bytes_for_full_part_storage = 536870912, compact_parts_max_bytes_to_buffer = 144942780, compact_parts_max_granules_to_buffer = 115, compact_parts_merge_max_bytes_to_prefetch_part = 20595776, merge_max_block_size = 2226, old_parts_lifetime = 30., prefer_fetch_merged_part_size_threshold = 1, vertical_merge_algorithm_min_rows_to_activate = 1000000, vertical_merge_algorithm_min_columns_to_activate = 100, min_merge_bytes_to_use_direct_io = 10737418240, index_granularity_bytes = 954667, concurrent_part_removal_threshold = 0, allow_vertical_merges_from_compact_to_wide_parts = true, cache_populated_by_fetch = false, marks_compress_block_size = 40483, primary_key_compress_block_size = 62390. (CANNOT_SCHEDULE_TASK),. (ASYNC_LOAD_CANCELED)
(query: SYSTEM START TTL MERGES)
2024-11-05 19:12:07,407 Command returend 184, retrying
Received exception from server (version 24.3.12):
Code: 696. DB::Exception: Received from localhost:9000. DB::Exception: Load job 'startup table test_3.t_02156_mt2' -> Code: 695. DB::Exception: Load job 'load table test_3.t_02156_mt2' failed: Code: 439. DB::Exception: Cannot schedule a task: cannot allocate thread (threads=6, jobs=6): Cannot attach table `test_3`.`t_02156_mt2` from metadata file /var/lib/clickhouse/store/aca/acaaf27a-4238-4240-8c2c-c8a24dd64228/t_02156_mt2.sql from query ATTACH TABLE test_3.t_02156_mt2 UUID 'c3069277-240f-45c2-8ab2-e0b8455c2acb' (`k` UInt32, `v` String) ENGINE = MergeTree ORDER BY k SETTINGS min_bytes_for_wide_part = 0, index_granularity = 12146, ratio_of_defaults_for_sparse_serialization = 1., replace_long_file_name_to_hash = false, max_file_name_length = 0, min_bytes_for_full_part_storage = 536870912, compact_parts_max_bytes_to_buffer = 144942780, compact_parts_max_granules_to_buffer = 115, compact_parts_merge_max_bytes_to_prefetch_part = 20595776, merge_max_block_size = 2226, old_parts_lifetime = 30., prefer_fetch_merged_part_size_threshold = 1, vertical_merge_algorithm_min_rows_to_activate = 1000000, vertical_merge_algorithm_min_columns_to_activate = 100, min_merge_bytes_to_use_direct_io = 10737418240, index_granularity_bytes = 954667, concurrent_part_removal_threshold = 0, allow_vertical_merges_from_compact_to_wide_parts = true, cache_populated_by_fetch = false, marks_compress_block_size = 40483, primary_key_compress_block_size = 62390. (CANNOT_SCHEDULE_TASK),. (ASYNC_LOAD_CANCELED)
(query: SYSTEM START TTL MERGES)
2024-11-05 19:12:09,574 Command returend 184, retrying
Received exception from server (version 24.3.12):
Code: 696. DB::Exception: Received from localhost:9000. DB::Exception: Load job 'startup table test_3.t_02156_mt2' -> Code: 695. DB::Exception: Load job 'load table test_3.t_02156_mt2' failed: Code: 439. DB::Exception: Cannot schedule a task: cannot allocate thread (threads=6, jobs=6): Cannot attach table `test_3`.`t_02156_mt2` from metadata file /var/lib/clickhouse/store/aca/acaaf27a-4238-4240-8c2c-c8a24dd64228/t_02156_mt2.sql from query ATTACH TABLE test_3.t_02156_mt2 UUID 'c3069277-240f-45c2-8ab2-e0b8455c2acb' (`k` UInt32, `v` String) ENGINE = MergeTree ORDER BY k SETTINGS min_bytes_for_wide_part = 0, index_granularity = 12146, ratio_of_defaults_for_sparse_serialization = 1., replace_long_file_name_to_hash = false, max_file_name_length = 0, min_bytes_for_full_part_storage = 536870912, compact_parts_max_bytes_to_buffer = 144942780, compact_parts_max_granules_to_buffer = 115, compact_parts_merge_max_bytes_to_prefetch_part = 20595776, merge_max_block_size = 2226, old_parts_lifetime = 30., prefer_fetch_merged_part_size_threshold = 1, vertical_merge_algorithm_min_rows_to_activate = 1000000, vertical_merge_algorithm_min_columns_to_activate = 100, min_merge_bytes_to_use_direct_io = 10737418240, index_granularity_bytes = 954667, concurrent_part_removal_threshold = 0, allow_vertical_merges_from_compact_to_wide_parts = true, cache_populated_by_fetch = false, marks_compress_block_size = 40483, primary_key_compress_block_size = 62390. (CANNOT_SCHEDULE_TASK),. (ASYNC_LOAD_CANCELED)
(query: SYSTEM START TTL MERGES)
2024-11-05 19:12:12,792 Command returend 184, retrying
2024-11-05 19:12:16,793 Running command: clickhouse client -q "SYSTEM START MOVES" --max_untracked_memory=1Gi --memory_profiler_step=1Gi --max_memory_usage_for_user=0 --max_memory_usage_in_client=1000000000
Received exception from server (version 24.3.12):
Code: 696. DB::Exception: Received from localhost:9000. DB::Exception: Load job 'startup table test_3.t_02156_mt2' -> Code: 695. DB::Exception: Load job 'load table test_3.t_02156_mt2' failed: Code: 439. DB::Exception: Cannot schedule a task: cannot allocate thread (threads=6, jobs=6): Cannot attach table `test_3`.`t_02156_mt2` from metadata file /var/lib/clickhouse/store/aca/acaaf27a-4238-4240-8c2c-c8a24dd64228/t_02156_mt2.sql from query ATTACH TABLE test_3.t_02156_mt2 UUID 'c3069277-240f-45c2-8ab2-e0b8455c2acb' (`k` UInt32, `v` String) ENGINE = MergeTree ORDER BY k SETTINGS min_bytes_for_wide_part = 0, index_granularity = 12146, ratio_of_defaults_for_sparse_serialization = 1., replace_long_file_name_to_hash = false, max_file_name_length = 0, min_bytes_for_full_part_storage = 536870912, compact_parts_max_bytes_to_buffer = 144942780, compact_parts_max_granules_to_buffer = 115, compact_parts_merge_max_bytes_to_prefetch_part = 20595776, merge_max_block_size = 2226, old_parts_lifetime = 30., prefer_fetch_merged_part_size_threshold = 1, vertical_merge_algorithm_min_rows_to_activate = 1000000, vertical_merge_algorithm_min_columns_to_activate = 100, min_merge_bytes_to_use_direct_io = 10737418240, index_granularity_bytes = 954667, concurrent_part_removal_threshold = 0, allow_vertical_merges_from_compact_to_wide_parts = true, cache_populated_by_fetch = false, marks_compress_block_size = 40483, primary_key_compress_block_size = 62390. (CANNOT_SCHEDULE_TASK),. (ASYNC_LOAD_CANCELED)
(query: SYSTEM START MOVES)
2024-11-05 19:12:17,008 Command returend 184, retrying
Received exception from server (version 24.3.12):
Code: 696. DB::Exception: Received from localhost:9000. DB::Exception: Load job 'startup table test_3.t_02156_mt2' -> Code: 695. DB::Exception: Load job 'load table test_3.t_02156_mt2' failed: Code: 439. DB::Exception: Cannot schedule a task: cannot allocate thread (threads=6, jobs=6): Cannot attach table `test_3`.`t_02156_mt2` from metadata file /var/lib/clickhouse/store/aca/acaaf27a-4238-4240-8c2c-c8a24dd64228/t_02156_mt2.sql from query ATTACH TABLE test_3.t_02156_mt2 UUID 'c3069277-240f-45c2-8ab2-e0b8455c2acb' (`k` UInt32, `v` String) ENGINE = MergeTree ORDER BY k SETTINGS min_bytes_for_wide_part = 0, index_granularity = 12146, ratio_of_defaults_for_sparse_serialization = 1., replace_long_file_name_to_hash = false, max_file_name_length = 0, min_bytes_for_full_part_storage = 536870912, compact_parts_max_bytes_to_buffer = 144942780, compact_parts_max_granules_to_buffer = 115, compact_parts_merge_max_bytes_to_prefetch_part = 20595776, merge_max_block_size = 2226, old_parts_lifetime = 30., prefer_fetch_merged_part_size_threshold = 1, vertical_merge_algorithm_min_rows_to_activate = 1000000, vertical_merge_algorithm_min_columns_to_activate = 100, min_merge_bytes_to_use_direct_io = 10737418240, index_granularity_bytes = 954667, concurrent_part_removal_threshold = 0, allow_vertical_merges_from_compact_to_wide_parts = true, cache_populated_by_fetch = false, marks_compress_block_size = 40483, primary_key_compress_block_size = 62390. (CANNOT_SCHEDULE_TASK),. (ASYNC_LOAD_CANCELED)
(query: SYSTEM START MOVES)
2024-11-05 19:12:17,223 Command returend 184, retrying
Received exception from server (version 24.3.12):
Code: 696. DB::Exception: Received from localhost:9000. DB::Exception: Load job 'startup table test_3.t_02156_mt2' -> Code: 695. DB::Exception: Load job 'load table test_3.t_02156_mt2' failed: Code: 439. DB::Exception: Cannot schedule a task: cannot allocate thread (threads=6, jobs=6): Cannot attach table `test_3`.`t_02156_mt2` from metadata file /var/lib/clickhouse/store/aca/acaaf27a-4238-4240-8c2c-c8a24dd64228/t_02156_mt2.sql from query ATTACH TABLE test_3.t_02156_mt2 UUID 'c3069277-240f-45c2-8ab2-e0b8455c2acb' (`k` UInt32, `v` String) ENGINE = MergeTree ORDER BY k SETTINGS min_bytes_for_wide_part = 0, index_granularity = 12146, ratio_of_defaults_for_sparse_serialization = 1., replace_long_file_name_to_hash = false, max_file_name_length = 0, min_bytes_for_full_part_storage = 536870912, compact_parts_max_bytes_to_buffer = 144942780, compact_parts_max_granules_to_buffer = 115, compact_parts_merge_max_bytes_to_prefetch_part = 20595776, merge_max_block_size = 2226, old_parts_lifetime = 30., prefer_fetch_merged_part_size_threshold = 1, vertical_merge_algorithm_min_rows_to_activate = 1000000, vertical_merge_algorithm_min_columns_to_activate = 100, min_merge_bytes_to_use_direct_io = 10737418240, index_granularity_bytes = 954667, concurrent_part_removal_threshold = 0, allow_vertical_merges_from_compact_to_wide_parts = true, cache_populated_by_fetch = false, marks_compress_block_size = 40483, primary_key_compress_block_size = 62390. (CANNOT_SCHEDULE_TASK),. (ASYNC_LOAD_CANCELED)
(query: SYSTEM START MOVES)
2024-11-05 19:12:18,439 Command returend 184, retrying
Received exception from server (version 24.3.12):
Code: 696. DB::Exception: Received from localhost:9000. DB::Exception: Load job 'startup table test_3.t_02156_mt2' -> Code: 695. DB::Exception: Load job 'load table test_3.t_02156_mt2' failed: Code: 439. DB::Exception: Cannot schedule a task: cannot allocate thread (threads=6, jobs=6): Cannot attach table `test_3`.`t_02156_mt2` from metadata file /var/lib/clickhouse/store/aca/acaaf27a-4238-4240-8c2c-c8a24dd64228/t_02156_mt2.sql from query ATTACH TABLE test_3.t_02156_mt2 UUID 'c3069277-240f-45c2-8ab2-e0b8455c2acb' (`k` UInt32, `v` String) ENGINE = MergeTree ORDER BY k SETTINGS min_bytes_for_wide_part = 0, index_granularity = 12146, ratio_of_defaults_for_sparse_serialization = 1., replace_long_file_name_to_hash = false, max_file_name_length = 0, min_bytes_for_full_part_storage = 536870912, compact_parts_max_bytes_to_buffer = 144942780, compact_parts_max_granules_to_buffer = 115, compact_parts_merge_max_bytes_to_prefetch_part = 20595776, merge_max_block_size = 2226, old_parts_lifetime = 30., prefer_fetch_merged_part_size_threshold = 1, vertical_merge_algorithm_min_rows_to_activate = 1000000, vertical_merge_algorithm_min_columns_to_activate = 100, min_merge_bytes_to_use_direct_io = 10737418240, index_granularity_bytes = 954667, concurrent_part_removal_threshold = 0, allow_vertical_merges_from_compact_to_wide_parts = true, cache_populated_by_fetch = false, marks_compress_block_size = 40483, primary_key_compress_block_size = 62390. (CANNOT_SCHEDULE_TASK),. (ASYNC_LOAD_CANCELED)
(query: SYSTEM START MOVES)
2024-11-05 19:12:20,606 Command returend 184, retrying
Received exception from server (version 24.3.12):
Code: 696. DB::Exception: Received from localhost:9000. DB::Exception: Load job 'startup table test_3.t_02156_mt2' -> Code: 695. DB::Exception: Load job 'load table test_3.t_02156_mt2' failed: Code: 439. DB::Exception: Cannot schedule a task: cannot allocate thread (threads=6, jobs=6): Cannot attach table `test_3`.`t_02156_mt2` from metadata file /var/lib/clickhouse/store/aca/acaaf27a-4238-4240-8c2c-c8a24dd64228/t_02156_mt2.sql from query ATTACH TABLE test_3.t_02156_mt2 UUID 'c3069277-240f-45c2-8ab2-e0b8455c2acb' (`k` UInt32, `v` String) ENGINE = MergeTree ORDER BY k SETTINGS min_bytes_for_wide_part = 0, index_granularity = 12146, ratio_of_defaults_for_sparse_serialization = 1., replace_long_file_name_to_hash = false, max_file_name_length = 0, min_bytes_for_full_part_storage = 536870912, compact_parts_max_bytes_to_buffer = 144942780, compact_parts_max_granules_to_buffer = 115, compact_parts_merge_max_bytes_to_prefetch_part = 20595776, merge_max_block_size = 2226, old_parts_lifetime = 30., prefer_fetch_merged_part_size_threshold = 1, vertical_merge_algorithm_min_rows_to_activate = 1000000, vertical_merge_algorithm_min_columns_to_activate = 100, min_merge_bytes_to_use_direct_io = 10737418240, index_granularity_bytes = 954667, concurrent_part_removal_threshold = 0, allow_vertical_merges_from_compact_to_wide_parts = true, cache_populated_by_fetch = false, marks_compress_block_size = 40483, primary_key_compress_block_size = 62390. (CANNOT_SCHEDULE_TASK),. (ASYNC_LOAD_CANCELED)
(query: SYSTEM START MOVES)
2024-11-05 19:12:23,773 Command returend 184, retrying
2024-11-05 19:12:27,774 Running command: clickhouse client -q "SYSTEM START FETCHES" --max_untracked_memory=1Gi --memory_profiler_step=1Gi --max_memory_usage_for_user=0 --max_memory_usage_in_client=1000000000
Received exception from server (version 24.3.12):
Code: 696. DB::Exception: Received from localhost:9000. DB::Exception: Load job 'startup table test_3.t_02156_mt2' -> Code: 695. DB::Exception: Load job 'load table test_3.t_02156_mt2' failed: Code: 439. DB::Exception: Cannot schedule a task: cannot allocate thread (threads=6, jobs=6): Cannot attach table `test_3`.`t_02156_mt2` from metadata file /var/lib/clickhouse/store/aca/acaaf27a-4238-4240-8c2c-c8a24dd64228/t_02156_mt2.sql from query ATTACH TABLE test_3.t_02156_mt2 UUID 'c3069277-240f-45c2-8ab2-e0b8455c2acb' (`k` UInt32, `v` String) ENGINE = MergeTree ORDER BY k SETTINGS min_bytes_for_wide_part = 0, index_granularity = 12146, ratio_of_defaults_for_sparse_serialization = 1., replace_long_file_name_to_hash = false, max_file_name_length = 0, min_bytes_for_full_part_storage = 536870912, compact_parts_max_bytes_to_buffer = 144942780, compact_parts_max_granules_to_buffer = 115, compact_parts_merge_max_bytes_to_prefetch_part = 20595776, merge_max_block_size = 2226, old_parts_lifetime = 30., prefer_fetch_merged_part_size_threshold = 1, vertical_merge_algorithm_min_rows_to_activate = 1000000, vertical_merge_algorithm_min_columns_to_activate = 100, min_merge_bytes_to_use_direct_io = 10737418240, index_granularity_bytes = 954667, concurrent_part_removal_threshold = 0, allow_vertical_merges_from_compact_to_wide_parts = true, cache_populated_by_fetch = false, marks_compress_block_size = 40483, primary_key_compress_block_size = 62390. (CANNOT_SCHEDULE_TASK),. (ASYNC_LOAD_CANCELED)
(query: SYSTEM START FETCHES)
2024-11-05 19:12:27,939 Command returend 184, retrying
Received exception from server (version 24.3.12):
Code: 696. DB::Exception: Received from localhost:9000. DB::Exception: Load job 'startup table test_3.t_02156_mt2' -> Code: 695. DB::Exception: Load job 'load table test_3.t_02156_mt2' failed: Code: 439. DB::Exception: Cannot schedule a task: cannot allocate thread (threads=6, jobs=6): Cannot attach table `test_3`.`t_02156_mt2` from metadata file /var/lib/clickhouse/store/aca/acaaf27a-4238-4240-8c2c-c8a24dd64228/t_02156_mt2.sql from query ATTACH TABLE test_3.t_02156_mt2 UUID 'c3069277-240f-45c2-8ab2-e0b8455c2acb' (`k` UInt32, `v` String) ENGINE = MergeTree ORDER BY k SETTINGS min_bytes_for_wide_part = 0, index_granularity = 12146, ratio_of_defaults_for_sparse_serialization = 1., replace_long_file_name_to_hash = false, max_file_name_length = 0, min_bytes_for_full_part_storage = 536870912, compact_parts_max_bytes_to_buffer = 144942780, compact_parts_max_granules_to_buffer = 115, compact_parts_merge_max_bytes_to_prefetch_part = 20595776, merge_max_block_size = 2226, old_parts_lifetime = 30., prefer_fetch_merged_part_size_threshold = 1, vertical_merge_algorithm_min_rows_to_activate = 1000000, vertical_merge_algorithm_min_columns_to_activate = 100, min_merge_bytes_to_use_direct_io = 10737418240, index_granularity_bytes = 954667, concurrent_part_removal_threshold = 0, allow_vertical_merges_from_compact_to_wide_parts = true, cache_populated_by_fetch = false, marks_compress_block_size = 40483, primary_key_compress_block_size = 62390. (CANNOT_SCHEDULE_TASK),. (ASYNC_LOAD_CANCELED)
(query: SYSTEM START FETCHES)
2024-11-05 19:12:28,104 Command returend 184, retrying
Received exception from server (version 24.3.12):
Code: 696. DB::Exception: Received from localhost:9000. DB::Exception: Load job 'startup table test_3.t_02156_mt2' -> Code: 695. DB::Exception: Load job 'load table test_3.t_02156_mt2' failed: Code: 439. DB::Exception: Cannot schedule a task: cannot allocate thread (threads=6, jobs=6): Cannot attach table `test_3`.`t_02156_mt2` from metadata file /var/lib/clickhouse/store/aca/acaaf27a-4238-4240-8c2c-c8a24dd64228/t_02156_mt2.sql from query ATTACH TABLE test_3.t_02156_mt2 UUID 'c3069277-240f-45c2-8ab2-e0b8455c2acb' (`k` UInt32, `v` String) ENGINE = MergeTree ORDER BY k SETTINGS min_bytes_for_wide_part = 0, index_granularity = 12146, ratio_of_defaults_for_sparse_serialization = 1., replace_long_file_name_to_hash = false, max_file_name_length = 0, min_bytes_for_full_part_storage = 536870912, compact_parts_max_bytes_to_buffer = 144942780, compact_parts_max_granules_to_buffer = 115, compact_parts_merge_max_bytes_to_prefetch_part = 20595776, merge_max_block_size = 2226, old_parts_lifetime = 30., prefer_fetch_merged_part_size_threshold = 1, vertical_merge_algorithm_min_rows_to_activate = 1000000, vertical_merge_algorithm_min_columns_to_activate = 100, min_merge_bytes_to_use_direct_io = 10737418240, index_granularity_bytes = 954667, concurrent_part_removal_threshold = 0, allow_vertical_merges_from_compact_to_wide_parts = true, cache_populated_by_fetch = false, marks_compress_block_size = 40483, primary_key_compress_block_size = 62390. (CANNOT_SCHEDULE_TASK),. (ASYNC_LOAD_CANCELED)
(query: SYSTEM START FETCHES)
2024-11-05 19:12:29,269 Command returend 184, retrying
Received exception from server (version 24.3.12):
Code: 696. DB::Exception: Received from localhost:9000. DB::Exception: Load job 'startup table test_3.t_02156_mt2' -> Code: 695. DB::Exception: Load job 'load table test_3.t_02156_mt2' failed: Code: 439. DB::Exception: Cannot schedule a task: cannot allocate thread (threads=6, jobs=6): Cannot attach table `test_3`.`t_02156_mt2` from metadata file /var/lib/clickhouse/store/aca/acaaf27a-4238-4240-8c2c-c8a24dd64228/t_02156_mt2.sql from query ATTACH TABLE test_3.t_02156_mt2 UUID 'c3069277-240f-45c2-8ab2-e0b8455c2acb' (`k` UInt32, `v` String) ENGINE = MergeTree ORDER BY k SETTINGS min_bytes_for_wide_part = 0, index_granularity = 12146, ratio_of_defaults_for_sparse_serialization = 1., replace_long_file_name_to_hash = false, max_file_name_length = 0, min_bytes_for_full_part_storage = 536870912, compact_parts_max_bytes_to_buffer = 144942780, compact_parts_max_granules_to_buffer = 115, compact_parts_merge_max_bytes_to_prefetch_part = 20595776, merge_max_block_size = 2226, old_parts_lifetime = 30., prefer_fetch_merged_part_size_threshold = 1, vertical_merge_algorithm_min_rows_to_activate = 1000000, vertical_merge_algorithm_min_columns_to_activate = 100, min_merge_bytes_to_use_direct_io = 10737418240, index_granularity_bytes = 954667, concurrent_part_removal_threshold = 0, allow_vertical_merges_from_compact_to_wide_parts = true, cache_populated_by_fetch = false, marks_compress_block_size = 40483, primary_key_compress_block_size = 62390. (CANNOT_SCHEDULE_TASK),. (ASYNC_LOAD_CANCELED)
(query: SYSTEM START FETCHES)
2024-11-05 19:12:31,486 Command returend 184, retrying
Received exception from server (version 24.3.12):
Code: 696. DB::Exception: Received from localhost:9000. DB::Exception: Load job 'startup table test_3.t_02156_mt2' -> Code: 695. DB::Exception: Load job 'load table test_3.t_02156_mt2' failed: Code: 439. DB::Exception: Cannot schedule a task: cannot allocate thread (threads=6, jobs=6): Cannot attach table `test_3`.`t_02156_mt2` from metadata file /var/lib/clickhouse/store/aca/acaaf27a-4238-4240-8c2c-c8a24dd64228/t_02156_mt2.sql from query ATTACH TABLE test_3.t_02156_mt2 UUID 'c3069277-240f-45c2-8ab2-e0b8455c2acb' (`k` UInt32, `v` String) ENGINE = MergeTree ORDER BY k SETTINGS min_bytes_for_wide_part = 0, index_granularity = 12146, ratio_of_defaults_for_sparse_serialization = 1., replace_long_file_name_to_hash = false, max_file_name_length = 0, min_bytes_for_full_part_storage = 536870912, compact_parts_max_bytes_to_buffer = 144942780, compact_parts_max_granules_to_buffer = 115, compact_parts_merge_max_bytes_to_prefetch_part = 20595776, merge_max_block_size = 2226, old_parts_lifetime = 30., prefer_fetch_merged_part_size_threshold = 1, vertical_merge_algorithm_min_rows_to_activate = 1000000, vertical_merge_algorithm_min_columns_to_activate = 100, min_merge_bytes_to_use_direct_io = 10737418240, index_granularity_bytes = 954667, concurrent_part_removal_threshold = 0, allow_vertical_merges_from_compact_to_wide_parts = true, cache_populated_by_fetch = false, marks_compress_block_size = 40483, primary_key_compress_block_size = 62390. (CANNOT_SCHEDULE_TASK),. (ASYNC_LOAD_CANCELED)
(query: SYSTEM START FETCHES)
2024-11-05 19:12:34,651 Command returend 184, retrying
2024-11-05 19:12:38,655 Running command: clickhouse client -q "SYSTEM START REPLICATED SENDS" --max_untracked_memory=1Gi --memory_profiler_step=1Gi --max_memory_usage_for_user=0 --max_memory_usage_in_client=1000000000
Received exception from server (version 24.3.12):
Code: 696. DB::Exception: Received from localhost:9000. DB::Exception: Load job 'startup table test_3.t_02156_mt2' -> Code: 695. DB::Exception: Load job 'load table test_3.t_02156_mt2' failed: Code: 439. DB::Exception: Cannot schedule a task: cannot allocate thread (threads=6, jobs=6): Cannot attach table `test_3`.`t_02156_mt2` from metadata file /var/lib/clickhouse/store/aca/acaaf27a-4238-4240-8c2c-c8a24dd64228/t_02156_mt2.sql from query ATTACH TABLE test_3.t_02156_mt2 UUID 'c3069277-240f-45c2-8ab2-e0b8455c2acb' (`k` UInt32, `v` String) ENGINE = MergeTree ORDER BY k SETTINGS min_bytes_for_wide_part = 0, index_granularity = 12146, ratio_of_defaults_for_sparse_serialization = 1., replace_long_file_name_to_hash = false, max_file_name_length = 0, min_bytes_for_full_part_storage = 536870912, compact_parts_max_bytes_to_buffer = 144942780, compact_parts_max_granules_to_buffer = 115, compact_parts_merge_max_bytes_to_prefetch_part = 20595776, merge_max_block_size = 2226, old_parts_lifetime = 30., prefer_fetch_merged_part_size_threshold = 1, vertical_merge_algorithm_min_rows_to_activate = 1000000, vertical_merge_algorithm_min_columns_to_activate = 100, min_merge_bytes_to_use_direct_io = 10737418240, index_granularity_bytes = 954667, concurrent_part_removal_threshold = 0, allow_vertical_merges_from_compact_to_wide_parts = true, cache_populated_by_fetch = false, marks_compress_block_size = 40483, primary_key_compress_block_size = 62390. (CANNOT_SCHEDULE_TASK),. (ASYNC_LOAD_CANCELED)
(query: SYSTEM START REPLICATED SENDS)
2024-11-05 19:12:38,819 Command returend 184, retrying
Received exception from server (version 24.3.12):
Code: 696. DB::Exception: Received from localhost:9000. DB::Exception: Load job 'startup table test_3.t_02156_mt2' -> Code: 695. DB::Exception: Load job 'load table test_3.t_02156_mt2' failed: Code: 439. DB::Exception: Cannot schedule a task: cannot allocate thread (threads=6, jobs=6): Cannot attach table `test_3`.`t_02156_mt2` from metadata file /var/lib/clickhouse/store/aca/acaaf27a-4238-4240-8c2c-c8a24dd64228/t_02156_mt2.sql from query ATTACH TABLE test_3.t_02156_mt2 UUID 'c3069277-240f-45c2-8ab2-e0b8455c2acb' (`k` UInt32, `v` String) ENGINE = MergeTree ORDER BY k SETTINGS min_bytes_for_wide_part = 0, index_granularity = 12146, ratio_of_defaults_for_sparse_serialization = 1., replace_long_file_name_to_hash = false, max_file_name_length = 0, min_bytes_for_full_part_storage = 536870912, compact_parts_max_bytes_to_buffer = 144942780, compact_parts_max_granules_to_buffer = 115, compact_parts_merge_max_bytes_to_prefetch_part = 20595776, merge_max_block_size = 2226, old_parts_lifetime = 30., prefer_fetch_merged_part_size_threshold = 1, vertical_merge_algorithm_min_rows_to_activate = 1000000, vertical_merge_algorithm_min_columns_to_activate = 100, min_merge_bytes_to_use_direct_io = 10737418240, index_granularity_bytes = 954667, concurrent_part_removal_threshold = 0, allow_vertical_merges_from_compact_to_wide_parts = true, cache_populated_by_fetch = false, marks_compress_block_size = 40483, primary_key_compress_block_size = 62390. (CANNOT_SCHEDULE_TASK),. (ASYNC_LOAD_CANCELED)
(query: SYSTEM START REPLICATED SENDS)
2024-11-05 19:12:38,984 Command returend 184, retrying
Received exception from server (version 24.3.12):
Code: 696. DB::Exception: Received from localhost:9000. DB::Exception: Load job 'startup table test_3.t_02156_mt2' -> Code: 695. DB::Exception: Load job 'load table test_3.t_02156_mt2' failed: Code: 439. DB::Exception: Cannot schedule a task: cannot allocate thread (threads=6, jobs=6): Cannot attach table `test_3`.`t_02156_mt2` from metadata file /var/lib/clickhouse/store/aca/acaaf27a-4238-4240-8c2c-c8a24dd64228/t_02156_mt2.sql from query ATTACH TABLE test_3.t_02156_mt2 UUID 'c3069277-240f-45c2-8ab2-e0b8455c2acb' (`k` UInt32, `v` String) ENGINE = MergeTree ORDER BY k SETTINGS min_bytes_for_wide_part = 0, index_granularity = 12146, ratio_of_defaults_for_sparse_serialization = 1., replace_long_file_name_to_hash = false, max_file_name_length = 0, min_bytes_for_full_part_storage = 536870912, compact_parts_max_bytes_to_buffer = 144942780, compact_parts_max_granules_to_buffer = 115, compact_parts_merge_max_bytes_to_prefetch_part = 20595776, merge_max_block_size = 2226, old_parts_lifetime = 30., prefer_fetch_merged_part_size_threshold = 1, vertical_merge_algorithm_min_rows_to_activate = 1000000, vertical_merge_algorithm_min_columns_to_activate = 100, min_merge_bytes_to_use_direct_io = 10737418240, index_granularity_bytes = 954667, concurrent_part_removal_threshold = 0, allow_vertical_merges_from_compact_to_wide_parts = true, cache_populated_by_fetch = false, marks_compress_block_size = 40483, primary_key_compress_block_size = 62390. (CANNOT_SCHEDULE_TASK),. (ASYNC_LOAD_CANCELED)
(query: SYSTEM START REPLICATED SENDS)
2024-11-05 19:12:40,150 Command returend 184, retrying
Received exception from server (version 24.3.12):
Code: 696. DB::Exception: Received from localhost:9000. DB::Exception: Load job 'startup table test_3.t_02156_mt2' -> Code: 695. DB::Exception: Load job 'load table test_3.t_02156_mt2' failed: Code: 439. DB::Exception: Cannot schedule a task: cannot allocate thread (threads=6, jobs=6): Cannot attach table `test_3`.`t_02156_mt2` from metadata file /var/lib/clickhouse/store/aca/acaaf27a-4238-4240-8c2c-c8a24dd64228/t_02156_mt2.sql from query ATTACH TABLE test_3.t_02156_mt2 UUID 'c3069277-240f-45c2-8ab2-e0b8455c2acb' (`k` UInt32, `v` String) ENGINE = MergeTree ORDER BY k SETTINGS min_bytes_for_wide_part = 0, index_granularity = 12146, ratio_of_defaults_for_sparse_serialization = 1., replace_long_file_name_to_hash = false, max_file_name_length = 0, min_bytes_for_full_part_storage = 536870912, compact_parts_max_bytes_to_buffer = 144942780, compact_parts_max_granules_to_buffer = 115, compact_parts_merge_max_bytes_to_prefetch_part = 20595776, merge_max_block_size = 2226, old_parts_lifetime = 30., prefer_fetch_merged_part_size_threshold = 1, vertical_merge_algorithm_min_rows_to_activate = 1000000, vertical_merge_algorithm_min_columns_to_activate = 100, min_merge_bytes_to_use_direct_io = 10737418240, index_granularity_bytes = 954667, concurrent_part_removal_threshold = 0, allow_vertical_merges_from_compact_to_wide_parts = true, cache_populated_by_fetch = false, marks_compress_block_size = 40483, primary_key_compress_block_size = 62390. (CANNOT_SCHEDULE_TASK),. (ASYNC_LOAD_CANCELED)
(query: SYSTEM START REPLICATED SENDS)
2024-11-05 19:12:42,317 Command returend 184, retrying
Received exception from server (version 24.3.12):
Code: 696. DB::Exception: Received from localhost:9000. DB::Exception: Load job 'startup table test_3.t_02156_mt2' -> Code: 695. DB::Exception: Load job 'load table test_3.t_02156_mt2' failed: Code: 439. DB::Exception: Cannot schedule a task: cannot allocate thread (threads=6, jobs=6): Cannot attach table `test_3`.`t_02156_mt2` from metadata file /var/lib/clickhouse/store/aca/acaaf27a-4238-4240-8c2c-c8a24dd64228/t_02156_mt2.sql from query ATTACH TABLE test_3.t_02156_mt2 UUID 'c3069277-240f-45c2-8ab2-e0b8455c2acb' (`k` UInt32, `v` String) ENGINE = MergeTree ORDER BY k SETTINGS min_bytes_for_wide_part = 0, index_granularity = 12146, ratio_of_defaults_for_sparse_serialization = 1., replace_long_file_name_to_hash = false, max_file_name_length = 0, min_bytes_for_full_part_storage = 536870912, compact_parts_max_bytes_to_buffer = 144942780, compact_parts_max_granules_to_buffer = 115, compact_parts_merge_max_bytes_to_prefetch_part = 20595776, merge_max_block_size = 2226, old_parts_lifetime = 30., prefer_fetch_merged_part_size_threshold = 1, vertical_merge_algorithm_min_rows_to_activate = 1000000, vertical_merge_algorithm_min_columns_to_activate = 100, min_merge_bytes_to_use_direct_io = 10737418240, index_granularity_bytes = 954667, concurrent_part_removal_threshold = 0, allow_vertical_merges_from_compact_to_wide_parts = true, cache_populated_by_fetch = false, marks_compress_block_size = 40483, primary_key_compress_block_size = 62390. (CANNOT_SCHEDULE_TASK),. (ASYNC_LOAD_CANCELED)
(query: SYSTEM START REPLICATED SENDS)
2024-11-05 19:12:45,483 Command returend 184, retrying
2024-11-05 19:12:49,487 Running command: clickhouse client -q "SYSTEM START REPLICATION QUEUES" --max_untracked_memory=1Gi --memory_profiler_step=1Gi --max_memory_usage_for_user=0 --max_memory_usage_in_client=1000000000
Received exception from server (version 24.3.12):
Code: 696. DB::Exception: Received from localhost:9000. DB::Exception: Load job 'startup table test_3.t_02156_mt2' -> Code: 695. DB::Exception: Load job 'load table test_3.t_02156_mt2' failed: Code: 439. DB::Exception: Cannot schedule a task: cannot allocate thread (threads=6, jobs=6): Cannot attach table `test_3`.`t_02156_mt2` from metadata file /var/lib/clickhouse/store/aca/acaaf27a-4238-4240-8c2c-c8a24dd64228/t_02156_mt2.sql from query ATTACH TABLE test_3.t_02156_mt2 UUID 'c3069277-240f-45c2-8ab2-e0b8455c2acb' (`k` UInt32, `v` String) ENGINE = MergeTree ORDER BY k SETTINGS min_bytes_for_wide_part = 0, index_granularity = 12146, ratio_of_defaults_for_sparse_serialization = 1., replace_long_file_name_to_hash = false, max_file_name_length = 0, min_bytes_for_full_part_storage = 536870912, compact_parts_max_bytes_to_buffer = 144942780, compact_parts_max_granules_to_buffer = 115, compact_parts_merge_max_bytes_to_prefetch_part = 20595776, merge_max_block_size = 2226, old_parts_lifetime = 30., prefer_fetch_merged_part_size_threshold = 1, vertical_merge_algorithm_min_rows_to_activate = 1000000, vertical_merge_algorithm_min_columns_to_activate = 100, min_merge_bytes_to_use_direct_io = 10737418240, index_granularity_bytes = 954667, concurrent_part_removal_threshold = 0, allow_vertical_merges_from_compact_to_wide_parts = true, cache_populated_by_fetch = false, marks_compress_block_size = 40483, primary_key_compress_block_size = 62390. (CANNOT_SCHEDULE_TASK),. (ASYNC_LOAD_CANCELED)
(query: SYSTEM START REPLICATION QUEUES)
2024-11-05 19:12:49,652 Command returend 184, retrying
Received exception from server (version 24.3.12):
Code: 696. DB::Exception: Received from localhost:9000. DB::Exception: Load job 'startup table test_3.t_02156_mt2' -> Code: 695. DB::Exception: Load job 'load table test_3.t_02156_mt2' failed: Code: 439. DB::Exception: Cannot schedule a task: cannot allocate thread (threads=6, jobs=6): Cannot attach table `test_3`.`t_02156_mt2` from metadata file /var/lib/clickhouse/store/aca/acaaf27a-4238-4240-8c2c-c8a24dd64228/t_02156_mt2.sql from query ATTACH TABLE test_3.t_02156_mt2 UUID 'c3069277-240f-45c2-8ab2-e0b8455c2acb' (`k` UInt32, `v` String) ENGINE = MergeTree ORDER BY k SETTINGS min_bytes_for_wide_part = 0, index_granularity = 12146, ratio_of_defaults_for_sparse_serialization = 1., replace_long_file_name_to_hash = false, max_file_name_length = 0, min_bytes_for_full_part_storage = 536870912, compact_parts_max_bytes_to_buffer = 144942780, compact_parts_max_granules_to_buffer = 115, compact_parts_merge_max_bytes_to_prefetch_part = 20595776, merge_max_block_size = 2226, old_parts_lifetime = 30., prefer_fetch_merged_part_size_threshold = 1, vertical_merge_algorithm_min_rows_to_activate = 1000000, vertical_merge_algorithm_min_columns_to_activate = 100, min_merge_bytes_to_use_direct_io = 10737418240, index_granularity_bytes = 954667, concurrent_part_removal_threshold = 0, allow_vertical_merges_from_compact_to_wide_parts = true, cache_populated_by_fetch = false, marks_compress_block_size = 40483, primary_key_compress_block_size = 62390. (CANNOT_SCHEDULE_TASK),. (ASYNC_LOAD_CANCELED)
(query: SYSTEM START REPLICATION QUEUES)
2024-11-05 19:12:49,816 Command returend 184, retrying
Received exception from server (version 24.3.12):
Code: 696. DB::Exception: Received from localhost:9000. DB::Exception: Load job 'startup table test_3.t_02156_mt2' -> Code: 695. DB::Exception: Load job 'load table test_3.t_02156_mt2' failed: Code: 439. DB::Exception: Cannot schedule a task: cannot allocate thread (threads=6, jobs=6): Cannot attach table `test_3`.`t_02156_mt2` from metadata file /var/lib/clickhouse/store/aca/acaaf27a-4238-4240-8c2c-c8a24dd64228/t_02156_mt2.sql from query ATTACH TABLE test_3.t_02156_mt2 UUID 'c3069277-240f-45c2-8ab2-e0b8455c2acb' (`k` UInt32, `v` String) ENGINE = MergeTree ORDER BY k SETTINGS min_bytes_for_wide_part = 0, index_granularity = 12146, ratio_of_defaults_for_sparse_serialization = 1., replace_long_file_name_to_hash = false, max_file_name_length = 0, min_bytes_for_full_part_storage = 536870912, compact_parts_max_bytes_to_buffer = 144942780, compact_parts_max_granules_to_buffer = 115, compact_parts_merge_max_bytes_to_prefetch_part = 20595776, merge_max_block_size = 2226, old_parts_lifetime = 30., prefer_fetch_merged_part_size_threshold = 1, vertical_merge_algorithm_min_rows_to_activate = 1000000, vertical_merge_algorithm_min_columns_to_activate = 100, min_merge_bytes_to_use_direct_io = 10737418240, index_granularity_bytes = 954667, concurrent_part_removal_threshold = 0, allow_vertical_merges_from_compact_to_wide_parts = true, cache_populated_by_fetch = false, marks_compress_block_size = 40483, primary_key_compress_block_size = 62390. (CANNOT_SCHEDULE_TASK),. (ASYNC_LOAD_CANCELED)
(query: SYSTEM START REPLICATION QUEUES)
2024-11-05 19:12:50,982 Command returend 184, retrying
Received exception from server (version 24.3.12):
Code: 696. DB::Exception: Received from localhost:9000. DB::Exception: Load job 'startup table test_3.t_02156_mt2' -> Code: 695. DB::Exception: Load job 'load table test_3.t_02156_mt2' failed: Code: 439. DB::Exception: Cannot schedule a task: cannot allocate thread (threads=6, jobs=6): Cannot attach table `test_3`.`t_02156_mt2` from metadata file /var/lib/clickhouse/store/aca/acaaf27a-4238-4240-8c2c-c8a24dd64228/t_02156_mt2.sql from query ATTACH TABLE test_3.t_02156_mt2 UUID 'c3069277-240f-45c2-8ab2-e0b8455c2acb' (`k` UInt32, `v` String) ENGINE = MergeTree ORDER BY k SETTINGS min_bytes_for_wide_part = 0, index_granularity = 12146, ratio_of_defaults_for_sparse_serialization = 1., replace_long_file_name_to_hash = false, max_file_name_length = 0, min_bytes_for_full_part_storage = 536870912, compact_parts_max_bytes_to_buffer = 144942780, compact_parts_max_granules_to_buffer = 115, compact_parts_merge_max_bytes_to_prefetch_part = 20595776, merge_max_block_size = 2226, old_parts_lifetime = 30., prefer_fetch_merged_part_size_threshold = 1, vertical_merge_algorithm_min_rows_to_activate = 1000000, vertical_merge_algorithm_min_columns_to_activate = 100, min_merge_bytes_to_use_direct_io = 10737418240, index_granularity_bytes = 954667, concurrent_part_removal_threshold = 0, allow_vertical_merges_from_compact_to_wide_parts = true, cache_populated_by_fetch = false, marks_compress_block_size = 40483, primary_key_compress_block_size = 62390. (CANNOT_SCHEDULE_TASK),. (ASYNC_LOAD_CANCELED)
(query: SYSTEM START REPLICATION QUEUES)
2024-11-05 19:12:53,149 Command returend 184, retrying
Received exception from server (version 24.3.12):
Code: 696. DB::Exception: Received from localhost:9000. DB::Exception: Load job 'startup table test_3.t_02156_mt2' -> Code: 695. DB::Exception: Load job 'load table test_3.t_02156_mt2' failed: Code: 439. DB::Exception: Cannot schedule a task: cannot allocate thread (threads=6, jobs=6): Cannot attach table `test_3`.`t_02156_mt2` from metadata file /var/lib/clickhouse/store/aca/acaaf27a-4238-4240-8c2c-c8a24dd64228/t_02156_mt2.sql from query ATTACH TABLE test_3.t_02156_mt2 UUID 'c3069277-240f-45c2-8ab2-e0b8455c2acb' (`k` UInt32, `v` String) ENGINE = MergeTree ORDER BY k SETTINGS min_bytes_for_wide_part = 0, index_granularity = 12146, ratio_of_defaults_for_sparse_serialization = 1., replace_long_file_name_to_hash = false, max_file_name_length = 0, min_bytes_for_full_part_storage = 536870912, compact_parts_max_bytes_to_buffer = 144942780, compact_parts_max_granules_to_buffer = 115, compact_parts_merge_max_bytes_to_prefetch_part = 20595776, merge_max_block_size = 2226, old_parts_lifetime = 30., prefer_fetch_merged_part_size_threshold = 1, vertical_merge_algorithm_min_rows_to_activate = 1000000, vertical_merge_algorithm_min_columns_to_activate = 100, min_merge_bytes_to_use_direct_io = 10737418240, index_granularity_bytes = 954667, concurrent_part_removal_threshold = 0, allow_vertical_merges_from_compact_to_wide_parts = true, cache_populated_by_fetch = false, marks_compress_block_size = 40483, primary_key_compress_block_size = 62390. (CANNOT_SCHEDULE_TASK),. (ASYNC_LOAD_CANCELED)
(query: SYSTEM START REPLICATION QUEUES)
2024-11-05 19:12:56,315 Command returend 184, retrying
2024-11-05 19:13:00,319 Running command: clickhouse client -q "SYSTEM DROP MARK CACHE" --max_untracked_memory=1Gi --memory_profiler_step=1Gi --max_memory_usage_for_user=0 --max_memory_usage_in_client=1000000000
2024-11-05 19:13:00,484 Running command: clickhouse client -q "KILL QUERY WHERE upper(query) LIKE 'WATCH %'" --max_untracked_memory=1Gi --memory_profiler_step=1Gi --max_memory_usage_for_user=0 --max_memory_usage_in_client=1000000000
2024-11-05 19:13:00,649 Running command: clickhouse client -q "KILL QUERY WHERE query LIKE 'insert into tableB select %'" --max_untracked_memory=1Gi --memory_profiler_step=1Gi --max_memory_usage_for_user=0 --max_memory_usage_in_client=1000000000
2024-11-05 19:13:00,813 Running command: clickhouse client -q "KILL QUERY WHERE query LIKE 'SELECT URL, uniq(SearchPhrase) AS u FROM test.hits GROUP BY URL ORDER BY u %'" --max_untracked_memory=1Gi --memory_profiler_step=1Gi --max_memory_usage_for_user=0 --max_memory_usage_in_client=1000000000
2024-11-05 19:13:00,978 Running command: clickhouse client -q "KILL QUERY WHERE query LIKE 'SELECT (SELECT number FROM system.numbers WHERE number = 1000000000000)%'" --max_untracked_memory=1Gi --memory_profiler_step=1Gi --max_memory_usage_for_user=0 --max_memory_usage_in_client=1000000000
Received exception from server (version 24.3.12):
Code: 696. DB::Exception: Received from localhost:9000. DB::Exception: Load job 'startup table test_3.t_02156_mt2' -> Code: 695. DB::Exception: Load job 'load table test_3.t_02156_mt2' failed: Code: 439. DB::Exception: Cannot schedule a task: cannot allocate thread (threads=6, jobs=6): Cannot attach table `test_3`.`t_02156_mt2` from metadata file /var/lib/clickhouse/store/aca/acaaf27a-4238-4240-8c2c-c8a24dd64228/t_02156_mt2.sql from query ATTACH TABLE test_3.t_02156_mt2 UUID 'c3069277-240f-45c2-8ab2-e0b8455c2acb' (`k` UInt32, `v` String) ENGINE = MergeTree ORDER BY k SETTINGS min_bytes_for_wide_part = 0, index_granularity = 12146, ratio_of_defaults_for_sparse_serialization = 1., replace_long_file_name_to_hash = false, max_file_name_length = 0, min_bytes_for_full_part_storage = 536870912, compact_parts_max_bytes_to_buffer = 144942780, compact_parts_max_granules_to_buffer = 115, compact_parts_merge_max_bytes_to_prefetch_part = 20595776, merge_max_block_size = 2226, old_parts_lifetime = 30., prefer_fetch_merged_part_size_threshold = 1, vertical_merge_algorithm_min_rows_to_activate = 1000000, vertical_merge_algorithm_min_columns_to_activate = 100, min_merge_bytes_to_use_direct_io = 10737418240, index_granularity_bytes = 954667, concurrent_part_removal_threshold = 0, allow_vertical_merges_from_compact_to_wide_parts = true, cache_populated_by_fetch = false, marks_compress_block_size = 40483, primary_key_compress_block_size = 62390. (CANNOT_SCHEDULE_TASK),. (ASYNC_LOAD_CANCELED)
(query: DETACH DATABASE test_3)
2024-11-05 19:13:02,679 Checking if some queries hung
Using queries from '/usr/share/clickhouse-test/queries' directory
Connecting to ClickHouse server... OK
Connected to server 24.3.12.76.altinitystable @ 5b67ba5a4febfa774d6373929d551c6a697d7d92 HEAD
Running 1 stateless tests (MainProcess).
00001_select_1: [ OK ]
1 tests passed. 0 tests skipped. 0.20 s elapsed (MainProcess).
Won't run stateful tests because test data wasn't loaded.
Checking the hung queries: done
No queries hung.
All tests have finished.
Top patterns of log messages:
count count_% size size_% uniq_loggers uniq_threads levels background_% message_format_string
1. 34441 0.108 8.38 MiB 0.082 1 76 ['Information'] 0.04 WriteBufferFromS3 is not finalized in destructor. The file might not be written to S3. {}.
2. 19340 0.06 1.17 MiB 0.011 12 203 ['Trace'] 0.002 Access granted: {}{}
3. 10821 0.034 485.02 KiB 0.005 1 1 ['Trace'] 1 Processing requests batch, size: {}, bytes: {}
4. 10319 0.032 614.71 KiB 0.006 1 105 ['Debug'] 0.955 Key {} is removed from metadata
5. 9475 0.03 1.98 MiB 0.019 1 63 ['Debug'] 0 (from {}{}{}){}{} {} (stage: {})
6. 9443 0.029 1.42 MiB 0.014 3 62 ['Trace'] 1 {} Creating query context from {} context, user_id: {}, parent context user: {}
7. 8778 0.027 505.76 KiB 0.005 1 353 ['Debug'] 0.032 Submitting key {} for removal
8. 7839 0.024 221.19 KiB 0.002 1 62 ['Debug'] 0 Processed in {} sec.
9. 7569 0.024 3.17 MiB 0.031 1 108 ['Trace'] 0.008 Query {} to stage {}{}
10. 7280 0.023 3.27 MiB 0.032 1 108 ['Trace'] 0.008 Query {} from stage {} to stage {}{}
11. 5559 0.017 464.98 KiB 0.004 5 76 ['Debug'] 0.998 {} Authenticating user '{}' from {}
12. 5478 0.017 615.21 KiB 0.006 4 76 ['Debug'] 0.998 {} Authenticated with global context as user {}
13. 5461 0.017 479.97 KiB 0.005 4 76 ['Debug'] 0.998 {} Logout, user_id: {}
14. 5453 0.017 596.42 KiB 0.006 3 62 ['Debug'] 1 {} Creating session context with user_id: {}
15. 5292 0.017 458.43 KiB 0.004 48 44 ['Trace'] 0 Reading {} ranges in{}order from part {}, approx. {} rows starting from {}
16. 5064 0.016 575.81 KiB 0.006 1 18 ['Debug'] 0 Reading {} marks from part {}, total {} rows starting from the beginning of the part, column {}
17. 5055 0.016 479.73 KiB 0.005 1 21 ['Debug'] 0 Reading {} marks from part {}, total {} rows starting from the beginning of the part
18. 4401 0.014 144.60 KiB 0.001 1 62 ['Trace'] 1 TCP Request. Address: {}
19. 4400 0.014 415.93 KiB 0.004 1 62 ['Debug'] 1 Connected {} version {}.{}.{}, revision: {}{}{}.
20. 4335 0.014 114.30 KiB 0.001 1 62 ['Debug'] 1 Done processing connection.
21. 3764 0.012 261.38 KiB 0.003 242 616 ['Trace'] 0.857 Trying to reserve {} using storage policy from min volume index {}
22. 3291 0.01 391.27 KiB 0.004 232 192 ['Trace'] 0.714 Renaming temporary part {} to {} with tid {}.
23. 3178 0.01 153.54 KiB 0.001 1 190 ['Trace'] 0.67 filled checksums {}
24. 3029 0.009 240.75 KiB 0.002 1 49 ['Debug'] 0 Read {} rows, {} in {} sec., {} rows/sec., {}/sec.
25. 3000 0.009 14.71 MiB 0.145 52 90 ['Error'] 0.426 Cannot schedule a task: {} (threads={}, jobs={})
26. 2879 0.009 149.88 KiB 0.001 1 76 ['Debug'] 0.335 Peak memory usage{}: {}.
27. 2634 0.008 1.64 MiB 0.016 1 271 ['Debug'] 0.968 Objects with paths [{}] were removed from S3
28. 2477 0.008 24.78 MiB 0.243 3 16 ['Error'] 0.288 Load job '{}' -> {}
29. 2395 0.007 322.76 KiB 0.003 1 151 ['Information'] 0 Sorting and writing part of data into temporary file {}
30. 2285 0.007 3.70 MiB 0.036 1 190 ['Debug'] 0.992 metadata and objects were removed for [{}], only metadata were removed for [{}].
31. 2264 0.007 69.36 KiB 0.001 1 309 ['Trace'] 0 Aggregation method: {}
32. 2218 0.007 384.26 KiB 0.004 1 141 ['Information'] 0 Done writing part of data into temporary file {}, compressed {}, uncompressed {}
33. 2181 0.007 126.18 KiB 0.001 16 16 ['Trace'] 1 Flushing system log, {} entries to flush up to offset {}
34. 2181 0.007 78.17 KiB 0.001 16 16 ['Trace'] 1 Flushed system log up to offset {}
35. 2090 0.007 52.44 KiB 0.001 147 77 ['Debug'] 0.01 Key condition: {}
36. 2035 0.006 184.18 KiB 0.002 1 291 ['Trace'] 0 Aggregated. {} to {} rows (from {}) in {} sec. ({:.3f} rows/sec., {}/sec.)
37. 1995 0.006 230.90 KiB 0.002 144 77 ['Debug'] 0.011 Selected {}/{} parts by partition key, {} parts by primary key, {}/{} marks by primary key, {} marks to read from {} ranges
38. 1858 0.006 96.17 KiB 0.001 123 77 ['Trace'] 0.011 Spreading mark ranges among streams (default reading)
39. 1852 0.006 199.94 KiB 0.002 1 50 ['Trace'] 0.069 PREWHERE condition was split into {} steps: {}
40. 1845 0.006 70.22 KiB 0.001 121 77 ['Debug'] 0.011 Reading approx. {} rows with {} streams
41. 1740 0.005 85.89 KiB 0.001 1 56 ['Trace'] 0 Merging partially aggregated blocks (bucket = {}).
42. 1740 0.005 258.75 KiB 0.002 1 56 ['Debug'] 0 Merged partially aggregated blocks for bucket #{}. Got {} rows, {} from {} source rows in {} sec. ({:.3f} rows/sec., {}/sec.)
43. 1638 0.005 405.30 KiB 0.004 1 138 ['Debug'] 0 Written part in {:.3f} sec., {} rows, {} uncompressed, {} compressed, {:.3f} uncompressed bytes per row, {:.3f} compressed bytes per row, compression rate: {:.3f} ({:.3f} rows/sec., {}/sec. uncompressed, {}/sec. compressed)
44. 1638 0.005 220.75 KiB 0.002 1 138 ['Debug'] 0 Writing part of aggregation data into temporary file {}
45. 1638 0.005 73.75 KiB 0.001 1 138 ['Debug'] 0 Max size of temporary block: {} rows, {}.
46. 1631 0.005 50.17 KiB 0 1 75 ['Debug'] 0.013 min_marks_for_concurrent_read={}
47. 1605 0.005 234.02 KiB 0.002 1 4 ['Trace'] 1 Creating part at path {}
48. 1598 0.005 104.80 KiB 0.001 277 16 ['Trace'] 1 Executing log entry to mutate part {} to {}
49. 1588 0.005 310.50 KiB 0.003 274 16 ['Trace'] 1 Mutating part {} with mutation commands from {} mutations ({}): {}
50. 1514 0.005 287.57 KiB 0.003 2 40 ['Trace'] 1 HTTP Request for {}. Method: {}, Address: {}, User-Agent: {}{}, Content Type: {}, Transfer Encoding: {}, X-Forwarded-For: {}
51. 1507 0.005 726.32 KiB 0.007 2 40 ['Trace'] 1 Request URI: {}
52. 1456 0.005 46.60 KiB 0 25 69 ['Debug'] 0.005 MinMax index condition: {}
53. 1408 0.004 28.88 KiB 0 2 40 ['Debug'] 0.277 Done processing query
54. 1365 0.004 166.96 KiB 0.002 1 2 ['Trace'] 1 MemoryTracking: was {}, peak {}, free memory in arenas {}, will set to {} (RSS), difference: {}
55. 1339 0.004 431.50 KiB 0.004 1125 319 ['Information'] 0.004 Setting download as failed: {}
56. 1298 0.004 7.62 MiB 0.075 5 17 ['Error'] 0.995 Cannot parse string {} as {}: syntax error {}
57. 1183 0.004 12.71 KiB 0 1 289 ['Trace'] 0 Aggregating
58. 1180 0.004 84.09 KiB 0.001 1 38 ['Debug'] 0.033 Schedule load job '{}' into {}
59. 1173 0.004 25.69 KiB 0 167 473 ['Trace'] 1 Execution took {} ms.
60. 1113 0.003 75.96 KiB 0.001 1 210 ['Debug'] 1 Execute load job '{}' in {}
61. 1095 0.003 90.08 KiB 0.001 1 258 ['Trace'] 0 An entry for key={} found in cache: sum_of_sizes={}, median_size={}
62. 1093 0.003 75.90 KiB 0.001 1 209 ['Debug'] 1 Finish load job '{}' with status {}
63. 1090 0.003 37.03 KiB 0 1 225 ['Debug'] 0.571 Spawn loader worker #{} in {}
64. 1069 0.003 24.86 KiB 0 1 266 ['Debug'] 1 Stop worker in {}
65. 1017 0.003 54.18 KiB 0.001 45 440 ['Debug'] 0.981 Selected {} parts from {} to {}
66. 986 0.003 83.14 KiB 0.001 63 432 ['Trace'] 1 Scheduling next merge selecting task after {}ms, current attempt status: {}
67. 983 0.003 80.63 KiB 0.001 1 22 ['Debug'] 0 Merging {} parts: from {} to {} into {} with storage {}
68. 983 0.003 33.20 KiB 0 1 22 ['Debug'] 0 Selected MergeAlgorithm: {}
69. 966 0.003 51.67 KiB 0 72 16 ['Debug'] 1 Zero copy lock taken, will mutate part {}
70. 962 0.003 166.93 KiB 0.002 1 266 ['Debug'] 0.499 Recursively remove path {}: metadata and objects were removed for [{}], only metadata were removed for [{}].
71. 914 0.003 30.32 KiB 0 1 235 ['Debug'] 0.508 Change current priority: {} -> {}
72. 906 0.003 69.90 KiB 0.001 1 48 ['Trace'] 0 Query span trace_id for opentelemetry log: {}
73. 904 0.003 7.95 KiB 0 2 38 ['Trace'] 0.002 No tables
74. 839 0.003 66.42 KiB 0.001 1 115 ['Debug'] 0.514 Prioritize load job '{}': {} -> {}
75. 693 0.002 62.64 KiB 0.001 46 315 ['Trace'] 0.951 Insert entry {} to queue with type {}
76. 693 0.002 15.57 KiB 0 1 210 ['Trace'] 0 Merging aggregated data
77. 690 0.002 16.73 KiB 0 1 51 ['Trace'] 0 {} -> {}
78. 648 0.002 84.88 KiB 0.001 1 21 ['Debug'] 0 Merge sorted {} rows, containing {} columns ({} merged, {} gathered) in {} sec., {} rows/sec., {}/sec.
79. 604 0.002 51.78 KiB 0 36 74 ['Trace'] 0.988 Part {} local references is zero, will check blobs can be removed in zookeeper
80. 598 0.002 139.03 KiB 0.001 35 70 ['Trace'] 0.998 Removing zookeeper lock {} for part {} (files to keep: [{}])
81. 572 0.002 79.39 KiB 0.001 34 341 ['Trace'] 1 Checked {} partitions, found {} partitions with parts that may be merged: [{}] (max_total_size_to_merge={}, merge_with_ttl_allowed={})
82. 566 0.002 64.67 KiB 0.001 166 244 ['Trace'] 0 Bypassing cache because file segment state is `PARTIALLY_DOWNLOADED_NO_CONTINUATION` and downloaded part already used
83. 550 0.002 44.58 KiB 0 1 16 ['Information'] 0 Write-through cache is stopped as cache limit is reached and nothing can be evicted
84. 550 0.002 219.18 KiB 0.002 1 16 ['Debug'] 0 Failed to reserve space in cache (size: {}, file segment info: {}
85. 503 0.002 65.29 KiB 0.001 156 156 ['Debug'] 0.99 Removing {} parts from filesystem (serially): Parts: [{}]
86. 500 0.002 81.15 KiB 0.001 13 41 ['Trace'] 0.946 List of all grants including implicit: {}
87. 500 0.002 38.09 KiB 0 13 41 ['Trace'] 0.946 Settings: readonly = {}, allow_ddl = {}, allow_introspection_functions = {}
88. 500 0.002 29.35 KiB 0 13 41 ['Trace'] 0.946 List of all grants: {}
89. 488 0.002 28.12 KiB 0 45 311 ['Debug'] 0.953 Pulling {} entries to queue: {} - {}
90. 487 0.002 12.37 KiB 0 45 311 ['Debug'] 0.955 Pulled {} entries to queue.
91. 483 0.002 60.70 KiB 0.001 1 315 ['Information'] 1 Have {} tables in drop queue ({} of them are in use), will try drop {}
92. 482 0.002 35.77 KiB 0 1 40 ['Debug'] 0 Waiting for table {} to be finally dropped
93. 480 0.001 93.99 KiB 0.001 1 312 ['Information'] 1 Removing metadata {} of dropped table {}
94. 475 0.001 32.67 KiB 0 22 172 ['Trace'] 1 Adding mutation {} for {} partitions (data versions: {})
95. 465 0.001 30.70 KiB 0 40 21 ['Trace'] 0 Merged {} parts: [{}, {}] -> {}
96. 462 0.001 13.43 KiB 0 21 221 ['Trace'] 0 Found {} range in {} steps
97. 462 0.001 14.08 KiB 0 21 221 ['Trace'] 0 Found (RIGHT) boundary mark: {}
98. 462 0.001 13.59 KiB 0 21 221 ['Trace'] 0 Found (LEFT) boundary mark: {}
99. 462 0.001 31.76 KiB 0 21 221 ['Trace'] 0 Running binary search on index range for part {} ({} marks)
100. 456 0.001 37.34 KiB 0 452 38 ['Information'] 0.009 Metadata processed, database {} has {} tables and {} dictionaries in total.
Top messages without format string (fmt::runtime):
count pattern runtime_message line
1. 66 CodeDBExceptionReceivedfromDBExc Code: 439. DB::Exception: Received from 127.0.0.1:9000. DB::Exception: Cannot schedule a task: fault injected (threads=50, jobs=174): While executing MergeTreeSelect(pool: PrefetchedReadPool, algorithm: Thread). Stack trace:
0. std::exception::capture() @ ('/executeQuery.cpp',218)
2. 58 DBExceptionThereisnouserinvalids DB::Exception: There is no user `invalid_session_log_test_xml_user` in user directories ('',0)
3. 48 Connectiontomysqlfailedtimes Connection to mysql failed 1 times ('',0)
4. 30 DBExceptionsessionlogtestuserdad DB::Exception: session_log_test_user_9d22a9d1bea00670d49152e3bee5af9a_plaintext_password_no_profiles_no_roles: Authentication failed: password is incorrect, or there is no user with such name. ('',0)
5. 30 Connectiontosystemasuserinvalids Connection to system@127.0.0.1:9004 as user invalid_session_log_test_xml_user failed: mysqlxx::ConnectionFailed: There is no user `invalid_session_log_test_xml_user` in user directories ((nullptr):9004) ('',0)
6. 25 autoDBStorageReplicatedMergeTree auto DB::StorageReplicatedMergeTree::processQueueEntry(ReplicatedMergeTreeQueue::SelectedEntryPtr)::(anonymous class)::operator()(LogEntryPtr &) const: Code: 999. Coordination::Exception: Session expired. (KEEPER_EXCEPTION), Stack trace (when copying this ('/Exception.cpp',222)
7. 22 Exceptionwhileexecutingbackgroun Exception while executing background task {9c75c126-38ac-4cfb-b0a5-06ac3b4f7478::all_14_14_0_21}: Code: 999. Coordination::Exception: Session expired. (KEEPER_EXCEPTION), Stack trace (when copying this message, always include the lines below):
0. std::exc ('/Exception.cpp',222)
8. 22 virtualboolDBReplicatedMergeMuta virtual bool DB::ReplicatedMergeMutateTaskBase::executeStep(): Code: 999. Coordination::Exception: Session expired. (KEEPER_EXCEPTION), Stack trace (when copying this message, always include the lines below):
0. std::exception::capture() @ 0x00000000181e1 ('/Exception.cpp',222)
9. 18 Connectiontosystemasusersessionl Connection to system@127.0.0.1:9004 as user session_log_test_user_9d22a9d1bea00670d49152e3bee5af9a_plaintext_password_no_profiles_no_roles failed: mysqlxx::ConnectionFailed: session_log_test_user_9d22a9d1bea00670d49152e3bee5af9a_plaintext_password_no_profi ('',0)
10. 15 voidDBReplicatedMergeTreeCleanup void DB::ReplicatedMergeTreeCleanupThread::run(): Code: 999. Coordination::Exception: Session expired. (KEEPER_EXCEPTION), Stack trace (when copying this message, always include the lines below):
0. std::exception::capture() @ 0x00000000181e1cb5
1. ./buil ('/Exception.cpp',222)
11. 15 voidDBStorageReplicatedMergeTree void DB::StorageReplicatedMergeTree::mutationsFinalizingTask(): Code: 999. Coordination::Exception: Session expired. (KEEPER_EXCEPTION), Stack trace (when copying this message, always include the lines below):
0. std::exception::capture() @ 0x00000000181e ('/Exception.cpp',222)
12. 10 CodeDBExceptionReceivedfromlocal Code: 241. DB::Exception: Received from localhost:9000. DB::Exception: Memory limit (for query) exceeded: would use 20.94 MiB (attempt to allocate chunk of 1049600 bytes), maximum: 20.00 MiB.: While executing AggregatingTransform: while pushing to view tes ('/executeQuery.cpp',218)
13. 8 CodeDBExceptionSyntaxerrorMultis Code: 62. DB::Exception: Syntax error (Multi-statements are not allowed): failed at position 9 (end of query): ; S. . (SYNTAX_ERROR) (version 24.3.12.76.altinitystable (altinity build)) (from [::ffff:127.0.0.1]:43174) (comment: 00366_multi_statements.sh) ( ('/executeQuery.cpp',218)
14. 4 CodeDBExceptionSyntaxerrorfailed Code: 62. DB::Exception: Syntax error: failed at position 32 ('DROP'): DROP COLUMN c. Expected one of: ALTER command, token, OpeningRoundBracket: In scope SELECT formatQuery('ALTER TABLE a (DROP COLUMN b), DROP COLUMN c'). (SYNTAX_ERROR) (version 24.3.12.7 ('/executeQuery.cpp',218)
15. 2 CodeDBExceptionEmptyquerySYNTAXE Code: 62. DB::Exception: Empty query. (SYNTAX_ERROR) (version 24.3.12.76.altinitystable (altinity build)) (from [::ffff:127.0.0.1]:34868) (in query: ), Stack trace (when copying this message, always include the lines below):
0. std::exception::capture() @ ('/executeQuery.cpp',218)
16. 2 CannotflushCodeDBExceptionReceiv Cannot flush: Code: 241. DB::Exception: Received from localhost:9000. DB::Exception: Memory limit (for query) exceeded: would use 20.94 MiB (attempt to allocate chunk of 1049600 bytes), maximum: 20.00 MiB.: While executing AggregatingTransform: while pushi ('/Exception.cpp',222)
17. 2 CodeDBExceptionGoterrorfromlocal Code: 439. DB::Exception: Got error from localhost:9000. DB::Exception: Cannot schedule a task: fault injected (threads=0, jobs=0). Stack trace:
0. std::exception::capture() @ 0x00000000181e1cb5
1. ./build_docker/./base/poco/Foundation/src/Exception.cpp:2 ('/executeQuery.cpp',218)
18. 2 PocoExceptionCodeecodeNetExcepti Poco::Exception. Code: 1000, e.code() = 107, Net Exception: Socket is not connected, Stack trace (when copying this message, always include the lines below):
0. std::exception::capture() @ 0x00000000181e1cb5
1. ./build_docker/./base/poco/Foundation/src/Ex ('/Exception.cpp',222)
19. 1 Forkedachildprocesstowatch Forked a child process to watch ('',0)
20. 1 createsnapshotidxlogtermdoneusel create snapshot idx 10096 log_term 3 done: 5467 us elapsed ('/LoggerWrapper.h',43)
21. 1 snapshotidxlogtermcreatedcompact snapshot idx 10096 log_term 3 created, compact the log store if needed ('/LoggerWrapper.h',43)
22. 1 invalidelectiontimeoutupperbound invalid election timeout upper bound detected, adjusted to 0 ('/LoggerWrapper.h',43)
23. 1 statemachinecommitindexprecommit state machine commit index 96, precommit index 96, last log index 96 ('/LoggerWrapper.h',43)
24. 1 newconfigurationlogidxprevlogidx new configuration: log idx 97, prev log idx 14
peer 1, DC ID 0, localhost:9234, voting member, 1
my id: 1, leader: 1, term: 3 ('/LoggerWrapper.h',43)
25. 1 PRIORITYdecaytargetmine [PRIORITY] decay, target 1 -> 1, mine 1 ('/LoggerWrapper.h',43)
26. 1 createsnapshotidxlogterm create snapshot idx 10096 log_term 3 ('/LoggerWrapper.h',43)
27. 1 Electiontimeoutinitiateleaderele Election timeout, initiate leader election ('/LoggerWrapper.h',43)
28. 1 startingup starting up ('',0)
29. 1 CodeCoordinationExceptionSession Code: 999. Coordination::Exception: Session expired (fault injected on recv). (KEEPER_EXCEPTION), Stack trace (when copying this message, always include the lines below):
0. std::exception::capture() @ 0x00000000181e1cb5
1. ./build_docker/./base/poco/Foun ('/Exception.cpp',222)
30. 1 newelectiontimeoutrange new election timeout range: 0 - 0 ('/LoggerWrapper.h',43)
Top messages not matching their format strings:
message_format_string count() any_message
1. 124 Connection to system@127.0.0.1:9004 as user invalid_session_log_test_xml_user failed: mysqlxx::ConnectionFailed: There is no user `invalid_session_log_test_xml_user` in user directories ((nullptr):9004)
2. Not executing log entry {} of type {} for part {} because source parts size ({}) is greater than the current maximum ({}). 112 Not executing log entry queue-0000000001 of type MUTATE_PART for part all_0_0_0_1 because source parts size (207.00 B) is greater than the current maximum (0.00 B). (skipped 13 similar messages)
3. {} is in use (by merge/mutation/INSERT) (consider increasing temporary_directories_lifetime setting) 99 /var/lib/clickhouse/disks/s3_disk/store/b90/b908c04c-4d3b-46cc-9cd4-e8bbbdad8033/tmp_merge_201403_1_6_1/ is in use (by merge/mutation/INSERT) (consider increasing temporary_directories_lifetime setting) (skipped 1 similar messages)
4. Illegal UTF-8 sequence, while processing '{}' 12 Code: 36. DB::Exception: Illegal UTF-8 sequence, while processing '�': while executing 'FUNCTION stringJaccardIndexUTF8(materialize('hello'_String) :: 3, materialize('�'_String) :: 1) -> stringJaccardIndexUTF8(materialize('hello'_String), materialize('�'_String)) Float64 : 2'. (BAD_ARGUMENTS) (version 24.3.12.76.altinitystable (altinity build)) (from [::1]:45572) (comment: 02884_string_distance_function.sql) (in query: SELECT stringJaccardIndexUTF8(materialize('hello'), materialize('\xC2\x01'));), Stack trace (when copying this message, always include the lines below):
0. std::exception::capture() @ 0x00000000181e1cb5
1. ./build_docker/./base/poco/Foundation/src/Exception.cpp:28: Poco::Exception::Exception(String const&, int) @ 0x0000000036986805
2. ./build_docker/./src/Common/Exception.cpp:96: DB::Exception::Exception(DB::Exception::MessageMasked&&, int, bool) @ 0x000000002467696b
3. DB::Exception::Exception(int, FormatStringHelperImpl::type>, StringRef&&) @ 0x00000000193055bb
4. DB::ByteJaccardIndexImpl::process(char const*, unsigned long, char const*, unsigned long) @ 0x000000001930932b
5. DB::FunctionStringDistanceImpl>::vectorVector(DB::PODArray, 63ul, 64ul> const&, DB::PODArray, 63ul, 64ul> const&, DB::PODArray, 63ul, 64ul> const&, DB::PODArray, 63ul, 64ul> const&, DB::PODArray, 63ul, 64ul>&) @ 0x0000000019308204
6. DB::FunctionsStringSimilarity>, DB::NameJaccardIndexUTF8>::executeImpl(std::vector> const&, std::shared_ptr const&, unsigned long) const @ 0x0000000019307082
7. DB::FunctionToExecutableFunctionAdaptor::executeImpl(std::vector> const&, std::shared_ptr const&, unsigned long) const @ 0x000000001878c001
8. ./contrib/boost/boost/smart_ptr/intrusive_ptr.hpp:117: DB::IExecutableFunction::executeWithoutLowCardinalityColumns(std::vector> const&, std::shared_ptr const&, unsigned long, bool) const @ 0x000000002e842008
9. ./contrib/boost/boost/smart_ptr/intrusive_ptr.hpp:117: DB::IExecutableFunction::executeWithoutSparseColumns(std::vector> const&, std::shared_ptr const&, unsigned long, bool) const @ 0x000000002e843272
10. ./build_docker/./src/Functions/IFunction.cpp:0: DB::IExecutableFunction::execute(std::vector> const&, std::shared_ptr const&, unsigned long, bool) const @ 0x000000002e845197
11. ./contrib/boost/boost/smart_ptr/intrusive_ptr.hpp:117: DB::executeAction(DB::ExpressionActions::Action const&, DB::(anonymous namespace)::ExecutionContext&, bool, bool) @ 0x000000002fdbeea9
12. ./build_docker/./src/Interpreters/ExpressionActions.cpp:0: DB::ExpressionActions::execute(DB::Block&, unsigned long&, bool, bool) const @ 0x000000002fdbc82a
13. ./build_docker/./src/Processors/Transforms/ExpressionTransform.cpp:0: DB::ExpressionTransform::transform(DB::Chunk&) @ 0x0000000033b55aa1
14. ./contrib/llvm-project/libcxx/include/__utility/swap.h:35: DB::ISimpleTransform::transform(DB::Chunk&, DB::Chunk&) @ 0x0000000028fec54b
15. ./build_docker/./src/Processors/ISimpleTransform.cpp:99: DB::ISimpleTransform::work() @ 0x000000003353dd0d
16. ./build_docker/./src/Processors/Executors/ExecutionThreadContext.cpp:50: DB::ExecutionThreadContext::executeTask() @ 0x000000003356efb0
17. ./build_docker/./src/Processors/Executors/PipelineExecutor.cpp:273: DB::PipelineExecutor::executeSingleThread(unsigned long) @ 0x000000003355d6fb
18. ./contrib/llvm-project/libcxx/include/__memory/shared_ptr.h:701: DB::PipelineExecutor::executeImpl(unsigned long, bool) @ 0x000000003355b4d7
19. ./contrib/llvm-project/libcxx/include/__memory/unique_ptr.h:274: DB::PipelineExecutor::execute(unsigned long, bool) @ 0x000000003355b0ea
20. ./build_docker/./src/Processors/Executors/PullingAsyncPipelineExecutor.cpp:0: void std::__function::__policy_invoker::__call_impl::ThreadFromGlobalPoolImpl(DB::PullingAsyncPipelineExecutor::pull(DB::Chunk&, unsigned long)::$_0&&)::'lambda'(), void ()>>(std::__function::__policy_storage const*) @ 0x0000000033574771
21. ./base/base/../base/wide_integer_impl.h:810: ThreadPoolImpl::worker(std::__list_iterator) @ 0x0000000024783038
22. ./contrib/llvm-project/libcxx/include/__memory/unique_ptr.h:302: void* std::__thread_proxy[abi:v15000]>, void ThreadPoolImpl::scheduleImpl(std::function, Priority, std::optional, bool)::'lambda0'()>>(void*) @ 0x000000002478972a
23. ? @ 0x00007f8ff1fccac3
24. ? @ 0x00007f8ff205e850
5. Query {} to stage {}{} 12 Query SELECT _CAST('FFFE80BB', 'String') AS `hex('����')` FROM system.one AS __table1 to stage Complete
6. Query {} from stage {} to stage {}{} 12 Query SELECT _CAST('FFFE80BB', 'String') AS `hex('����')` FROM system.one AS __table1 from stage FetchColumns to stage Complete
7. (from {}{}{}){}{} {} (stage: {}) 1 (from [::1]:56012) (comment: 01957_heredoc_more.sql) SELECT hex($$����$$); (stage: Complete)
Top short messages:
c message_format_string substr(any(message), 1, 120) min_length_without_exception_boilerplate
1. 12 Illegal UTF-8 sequence, while processing '{}' Code: 36. DB::Exception: Illegal UTF-8 sequence, while processing '�': while executing 'FUNCTION stringJaccardIndexUTF8 -1
2. 7 {} ThreadFuzzer is enabled. Application will run slowly and unstable. 27
3. 3 Found {} on {} Found snapshot_96.bin.zstd on LocalSnapshotDisk 18
4. 2 {}:{}: '{}' src/Processors/Transforms/MergeJoinTransform.cpp:836: ' UInt64(size = 1) Int64(size = 1) UInt64(size = 1)' 80
5. 2 Creating {}: {} Creating table test_dqehi2jn.tbl2: CREATE TABLE IF NOT EXISTS test_dqehi2jn.tbl2 UUID 'c3160413-18b4-48b0-bb72-da92f42c1 201
6. 2 Substitution {} is not set Code: 456. DB::Exception: Substitution `n` is not set. (UNKNOWN_QUERY_PARAMETER) (version 24.3.12.76.altinitystable (alt 29
7. 1 Froze {} parts Froze 1 parts -13
Top messages by level:
(0.009368997985665433,'Cannot schedule a task: {} (threads={}, jobs={})') Error
(0.00027794694024140784,'Client has gone away.') Warning
(0.10755921987476773,'WriteBufferFromS3 is not finalized in destructor. The file might not be written to S3. {}.') Information
(0.03222623007136054,'Key {} is removed from metadata') Debug
(0.06039880701425649,'Access granted: {}{}') Trace
2024-11-05 19:13:05,552 Stress test finished
+ echo -e 'Test script exit code\tOK\t\N\t'
+ grep -Fa OK
+ rg -Fa 'No queries hung' /test_output/test_results.tsv
No queries hung OK \N
+ stop_server
+ local max_tries=90
+ local check_hang=true
+ local pid
++ cat /var/run/clickhouse-server/clickhouse-server.pid
+ pid=2728
+ clickhouse stop --max-tries 90 --do-not-kill
/var/run/clickhouse-server/clickhouse-server.pid file exists and contains pid = 2728.
The process with pid = 2728 is running.
Sent terminate signal to process with pid 2728.
Waiting for server to stop
/var/run/clickhouse-server/clickhouse-server.pid file exists and contains pid = 2728.
The process with pid = 2728 is running.
Waiting for server to stop
/var/run/clickhouse-server/clickhouse-server.pid file exists and contains pid = 2728.
The process with pid = 2728 is running.
Waiting for server to stop
/var/run/clickhouse-server/clickhouse-server.pid file exists and contains pid = 2728.
The process with pid = 2728 is running.
Waiting for server to stop
/var/run/clickhouse-server/clickhouse-server.pid file exists and contains pid = 2728.
The process with pid = 2728 is running.
Waiting for server to stop
/var/run/clickhouse-server/clickhouse-server.pid file exists and contains pid = 2728.
The process with pid = 2728 is running.
Waiting for server to stop
/var/run/clickhouse-server/clickhouse-server.pid file exists and contains pid = 2728.
The process with pid = 2728 is running.
Waiting for server to stop
/var/run/clickhouse-server/clickhouse-server.pid file exists and contains pid = 2728.
The process with pid = 2728 is running.
Waiting for server to stop
/var/run/clickhouse-server/clickhouse-server.pid file exists and contains pid = 2728.
The process with pid = 2728 does not exist.
Server stopped
+ return
+ mv /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/clickhouse-server.stress.log
+ unset THREAD_FUZZER_CPU_TIME_PERIOD_US THREAD_FUZZER_EXPLICIT_MEMORY_EXCEPTION_PROBABILITY THREAD_FUZZER_EXPLICIT_SLEEP_PROBABILITY THREAD_FUZZER_SLEEP_PROBABILITY THREAD_FUZZER_SLEEP_TIME_US_MAX THREAD_FUZZER_pthread_mutex_lock_AFTER_MIGRATE_PROBABILITY THREAD_FUZZER_pthread_mutex_lock_AFTER_SLEEP_PROBABILITY THREAD_FUZZER_pthread_mutex_lock_AFTER_SLEEP_TIME_US_MAX THREAD_FUZZER_pthread_mutex_lock_BEFORE_MIGRATE_PROBABILITY THREAD_FUZZER_pthread_mutex_lock_BEFORE_SLEEP_PROBABILITY THREAD_FUZZER_pthread_mutex_lock_BEFORE_SLEEP_TIME_US_MAX THREAD_FUZZER_pthread_mutex_unlock_AFTER_MIGRATE_PROBABILITY THREAD_FUZZER_pthread_mutex_unlock_AFTER_SLEEP_PROBABILITY THREAD_FUZZER_pthread_mutex_unlock_AFTER_SLEEP_TIME_US_MAX THREAD_FUZZER_pthread_mutex_unlock_BEFORE_MIGRATE_PROBABILITY THREAD_FUZZER_pthread_mutex_unlock_BEFORE_SLEEP_PROBABILITY THREAD_FUZZER_pthread_mutex_unlock_BEFORE_SLEEP_TIME_US_MAX THREAD_POOL_FAULT_INJECTION
+ start_server
+ counter=0
+ max_attempt=120
+ clickhouse-client --query 'SELECT 1'
Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
+ '[' 0 -gt 120 ']'
+ clickhouse start --user root
+ sleep 0.5
127.0.0.1 - - [05/Nov/2024:18:13:13 +0000] "PUT /devstoreaccount1/cont?restype=container HTTP/1.1" 409 -
127.0.0.1 - - [05/Nov/2024:18:13:13 +0000] "PUT /devstoreaccount1/cont/kodrlrqyleqywshhygadbshjzuqjhuvf?blockid=vnrftwbwfoxjfyysnhiremkuoojgdcmklhjfalklqdbjzikflneiadamcmhjnxyw&comp=block HTTP/1.1" 201 -
127.0.0.1 - - [05/Nov/2024:18:13:13 +0000] "PUT /devstoreaccount1/cont/kodrlrqyleqywshhygadbshjzuqjhuvf?comp=blocklist HTTP/1.1" 201 -
127.0.0.1 - - [05/Nov/2024:18:13:13 +0000] "GET /devstoreaccount1/cont/kodrlrqyleqywshhygadbshjzuqjhuvf HTTP/1.1" 206 4
127.0.0.1 - - [05/Nov/2024:18:13:13 +0000] "GET /devstoreaccount1/cont/kodrlrqyleqywshhygadbshjzuqjhuvf HTTP/1.1" 206 2
127.0.0.1 - - [05/Nov/2024:18:13:13 +0000] "DELETE /devstoreaccount1/cont/kodrlrqyleqywshhygadbshjzuqjhuvf HTTP/1.1" 202 -
+ counter=1
+ clickhouse-client --query 'SELECT 1'
Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
+ '[' 1 -gt 120 ']'
+ clickhouse start --user root
+ sleep 0.5
+ counter=2
+ clickhouse-client --query 'SELECT 1'
1
+ attach_gdb_to_clickhouse
++ kill -l SIGRTMIN
+ RTMIN=34
+ echo '
set follow-fork-mode parent
handle SIGHUP nostop noprint pass
handle SIGINT nostop noprint pass
handle SIGQUIT nostop noprint pass
handle SIGPIPE nostop noprint pass
handle SIGTERM nostop noprint pass
handle SIGUSR1 nostop noprint pass
handle SIGUSR2 nostop noprint pass
handle SIG34 nostop noprint pass
info signals
continue
backtrace full
thread apply all backtrace full
info registers
disassemble /s
up
disassemble /s
up
disassemble /s
p "done"
detach
quit
'
+ sleep 5
+ ts '%Y-%m-%d %H:%M:%S'
++ cat /var/run/clickhouse-server/clickhouse-server.pid
+ gdb -batch -command script.gdb -p 33262
+ run_with_retry 60 clickhouse-client --query 'SELECT '\''Connected to clickhouse-server after attaching gdb'\'''
+ [[ hxB =~ e ]]
+ set_e=false
+ set +e
+ local total_retries=60
+ shift
+ local retry=0
+ '[' 0 -ge 60 ']'
+ clickhouse-client --query 'SELECT '\''Connected to clickhouse-server after attaching gdb'\'''
Connected to clickhouse-server after attaching gdb
+ false
+ return
+ check_server_start
+ clickhouse-client --query 'SELECT '\''Server successfully started'\'', '\''OK'\'', NULL, '\'''\'''
+ '[' -s /test_output/application_errors.txt ']'
+ rm /test_output/application_errors.txt
rm: cannot remove '/test_output/application_errors.txt': No such file or directory
+ stop_server
+ local max_tries=90
+ local check_hang=true
+ local pid
++ cat /var/run/clickhouse-server/clickhouse-server.pid
+ pid=33262
+ clickhouse stop --max-tries 90 --do-not-kill
script.gdb:13: Error in sourced command file:
No stack.
/var/run/clickhouse-server/clickhouse-server.pid file exists and contains pid = 33262.
The process with pid = 33262 is running.
Sent terminate signal to process with pid 33262.
Waiting for server to stop
/var/run/clickhouse-server/clickhouse-server.pid file exists and contains pid = 33262.
The process with pid = 33262 is running.
Waiting for server to stop
/var/run/clickhouse-server/clickhouse-server.pid file exists and contains pid = 33262.
The process with pid = 33262 is running.
Waiting for server to stop
/var/run/clickhouse-server/clickhouse-server.pid file exists and contains pid = 33262.
The process with pid = 33262 is running.
Waiting for server to stop
/var/run/clickhouse-server/clickhouse-server.pid file exists and contains pid = 33262.
The process with pid = 33262 is running.
Waiting for server to stop
Now there is no clickhouse-server process.
Server stopped
+ return
+ '[' -f /var/log/clickhouse-server/clickhouse-server.log ']'
+ '[' -f /var/log/clickhouse-server/stderr.log ']'
+ mv /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/clickhouse-server.final.log
+ check_logs_for_critical_errors
+ sed -n '/WARNING:.*anitizer/,/^$/p' /var/log/clickhouse-server/stderr.log
+ rg -Fav -e 'ASan doesn'\''t fully support makecontext/swapcontext functions' -e DB::Exception /test_output/tmp
+ echo -e 'No sanitizer asserts\tOK\t\N\t'
+ rm -f /test_output/tmp
+ rg -Fa ' Application: Child process was terminated by signal 9' /var/log/clickhouse-server/clickhouse-server.err.log /var/log/clickhouse-server/clickhouse-server.final.log /var/log/clickhouse-server/clickhouse-server.initial.log /var/log/clickhouse-server/clickhouse-server.stress.log
+ echo -e 'No OOM messages in clickhouse-server.log\tOK\t\N\t'
+ rg -Fa 'Code: 49. DB::Exception: ' /var/log/clickhouse-server/clickhouse-server.err.log /var/log/clickhouse-server/clickhouse-server.final.log /var/log/clickhouse-server/clickhouse-server.initial.log /var/log/clickhouse-server/clickhouse-server.stress.log
+ echo -e 'No logical errors\tOK\t\N\t'
+ '[' -s /test_output/logical_errors.txt ']'
+ rm /test_output/logical_errors.txt
+ rg --text 'Code: 499.*The specified key does not exist' /var/log/clickhouse-server/clickhouse-server.err.log /var/log/clickhouse-server/clickhouse-server.final.log /var/log/clickhouse-server/clickhouse-server.initial.log /var/log/clickhouse-server/clickhouse-server.stress.log
+ grep -v a.myext
+ echo -e 'No lost s3 keys\tOK\t\N\t'
+ grep SharedMergeTreePartCheckThread
+ rg -Fa 'it is lost forever' /var/log/clickhouse-server/clickhouse-server.err.log /var/log/clickhouse-server/clickhouse-server.final.log /var/log/clickhouse-server/clickhouse-server.initial.log /var/log/clickhouse-server/clickhouse-server.stress.log
+ echo -e 'No SharedMergeTree lost forever in clickhouse-server.log\tOK\t\N\t'
+ '[' -s /test_output/no_such_key_errors.txt ']'
+ rm /test_output/no_such_key_errors.txt
+ rg -Fa '########################################' /var/log/clickhouse-server/clickhouse-server.err.log /var/log/clickhouse-server/clickhouse-server.final.log /var/log/clickhouse-server/clickhouse-server.initial.log /var/log/clickhouse-server/clickhouse-server.stress.log
+ echo -e 'Not crashed\tOK\t\N\t'
+ rg -Fa ' ' /var/log/clickhouse-server/clickhouse-server.err.log /var/log/clickhouse-server/clickhouse-server.final.log /var/log/clickhouse-server/clickhouse-server.initial.log /var/log/clickhouse-server/clickhouse-server.stress.log
+ echo -e 'No fatal messages in clickhouse-server.log\tOK\t\N\t'
+ '[' -s /test_output/fatal_messages.txt ']'
+ rm /test_output/fatal_messages.txt
+ rg -Faz '########################################' /test_output/gdb.log /test_output/stress_run_logs.tar.zst /test_output/test_results.tsv
+ rg -Fa ' received signal ' /test_output/gdb.log
+ dmesg -T
+ grep -q -F -e 'Out of memory: Killed process' -e 'oom_reaper: reaped process' -e oom-kill:constraint=CONSTRAINT_NONE /test_output/dmesg.log
+ echo -e 'No OOM in dmesg\tOK\t\N\t'
+ tar -chf /test_output/coordination.tar /var/lib/clickhouse/coordination
tar: Removing leading `/' from member names
tar: Removing leading `/' from hard link targets
+ collect_query_and_trace_logs
+ for table in query_log trace_log
+ clickhouse-local --config-file=/etc/clickhouse-server/config.xml --only-system-tables -q 'select * from system.query_log format TSVWithNamesAndTypes'
+ zstd --threads=0
Processing configuration file '/etc/clickhouse-server/config.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/CORS.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/asynchronous_metrics_update_period_s.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/azure_storage_conf.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/backoff_failed_mutation.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/backups.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/blob_storage_log.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/block_number.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/cannot_allocate_thread_injection.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/clusters.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/compressed_marks_and_index.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/core.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/custom_disks_base_path.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/custom_settings_prefixes.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/database_atomic.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/disable_s3_env_credentials.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/display_name.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/enable_access_control_improvements.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/enable_async_load_databases.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/enable_keeper_map.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/enable_wait_for_shutdown_replicated_tables.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/enable_zero_copy_replication.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/encryption.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/filesystem_cache_log.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/filesystem_caches_path.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/forbidden_headers.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/graphite.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/graphite_alternative.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/handlers.yaml'.
Merging configuration file '/etc/clickhouse-server/config.d/keeper_port.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/legacy_geobase.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/listen.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/logger_trace.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/logging_no_rotate.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/lost_forever_check.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/macros.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/max_concurrent_queries.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/max_num_to_warn.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/max_server_memory_usage.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/merge_tree.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/merge_tree_old_dirs_cleanup.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/merge_tree_settings.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/named_collection.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/nlp.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/prometheus.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/query_masking_rules.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/query_masking_rules.xml_original.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/secure_ports.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/session_log.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/ssl_certs.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/storage_conf.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/storage_conf_02944.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/storage_conf_02961.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/storage_conf_02963.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/system_unfreeze.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/tcp_with_proxy.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/test_cluster_with_incorrect_pw.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/text_log.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/top_level_domains_lists.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/top_level_domains_path.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/transactions.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/validate_tcp_client_information.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/zero_copy_destructive_operations.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/zookeeper_fault_injection.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/zookeeper_log.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/zookeeper_write.xml'.
Logging trace to /var/log/clickhouse-server/clickhouse-server.log
Logging errors to /var/log/clickhouse-server/clickhouse-server.err.log
127.0.0.1 - - [05/Nov/2024:18:14:20 +0000] "PUT /devstoreaccount1/cont?restype=container HTTP/1.1" 409 -
+ for table in query_log trace_log
+ clickhouse-local --config-file=/etc/clickhouse-server/config.xml --only-system-tables -q 'select * from system.trace_log format TSVWithNamesAndTypes'
+ zstd --threads=0
Processing configuration file '/etc/clickhouse-server/config.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/CORS.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/asynchronous_metrics_update_period_s.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/azure_storage_conf.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/backoff_failed_mutation.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/backups.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/blob_storage_log.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/block_number.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/cannot_allocate_thread_injection.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/clusters.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/compressed_marks_and_index.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/core.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/custom_disks_base_path.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/custom_settings_prefixes.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/database_atomic.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/disable_s3_env_credentials.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/display_name.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/enable_access_control_improvements.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/enable_async_load_databases.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/enable_keeper_map.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/enable_wait_for_shutdown_replicated_tables.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/enable_zero_copy_replication.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/encryption.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/filesystem_cache_log.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/filesystem_caches_path.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/forbidden_headers.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/graphite.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/graphite_alternative.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/handlers.yaml'.
Merging configuration file '/etc/clickhouse-server/config.d/keeper_port.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/legacy_geobase.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/listen.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/logger_trace.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/logging_no_rotate.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/lost_forever_check.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/macros.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/max_concurrent_queries.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/max_num_to_warn.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/max_server_memory_usage.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/merge_tree.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/merge_tree_old_dirs_cleanup.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/merge_tree_settings.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/named_collection.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/nlp.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/prometheus.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/query_masking_rules.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/query_masking_rules.xml_original.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/secure_ports.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/session_log.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/ssl_certs.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/storage_conf.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/storage_conf_02944.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/storage_conf_02961.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/storage_conf_02963.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/system_unfreeze.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/tcp_with_proxy.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/test_cluster_with_incorrect_pw.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/text_log.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/top_level_domains_lists.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/top_level_domains_path.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/transactions.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/validate_tcp_client_information.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/zero_copy_destructive_operations.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/zookeeper_fault_injection.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/zookeeper_log.xml'.
Merging configuration file '/etc/clickhouse-server/config.d/zookeeper_write.xml'.
Logging trace to /var/log/clickhouse-server/clickhouse-server.log
Logging errors to /var/log/clickhouse-server/clickhouse-server.err.log
127.0.0.1 - - [05/Nov/2024:18:14:22 +0000] "PUT /devstoreaccount1/cont?restype=container HTTP/1.1" 409 -
+ mv /var/log/clickhouse-server/stderr.log /test_output/
+ clickhouse-local --structure 'test String, res String, time Nullable(Float32), desc String' -q 'SELECT '\''failure'\'', test FROM table WHERE res != '\''OK'\'' order by
(test like '\''%Sanitizer%'\'') DESC,
(test like '\''%Killed by signal%'\'') DESC,
(test like '\''%gdb.log%'\'') DESC,
(test ilike '\''%possible deadlock%'\'') DESC,
(test like '\''%start%'\'') DESC,
(test like '\''%dmesg%'\'') DESC,
(test like '\''%OOM%'\'') DESC,
(test like '\''%Signal 9%'\'') DESC,
(test like '\''%Fatal message%'\'') DESC,
rowNumberInAllBlocks()
LIMIT 1'
+ '[' -s /test_output/check_status.tsv ']'
+ echo -e 'success\tNo errors found'
+ rg 'OOM in dmesg|Signal 9' /test_output/check_status.tsv
+ collect_core_dumps
+ read -r core
+ find . -type f -maxdepth 1 -name 'core.*'