Skip to content

Console Output

Skipping 2,634 KB.. Full Log
Git Commit Branch: release-7.5
UTC Build Time:    2024-04-28 08:20:54
Rust Version:      rustc 1.67.0-nightly (96ddd32c4 2022-11-14)
Enable Features:   pprof-fp jemalloc mem-profiling portable sse test-engine-kv-rocksdb test-engine-raft-raft-engine cloud-aws cloud-gcp cloud-azure
Profile:           dist_release
table mark.finish_mark_3 not exists for 55-th check, retry later
TEST FAILED: OUTPUT DOES NOT CONTAIN 'id: 1'
____________________________________
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
check data failed 12-th time, retry later
table consistent_replicate_storage_file.check1 not exists for 7-th check, retry later
+ set +x
+ run_sql 'USE TEST;Create table t1(a int primary key, b int);insert into t1 values(1,2);insert into t1 values(2,3);'
+ check_table_exists test.t1 127.0.0.1 3306
table test.t1 not exists for 1-th check, retry later
table sequence_test.t1 exists
check table exists success
check diff failed 1-th time, retry later
Starting Upstream TiDB...
Release Version: v7.5.1-45-gbf84e231e6
Edition: Community
Git Commit Hash: bf84e231e6ef26891d0cb524d938345f43aa047c
Git Branch: release-7.5
UTC Build Time: 2024-04-29 02:05:15
GoVersion: go1.21.6
Race Enabled: false
Check Table Before Drop: false
Store: unistore
Starting Downstream TiDB...
Release Version: v7.5.1-45-gbf84e231e6
Edition: Community
Git Commit Hash: bf84e231e6ef26891d0cb524d938345f43aa047c
Git Branch: release-7.5
UTC Build Time: 2024-04-29 02:05:15
GoVersion: go1.21.6
Race Enabled: false
Check Table Before Drop: false
Store: unistore
Verifying Upstream TiDB is started...
ERROR 2003 (HY000): Can't connect to MySQL server on '127.0.0.1' (111)
table mark.finish_mark_3 not exists for 56-th check, retry later
table consistent_replicate_storage_file.check1 not exists for 8-th check, retry later
TEST FAILED: OUTPUT DOES NOT CONTAIN 'id: 1'
____________________________________
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
check data failed 13-th time, retry later
table test.t1 exists
+ sleep 5
ERROR 2003 (HY000): Can't connect to MySQL server on '127.0.0.1' (111)
check diff failed 2-th time, retry later
table consistent_replicate_storage_file.check1 not exists for 9-th check, retry later
table mark.finish_mark_3 not exists for 57-th check, retry later
TEST FAILED: OUTPUT DOES NOT CONTAIN 'id: 1'
____________________________________
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
check data failed 14-th time, retry later
check diff failed 3-th time, retry later
ERROR 2003 (HY000): Can't connect to MySQL server on '127.0.0.1' (111)
table consistent_replicate_storage_file.check1 not exists for 10-th check, retry later
table mark.finish_mark_3 not exists for 58-th check, retry later
TEST FAILED: OUTPUT DOES NOT CONTAIN 'id: 1'
____________________________________
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
check data failed 15-th time, retry later
check diff failed 4-th time, retry later
\033[0;36m<<< Run all test success >>>\033[0m
[Pipeline] }
Cache not saved (ws/jenkins-pingcap-tiflow-release-7.5-pull_cdc_integration_mysql_test-340/tiflow-cdc already exists)
[Pipeline] // cache
[Pipeline] }
[Pipeline] // dir
[Pipeline] }
[Pipeline] // withCredentials
[Pipeline] }
VARIABLE_NAME	VARIABLE_VALUE	COMMENT
bootstrapped	True	Bootstrap flag. Do not delete.
tidb_server_version	179	Bootstrap version. Do not delete.
system_tz	Asia/Shanghai	TiDB Global System Timezone.
new_collation_enabled	True	If the new collations are enabled. Do not edit it.
ddl_table_version	3	DDL Table Version. Do not delete.
tikv_gc_leader_uuid	63ca4e881540004	Current GC worker leader UUID. (DO NOT EDIT)
tikv_gc_leader_desc	host:ap-tiflow-release-7-5-pull-cdc-integration-mysql-test-340-hjh9s, pid:6636, start at 2024-04-29 17:40:26.32879603 +0800 CST m=+5.502894374	Host name and pid of current GC leader. (DO NOT EDIT)
tikv_gc_leader_lease	20240429-17:42:26.337 +0800	Current GC worker leader lease. (DO NOT EDIT)
tikv_gc_auto_concurrency	true	Let TiDB pick the concurrency automatically. If set false, tikv_gc_concurrency will be used
tikv_gc_enable	true	Current GC enable status
tikv_gc_run_interval	10m0s	GC run interval, at least 10m, in Go format.
tikv_gc_life_time	10m0s	All versions within life time will not be collected by GC, at least 10m, in Go format.
tikv_gc_last_run_time	20240429-17:40:26.325 +0800	The time when last GC starts. (DO NOT EDIT)
tikv_gc_safe_point	20240429-17:30:26.325 +0800	All versions after safe point can be accessed. (DO NOT EDIT)
ERROR 2003 (HY000): Can't connect to MySQL server on '127.0.0.1' (111)
table consistent_replicate_storage_file.check1 not exists for 11-th check, retry later
[Pipeline] // timeout
[Pipeline] }
table mark.finish_mark_3 not exists for 59-th check, retry later
[Pipeline] // stage
[Pipeline] }
+ kill_tikv
++ ps aux
++ grep tikv-server
++ grep /tmp/tidb_cdc_test/synced_status
+ info='jenkins    17680 24.2  0.4 3801524 1684960 ?     Sl   17:40   0:05 tikv-server --pd 127.0.0.1:2379 -A 127.0.0.1:20160 --status-addr 127.0.0.1:20181 --log-file /tmp/tidb_cdc_test/synced_status/tikv1.log --log-level debug -C /tmp/tidb_cdc_test/synced_status/tikv-config.toml -s /tmp/tidb_cdc_test/synced_status/tikv1
jenkins    17681 17.4  0.4 3770288 1610264 ?     Sl   17:40   0:04 tikv-server --pd 127.0.0.1:2379 -A 127.0.0.1:20161 --status-addr 127.0.0.1:20182 --log-file /tmp/tidb_cdc_test/synced_status/tikv2.log --log-level debug -C /tmp/tidb_cdc_test/synced_status/tikv-config.toml -s /tmp/tidb_cdc_test/synced_status/tikv2
jenkins    17682 18.2  0.4 3779508 1633224 ?     Sl   17:40   0:04 tikv-server --pd 127.0.0.1:2379 -A 127.0.0.1:20162 --status-addr 127.0.0.1:20183 --log-file /tmp/tidb_cdc_test/synced_status/tikv3.log --log-level debug -C /tmp/tidb_cdc_test/synced_status/tikv-config.toml -s /tmp/tidb_cdc_test/synced_status/tikv3
jenkins    17684 23.7  0.4 3813808 1677196 ?     Sl   17:40   0:05 tikv-server --pd 127.0.0.1:2479 -A 127.0.0.1:21160 --status-addr 127.0.0.1:21180 --log-file /tmp/tidb_cdc_test/synced_status/tikv_down.log --log-level debug -C /tmp/tidb_cdc_test/synced_status/tikv-config.toml -s /tmp/tidb_cdc_test/synced_status/tikv_down'
++ ps aux
++ grep tikv-server
++ grep /tmp/tidb_cdc_test/synced_status
++ awk '{print $2}'
++ xargs kill -9
++ curl -X GET http://127.0.0.1:8300/api/v2/changefeeds/test-1/synced
  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                 Dload  Upload   Total   Spent    Left  Speed

  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0
100   243  100   243    0     0   2399      0 --:--:-- --:--:-- --:--:--  2405
+ synced_status='{"synced":false,"sink_checkpoint_ts":"2024-04-29 17:40:25.992","puller_resolved_ts":"1970-01-01 08:00:00.000","last_synced_ts":"2024-04-29 17:40:19.891","now_ts":"2024-04-29 17:40:27.000","info":"The data syncing is not finished, please wait"}'
++ echo '{"synced":false,"sink_checkpoint_ts":"2024-04-29' '17:40:25.992","puller_resolved_ts":"1970-01-01' '08:00:00.000","last_synced_ts":"2024-04-29' '17:40:19.891","now_ts":"2024-04-29' '17:40:27.000","info":"The' data syncing is not finished, please 'wait"}'
++ jq .synced
+ status=false
+ '[' false '!=' false ']'
++ echo '{"synced":false,"sink_checkpoint_ts":"2024-04-29' '17:40:25.992","puller_resolved_ts":"1970-01-01' '08:00:00.000","last_synced_ts":"2024-04-29' '17:40:19.891","now_ts":"2024-04-29' '17:40:27.000","info":"The' data syncing is not finished, please 'wait"}'
++ jq -r .info
+ info='The data syncing is not finished, please wait'
+ target_message='The data syncing is not finished, please wait'
+ '[' 'The data syncing is not finished, please wait' '!=' 'The data syncing is not finished, please wait' ']'
+ sleep 130
[Pipeline] // container
TEST FAILED: OUTPUT DOES NOT CONTAIN 'id: 1'
____________________________________
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
check data failed 16-th time, retry later
[Pipeline] }
[Pipeline] // withEnv
[Pipeline] }
[Pipeline] // node
[Pipeline] }
[Pipeline] // podTemplate
[Pipeline] }
[Pipeline] // withEnv
[Pipeline] }
[Pipeline] // stage
[Pipeline] }
VARIABLE_NAME	VARIABLE_VALUE	COMMENT
bootstrapped	True	Bootstrap flag. Do not delete.
tidb_server_version	179	Bootstrap version. Do not delete.
system_tz	Asia/Shanghai	TiDB Global System Timezone.
new_collation_enabled	True	If the new collations are enabled. Do not edit it.
ddl_table_version	3	DDL Table Version. Do not delete.
tikv_gc_leader_uuid	63ca4e881540004	Current GC worker leader UUID. (DO NOT EDIT)
tikv_gc_leader_desc	host:ap-tiflow-release-7-5-pull-cdc-integration-mysql-test-340-hjh9s, pid:6636, start at 2024-04-29 17:40:26.32879603 +0800 CST m=+5.502894374	Host name and pid of current GC leader. (DO NOT EDIT)
tikv_gc_leader_lease	20240429-17:42:26.337 +0800	Current GC worker leader lease. (DO NOT EDIT)
tikv_gc_auto_concurrency	true	Let TiDB pick the concurrency automatically. If set false, tikv_gc_concurrency will be used
tikv_gc_enable	true	Current GC enable status
tikv_gc_run_interval	10m0s	GC run interval, at least 10m, in Go format.
tikv_gc_life_time	10m0s	All versions within life time will not be collected by GC, at least 10m, in Go format.
tikv_gc_last_run_time	20240429-17:40:26.325 +0800	The time when last GC starts. (DO NOT EDIT)
tikv_gc_safe_point	20240429-17:30:26.325 +0800	All versions after safe point can be accessed. (DO NOT EDIT)
Verifying Downstream TiDB is started...
VARIABLE_NAME	VARIABLE_VALUE	COMMENT
bootstrapped	True	Bootstrap flag. Do not delete.
tidb_server_version	179	Bootstrap version. Do not delete.
system_tz	Asia/Shanghai	TiDB Global System Timezone.
new_collation_enabled	True	If the new collations are enabled. Do not edit it.
ddl_table_version	3	DDL Table Version. Do not delete.
tikv_gc_leader_uuid	63ca4e882040013	Current GC worker leader UUID. (DO NOT EDIT)
tikv_gc_leader_desc	host:ap-tiflow-release-7-5-pull-cdc-integration-mysql-test-340-hjh9s, pid:6713, start at 2024-04-29 17:40:26.399734536 +0800 CST m=+5.506110312	Host name and pid of current GC leader. (DO NOT EDIT)
tikv_gc_leader_lease	20240429-17:42:26.408 +0800	Current GC worker leader lease. (DO NOT EDIT)
tikv_gc_auto_concurrency	true	Let TiDB pick the concurrency automatically. If set false, tikv_gc_concurrency will be used
tikv_gc_enable	true	Current GC enable status
tikv_gc_run_interval	10m0s	GC run interval, at least 10m, in Go format.
tikv_gc_life_time	10m0s	All versions within life time will not be collected by GC, at least 10m, in Go format.
tikv_gc_last_run_time	20240429-17:40:26.369 +0800	The time when last GC starts. (DO NOT EDIT)
tikv_gc_safe_point	20240429-17:30:26.369 +0800	All versions after safe point can be accessed. (DO NOT EDIT)
Starting Upstream TiFlash...
TiFlash
Release Version: v7.5.1-12-g9002cc34d
Edition:         Community
Git Commit Hash: 9002cc34d3b593a718b6c5260ba18f30a45ab314
Git Branch:      HEAD
UTC Build Time:  2024-04-18 07:24:48
Enable Features: jemalloc sm4(GmSSL) avx2 avx512 unwind thinlto
Profile:         RELWITHDEBINFO

Raft Proxy
Git Commit Hash:   521fd9dbc55e58646045d88f91c3c35db50b5981
Git Commit Branch: HEAD
UTC Build Time:    2024-04-18 07:28:40
Rust Version:      rustc 1.67.0-nightly (96ddd32c4 2022-11-14)
Storage Engine:    tiflash
Prometheus Prefix: tiflash_proxy_
Profile:           release
Enable Features:    portable sse test-engine-kv-rocksdb test-engine-raft-raft-engine cloud-aws cloud-gcp cloud-azure
Verifying Upstream TiFlash is started...
Logging trace to /tmp/tidb_cdc_test/force_replicate_table/tiflash/log/server.log
Logging errors to /tmp/tidb_cdc_test/force_replicate_table/tiflash/log/error.log
arg matches is ArgMatches { args: {"data-dir": MatchedArg { occurs: 1, indices: [6], vals: ["/tmp/tidb_cdc_test/force_replicate_table/tiflash/db/proxy"] }, "addr": MatchedArg { occurs: 1, indices: [20], vals: ["127.0.0.1:9000"] }, "advertise-addr": MatchedArg { occurs: 1, indices: [4], vals: ["127.0.0.1:9000"] }, "engine-label": MatchedArg { occurs: 1, indices: [14], vals: ["tiflash"] }, "engine-version": MatchedArg { occurs: 1, indices: [12], vals: ["v7.5.1-12-g9002cc34d"] }, "log-file": MatchedArg { occurs: 1, indices: [18], vals: ["/tmp/tidb_cdc_test/force_replicate_table/tiflash/log/proxy.log"] }, "engine-addr": MatchedArg { occurs: 1, indices: [2], vals: ["127.0.0.1:9500"] }, "config": MatchedArg { occurs: 1, indices: [8], vals: ["/tmp/tidb_cdc_test/force_replicate_table/tiflash-proxy.toml"] }, "pd-endpoints": MatchedArg { occurs: 1, indices: [16], vals: ["127.0.0.1:2379"] }, "engine-git-hash": MatchedArg { occurs: 1, indices: [10], vals: ["9002cc34d3b593a718b6c5260ba18f30a45ab314"] }}, subcommand: None, usage: Some("USAGE:\n    TiFlash Proxy [FLAGS] [OPTIONS] --engine-git-hash <engine-git-hash> --engine-label <engine-label> --engine-version <engine-version>") }
check diff failed 5-th time, retry later
table mark.finish_mark_3 not exists for 60-th check, retry later
TEST FAILED: OUTPUT DOES NOT CONTAIN 'id: 1'
____________________________________
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
check data failed 17-th time, retry later
table consistent_replicate_storage_file.check1 not exists for 12-th check, retry later
check diff failed 6-th time, retry later
table mark.finish_mark_3 not exists for 61-th check, retry later
[Mon Apr 29 17:40:31 CST 2024] <<<<<< START cdc server in force_replicate_table case >>>>>>
+ [[ '' == \t\r\u\e ]]
+ set +e
+ get_info_fail_msg='failed to get info:'
+ etcd_info_msg='etcd info'
+ '[' -z '' ']'
+ curl_status_cmd='curl -vsL --max-time 20 http://127.0.0.1:8300/debug/info'
+ GO_FAILPOINTS=
+ [[ no != \n\o ]]
+ cdc.test -test.coverprofile=/tmp/tidb_cdc_test/cov.force_replicate_table.80388040.out server --log-file /tmp/tidb_cdc_test/force_replicate_table/cdc.log --log-level debug --data-dir /tmp/tidb_cdc_test/force_replicate_table/cdc_data --cluster-id default
+ (( i = 0 ))
+ (( i <= 50 ))
++ curl -vsL --max-time 20 http://127.0.0.1:8300/debug/info
* About to connect() to 127.0.0.1 port 8300 (#0)
*   Trying 127.0.0.1...
* Connection refused
* Failed connect to 127.0.0.1:8300; Connection refused
* Closing connection 0
+ res=
+ echo ''
+ grep -q 'failed to get info:'
+ echo ''
+ grep -q 'etcd info'
+ '[' 0 -eq 50 ']'
+ sleep 3
table consistent_replicate_storage_file.check1 not exists for 13-th check, retry later
TEST FAILED: OUTPUT DOES NOT CONTAIN 'id: 1'
____________________________________
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
check data failed 18-th time, retry later
check diff failed 7-th time, retry later
table consistent_replicate_storage_file.check1 not exists for 14-th check, retry later
table mark.finish_mark_3 not exists for 62-th check, retry later
TEST FAILED: OUTPUT DOES NOT CONTAIN 'id: 1'
____________________________________
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
check data failed 19-th time, retry later
+ (( i++ ))
+ (( i <= 50 ))
++ curl -vsL --max-time 20 http://127.0.0.1:8300/debug/info
* About to connect() to 127.0.0.1 port 8300 (#0)
*   Trying 127.0.0.1...
* Connected to 127.0.0.1 (127.0.0.1) port 8300 (#0)
> GET /debug/info HTTP/1.1
> User-Agent: curl/7.29.0
> Host: 127.0.0.1:8300
> Accept: */*
> 
< HTTP/1.1 200 OK
< Date: Mon, 29 Apr 2024 09:40:34 GMT
< Content-Length: 613
< Content-Type: text/plain; charset=utf-8
< 
{ [data not shown]
* Connection #0 to host 127.0.0.1 left intact
+ res='

*** owner info ***:



*** processors info ***:



*** etcd info ***:

/tidb/cdc/default/__cdc_meta__/capture/bde5fe5c-7d46-496a-a55e-58fbf8939bc1
	{"id":"bde5fe5c-7d46-496a-a55e-58fbf8939bc1","address":"127.0.0.1:8300","version":"v7.5.1-21-g88db1a842"}

/tidb/cdc/default/__cdc_meta__/meta/meta-version
	1

/tidb/cdc/default/__cdc_meta__/owner/22318f2939fcc2f2
	bde5fe5c-7d46-496a-a55e-58fbf8939bc1

/tidb/cdc/default/default/upstream/7363221570257451618
	{"id":7363221570257451618,"pd-endpoints":"http://127.0.0.1:2379,http://127.0.0.1:2379","key-path":"","cert-path":"","ca-path":"","cert-allowed-cn":null}'
+ echo '

*** owner info ***:



*** processors info ***:



*** etcd info ***:

/tidb/cdc/default/__cdc_meta__/capture/bde5fe5c-7d46-496a-a55e-58fbf8939bc1
	{"id":"bde5fe5c-7d46-496a-a55e-58fbf8939bc1","address":"127.0.0.1:8300","version":"v7.5.1-21-g88db1a842"}

/tidb/cdc/default/__cdc_meta__/meta/meta-version
	1

/tidb/cdc/default/__cdc_meta__/owner/22318f2939fcc2f2
	bde5fe5c-7d46-496a-a55e-58fbf8939bc1

/tidb/cdc/default/default/upstream/7363221570257451618
	{"id":7363221570257451618,"pd-endpoints":"http://127.0.0.1:2379,http://127.0.0.1:2379","key-path":"","cert-path":"","ca-path":"","cert-allowed-cn":null}'
+ grep -q 'failed to get info:'
+ echo '

*** owner info ***:



*** processors info ***:



*** etcd info ***:

/tidb/cdc/default/__cdc_meta__/capture/bde5fe5c-7d46-496a-a55e-58fbf8939bc1
	{"id":"bde5fe5c-7d46-496a-a55e-58fbf8939bc1","address":"127.0.0.1:8300","version":"v7.5.1-21-g88db1a842"}

/tidb/cdc/default/__cdc_meta__/meta/meta-version
	1

/tidb/cdc/default/__cdc_meta__/owner/22318f2939fcc2f2
	bde5fe5c-7d46-496a-a55e-58fbf8939bc1

/tidb/cdc/default/default/upstream/7363221570257451618
	{"id":7363221570257451618,"pd-endpoints":"http://127.0.0.1:2379,http://127.0.0.1:2379","key-path":"","cert-path":"","ca-path":"","cert-allowed-cn":null}'
+ grep -q 'etcd info'
+ break
+ set +x
Create changefeed successfully!
ID: 5eb02fda-1c32-41b8-9a09-1d5c4d63983b
Info: {"upstream_id":7363221570257451618,"namespace":"default","id":"5eb02fda-1c32-41b8-9a09-1d5c4d63983b","sink_uri":"mysql://normal:xxxxx@127.0.0.1:3306/?safe-mode=true","create_time":"2024-04-29T17:40:34.576262732+08:00","start_ts":449415382636953601,"config":{"memory_quota":1073741824,"case_sensitive":false,"force_replicate":true,"ignore_ineligible_table":false,"check_gc_safe_point":true,"enable_sync_point":false,"bdr_mode":false,"sync_point_interval":600000000000,"sync_point_retention":86400000000000,"filter":{"rules":["*.*"]},"mounter":{"worker_num":16},"sink":{"csv":{"delimiter":",","quote":"\"","null":"\\N","include_commit_ts":false,"binary_encoding_method":"base64"},"encoder_concurrency":32,"terminator":"\r\n","date_separator":"day","enable_partition_separator":true,"enable_kafka_sink_v2":false,"only_output_updated_columns":false,"delete_only_output_handle_key_columns":false,"advance_timeout":150,"send_bootstrap_interval_in_sec":120,"send_bootstrap_in_msg_count":10000,"send_bootstrap_to_all_partition":true},"consistent":{"level":"none","max_log_size":64,"flush_interval":2000,"meta_flush_interval":200,"encoding_worker_num":16,"flush_worker_num":8,"use_file_backend":false,"memory_usage":{"memory_quota_percentage":50,"event_cache_percentage":0}},"scheduler":{"enable_table_across_nodes":false,"region_threshold":100000,"write_key_threshold":0},"integrity":{"integrity_check_level":"none","corruption_handle_level":"warn"},"changefeed_error_stuck_duration":1800000000000,"sql_mode":"ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION","synced_status":{"synced_check_interval":300,"checkpoint_interval":15}},"state":"normal","creator_version":"v7.5.1-21-g88db1a842","resolved_ts":449415382636953601,"checkpoint_ts":449415382636953601,"checkpoint_time":"2024-04-29 17:40:31.275"}
check diff failed 8-th time, retry later
table consistent_replicate_storage_file.check1 not exists for 15-th check, retry later
table mark.finish_mark_3 not exists for 63-th check, retry later
TEST FAILED: OUTPUT DOES NOT CONTAIN 'id: 1'
____________________________________
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
check data failed 20-th time, retry later
table force_replicate_table.t0 exists
table force_replicate_table.t1 not exists for 1-th check, retry later
table consistent_replicate_storage_file.check1 not exists for 16-th check, retry later
table mark.finish_mark_3 exists
table mark.finish_mark not exists for 1-th check, retry later
check diff failed 9-th time, retry later
TEST FAILED: OUTPUT DOES NOT CONTAIN 'id: 1'
____________________________________
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
check data failed 21-th time, retry later
table force_replicate_table.t1 exists
table force_replicate_table.t2 not exists for 1-th check, retry later
table consistent_replicate_storage_file.check1 not exists for 17-th check, retry later
table mark.finish_mark exists
check diff failed 10-th time, retry later
check diff successfully
TEST FAILED: OUTPUT DOES NOT CONTAIN 'id: 1'
____________________________________
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
check data failed 22-th time, retry later
table force_replicate_table.t2 exists
table force_replicate_table.t3 not exists for 1-th check, retry later
wait process cdc.test exit for 1-th time...
wait process cdc.test exit for 2-th time...
check diff failed 11-th time, retry later
cdc.test: no process found
wait process cdc.test exit for 3-th time...
process cdc.test already exit
[Mon Apr 29 17:40:41 CST 2024] <<<<<< run test case default_value success! >>>>>>
table consistent_replicate_storage_file.check1 not exists for 18-th check, retry later
TEST FAILED: OUTPUT DOES NOT CONTAIN 'id: 1'
____________________________________
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
check data failed 23-th time, retry later
table force_replicate_table.t3 exists
table force_replicate_table.t4 not exists for 1-th check, retry later
table consistent_replicate_storage_file.check1 not exists for 19-th check, retry later
check diff failed 12-th time, retry later
TEST FAILED: OUTPUT DOES NOT CONTAIN 'id: 1'
____________________________________
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
check data failed 24-th time, retry later
table force_replicate_table.t4 exists
table force_replicate_table.t5 not exists for 1-th check, retry later
table consistent_replicate_storage_file.check1 not exists for 20-th check, retry later
check diff failed 13-th time, retry later
TEST FAILED: OUTPUT DOES NOT CONTAIN 'id: 1'
____________________________________
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
check data failed 25-th time, retry later
table force_replicate_table.t5 exists
table force_replicate_table.t6 not exists for 1-th check, retry later
check diff failed 14-th time, retry later
table consistent_replicate_storage_file.check1 not exists for 21-th check, retry later
TEST FAILED: OUTPUT DOES NOT CONTAIN 'id: 1'
____________________________________
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
check data failed 26-th time, retry later
table force_replicate_table.t6 not exists for 2-th check, retry later
table consistent_replicate_storage_file.check1 not exists for 22-th check, retry later
check diff failed 15-th time, retry later
TEST FAILED: OUTPUT DOES NOT CONTAIN 'id: 1'
____________________________________
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
check data failed 27-th time, retry later
table force_replicate_table.t6 exists
check_data_subset force_replicate_table.t0 127.0.0.1 4000 127.0.0.1 3306
table consistent_replicate_storage_file.check1 not exists for 23-th check, retry later
run task successfully
check_data_subset force_replicate_table.t1 127.0.0.1 4000 127.0.0.1 3306
check diff failed 16-th time, retry later
TEST FAILED: OUTPUT DOES NOT CONTAIN 'id: 1'
____________________________________
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
check data failed 28-th time, retry later
pass test: get status
pass test: health
pass test: changefeed apis
pass test: delete changefeed apis
pass test: capture apis
pass test: processor apis
pass test: owner apis
pass test: set log level
+ cleanup_process cdc.test
wait process cdc.test exit for 1-th time...
wait process cdc.test exit for 2-th time...
cdc.test: no process found
wait process cdc.test exit for 3-th time...
process cdc.test already exit
++ date
+ echo '[Mon Apr 29 17:40:51 CST 2024] <<<<<< run test case api_v2 success! >>>>>>'
[Mon Apr 29 17:40:51 CST 2024] <<<<<< run test case api_v2 success! >>>>>>
+ stop_tidb_cluster
run task successfully
check_data_subset force_replicate_table.t2 127.0.0.1 4000 127.0.0.1 3306
run task successfully
check_data_subset force_replicate_table.t3 127.0.0.1 4000 127.0.0.1 3306
check diff failed 17-th time, retry later
run task successfully
check_data_subset force_replicate_table.t4 127.0.0.1 4000 127.0.0.1 3306
TEST FAILED: OUTPUT DOES NOT CONTAIN 'id: 1'
____________________________________
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
check data failed 29-th time, retry later
table consistent_replicate_storage_file.check1 exists
table consistent_replicate_storage_file.t2 not exists for 1-th check, retry later
run task successfully
check_data_subset force_replicate_table.t5 127.0.0.1 4000 127.0.0.1 3306
run task successfully
check_data_subset force_replicate_table.t6 127.0.0.1 4000 127.0.0.1 3306
id=19,a=NULL doesn't exist in downstream table force_replicate_table.t6
run task failed 1-th time, retry later
check diff failed 18-th time, retry later
table consistent_replicate_storage_file.t2 not exists for 2-th check, retry later
=================>> Running test /home/jenkins/agent/workspace/pingcap/tiflow/release-7.5/pull_cdc_integration_mysql_test/tiflow/tests/integration_tests/simple/run.sh using Sink-Type: mysql... <<=================
The 1 times to try to start tidb cluster...
TEST FAILED: OUTPUT DOES NOT CONTAIN 'id: 1'
____________________________________
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
check data failed 30-th time, retry later
start tidb cluster in /tmp/tidb_cdc_test/simple
Starting Upstream PD...
Release Version: v7.5.1-5-g584533652
Edition: Community
Git Commit Hash: 58453365285465cd90bc4472cff2bad7ce4d764b
Git Branch: release-7.5
UTC Build Time:  2024-04-03 10:04:14
Starting Downstream PD...
Release Version: v7.5.1-5-g584533652
Edition: Community
Git Commit Hash: 58453365285465cd90bc4472cff2bad7ce4d764b
Git Branch: release-7.5
UTC Build Time:  2024-04-03 10:04:14
Verifying upstream PD is started...
check_data_subset force_replicate_table.t6 127.0.0.1 4000 127.0.0.1 3306
id=7,a=NULL doesn't exist in downstream table force_replicate_table.t6
run task failed 2-th time, retry later
table consistent_replicate_storage_file.t2 not exists for 3-th check, retry later
TEST FAILED: OUTPUT DOES NOT CONTAIN 'id: 1'
____________________________________
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
check data failed 31-th time, retry later
check diff failed 19-th time, retry later
table consistent_replicate_storage_file.t2 not exists for 4-th check, retry later
check diff failed 20-th time, retry later
TEST FAILED: OUTPUT DOES NOT CONTAIN 'id: 1'
____________________________________
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
check data failed 32-th time, retry later
table consistent_replicate_storage_file.t2 not exists for 5-th check, retry later
Verifying downstream PD is started...
Starting Upstream TiKV...
TiKV 
Release Version:   7.5.2
Edition:           Community
Git Commit Hash:   3478895c2a700e4824bb41940260b6b28013275e
Git Commit Branch: release-7.5
UTC Build Time:    2024-04-28 08:20:54
Rust Version:      rustc 1.67.0-nightly (96ddd32c4 2022-11-14)
Enable Features:   pprof-fp jemalloc mem-profiling portable sse test-engine-kv-rocksdb test-engine-raft-raft-engine cloud-aws cloud-gcp cloud-azure
Profile:           dist_release
Starting Downstream TiKV...
TiKV 
Release Version:   7.5.2
Edition:           Community
Git Commit Hash:   3478895c2a700e4824bb41940260b6b28013275e
Git Commit Branch: release-7.5
UTC Build Time:    2024-04-28 08:20:54
Rust Version:      rustc 1.67.0-nightly (96ddd32c4 2022-11-14)
Enable Features:   pprof-fp jemalloc mem-profiling portable sse test-engine-kv-rocksdb test-engine-raft-raft-engine cloud-aws cloud-gcp cloud-azure
Profile:           dist_release
check_data_subset force_replicate_table.t6 127.0.0.1 4000 127.0.0.1 3306
check diff failed 21-th time, retry later
=================>> Running test /home/jenkins/agent/workspace/pingcap/tiflow/release-7.5/pull_cdc_integration_mysql_test/tiflow/tests/integration_tests/kafka_simple_basic/run.sh using Sink-Type: mysql... <<=================
[Mon Apr 29 17:41:00 CST 2024] <<<<<< run test case kafka_simple_basic success! >>>>>>
TEST FAILED: OUTPUT DOES NOT CONTAIN 'id: 1'
____________________________________
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
check data failed 33-th time, retry later
run task successfully
wait process cdc.test exit for 1-th time...
Starting Upstream TiDB...
Release Version: v7.5.1-45-gbf84e231e6
Edition: Community
Git Commit Hash: bf84e231e6ef26891d0cb524d938345f43aa047c
Git Branch: release-7.5
UTC Build Time: 2024-04-29 02:05:15
GoVersion: go1.21.6
Race Enabled: false
Check Table Before Drop: false
Store: unistore
Starting Downstream TiDB...
Release Version: v7.5.1-45-gbf84e231e6
Edition: Community
Git Commit Hash: bf84e231e6ef26891d0cb524d938345f43aa047c
Git Branch: release-7.5
UTC Build Time: 2024-04-29 02:05:15
GoVersion: go1.21.6
Race Enabled: false
Check Table Before Drop: false
Store: unistore
Verifying Upstream TiDB is started...
ERROR 2003 (HY000): Can't connect to MySQL server on '127.0.0.1' (111)
table consistent_replicate_storage_file.t2 not exists for 6-th check, retry later
=================>> Running test /home/jenkins/agent/workspace/pingcap/tiflow/release-7.5/pull_cdc_integration_mysql_test/tiflow/tests/integration_tests/kafka_simple_basic_avro/run.sh using Sink-Type: mysql... <<=================
[Mon Apr 29 17:41:03 CST 2024] <<<<<< run test case kafka_simple_basic_avro success! >>>>>>
wait process cdc.test exit for 2-th time...
cdc.test: no process found
wait process cdc.test exit for 3-th time...
process cdc.test already exit
[Mon Apr 29 17:41:03 CST 2024] <<<<<< run test case force_replicate_table success! >>>>>>
TEST FAILED: OUTPUT DOES NOT CONTAIN 'id: 1'
____________________________________
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
check data failed 34-th time, retry later
check diff failed 22-th time, retry later
table consistent_replicate_storage_file.t2 exists
check diff successfully
check diff failed 23-th time, retry later
TEST FAILED: OUTPUT DOES NOT CONTAIN 'id: 1'
____________________________________
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
check data failed 35-th time, retry later
ERROR 2003 (HY000): Can't connect to MySQL server on '127.0.0.1' (111)
wait process cdc.test exit for 1-th time...
=================>> Running test /home/jenkins/agent/workspace/pingcap/tiflow/release-7.5/pull_cdc_integration_mysql_test/tiflow/tests/integration_tests/kafka_simple_handle_key_only/run.sh using Sink-Type: mysql... <<=================
[Mon Apr 29 17:41:06 CST 2024] <<<<<< run test case kafka_simple_handle_key_only success! >>>>>>
wait process cdc.test exit for 2-th time...
ERROR 2003 (HY000): Can't connect to MySQL server on '127.0.0.1' (111)
wait process cdc.test exit for 3-th time...
TEST FAILED: OUTPUT DOES NOT CONTAIN 'id: 1'
____________________________________
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
check data failed 36-th time, retry later
cdc.test: no process found
wait process cdc.test exit for 4-th time...
process cdc.test already exit
[Mon Apr 29 17:41:08 CST 2024] <<<<<< START cdc server in consistent_replicate_storage_file case >>>>>>
+ [[ '' == \t\r\u\e ]]
+ set +e
+ get_info_fail_msg='failed to get info:'
+ etcd_info_msg='etcd info'
+ '[' -z '' ']'
+ curl_status_cmd='curl -vsL --max-time 20 http://127.0.0.1:8300/debug/info'
+ [[ no != \n\o ]]
+ GO_FAILPOINTS='github.com/pingcap/tiflow/cdc/sink/dmlsink/txn/mysql/MySQLSinkHangLongTime=return(true)'
+ (( i = 0 ))
+ (( i <= 50 ))
+ cdc.test -test.coverprofile=/tmp/tidb_cdc_test/cov.consistent_replicate_storage_file.1128511287.out server --log-file /tmp/tidb_cdc_test/consistent_replicate_storage_file/cdc.log --log-level debug --data-dir /tmp/tidb_cdc_test/consistent_replicate_storage_file/cdc_data --cluster-id default
++ curl -vsL --max-time 20 http://127.0.0.1:8300/debug/info
* About to connect() to 127.0.0.1 port 8300 (#0)
*   Trying 127.0.0.1...
* Connection refused
* Failed connect to 127.0.0.1:8300; Connection refused
* Closing connection 0
+ res=
+ echo ''
+ grep -q 'failed to get info:'
+ echo ''
+ grep -q 'etcd info'
+ '[' 0 -eq 50 ']'
+ sleep 3
check diff failed 24-th time, retry later
=================>> Running test /home/jenkins/agent/workspace/pingcap/tiflow/release-7.5/pull_cdc_integration_mysql_test/tiflow/tests/integration_tests/kafka_simple_handle_key_only_avro/run.sh using Sink-Type: mysql... <<=================
[Mon Apr 29 17:41:09 CST 2024] <<<<<< run test case kafka_simple_handle_key_only_avro success! >>>>>>
VARIABLE_NAME	VARIABLE_VALUE	COMMENT
bootstrapped	True	Bootstrap flag. Do not delete.
tidb_server_version	179	Bootstrap version. Do not delete.
system_tz	Asia/Shanghai	TiDB Global System Timezone.
new_collation_enabled	True	If the new collations are enabled. Do not edit it.
ddl_table_version	3	DDL Table Version. Do not delete.
tikv_gc_leader_uuid	63ca4eb11cc0013	Current GC worker leader UUID. (DO NOT EDIT)
tikv_gc_leader_desc	host:ap-tiflow-release-7-5-pull-cdc-integration-mysql-test-340-5p904, pid:20346, start at 2024-04-29 17:41:08.37146199 +0800 CST m=+5.374411381	Host name and pid of current GC leader. (DO NOT EDIT)
tikv_gc_leader_lease	20240429-17:43:08.379 +0800	Current GC worker leader lease. (DO NOT EDIT)
tikv_gc_auto_concurrency	true	Let TiDB pick the concurrency automatically. If set false, tikv_gc_concurrency will be used
tikv_gc_enable	true	Current GC enable status
tikv_gc_run_interval	10m0s	GC run interval, at least 10m, in Go format.
tikv_gc_life_time	10m0s	All versions within life time will not be collected by GC, at least 10m, in Go format.
tikv_gc_last_run_time	20240429-17:41:08.339 +0800	The time when last GC starts. (DO NOT EDIT)
tikv_gc_safe_point	20240429-17:31:08.339 +0800	All versions after safe point can be accessed. (DO NOT EDIT)
VARIABLE_NAME	VARIABLE_VALUE	COMMENT
bootstrapped	True	Bootstrap flag. Do not delete.
tidb_server_version	179	Bootstrap version. Do not delete.
system_tz	Asia/Shanghai	TiDB Global System Timezone.
new_collation_enabled	True	If the new collations are enabled. Do not edit it.
ddl_table_version	3	DDL Table Version. Do not delete.
tikv_gc_leader_uuid	63ca4eb11cc0013	Current GC worker leader UUID. (DO NOT EDIT)
tikv_gc_leader_desc	host:ap-tiflow-release-7-5-pull-cdc-integration-mysql-test-340-5p904, pid:20346, start at 2024-04-29 17:41:08.37146199 +0800 CST m=+5.374411381	Host name and pid of current GC leader. (DO NOT EDIT)
tikv_gc_leader_lease	20240429-17:43:08.379 +0800	Current GC worker leader lease. (DO NOT EDIT)
tikv_gc_auto_concurrency	true	Let TiDB pick the concurrency automatically. If set false, tikv_gc_concurrency will be used
tikv_gc_enable	true	Current GC enable status
tikv_gc_run_interval	10m0s	GC run interval, at least 10m, in Go format.
tikv_gc_life_time	10m0s	All versions within life time will not be collected by GC, at least 10m, in Go format.
tikv_gc_last_run_time	20240429-17:41:08.339 +0800	The time when last GC starts. (DO NOT EDIT)
tikv_gc_safe_point	20240429-17:31:08.339 +0800	All versions after safe point can be accessed. (DO NOT EDIT)
Verifying Downstream TiDB is started...
VARIABLE_NAME	VARIABLE_VALUE	COMMENT
bootstrapped	True	Bootstrap flag. Do not delete.
tidb_server_version	179	Bootstrap version. Do not delete.
system_tz	Asia/Shanghai	TiDB Global System Timezone.
new_collation_enabled	True	If the new collations are enabled. Do not edit it.
ddl_table_version	3	DDL Table Version. Do not delete.
tikv_gc_leader_uuid	63ca4eb11c80013	Current GC worker leader UUID. (DO NOT EDIT)
tikv_gc_leader_desc	host:ap-tiflow-release-7-5-pull-cdc-integration-mysql-test-340-5p904, pid:20426, start at 2024-04-29 17:41:08.362154345 +0800 CST m=+5.294116439	Host name and pid of current GC leader. (DO NOT EDIT)
tikv_gc_leader_lease	20240429-17:43:08.369 +0800	Current GC worker leader lease. (DO NOT EDIT)
tikv_gc_auto_concurrency	true	Let TiDB pick the concurrency automatically. If set false, tikv_gc_concurrency will be used
tikv_gc_enable	true	Current GC enable status
tikv_gc_run_interval	10m0s	GC run interval, at least 10m, in Go format.
tikv_gc_life_time	10m0s	All versions within life time will not be collected by GC, at least 10m, in Go format.
tikv_gc_last_run_time	20240429-17:41:08.338 +0800	The time when last GC starts. (DO NOT EDIT)
tikv_gc_safe_point	20240429-17:31:08.338 +0800	All versions after safe point can be accessed. (DO NOT EDIT)
Starting Upstream TiFlash...
TiFlash
Release Version: v7.5.1-12-g9002cc34d
Edition:         Community
Git Commit Hash: 9002cc34d3b593a718b6c5260ba18f30a45ab314
Git Branch:      HEAD
UTC Build Time:  2024-04-18 07:24:48
Enable Features: jemalloc sm4(GmSSL) avx2 avx512 unwind thinlto
Profile:         RELWITHDEBINFO

Raft Proxy
Git Commit Hash:   521fd9dbc55e58646045d88f91c3c35db50b5981
Git Commit Branch: HEAD
UTC Build Time:    2024-04-18 07:28:40
Rust Version:      rustc 1.67.0-nightly (96ddd32c4 2022-11-14)
Storage Engine:    tiflash
Prometheus Prefix: tiflash_proxy_
Profile:           release
Enable Features:    portable sse test-engine-kv-rocksdb test-engine-raft-raft-engine cloud-aws cloud-gcp cloud-azure
Verifying Upstream TiFlash is started...
Logging trace to /tmp/tidb_cdc_test/simple/tiflash/log/server.log
Logging errors to /tmp/tidb_cdc_test/simple/tiflash/log/error.log
arg matches is ArgMatches { args: {"pd-endpoints": MatchedArg { occurs: 1, indices: [16], vals: ["127.0.0.1:2379"] }, "addr": MatchedArg { occurs: 1, indices: [20], vals: ["127.0.0.1:9000"] }, "engine-git-hash": MatchedArg { occurs: 1, indices: [10], vals: ["9002cc34d3b593a718b6c5260ba18f30a45ab314"] }, "data-dir": MatchedArg { occurs: 1, indices: [6], vals: ["/tmp/tidb_cdc_test/simple/tiflash/db/proxy"] }, "advertise-addr": MatchedArg { occurs: 1, indices: [4], vals: ["127.0.0.1:9000"] }, "log-file": MatchedArg { occurs: 1, indices: [18], vals: ["/tmp/tidb_cdc_test/simple/tiflash/log/proxy.log"] }, "engine-label": MatchedArg { occurs: 1, indices: [14], vals: ["tiflash"] }, "config": MatchedArg { occurs: 1, indices: [8], vals: ["/tmp/tidb_cdc_test/simple/tiflash-proxy.toml"] }, "engine-addr": MatchedArg { occurs: 1, indices: [2], vals: ["127.0.0.1:9500"] }, "engine-version": MatchedArg { occurs: 1, indices: [12], vals: ["v7.5.1-12-g9002cc34d"] }}, subcommand: None, usage: Some("USAGE:\n    TiFlash Proxy [FLAGS] [OPTIONS] --engine-git-hash <engine-git-hash> --engine-label <engine-label> --engine-version <engine-version>") }
check diff failed 25-th time, retry later
TEST FAILED: OUTPUT DOES NOT CONTAIN 'id: 1'
____________________________________
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
check data failed 37-th time, retry later
+ (( i++ ))
+ (( i <= 50 ))
++ curl -vsL --max-time 20 http://127.0.0.1:8300/debug/info
* About to connect() to 127.0.0.1 port 8300 (#0)
*   Trying 127.0.0.1...
* Connected to 127.0.0.1 (127.0.0.1) port 8300 (#0)
> GET /debug/info HTTP/1.1
> User-Agent: curl/7.29.0
> Host: 127.0.0.1:8300
> Accept: */*
> 
< HTTP/1.1 200 OK
< Date: Mon, 29 Apr 2024 09:41:11 GMT
< Content-Type: text/plain; charset=utf-8
< Transfer-Encoding: chunked
< 
{ [data not shown]
* Connection #0 to host 127.0.0.1 left intact
+ res='

*** owner info ***:



*** processors info ***:

changefeedID: default/5aa2918f-e47a-4194-b365-70d4fd6cebdf
{UpstreamID:7363221430226611654 Namespace:default ID:5aa2918f-e47a-4194-b365-70d4fd6cebdf SinkURI:mysql://normal:123456@127.0.0.1:3306/ CreateTime:2024-04-29 17:40:01.407430316 +0800 CST StartTs:449415374775255041 TargetTs:0 AdminJobType:noop Engine:unified SortDir: Config:0xc002d01b90 State:normal Error:<nil> Warning:<nil> CreatorVersion:v7.5.1-21-g88db1a842 Epoch:449415374801469443}
{CheckpointTs:449415392784547843 MinTableBarrierTs:449415392784547847 AdminJobType:noop}
span: {table_id:110,start_key:7480000000000000ff6e5f720000000000fa,end_key:7480000000000000ff6e5f730000000000fa}, resolvedTs: 449415392784547843, checkpointTs: 449415392784547843, state: Replicating
span: {table_id:111,start_key:7480000000000000ff6f5f720000000000fa,end_key:7480000000000000ff6f5f730000000000fa}, resolvedTs: 449415392784547847, checkpointTs: 449415392784547843, state: Replicating
span: {table_id:114,start_key:7480000000000000ff725f720000000000fa,end_key:7480000000000000ff725f730000000000fa}, resolvedTs: 449415392784547844, checkpointTs: 449415392784547843, state: Replicating
span: {table_id:113,start_key:7480000000000000ff715f720000000000fa,end_key:7480000000000000ff715f730000000000fa}, resolvedTs: 449415392784547847, checkpointTs: 449415392784547843, state: Replicating
span: {table_id:104,start_key:7480000000000000ff685f720000000000fa,end_key:7480000000000000ff685f730000000000fa}, resolvedTs: 449415392784547843, checkpointTs: 449415392784547843, state: Replicating
span: {table_id:116,start_key:7480000000000000ff745f720000000000fa,end_key:7480000000000000ff745f730000000000fa}, resolvedTs: 449415392784547847, checkpointTs: 449415392784547843, state: Replicating
span: {table_id:106,start_key:7480000000000000ff6a5f720000000000fa,end_key:7480000000000000ff6a5f730000000000fa}, resolvedTs: 449415392784547847, checkpointTs: 449415392784547843, state: Replicating
span: {table_id:109,start_key:7480000000000000ff6d5f720000000000fa,end_key:7480000000000000ff6d5f730000000000fa}, resolvedTs: 449415392784547844, checkpointTs: 449415392784547843, state: Replicating



*** etcd info ***:

/tidb/cdc/default/__cdc_meta__/capture/818dcd1c-6aae-487d-89bc-195419e91634
	{"id":"818dcd1c-6aae-487d-89bc-195419e91634","address":"127.0.0.1:8300","version":"v7.5.1-21-g88db1a842"}

/tidb/cdc/default/__cdc_meta__/meta/meta-version
	1

/tidb/cdc/default/__cdc_meta__/owner/22318f293971bf30
	818dcd1c-6aae-487d-89bc-195419e91634

/tidb/cdc/default/default/changefeed/info/5aa2918f-e47a-4194-b365-70d4fd6cebdf
	{"upstream-id":7363221430226611654,"namespace":"default","changefeed-id":"5aa2918f-e47a-4194-b365-70d4fd6cebdf","sink-uri":"mysql://normal:123456@127.0.0.1:3306/","create-time":"2024-04-29T17:40:01.407430316+08:00","start-ts":449415374775255041,"target-ts":0,"admin-job-type":0,"sort-engine":"","sort-dir":"","config":{"memory-quota":1073741824,"case-sensitive":false,"force-replicate":false,"check-gc-safe-point":true,"enable-sync-point":false,"ignore-ineligible-table":false,"bdr-mode":false,"sync-point-interval":600000000000,"sync-point-retention":86400000000000,"filter":{"rules":["*.*"],"ignore-txn-start-ts":null,"event-filters":null},"mounter":{"worker-num":16},"sink":{"csv":{"delimiter":",","quote":"\"","null":"\\N","include-commit-ts":false,"binary-encoding-method":"base64"},"encoder-concurrency":32,"terminator":"\r\n","date-separator":"day","enable-partition-separator":true,"enable-kafka-sink-v2":false,"only-output-updated-columns":false,"delete-only-output-handle-key-columns":false,"advance-timeout-in-sec":150,"send-bootstrap-interval-in-sec":120,"send-bootstrap-in-msg-count":10000,"send-bootstrap-to-all-partition":true},"consistent":{"level":"eventual","max-log-size":64,"flush-interval":2000,"meta-flush-interval":200,"encoding-worker-num":16,"flush-worker-num":8,"storage":"file:///tmp/tidb_cdc_test/consistent_replicate_storage_file/redo","use-file-backend":false,"compression":"lz4","memory-usage":{"memory-quota-percentage":50,"event-cache-percentage":0}},"scheduler":{"enable-table-across-nodes":false,"region-threshold":100000,"write-key-threshold":0,"region-per-span":0},"integrity":{"integrity-check-level":"none","corruption-handle-level":"warn"},"changefeed-error-stuck-duration":1800000000000,"sql-mode":"ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION","synced-status":{"synced-check-interval":300,"checkpoint-interval":15}},"state":"normal","error":null,"warning":null,"creator-version":"v7.5.1-21-g88db1a842","epoch":449415374801469443}

/tidb/cdc/default/default/changefeed/status/5aa2918f-e47a-4194-b365-70d4fd6cebdf
	{"checkpoint-ts":449415392784547843,"min-table-barrier-ts":449415392784547847,"admin-job-type":0}

/tidb/cdc/default/default/task/position/818dcd1c-6aae-487d-89bc-195419e91634/5aa2918f-e47a-4194-b365-70d4fd6cebdf
	{"checkpoint-ts":0,"resolved-ts":0,"count":0,"error":null,"warning":null}

/tidb/cdc/default/default/upstream/7363221430226611654
	{"id":7363221430226611654,"pd-endpoints":"http://127.0.0.1:2379,http://127.0.0.1:2379","key-path":"","cert-path":"","ca-path":"","cert-allowed-cn":null}'
+ grep -q 'failed to get info:'
+ echo '

*** owner info ***:



*** processors info ***:

changefeedID: default/5aa2918f-e47a-4194-b365-70d4fd6cebdf
{UpstreamID:7363221430226611654 Namespace:default ID:5aa2918f-e47a-4194-b365-70d4fd6cebdf SinkURI:mysql://normal:123456@127.0.0.1:3306/ CreateTime:2024-04-29 17:40:01.407430316 +0800 CST StartTs:449415374775255041 TargetTs:0 AdminJobType:noop Engine:unified SortDir: Config:0xc002d01b90 State:normal Error:<nil> Warning:<nil> CreatorVersion:v7.5.1-21-g88db1a842 Epoch:449415374801469443}
{CheckpointTs:449415392784547843 MinTableBarrierTs:449415392784547847 AdminJobType:noop}
span: {table_id:110,start_key:7480000000000000ff6e5f720000000000fa,end_key:7480000000000000ff6e5f730000000000fa}, resolvedTs: 449415392784547843, checkpointTs: 449415392784547843, state: Replicating
span: {table_id:111,start_key:7480000000000000ff6f5f720000000000fa,end_key:7480000000000000ff6f5f730000000000fa}, resolvedTs: 449415392784547847, checkpointTs: 449415392784547843, state: Replicating
span: {table_id:114,start_key:7480000000000000ff725f720000000000fa,end_key:7480000000000000ff725f730000000000fa}, resolvedTs: 449415392784547844, checkpointTs: 449415392784547843, state: Replicating
span: {table_id:113,start_key:7480000000000000ff715f720000000000fa,end_key:7480000000000000ff715f730000000000fa}, resolvedTs: 449415392784547847, checkpointTs: 449415392784547843, state: Replicating
span: {table_id:104,start_key:7480000000000000ff685f720000000000fa,end_key:7480000000000000ff685f730000000000fa}, resolvedTs: 449415392784547843, checkpointTs: 449415392784547843, state: Replicating
span: {table_id:116,start_key:7480000000000000ff745f720000000000fa,end_key:7480000000000000ff745f730000000000fa}, resolvedTs: 449415392784547847, checkpointTs: 449415392784547843, state: Replicating
span: {table_id:106,start_key:7480000000000000ff6a5f720000000000fa,end_key:7480000000000000ff6a5f730000000000fa}, resolvedTs: 449415392784547847, checkpointTs: 449415392784547843, state: Replicating
span: {table_id:109,start_key:7480000000000000ff6d5f720000000000fa,end_key:7480000000000000ff6d5f730000000000fa}, resolvedTs: 449415392784547844, checkpointTs: 449415392784547843, state: Replicating



*** etcd info ***:

/tidb/cdc/default/__cdc_meta__/capture/818dcd1c-6aae-487d-89bc-195419e91634
	{"id":"818dcd1c-6aae-487d-89bc-195419e91634","address":"127.0.0.1:8300","version":"v7.5.1-21-g88db1a842"}

/tidb/cdc/default/__cdc_meta__/meta/meta-version
	1

/tidb/cdc/default/__cdc_meta__/owner/22318f293971bf30
	818dcd1c-6aae-487d-89bc-195419e91634

/tidb/cdc/default/default/changefeed/info/5aa2918f-e47a-4194-b365-70d4fd6cebdf
	{"upstream-id":7363221430226611654,"namespace":"default","changefeed-id":"5aa2918f-e47a-4194-b365-70d4fd6cebdf","sink-uri":"mysql://normal:123456@127.0.0.1:3306/","create-time":"2024-04-29T17:40:01.407430316+08:00","start-ts":449415374775255041,"target-ts":0,"admin-job-type":0,"sort-engine":"","sort-dir":"","config":{"memory-quota":1073741824,"case-sensitive":false,"force-replicate":false,"check-gc-safe-point":true,"enable-sync-point":false,"ignore-ineligible-table":false,"bdr-mode":false,"sync-point-interval":600000000000,"sync-point-retention":86400000000000,"filter":{"rules":["*.*"],"ignore-txn-start-ts":null,"event-filters":null},"mounter":{"worker-num":16},"sink":{"csv":{"delimiter":",","quote":"\"","null":"\\N","include-commit-ts":false,"binary-encoding-method":"base64"},"encoder-concurrency":32,"terminator":"\r\n","date-separator":"day","enable-partition-separator":true,"enable-kafka-sink-v2":false,"only-output-updated-columns":false,"delete-only-output-handle-key-columns":false,"advance-timeout-in-sec":150,"send-bootstrap-interval-in-sec":120,"send-bootstrap-in-msg-count":10000,"send-bootstrap-to-all-partition":true},"consistent":{"level":"eventual","max-log-size":64,"flush-interval":2000,"meta-flush-interval":200,"encoding-worker-num":16,"flush-worker-num":8,"storage":"file:///tmp/tidb_cdc_test/consistent_replicate_storage_file/redo","use-file-backend":false,"compression":"lz4","memory-usage":{"memory-quota-percentage":50,"event-cache-percentage":0}},"scheduler":{"enable-table-across-nodes":false,"region-threshold":100000,"write-key-threshold":0,"region-per-span":0},"integrity":{"integrity-check-level":"none","corruption-handle-level":"warn"},"changefeed-error-stuck-duration":1800000000000,"sql-mode":"ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION","synced-status":{"synced-check-interval":300,"checkpoint-interval":15}},"state":"normal","error":null,"warning":null,"creator-version":"v7.5.1-21-g88db1a842","epoch":449415374801469443}

/tidb/cdc/default/default/changefeed/status/5aa2918f-e47a-4194-b365-70d4fd6cebdf
	{"checkpoint-ts":449415392784547843,"min-table-barrier-ts":449415392784547847,"admin-job-type":0}

/tidb/cdc/default/default/task/position/818dcd1c-6aae-487d-89bc-195419e91634/5aa2918f-e47a-4194-b365-70d4fd6cebdf
	{"checkpoint-ts":0,"resolved-ts":0,"count":0,"error":null,"warning":null}

/tidb/cdc/default/default/upstream/7363221430226611654
	{"id":7363221430226611654,"pd-endpoints":"http://127.0.0.1:2379,http://127.0.0.1:2379","key-path":"","cert-path":"","ca-path":"","cert-allowed-cn":null}'
+ echo '

*** owner info ***:



*** processors info ***:

changefeedID: default/5aa2918f-e47a-4194-b365-70d4fd6cebdf
{UpstreamID:7363221430226611654 Namespace:default ID:5aa2918f-e47a-4194-b365-70d4fd6cebdf SinkURI:mysql://normal:123456@127.0.0.1:3306/ CreateTime:2024-04-29 17:40:01.407430316 +0800 CST StartTs:449415374775255041 TargetTs:0 AdminJobType:noop Engine:unified SortDir: Config:0xc002d01b90 State:normal Error:<nil> Warning:<nil> CreatorVersion:v7.5.1-21-g88db1a842 Epoch:449415374801469443}
{CheckpointTs:449415392784547843 MinTableBarrierTs:449415392784547847 AdminJobType:noop}
span: {table_id:110,start_key:7480000000000000ff6e5f720000000000fa,end_key:7480000000000000ff6e5f730000000000fa}, resolvedTs: 449415392784547843, checkpointTs: 449415392784547843, state: Replicating
span: {table_id:111,start_key:7480000000000000ff6f5f720000000000fa,end_key:7480000000000000ff6f5f730000000000fa}, resolvedTs: 449415392784547847, checkpointTs: 449415392784547843, state: Replicating
span: {table_id:114,start_key:7480000000000000ff725f720000000000fa,end_key:7480000000000000ff725f730000000000fa}, resolvedTs: 449415392784547844, checkpointTs: 449415392784547843, state: Replicating
span: {table_id:113,start_key:7480000000000000ff715f720000000000fa,end_key:7480000000000000ff715f730000000000fa}, resolvedTs: 449415392784547847, checkpointTs: 449415392784547843, state: Replicating
span: {table_id:104,start_key:7480000000000000ff685f720000000000fa,end_key:7480000000000000ff685f730000000000fa}, resolvedTs: 449415392784547843, checkpointTs: 449415392784547843, state: Replicating
span: {table_id:116,start_key:7480000000000000ff745f720000000000fa,end_key:7480000000000000ff745f730000000000fa}, resolvedTs: 449415392784547847, checkpointTs: 449415392784547843, state: Replicating
span: {table_id:106,start_key:7480000000000000ff6a5f720000000000fa,end_key:7480000000000000ff6a5f730000000000fa}, resolvedTs: 449415392784547847, checkpointTs: 449415392784547843, state: Replicating
span: {table_id:109,start_key:7480000000000000ff6d5f720000000000fa,end_key:7480000000000000ff6d5f730000000000fa}, resolvedTs: 449415392784547844, checkpointTs: 449415392784547843, state: Replicating



*** etcd info ***:

/tidb/cdc/default/__cdc_meta__/capture/818dcd1c-6aae-487d-89bc-195419e91634
	{"id":"818dcd1c-6aae-487d-89bc-195419e91634","address":"127.0.0.1:8300","version":"v7.5.1-21-g88db1a842"}

/tidb/cdc/default/__cdc_meta__/meta/meta-version
	1

/tidb/cdc/default/__cdc_meta__/owner/22318f293971bf30
	818dcd1c-6aae-487d-89bc-195419e91634

/tidb/cdc/default/default/changefeed/info/5aa2918f-e47a-4194-b365-70d4fd6cebdf
	{"upstream-id":7363221430226611654,"namespace":"default","changefeed-id":"5aa2918f-e47a-4194-b365-70d4fd6cebdf","sink-uri":"mysql://normal:123456@127.0.0.1:3306/","create-time":"2024-04-29T17:40:01.407430316+08:00","start-ts":449415374775255041,"target-ts":0,"admin-job-type":0,"sort-engine":"","sort-dir":"","config":{"memory-quota":1073741824,"case-sensitive":false,"force-replicate":false,"check-gc-safe-point":true,"enable-sync-point":false,"ignore-ineligible-table":false,"bdr-mode":false,"sync-point-interval":600000000000,"sync-point-retention":86400000000000,"filter":{"rules":["*.*"],"ignore-txn-start-ts":null,"event-filters":null},"mounter":{"worker-num":16},"sink":{"csv":{"delimiter":",","quote":"\"","null":"\\N","include-commit-ts":false,"binary-encoding-method":"base64"},"encoder-concurrency":32,"terminator":"\r\n","date-separator":"day","enable-partition-separator":true,"enable-kafka-sink-v2":false,"only-output-updated-columns":false,"delete-only-output-handle-key-columns":false,"advance-timeout-in-sec":150,"send-bootstrap-interval-in-sec":120,"send-bootstrap-in-msg-count":10000,"send-bootstrap-to-all-partition":true},"consistent":{"level":"eventual","max-log-size":64,"flush-interval":2000,"meta-flush-interval":200,"encoding-worker-num":16,"flush-worker-num":8,"storage":"file:///tmp/tidb_cdc_test/consistent_replicate_storage_file/redo","use-file-backend":false,"compression":"lz4","memory-usage":{"memory-quota-percentage":50,"event-cache-percenta+ grep -q 'etcd info'
ge":0}},"scheduler":{"enable-table-across-nodes":false,"region-threshold":100000,"write-key-threshold":0,"region-per-span":0},"integrity":{"integrity-check-level":"none","corruption-handle-level":"warn"},"changefeed-error-stuck-duration":1800000000000,"sql-mode":"ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION","synced-status":{"synced-check-interval":300,"checkpoint-interval":15}},"state":"normal","error":null,"warning":null,"creator-version":"v7.5.1-21-g88db1a842","epoch":449415374801469443}

/tidb/cdc/default/default/changefeed/status/5aa2918f-e47a-4194-b365-70d4fd6cebdf
	{"checkpoint-ts":449415392784547843,"min-table-barrier-ts":449415392784547847,"admin-job-type":0}

/tidb/cdc/default/default/task/position/818dcd1c-6aae-487d-89bc-195419e91634/5aa2918f-e47a-4194-b365-70d4fd6cebdf
	{"checkpoint-ts":0,"resolved-ts":0,"count":0,"error":null,"warning":null}

/tidb/cdc/default/default/upstream/7363221430226611654
	{"id":7363221430226611654,"pd-endpoints":"http://127.0.0.1:2379,http://127.0.0.1:2379","key-path":"","cert-path":"","ca-path":"","cert-allowed-cn":null}'
+ break
+ set +x
+ pd_host=127.0.0.1
+ pd_port=2379
++ run_cdc_cli tso query --pd=http://127.0.0.1:2379
+ cdc.test -test.coverprofile=/tmp/tidb_cdc_test/cov.simple.cli.21674.out cli tso query --pd=http://127.0.0.1:2379
check diff failed 26-th time, retry later
TEST FAILED: OUTPUT DOES NOT CONTAIN 'id: 1'
____________________________________
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
check data failed 38-th time, retry later
=================>> Running test /home/jenkins/agent/workspace/pingcap/tiflow/release-7.5/pull_cdc_integration_mysql_test/tiflow/tests/integration_tests/kafka_simple_claim_check/run.sh using Sink-Type: mysql... <<=================
[Mon Apr 29 17:41:12 CST 2024] <<<<<< run test case kafka_simple_claim_check success! >>>>>>
+ set +x
+ tso='449415393388527625
PASS
coverage: 1.8% of statements in github.com/pingcap/tiflow/...'
+ echo 449415393388527625 PASS coverage: 1.8% of statements in github.com/pingcap/tiflow/...
+ awk -F ' ' '{print $1}'
+ set +x
\033[0;36m<<< Run all test success >>>\033[0m
[Pipeline] }
Cache not saved (ws/jenkins-pingcap-tiflow-release-7.5-pull_cdc_integration_mysql_test-340/tiflow-cdc already exists)
[Pipeline] // cache
[Pipeline] }
[Pipeline] // dir
[Pipeline] }
[Pipeline] // withCredentials
[Pipeline] }
TEST FAILED: OUTPUT DOES NOT CONTAIN 'id: 1'
____________________________________
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
check data failed 39-th time, retry later
[Pipeline] // timeout
[Pipeline] }
[Pipeline] // stage
[Pipeline] }
[Pipeline] // container
[Pipeline] }
[Pipeline] // withEnv
[Pipeline] }
[Pipeline] // node
[Pipeline] }
check diff failed 27-th time, retry later
[Mon Apr 29 17:41:14 CST 2024] <<<<<< START cdc server in simple case >>>>>>
+ [[ '' == \t\r\u\e ]]
+ set +e
+ get_info_fail_msg='failed to get info:'
+ etcd_info_msg='etcd info'
+ '[' -z '' ']'
+ curl_status_cmd='curl -vsL --max-time 20 http://127.0.0.1:8300/debug/info'
+ [[ no != \n\o ]]
+ (( i = 0 ))
+ (( i <= 50 ))
+ GO_FAILPOINTS=
+ cdc.test -test.coverprofile=/tmp/tidb_cdc_test/cov.simple.2172621728.out server --log-file /tmp/tidb_cdc_test/simple/cdc.log --log-level debug --data-dir /tmp/tidb_cdc_test/simple/cdc_data --cluster-id default
++ curl -vsL --max-time 20 http://127.0.0.1:8300/debug/info
* About to connect() to 127.0.0.1 port 8300 (#0)
*   Trying 127.0.0.1...
* Connection refused
* Failed connect to 127.0.0.1:8300; Connection refused
* Closing connection 0
+ res=
+ echo ''
+ grep -q 'failed to get info:'
+ echo ''
+ grep -q 'etcd info'
+ '[' 0 -eq 50 ']'
+ sleep 3
[Pipeline] // podTemplate
[Pipeline] }
[Pipeline] // withEnv
[Pipeline] }
[Pipeline] // stage
[Pipeline] }
=================>> Running test /home/jenkins/agent/workspace/pingcap/tiflow/release-7.5/pull_cdc_integration_mysql_test/tiflow/tests/integration_tests/kafka_simple_claim_check_avro/run.sh using Sink-Type: mysql... <<=================
[Mon Apr 29 17:41:15 CST 2024] <<<<<< run test case kafka_simple_claim_check_avro success! >>>>>>
TEST FAILED: OUTPUT DOES NOT CONTAIN 'id: 1'
____________________________________
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
check data failed 40-th time, retry later
check diff failed 28-th time, retry later
+ (( i++ ))
+ (( i <= 50 ))
++ curl -vsL --max-time 20 http://127.0.0.1:8300/debug/info
* About to connect() to 127.0.0.1 port 8300 (#0)
*   Trying 127.0.0.1...
* Connected to 127.0.0.1 (127.0.0.1) port 8300 (#0)
> GET /debug/info HTTP/1.1
> User-Agent: curl/7.29.0
> Host: 127.0.0.1:8300
> Accept: */*
> 
< HTTP/1.1 200 OK
< Date: Mon, 29 Apr 2024 09:41:17 GMT
< Content-Length: 613
< Content-Type: text/plain; charset=utf-8
< 
{ [data not shown]
* Connection #0 to host 127.0.0.1 left intact
+ res='

*** owner info ***:



*** processors info ***:



*** etcd info ***:

/tidb/cdc/default/__cdc_meta__/capture/9223d116-335e-4c3d-945b-135fa3a25a9e
	{"id":"9223d116-335e-4c3d-945b-135fa3a25a9e","address":"127.0.0.1:8300","version":"v7.5.1-21-g88db1a842"}

/tidb/cdc/default/__cdc_meta__/meta/meta-version
	1

/tidb/cdc/default/__cdc_meta__/owner/22318f293a99a90b
	9223d116-335e-4c3d-945b-135fa3a25a9e

/tidb/cdc/default/default/upstream/7363221751058103275
	{"id":7363221751058103275,"pd-endpoints":"http://127.0.0.1:2379,http://127.0.0.1:2379","key-path":"","cert-path":"","ca-path":"","cert-allowed-cn":null}'
+ echo '

*** owner info ***:



*** processors info ***:



*** etcd info ***:

/tidb/cdc/default/__cdc_meta__/capture/9223d116-335e-4c3d-945b-135fa3a25a9e
	{"id":"9223d116-335e-4c3d-945b-135fa3a25a9e","address":"127.0.0.1:8300","version":"v7.5.1-21-g88db1a842"}

/tidb/cdc/default/__cdc_meta__/meta/meta-version
	1

/tidb/cdc/default/__cdc_meta__/owner/22318f293a99a90b
	9223d116-335e-4c3d-945b-135fa3a25a9e

/tidb/cdc/default/default/upstream/7363221751058103275
	{"id":7363221751058103275,"pd-endpoints":"http://127.0.0.1:2379,http://127.0.0.1:2379","key-path":"","cert-path":"","ca-path":"","cert-allowed-cn":null}'
+ grep -q 'failed to get info:'
+ echo '

*** owner info ***:



*** processors info ***:



*** etcd info ***:

/tidb/cdc/default/__cdc_meta__/capture/9223d116-335e-4c3d-945b-135fa3a25a9e
	{"id":"9223d116-335e-4c3d-945b-135fa3a25a9e","address":"127.0.0.1:8300","version":"v7.5.1-21-g88db1a842"}

/tidb/cdc/default/__cdc_meta__/meta/meta-version
	1

/tidb/cdc/default/__cdc_meta__/owner/22318f293a99a90b
	9223d116-335e-4c3d-945b-135fa3a25a9e

/tidb/cdc/default/default/upstream/7363221751058103275
	{"id":7363221751058103275,"pd-endpoints":"http://127.0.0.1:2379,http://127.0.0.1:2379","key-path":"","cert-path":"","ca-path":"","cert-allowed-cn":null}'
+ grep -q 'etcd info'
+ break
+ set +x
+ cdc.test -test.coverprofile=/tmp/tidb_cdc_test/cov.simple.cli.21770.out cli changefeed create --start-ts=449415393388527625 --sink-uri=mysql+ssl://normal:123456@127.0.0.1:3306/
TEST FAILED: OUTPUT DOES NOT CONTAIN 'id: 1'
____________________________________
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
check data failed 41-th time, retry later
=================>> Running test /home/jenkins/agent/workspace/pingcap/tiflow/release-7.5/pull_cdc_integration_mysql_test/tiflow/tests/integration_tests/canal_json_adapter_compatibility/run.sh using Sink-Type: mysql... <<=================
[Mon Apr 29 17:41:18 CST 2024] <<<<<< run test case canal_json_adapter_compatibility success! >>>>>>
Create changefeed successfully!
ID: f54da896-34ca-4792-8a23-1e9d7f4ef3aa
Info: {"upstream_id":7363221751058103275,"namespace":"default","id":"f54da896-34ca-4792-8a23-1e9d7f4ef3aa","sink_uri":"mysql+ssl://normal:xxxxx@127.0.0.1:3306/","create_time":"2024-04-29T17:41:18.407264028+08:00","start_ts":449415393388527625,"config":{"memory_quota":1073741824,"case_sensitive":false,"force_replicate":false,"ignore_ineligible_table":false,"check_gc_safe_point":true,"enable_sync_point":false,"bdr_mode":false,"sync_point_interval":600000000000,"sync_point_retention":86400000000000,"filter":{"rules":["*.*"]},"mounter":{"worker_num":16},"sink":{"csv":{"delimiter":",","quote":"\"","null":"\\N","include_commit_ts":false,"binary_encoding_method":"base64"},"encoder_concurrency":32,"terminator":"\r\n","date_separator":"day","enable_partition_separator":true,"enable_kafka_sink_v2":false,"only_output_updated_columns":false,"delete_only_output_handle_key_columns":false,"advance_timeout":150,"send_bootstrap_interval_in_sec":120,"send_bootstrap_in_msg_count":10000,"send_bootstrap_to_all_partition":true},"consistent":{"level":"none","max_log_size":64,"flush_interval":2000,"meta_flush_interval":200,"encoding_worker_num":16,"flush_worker_num":8,"use_file_backend":false,"memory_usage":{"memory_quota_percentage":50,"event_cache_percentage":0}},"scheduler":{"enable_table_across_nodes":false,"region_threshold":100000,"write_key_threshold":0},"integrity":{"integrity_check_level":"none","corruption_handle_level":"warn"},"changefeed_error_stuck_duration":1800000000000,"sql_mode":"ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION","synced_status":{"synced_check_interval":300,"checkpoint_interval":15}},"state":"normal","creator_version":"v7.5.1-21-g88db1a842","resolved_ts":449415393388527625,"checkpoint_ts":449415393388527625,"checkpoint_time":"2024-04-29 17:41:12.289"}
PASS
coverage: 2.4% of statements in github.com/pingcap/tiflow/...
check diff failed 29-th time, retry later
+ set +x
succeed to verify meta placement rules
ERROR 1146 (42S02) at line 1: Table 'test.simple1' doesn't exist
check data failed 1-th time, retry later
check diff failed 30-th time, retry later
TEST FAILED: OUTPUT DOES NOT CONTAIN 'id: 1'
____________________________________
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
check data failed 42-th time, retry later
=================>> Running test /home/jenkins/agent/workspace/pingcap/tiflow/release-7.5/pull_cdc_integration_mysql_test/tiflow/tests/integration_tests/canal_json_basic/run.sh using Sink-Type: mysql... <<=================
[Mon Apr 29 17:41:21 CST 2024] <<<<<< run test case canal_json_basic success! >>>>>>
TEST FAILED: OUTPUT DOES NOT CONTAIN 'id: 1'
____________________________________
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
check data failed 2-th time, retry later
TEST FAILED: OUTPUT DOES NOT CONTAIN 'id: 1'
____________________________________
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
check data failed 43-th time, retry later
check diff failed 31-th time, retry later
=================>> Running test /home/jenkins/agent/workspace/pingcap/tiflow/release-7.5/pull_cdc_integration_mysql_test/tiflow/tests/integration_tests/multi_topics/run.sh using Sink-Type: mysql... <<=================
[Mon Apr 29 17:41:24 CST 2024] <<<<<< run test case multi_topics success! >>>>>>
TEST FAILED: OUTPUT DOES NOT CONTAIN 'id: 1'
____________________________________
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
check data failed 3-th time, retry later
check diff failed 32-th time, retry later
TEST FAILED: OUTPUT DOES NOT CONTAIN 'id: 1'
____________________________________
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
check data failed 44-th time, retry later
check data successfully
wait process cdc.test exit for 1-th time...
check diff failed 33-th time, retry later
wait process cdc.test exit for 2-th time...
=================>> Running test /home/jenkins/agent/workspace/pingcap/tiflow/release-7.5/pull_cdc_integration_mysql_test/tiflow/tests/integration_tests/avro_basic/run.sh using Sink-Type: mysql... <<=================
[Mon Apr 29 17:41:26 CST 2024] <<<<<< run test case avro_basic success! >>>>>>
TEST FAILED: OUTPUT DOES NOT CONTAIN 'id: 1'
____________________________________
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
check data failed 45-th time, retry later
cdc.test: no process found
wait process cdc.test exit for 3-th time...
process cdc.test already exit
[Mon Apr 29 17:41:27 CST 2024] <<<<<< run test case simple success! >>>>>>
check diff failed 34-th time, retry later
TEST FAILED: OUTPUT DOES NOT CONTAIN 'id: 1'
____________________________________
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
check data failed 46-th time, retry later
=================>> Running test /home/jenkins/agent/workspace/pingcap/tiflow/release-7.5/pull_cdc_integration_mysql_test/tiflow/tests/integration_tests/canal_json_handle_key_only/run.sh using Sink-Type: mysql... <<=================
[Mon Apr 29 17:41:29 CST 2024] <<<<<< run test case canal_json_handle_key_only success! >>>>>>
check diff failed 35-th time, retry later
TEST FAILED: OUTPUT DOES NOT CONTAIN 'id: 1'
____________________________________
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
check data failed 47-th time, retry later
check diff failed 36-th time, retry later
=================>> Running test /home/jenkins/agent/workspace/pingcap/tiflow/release-7.5/pull_cdc_integration_mysql_test/tiflow/tests/integration_tests/open_protocol_handle_key_only/run.sh using Sink-Type: mysql... <<=================
[Mon Apr 29 17:41:32 CST 2024] <<<<<< run test case open_protocol_handle_key_only success! >>>>>>
check_redo_resolved_ts 5aa2918f-e47a-4194-b365-70d4fd6cebdf 449415398905610251 file:///tmp/tidb_cdc_test/consistent_replicate_storage_file/redo /tmp/tidb_cdc_test/consistent_replicate_storage_file/cdc_data/redo/5aa2918f-e47a-4194-b365-70d4fd6cebdf/meta
global resolved ts 449415393099120664 not forward to 449415398905610251
run task failed 1-th time, retry later
TEST FAILED: OUTPUT DOES NOT CONTAIN 'id: 1'
____________________________________
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
check data failed 48-th time, retry later
TEST FAILED: OUTPUT DOES NOT CONTAIN 'id: 1'
____________________________________
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
check data failed 49-th time, retry later
check_redo_resolved_ts 5aa2918f-e47a-4194-b365-70d4fd6cebdf 449415398905610251 file:///tmp/tidb_cdc_test/consistent_replicate_storage_file/redo /tmp/tidb_cdc_test/consistent_replicate_storage_file/cdc_data/redo/5aa2918f-e47a-4194-b365-70d4fd6cebdf/meta
global resolved ts 449415393099120664 not forward to 449415398905610251
run task failed 2-th time, retry later
check diff failed 37-th time, retry later
=================>> Running test /home/jenkins/agent/workspace/pingcap/tiflow/release-7.5/pull_cdc_integration_mysql_test/tiflow/tests/integration_tests/canal_json_claim_check/run.sh using Sink-Type: mysql... <<=================
[Mon Apr 29 17:41:35 CST 2024] <<<<<< run test case canal_json_claim_check success! >>>>>>
TEST FAILED: OUTPUT DOES NOT CONTAIN 'id: 1'
____________________________________
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
check data failed 50-th time, retry later
check diff failed 38-th time, retry later
=================>> Running test /home/jenkins/agent/workspace/pingcap/tiflow/release-7.5/pull_cdc_integration_mysql_test/tiflow/tests/integration_tests/open_protocol_claim_check/run.sh using Sink-Type: mysql... <<=================
[Mon Apr 29 17:41:38 CST 2024] <<<<<< run test case open_protocol_claim_check success! >>>>>>
check data failed at last
check_redo_resolved_ts 5aa2918f-e47a-4194-b365-70d4fd6cebdf 449415398905610251 file:///tmp/tidb_cdc_test/consistent_replicate_storage_file/redo /tmp/tidb_cdc_test/consistent_replicate_storage_file/cdc_data/redo/5aa2918f-e47a-4194-b365-70d4fd6cebdf/meta
run task successfully
check diff failed 39-th time, retry later
wait process cdc.test exit for 1-th time...
wait process cdc.test exit for 2-th time...
wait process cdc.test exit for 3-th time...
cdc.test: no process found
wait process cdc.test exit for 4-th time...
process cdc.test already exit
[2024/04/29 17:41:41.877 +08:00] [INFO] [helper.go:54] ["init log"] [file=] [level=info]
[2024/04/29 17:41:41.878 +08:00] [INFO] [mem_quota.go:82] ["New memory quota"] [namespace=default] [changefeed=redo-applier] [total=1073741824]
[2024/04/29 17:41:41.878 +08:00] [INFO] [redo.go:163] ["apply redo log starts"] [checkpointTs=449415393256407053] [resolvedTs=449415400150794241]
[2024/04/29 17:41:41.881 +08:00] [INFO] [tz.go:35] ["Use the timezone of the TiCDC server machine"] [timezoneName=System] [timezone=Asia/Shanghai]
[2024/04/29 17:41:41.882 +08:00] [WARN] [config.go:380] ["Because time-zone is not specified, the timezone of the TiCDC server will be used. We recommend that you specify the time-zone explicitly. Please make sure that the timezone of the TiCDC server, sink-uri and the downstream database are consistent. If the downstream database does not load the timezone information, you can refer to https://dev.mysql.com/doc/refman/8.0/en/mysql-tzinfo-to-sql.html."] [timezone=Asia/Shanghai]
[2024/04/29 17:41:41.883 +08:00] [INFO] [file.go:114] ["succeed to download and sort redo logs"] [type=ddl] [duration=4.958776ms]
[2024/04/29 17:41:41.891 +08:00] [INFO] [db_helper.go:175] ["sink uri is configured"] [dsn="normal:******@tcp(127.0.0.1:3306)/?interpolateParams=true&multiStatements=true&allow_auto_random_explicit_insert=1&charset=utf8mb4&foreign_key_checks=0&maxAllowedPacket=0&readTimeout=2m&sql_mode=%22NO_ENGINE_SUBSTITUTION%2CONLY_FULL_GROUP_BY%2CNO_AUTO_VALUE_ON_ZERO%2CIGNORE_SPACE%2CALLOW_INVALID_DATES%22&tidb_enable_external_ts_read=%22OFF%22&tidb_placement_mode=%22ignore%22&tidb_txn_mode=optimistic&time_zone=%22Asia%2FShanghai%22&timeout=2m&transaction_isolation=%22READ-COMMITTED%22&writeTimeout=2m"]
[2024/04/29 17:41:41.895 +08:00] [INFO] [mysql.go:198] ["MySQL backends is created"] [changefeed=.] [workerCount=16] [forceReplicate=false]
[2024/04/29 17:41:41.895 +08:00] [INFO] [worker.go:105] ["Transaction dmlSink worker starts"] [changefeedID=.] [workerID=1]
[2024/04/29 17:41:41.895 +08:00] [INFO] [worker.go:105] ["Transaction dmlSink worker starts"] [changefeedID=.] [workerID=0]
[2024/04/29 17:41:41.895 +08:00] [INFO] [worker.go:105] ["Transaction dmlSink worker starts"] [changefeedID=.] [workerID=2]
[2024/04/29 17:41:41.895 +08:00] [INFO] [worker.go:105] ["Transaction dmlSink worker starts"] [changefeedID=.] [workerID=3]
[2024/04/29 17:41:41.895 +08:00] [INFO] [worker.go:105] ["Transaction dmlSink worker starts"] [changefeedID=.] [workerID=6]
[2024/04/29 17:41:41.895 +08:00] [INFO] [worker.go:105] ["Transaction dmlSink worker starts"] [changefeedID=.] [workerID=5]
[2024/04/29 17:41:41.895 +08:00] [INFO] [worker.go:105] ["Transaction dmlSink worker starts"] [changefeedID=.] [workerID=4]
[2024/04/29 17:41:41.895 +08:00] [INFO] [worker.go:105] ["Transaction dmlSink worker starts"] [changefeedID=.] [workerID=11]
[2024/04/29 17:41:41.895 +08:00] [INFO] [worker.go:105] ["Transaction dmlSink worker starts"] [changefeedID=.] [workerID=13]
[2024/04/29 17:41:41.895 +08:00] [INFO] [worker.go:105] ["Transaction dmlSink worker starts"] [changefeedID=.] [workerID=9]
[2024/04/29 17:41:41.895 +08:00] [INFO] [worker.go:105] ["Transaction dmlSink worker starts"] [changefeedID=.] [workerID=14]
[2024/04/29 17:41:41.895 +08:00] [INFO] [worker.go:105] ["Transaction dmlSink worker starts"] [changefeedID=.] [workerID=7]
[2024/04/29 17:41:41.895 +08:00] [INFO] [worker.go:105] ["Transaction dmlSink worker starts"] [changefeedID=.] [workerID=12]
[2024/04/29 17:41:41.895 +08:00] [INFO] [worker.go:105] ["Transaction dmlSink worker starts"] [changefeedID=.] [workerID=10]
[2024/04/29 17:41:41.895 +08:00] [INFO] [worker.go:105] ["Transaction dmlSink worker starts"] [changefeedID=.] [workerID=8]
[2024/04/29 17:41:41.895 +08:00] [INFO] [worker.go:105] ["Transaction dmlSink worker starts"] [changefeedID=.] [workerID=15]
[2024/04/29 17:41:41.896 +08:00] [INFO] [tz.go:35] ["Use the timezone of the TiCDC server machine"] [timezoneName=System] [timezone=Asia/Shanghai]
[2024/04/29 17:41:41.896 +08:00] [WARN] [config.go:380] ["Because time-zone is not specified, the timezone of the TiCDC server will be used. We recommend that you specify the time-zone explicitly. Please make sure that the timezone of the TiCDC server, sink-uri and the downstream database are consistent. If the downstream database does not load the timezone information, you can refer to https://dev.mysql.com/doc/refman/8.0/en/mysql-tzinfo-to-sql.html."] [timezone=Asia/Shanghai]
[2024/04/29 17:41:41.901 +08:00] [INFO] [db_helper.go:175] ["sink uri is configured"] [dsn="normal:******@tcp(127.0.0.1:3306)/?interpolateParams=true&multiStatements=true&allow_auto_random_explicit_insert=1&charset=utf8mb4&foreign_key_checks=0&maxAllowedPacket=0&readTimeout=2m&sql_mode=%22NO_ENGINE_SUBSTITUTION%2CONLY_FULL_GROUP_BY%2CNO_AUTO_VALUE_ON_ZERO%2CIGNORE_SPACE%2CALLOW_INVALID_DATES%22&tidb_enable_external_ts_read=%22OFF%22&tidb_placement_mode=%22ignore%22&tidb_txn_mode=optimistic&time_zone=%22Asia%2FShanghai%22&timeout=2m&transaction_isolation=%22READ-COMMITTED%22&writeTimeout=2m"]
[2024/04/29 17:41:41.903 +08:00] [INFO] [mysql_ddl_sink.go:99] ["MySQL DDL sink is created"] [namespace=] [changefeed=]
[2024/04/29 17:41:42.345 +08:00] [INFO] [file.go:114] ["succeed to download and sort redo logs"] [type=row] [duration=466.786189ms]
[2024/04/29 17:41:42.345 +08:00] [ERROR] [redo.go:268] ["ignore unsupported DDL"] [ddl="{\"StartTs\":449415393256407048,\"CommitTs\":449415393256407053,\"Query\":\"ALTER TABLE `consistent_replicate_storage_file`.`t1` EXCHANGE PARTITION `p3` WITH TABLE `consistent_replicate_storage_file`.`t2`\",\"TableInfo\":{\"SchemaID\":0,\"TableName\":{\"db-name\":\"consistent_replicate_storage_file\",\"tbl-name\":\"t1\",\"tbl-id\":108,\"is-partition\":true},\"Version\":0,\"RowColumnsOffset\":null,\"ColumnsFlag\":null,\"HandleIndexID\":0,\"IndexColumnsOffset\":null},\"PreTableInfo\":null,\"Type\":42,\"Done\":{},\"Charset\":\"\",\"Collate\":\"\",\"IsBootstrap\":false}"]
=================>> Running test /home/jenkins/agent/workspace/pingcap/tiflow/release-7.5/pull_cdc_integration_mysql_test/tiflow/tests/integration_tests/canal_json_storage_basic/run.sh using Sink-Type: mysql... <<=================
[Mon Apr 29 17:41:41 CST 2024] <<<<<< run test case canal_json_storage_basic success! >>>>>>
=================>> Running test /home/jenkins/agent/workspace/pingcap/tiflow/release-7.5/pull_cdc_integration_mysql_test/tiflow/tests/integration_tests/cdc_server_tips/run.sh using Sink-Type: mysql... <<=================
The 1 times to try to start tidb cluster...
check diff failed 40-th time, retry later
[2024/04/29 17:41:43.413 +08:00] [INFO] [table_sink_impl.go:257] ["Table sink stopped"] [namespace=default] [changefeed=redo-applier] [span={table_id:116,start_key:7480000000000000ff745f720000000000fa,end_key:7480000000000000ff745f730000000000fa}] [checkpointTs=449415400150794241]
[2024/04/29 17:41:43.413 +08:00] [INFO] [table_sink_impl.go:239] ["Stopping table sink"] [namespace=default] [changefeed=redo-applier] [span={table_id:116,start_key:7480000000000000ff745f720000000000fa,end_key:7480000000000000ff745f730000000000fa}] [checkpointTs=449415400150794241]
start tidb cluster in /tmp/tidb_cdc_test/cdc_server_tips
Starting Upstream PD...
Release Version: v7.5.1-5-g584533652
Edition: Community
Git Commit Hash: 58453365285465cd90bc4472cff2bad7ce4d764b
Git Branch: release-7.5
UTC Build Time:  2024-04-03 10:04:14
Starting Downstream PD...
Release Version: v7.5.1-5-g584533652
Edition: Community
Git Commit Hash: 58453365285465cd90bc4472cff2bad7ce4d764b
Git Branch: release-7.5
UTC Build Time:  2024-04-03 10:04:14
Verifying upstream PD is started...
[2024/04/29 17:41:44.317 +08:00] [INFO] [table_sink_impl.go:257] ["Table sink stopped"] [namespace=default] [changefeed=redo-applier] [span={table_id:118,start_key:7480000000000000ff765f720000000000fa,end_key:7480000000000000ff765f730000000000fa}] [checkpointTs=449415400150794241]
[2024/04/29 17:41:44.317 +08:00] [INFO] [table_sink_impl.go:239] ["Stopping table sink"] [namespace=default] [changefeed=redo-applier] [span={table_id:118,start_key:7480000000000000ff765f720000000000fa,end_key:7480000000000000ff765f730000000000fa}] [checkpointTs=449415400150794241]
[2024/04/29 17:41:44.418 +08:00] [INFO] [table_sink_impl.go:257] ["Table sink stopped"] [namespace=default] [changefeed=redo-applier] [span={table_id:113,start_key:7480000000000000ff715f720000000000fa,end_key:7480000000000000ff715f730000000000fa}] [checkpointTs=449415400150794241]
[2024/04/29 17:41:44.418 +08:00] [INFO] [table_sink_impl.go:239] ["Stopping table sink"] [namespace=default] [changefeed=redo-applier] [span={table_id:113,start_key:7480000000000000ff715f720000000000fa,end_key:7480000000000000ff715f730000000000fa}] [checkpointTs=449415400150794241]
[2024/04/29 17:41:44.519 +08:00] [INFO] [redo.go:219] ["apply redo log finishes"] [appliedLogCount=5008] [appliedDDLCount=0] [currentCheckpoint=449415400150794241]
[2024/04/29 17:41:44.519 +08:00] [INFO] [worker.go:120] ["Transaction dmlSink worker exits as canceled"] [changefeedID=.] [workerID=6]
[2024/04/29 17:41:44.519 +08:00] [INFO] [worker.go:120] ["Transaction dmlSink worker exits as canceled"] [changefeedID=.] [workerID=8]
[2024/04/29 17:41:44.519 +08:00] [INFO] [worker.go:120] ["Transaction dmlSink worker exits as canceled"] [changefeedID=.] [workerID=9]
[2024/04/29 17:41:44.519 +08:00] [INFO] [worker.go:120] ["Transaction dmlSink worker exits as canceled"] [changefeedID=.] [workerID=12]
[2024/04/29 17:41:44.519 +08:00] [INFO] [worker.go:120] ["Transaction dmlSink worker exits as canceled"] [changefeedID=.] [workerID=10]
[2024/04/29 17:41:44.519 +08:00] [INFO] [worker.go:120] ["Transaction dmlSink worker exits as canceled"] [changefeedID=.] [workerID=1]
[2024/04/29 17:41:44.519 +08:00] [INFO] [worker.go:120] ["Transaction dmlSink worker exits as canceled"] [changefeedID=.] [workerID=15]
[2024/04/29 17:41:44.519 +08:00] [INFO] [worker.go:120] ["Transaction dmlSink worker exits as canceled"] [changefeedID=.] [workerID=13]
[2024/04/29 17:41:44.519 +08:00] [INFO] [worker.go:120] ["Transaction dmlSink worker exits as canceled"] [changefeedID=.] [workerID=5]
[2024/04/29 17:41:44.519 +08:00] [INFO] [worker.go:120] ["Transaction dmlSink worker exits as canceled"] [changefeedID=.] [workerID=14]
[2024/04/29 17:41:44.519 +08:00] [INFO] [worker.go:120] ["Transaction dmlSink worker exits as canceled"] [changefeedID=.] [workerID=7]
[2024/04/29 17:41:44.519 +08:00] [INFO] [worker.go:120] ["Transaction dmlSink worker exits as canceled"] [changefeedID=.] [workerID=4]
[2024/04/29 17:41:44.519 +08:00] [INFO] [worker.go:120] ["Transaction dmlSink worker exits as canceled"] [changefeedID=.] [workerID=11]
[2024/04/29 17:41:44.519 +08:00] [INFO] [worker.go:120] ["Transaction dmlSink worker exits as canceled"] [changefeedID=.] [workerID=2]
[2024/04/29 17:41:44.519 +08:00] [INFO] [worker.go:120] ["Transaction dmlSink worker exits as canceled"] [changefeedID=.] [workerID=3]
[2024/04/29 17:41:44.519 +08:00] [INFO] [worker.go:120] ["Transaction dmlSink worker exits as canceled"] [changefeedID=.] [workerID=0]
Apply redo log successfully
check diff failed 41-th time, retry later
check diff successfully
[Mon Apr 29 17:41:45 CST 2024] <<<<<< run test case consistent_replicate_storage_file success! >>>>>>
count(*) 5000
=================>> Running test /home/jenkins/agent/workspace/pingcap/tiflow/release-7.5/pull_cdc_integration_mysql_test/tiflow/tests/integration_tests/canal_json_storage_partition_table/run.sh using Sink-Type: mysql... <<=================
[Mon Apr 29 17:41:45 CST 2024] <<<<<< run test case canal_json_storage_partition_table success! >>>>>>
Verifying downstream PD is started...
check diff failed 42-th time, retry later
Starting Upstream TiKV...
TiKV 
Release Version:   7.5.2
Edition:           Community
Git Commit Hash:   3478895c2a700e4824bb41940260b6b28013275e
Git Commit Branch: release-7.5
UTC Build Time:    2024-04-28 08:20:54
Rust Version:      rustc 1.67.0-nightly (96ddd32c4 2022-11-14)
Enable Features:   pprof-fp jemalloc mem-profiling portable sse test-engine-kv-rocksdb test-engine-raft-raft-engine cloud-aws cloud-gcp cloud-azure
Profile:           dist_release
Starting Downstream TiKV...
TiKV 
Release Version:   7.5.2
Edition:           Community
Git Commit Hash:   3478895c2a700e4824bb41940260b6b28013275e
Git Commit Branch: release-7.5
UTC Build Time:    2024-04-28 08:20:54
Rust Version:      rustc 1.67.0-nightly (96ddd32c4 2022-11-14)
Enable Features:   pprof-fp jemalloc mem-profiling portable sse test-engine-kv-rocksdb test-engine-raft-raft-engine cloud-aws cloud-gcp cloud-azure
Profile:           dist_release
=================>> Running test /home/jenkins/agent/workspace/pingcap/tiflow/release-7.5/pull_cdc_integration_mysql_test/tiflow/tests/integration_tests/multi_tables_ddl/run.sh using Sink-Type: mysql... <<=================
* About to connect() to 127.0.0.1 port 24927 (#0)
*   Trying 127.0.0.1...
* Connection refused
* Failed connect to 127.0.0.1:24927; Connection refused
* Closing connection 0

 You are running an older version of MinIO released 3 years ago 
 Update: Run `mc admin update` 


Attempting encryption of all config, IAM users and policies on MinIO backend
check diff failed 43-th time, retry later
Endpoint:  http://127.0.0.1:24927

Object API (Amazon S3 compatible):
   Go:         https://docs.min.io/docs/golang-client-quickstart-guide
   Java:       https://docs.min.io/docs/java-client-quickstart-guide
   Python:     https://docs.min.io/docs/python-client-quickstart-guide
   JavaScript: https://docs.min.io/docs/javascript-client-quickstart-guide
   .NET:       https://docs.min.io/docs/dotnet-client-quickstart-guide
Starting Upstream TiDB...
Release Version: v7.5.1-45-gbf84e231e6
Edition: Community
Git Commit Hash: bf84e231e6ef26891d0cb524d938345f43aa047c
Git Branch: release-7.5
UTC Build Time: 2024-04-29 02:05:15
GoVersion: go1.21.6
Race Enabled: false
Check Table Before Drop: false
Store: unistore
Starting Downstream TiDB...
Release Version: v7.5.1-45-gbf84e231e6
Edition: Community
Git Commit Hash: bf84e231e6ef26891d0cb524d938345f43aa047c
Git Branch: release-7.5
UTC Build Time: 2024-04-29 02:05:15
GoVersion: go1.21.6
Race Enabled: false
Check Table Before Drop: false
Store: unistore
Verifying Upstream TiDB is started...
ERROR 2003 (HY000): Can't connect to MySQL server on '127.0.0.1' (111)
[Pipeline] }
Cache not saved (inner-step execution failed)
[Pipeline] // cache
[Pipeline] }
[Pipeline] // dir
Post stage
[Pipeline] sh
* About to connect() to 127.0.0.1 port 24927 (#0)
*   Trying 127.0.0.1...
* Connected to 127.0.0.1 (127.0.0.1) port 24927 (#0)
> GET / HTTP/1.1
> User-Agent: curl/7.29.0
> Host: 127.0.0.1:24927
> Accept: */*
> 
< HTTP/1.1 403 Forbidden
< Accept-Ranges: bytes
< Content-Length: 226
< Content-Security-Policy: block-all-mixed-content
< Content-Type: application/xml
< Server: MinIO/RELEASE.2020-07-27T18-37-02Z
< Vary: Origin
< X-Amz-Request-Id: 17CAB6E6B7BEC961
< X-Xss-Protection: 1; mode=block
< Date: Mon, 29 Apr 2024 09:41:50 GMT
< 
{ [data not shown]
* Connection #0 to host 127.0.0.1 left intact
Bucket 's3://logbucket/' created
The 1 times to try to start tidb cluster...
+ ls /tmp/tidb_cdc_test/
cov.ddl_puller_lag.51105112.out
cov.ddl_puller_lag.cli.5062.out
cov.ddl_puller_lag.cli.5156.out
cov.foreign_key.26102612.out
cov.foreign_key.cli.2581.out
cov.foreign_key.cli.2657.out
ddl_puller_lag
foreign_key
sql_res.ddl_puller_lag.txt
sql_res.foreign_key.txt
++ find /tmp/tidb_cdc_test/ -type f -name '*.log'
+ tar -cvzf log-G04.tar.gz /tmp/tidb_cdc_test/ddl_puller_lag/tikv2.log /tmp/tidb_cdc_test/ddl_puller_lag/tiflash/log/proxy.log /tmp/tidb_cdc_test/ddl_puller_lag/tiflash/log/error.log /tmp/tidb_cdc_test/ddl_puller_lag/tiflash/log/server.log /tmp/tidb_cdc_test/ddl_puller_lag/tiflash/db/proxy/db/000005.log /tmp/tidb_cdc_test/ddl_puller_lag/tikv_down.log /tmp/tidb_cdc_test/ddl_puller_lag/tikv3/db/000005.log /tmp/tidb_cdc_test/ddl_puller_lag/stdout.log /tmp/tidb_cdc_test/ddl_puller_lag/tikv1.log /tmp/tidb_cdc_test/ddl_puller_lag/tidb_down.log /tmp/tidb_cdc_test/ddl_puller_lag/tikv2/db/000005.log /tmp/tidb_cdc_test/ddl_puller_lag/tidb_other.log /tmp/tidb_cdc_test/ddl_puller_lag/cdc.log /tmp/tidb_cdc_test/ddl_puller_lag/tidb-slow.log /tmp/tidb_cdc_test/ddl_puller_lag/down_pd.log /tmp/tidb_cdc_test/ddl_puller_lag/tikv1/db/000005.log /tmp/tidb_cdc_test/ddl_puller_lag/down_pd/region-meta/000001.log /tmp/tidb_cdc_test/ddl_puller_lag/down_pd/hot-region/000001.log /tmp/tidb_cdc_test/ddl_puller_lag/pd1.log /tmp/tidb_cdc_test/ddl_puller_lag/tikv_down/db/000005.log /tmp/tidb_cdc_test/ddl_puller_lag/cdc_data/tmp/sorter/0002/000002.log /tmp/tidb_cdc_test/ddl_puller_lag/cdc_data/tmp/sorter/0005/000002.log /tmp/tidb_cdc_test/ddl_puller_lag/cdc_data/tmp/sorter/0006/000002.log /tmp/tidb_cdc_test/ddl_puller_lag/cdc_data/tmp/sorter/0003/000002.log /tmp/tidb_cdc_test/ddl_puller_lag/cdc_data/tmp/sorter/0007/000002.log /tmp/tidb_cdc_test/ddl_puller_lag/cdc_data/tmp/sorter/0000/000002.log /tmp/tidb_cdc_test/ddl_puller_lag/cdc_data/tmp/sorter/0004/000002.log /tmp/tidb_cdc_test/ddl_puller_lag/cdc_data/tmp/sorter/0001/000002.log /tmp/tidb_cdc_test/ddl_puller_lag/pd1/region-meta/000001.log /tmp/tidb_cdc_test/ddl_puller_lag/pd1/hot-region/000001.log /tmp/tidb_cdc_test/ddl_puller_lag/tidb.log /tmp/tidb_cdc_test/ddl_puller_lag/tikv3.log /tmp/tidb_cdc_test/foreign_key/tikv2.log /tmp/tidb_cdc_test/foreign_key/tikv_down.log /tmp/tidb_cdc_test/foreign_key/stdout.log /tmp/tidb_cdc_test/foreign_key/tikv1.log /tmp/tidb_cdc_test/foreign_key/tidb_down.log /tmp/tidb_cdc_test/foreign_key/tidb_other.log /tmp/tidb_cdc_test/foreign_key/cdc.log /tmp/tidb_cdc_test/foreign_key/sync_diff_inspector.log /tmp/tidb_cdc_test/foreign_key/tidb-slow.log /tmp/tidb_cdc_test/foreign_key/down_pd.log /tmp/tidb_cdc_test/foreign_key/pd1.log /tmp/tidb_cdc_test/foreign_key/tidb.log /tmp/tidb_cdc_test/foreign_key/tikv3.log
tar: Removing leading `/' from member names
/tmp/tidb_cdc_test/ddl_puller_lag/tikv2.log
check diff failed 44-th time, retry later
/tmp/tidb_cdc_test/ddl_puller_lag/tiflash/log/proxy.log
/tmp/tidb_cdc_test/ddl_puller_lag/tiflash/log/error.log
/tmp/tidb_cdc_test/ddl_puller_lag/tiflash/log/server.log
/tmp/tidb_cdc_test/ddl_puller_lag/tiflash/db/proxy/db/000005.log
/tmp/tidb_cdc_test/ddl_puller_lag/tikv_down.log
/tmp/tidb_cdc_test/ddl_puller_lag/tikv3/db/000005.log
/tmp/tidb_cdc_test/ddl_puller_lag/stdout.log
/tmp/tidb_cdc_test/ddl_puller_lag/tikv1.log
/tmp/tidb_cdc_test/ddl_puller_lag/tidb_down.log
/tmp/tidb_cdc_test/ddl_puller_lag/tikv2/db/000005.log
/tmp/tidb_cdc_test/ddl_puller_lag/tidb_other.log
/tmp/tidb_cdc_test/ddl_puller_lag/cdc.log
/tmp/tidb_cdc_test/ddl_puller_lag/tidb-slow.log
/tmp/tidb_cdc_test/ddl_puller_lag/down_pd.log
/tmp/tidb_cdc_test/ddl_puller_lag/tikv1/db/000005.log
/tmp/tidb_cdc_test/ddl_puller_lag/down_pd/region-meta/000001.log
/tmp/tidb_cdc_test/ddl_puller_lag/down_pd/hot-region/000001.log
/tmp/tidb_cdc_test/ddl_puller_lag/pd1.log
/tmp/tidb_cdc_test/ddl_puller_lag/tikv_down/db/000005.log
/tmp/tidb_cdc_test/ddl_puller_lag/cdc_data/tmp/sorter/0002/000002.log
/tmp/tidb_cdc_test/ddl_puller_lag/cdc_data/tmp/sorter/0005/000002.log
/tmp/tidb_cdc_test/ddl_puller_lag/cdc_data/tmp/sorter/0006/000002.log
/tmp/tidb_cdc_test/ddl_puller_lag/cdc_data/tmp/sorter/0003/000002.log
/tmp/tidb_cdc_test/ddl_puller_lag/cdc_data/tmp/sorter/0007/000002.log
/tmp/tidb_cdc_test/ddl_puller_lag/cdc_data/tmp/sorter/0000/000002.log
/tmp/tidb_cdc_test/ddl_puller_lag/cdc_data/tmp/sorter/0004/000002.log
/tmp/tidb_cdc_test/ddl_puller_lag/cdc_data/tmp/sorter/0001/000002.log
/tmp/tidb_cdc_test/ddl_puller_lag/pd1/region-meta/000001.log
/tmp/tidb_cdc_test/ddl_puller_lag/pd1/hot-region/000001.log
/tmp/tidb_cdc_test/ddl_puller_lag/tidb.log
/tmp/tidb_cdc_test/ddl_puller_lag/tikv3.log
ERROR 2003 (HY000): Can't connect to MySQL server on '127.0.0.1' (111)
/tmp/tidb_cdc_test/foreign_key/tikv2.log
/tmp/tidb_cdc_test/foreign_key/tikv_down.log
/tmp/tidb_cdc_test/foreign_key/stdout.log
/tmp/tidb_cdc_test/foreign_key/tikv1.log
/tmp/tidb_cdc_test/foreign_key/tidb_down.log
/tmp/tidb_cdc_test/foreign_key/tidb_other.log
/tmp/tidb_cdc_test/foreign_key/cdc.log
/tmp/tidb_cdc_test/foreign_key/sync_diff_inspector.log
/tmp/tidb_cdc_test/foreign_key/tidb-slow.log
/tmp/tidb_cdc_test/foreign_key/down_pd.log
/tmp/tidb_cdc_test/foreign_key/pd1.log
/tmp/tidb_cdc_test/foreign_key/tidb.log
/tmp/tidb_cdc_test/foreign_key/tikv3.log
+ ls -alh log-G04.tar.gz
-rw-r--r-- 1 jenkins jenkins 7.6M Apr 29 17:41 log-G04.tar.gz
[Pipeline] archiveArtifacts
Archiving artifacts
check diff failed 45-th time, retry later
Recording fingerprints
[Pipeline] }
[Pipeline] // withCredentials
[Pipeline] }
[Pipeline] // timeout
[Pipeline] }
[Pipeline] // stage
[Pipeline] }
[Pipeline] // container
[Pipeline] }
ERROR 2003 (HY000): Can't connect to MySQL server on '127.0.0.1' (111)
start tidb cluster in /tmp/tidb_cdc_test/multi_tables_ddl
Starting Upstream PD...
Release Version: v7.5.1-5-g584533652
Edition: Community
Git Commit Hash: 58453365285465cd90bc4472cff2bad7ce4d764b
Git Branch: release-7.5
UTC Build Time:  2024-04-03 10:04:14
Starting Downstream PD...
Release Version: v7.5.1-5-g584533652
Edition: Community
Git Commit Hash: 58453365285465cd90bc4472cff2bad7ce4d764b
Git Branch: release-7.5
UTC Build Time:  2024-04-03 10:04:14
Verifying upstream PD is started...
[Pipeline] // withEnv
[Pipeline] }
[Pipeline] // node
[Pipeline] }
[Pipeline] // podTemplate
[Pipeline] }
[Pipeline] // withEnv
[Pipeline] }
[Pipeline] // stage
[Pipeline] }
Failed in branch Matrix - TEST_GROUP = 'G04'
Sending interrupt signal to process
Killing processes
kill finished with exit code 0
Sending interrupt signal to process
Killing processes
kill finished with exit code 0
Sending interrupt signal to process
Killing processes
Exiting on signal: TERMINATED
script returned exit code 143
=================>> Running test /home/jenkins/agent/workspace/pingcap/tiflow/release-7.5/pull_cdc_integration_mysql_test/tiflow/tests/integration_tests/consistent_replicate_storage_file_large_value/run.sh using Sink-Type: mysql... <<=================
The 1 times to try to start tidb cluster...
ERROR 2003 (HY000): Can't connect to MySQL server on '127.0.0.1' (111)

script returned exit code 143
kill finished with exit code 0
Sending interrupt signal to process
Killing processes
check diff failed 46-th time, retry later
kill finished with exit code 0
Sending interrupt signal to process
Killing processes
ERROR 2003 (HY000): Can't connect to MySQL server on '127.0.0.1' (111)
script returned exit code 143
kill finished with exit code 0
{"level":"warn","ts":1714383715.763123,"caller":"v3@v3.5.10/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc002403a40/127.0.0.1:2379","attempt":0,"error":"rpc error: code = Unavailable desc = error reading from server: EOF"}
script returned exit code 143
[Pipeline] }
Cache not saved (inner-step execution failed)
[Pipeline] }
Cache not saved (inner-step execution failed)
[Pipeline] }
Cache not saved (inner-step execution failed)
[Pipeline] }
Cache not saved (inner-step execution failed)
[Pipeline] // cache
[Pipeline] // cache
[Pipeline] // cache
[Pipeline] }
[Pipeline] }
[Pipeline] }
[Pipeline] // cache
[Pipeline] }
[Pipeline] // dir
[Pipeline] // dir
[Pipeline] // dir
[Pipeline] }
[Pipeline] }
[Pipeline] }
[Pipeline] // dir
[Pipeline] }
[Pipeline] // withCredentials
[Pipeline] // withCredentials
[Pipeline] // withCredentials
[Pipeline] }
[Pipeline] }
[Pipeline] }
[Pipeline] // withCredentials
[Pipeline] }
[Pipeline] // timeout
[Pipeline] // timeout
[Pipeline] // timeout
[Pipeline] }
[Pipeline] }
[Pipeline] }
[Pipeline] // timeout
[Pipeline] }
[Pipeline] // stage
[Pipeline] // stage
[Pipeline] // stage
[Pipeline] }
[Pipeline] }
[Pipeline] }
[Pipeline] // stage
[Pipeline] }
[Pipeline] // container
[Pipeline] // container
[Pipeline] // container
[Pipeline] }
[Pipeline] }
[Pipeline] }
[Pipeline] // container
[Pipeline] }
[Pipeline] // withEnv
[Pipeline] // withEnv
[Pipeline] // withEnv
[Pipeline] }
[Pipeline] }
[Pipeline] }
[Pipeline] // withEnv
[Pipeline] }
[Pipeline] // node
[Pipeline] // node
[Pipeline] // node
[Pipeline] }
[Pipeline] }
[Pipeline] }
[Pipeline] // node
[Pipeline] }
[Pipeline] // podTemplate
[Pipeline] // podTemplate
[Pipeline] // podTemplate
[Pipeline] }
[Pipeline] }
[Pipeline] }
[Pipeline] // podTemplate
[Pipeline] }
[Pipeline] // withEnv
[Pipeline] // withEnv
[Pipeline] // withEnv
[Pipeline] }
[Pipeline] }
[Pipeline] }
[Pipeline] // withEnv
[Pipeline] }
[Pipeline] // stage
[Pipeline] // stage
[Pipeline] // stage
[Pipeline] }
Failed in branch Matrix - TEST_GROUP = 'G01'
[Pipeline] }
Failed in branch Matrix - TEST_GROUP = 'G02'
[Pipeline] }
Failed in branch Matrix - TEST_GROUP = 'G10'
[Pipeline] // stage
[Pipeline] }
Failed in branch Matrix - TEST_GROUP = 'G18'
++ stop_tidb_cluster
script returned exit code 143
[Pipeline] }
Cache not saved (inner-step execution failed)
[Pipeline] // cache
[Pipeline] }
[Pipeline] // dir
[Pipeline] }
[Pipeline] // withCredentials
[Pipeline] }
[Pipeline] // timeout
[Pipeline] }
[Pipeline] // stage
[Pipeline] }
[Pipeline] // container
[Pipeline] }
[Pipeline] // withEnv
[Pipeline] }
[Pipeline] // node
[Pipeline] }
[Pipeline] // podTemplate
[Pipeline] }
[Pipeline] // withEnv
[Pipeline] }
[Pipeline] // stage
[Pipeline] }
Failed in branch Matrix - TEST_GROUP = 'G09'
[Pipeline] // parallel
[Pipeline] }
[Pipeline] // stage
[Pipeline] }
[Pipeline] // timeout
[Pipeline] }
[Pipeline] // withEnv
[Pipeline] }
[Pipeline] // container
[Pipeline] }
[Pipeline] // withEnv
[Pipeline] }
[Pipeline] // node
[Pipeline] }
[Pipeline] // podTemplate
[Pipeline] End of Pipeline
ERROR: script returned exit code 1
Finished: FAILURE