Skip to content

Console Output

Skipping 1,622 KB.. Full Log
Logging errors to /tmp/tidb_cdc_test/changefeed_auto_stop/tiflash/log/error.log
arg matches is ArgMatches { args: {"config": MatchedArg { occurs: 1, indices: [8], vals: ["/tmp/tidb_cdc_test/changefeed_auto_stop/tiflash-proxy.toml"] }, "data-dir": MatchedArg { occurs: 1, indices: [6], vals: ["/tmp/tidb_cdc_test/changefeed_auto_stop/tiflash/db/proxy"] }, "engine-label": MatchedArg { occurs: 1, indices: [14], vals: ["tiflash"] }, "advertise-addr": MatchedArg { occurs: 1, indices: [4], vals: ["127.0.0.1:9000"] }, "engine-addr": MatchedArg { occurs: 1, indices: [2], vals: ["127.0.0.1:9500"] }, "pd-endpoints": MatchedArg { occurs: 1, indices: [16], vals: ["127.0.0.1:2379"] }, "log-file": MatchedArg { occurs: 1, indices: [18], vals: ["/tmp/tidb_cdc_test/changefeed_auto_stop/tiflash/log/proxy.log"] }, "addr": MatchedArg { occurs: 1, indices: [20], vals: ["127.0.0.1:9000"] }, "engine-version": MatchedArg { occurs: 1, indices: [12], vals: ["v8.2.0-alpha-16-g8e170090f"] }, "engine-git-hash": MatchedArg { occurs: 1, indices: [10], vals: ["8e170090fad91c94bef8d908e21c195c1d145b02"] }}, subcommand: None, usage: Some("USAGE:\n    TiFlash Proxy [FLAGS] [OPTIONS] --engine-git-hash <engine-git-hash> --engine-label <engine-label> --engine-version <engine-version>") }
=================>> Running test /home/jenkins/agent/workspace/pingcap/tiflow/pull_cdc_integration_pulsar_test/tiflow/tests/integration_tests/generate_column/run.sh using Sink-Type: pulsar... <<=================
ERROR 2003 (HY000): Can't connect to MySQL server on '127.0.0.1' (111)
***************** properties *****************
"workload"="core"
"mysql.port"="4000"
"readallfields"="true"
"scanproportion"="0"
"insertproportion"="0"
"recordcount"="20"
"threadcount"="4"
"readproportion"="0"
"mysql.user"="root"
"dotransactions"="false"
"operationcount"="0"
"mysql.db"="changefeed_auto_stop_1"
"updateproportion"="0"
"mysql.host"="127.0.0.1"
"requestdistribution"="uniform"
**********************************************
Run finished, takes 7.906037ms
INSERT - Takes(s): 0.0, Count: 20, OPS: 4497.3, Avg(us): 1507, Min(us): 858, Max(us): 3361, 95th(us): 4000, 99th(us): 4000
***************** properties *****************
"threadcount"="4"
"operationcount"="0"
"insertproportion"="0"
"mysql.host"="127.0.0.1"
"mysql.user"="root"
"readproportion"="0"
"mysql.port"="4000"
"recordcount"="20"
"mysql.db"="changefeed_auto_stop_2"
"readallfields"="true"
"requestdistribution"="uniform"
"scanproportion"="0"
"dotransactions"="false"
"workload"="core"
"updateproportion"="0"
**********************************************
Run finished, takes 9.156311ms
INSERT - Takes(s): 0.0, Count: 20, OPS: 3643.0, Avg(us): 1640, Min(us): 856, Max(us): 3988, 95th(us): 4000, 99th(us): 4000
***************** properties *****************
"workload"="core"
"scanproportion"="0"
"mysql.port"="4000"
"operationcount"="0"
"threadcount"="4"
"recordcount"="20"
"insertproportion"="0"
"mysql.host"="127.0.0.1"
"mysql.db"="changefeed_auto_stop_3"
"requestdistribution"="uniform"
"mysql.user"="root"
"dotransactions"="false"
"readproportion"="0"
"readallfields"="true"
"updateproportion"="0"
**********************************************
Run finished, takes 7.928869ms
INSERT - Takes(s): 0.0, Count: 20, OPS: 4317.7, Avg(us): 1491, Min(us): 847, Max(us): 3317, 95th(us): 4000, 99th(us): 4000
***************** properties *****************
"insertproportion"="0"
"dotransactions"="false"
"recordcount"="20"
"readallfields"="true"
"scanproportion"="0"
"readproportion"="0"
"mysql.host"="127.0.0.1"
"operationcount"="0"
"mysql.user"="root"
"requestdistribution"="uniform"
"mysql.db"="changefeed_auto_stop_4"
"mysql.port"="4000"
"threadcount"="4"
"updateproportion"="0"
"workload"="core"
**********************************************
Run finished, takes 8.717981ms
INSERT - Takes(s): 0.0, Count: 17, OPS: 3272.5, Avg(us): 1234, Min(us): 905, Max(us): 3367, 95th(us): 4000, 99th(us): 4000
[Sat May  4 17:57:25 CST 2024] <<<<<< START cdc server in changefeed_auto_stop case >>>>>>
+ [[ '' == \t\r\u\e ]]
+ set +e
+ get_info_fail_msg='failed to get info:'
+ etcd_info_msg='etcd info'
+ '[' -z '' ']'
+ curl_status_cmd='curl -vsL --max-time 20 http://127.0.0.1:8301/debug/info --user ticdc:ticdc_secret -vsL'
+ GO_FAILPOINTS=
+ [[ no != \n\o ]]
+ cdc.test -test.coverprofile=/tmp/tidb_cdc_test/cov.changefeed_auto_stop.1390913911.out server --log-file /tmp/tidb_cdc_test/changefeed_auto_stop/cdc1.log --log-level debug --data-dir /tmp/tidb_cdc_test/changefeed_auto_stop/cdc_data1 --cluster-id default --addr 127.0.0.1:8301 --pd http://127.0.0.1:2379
+ (( i = 0 ))
+ (( i <= 50 ))
++ curl -vsL --max-time 20 http://127.0.0.1:8301/debug/info --user ticdc:ticdc_secret -vsL
* About to connect() to 127.0.0.1 port 8301 (#0)
*   Trying 127.0.0.1...
* Connection refused
* Failed connect to 127.0.0.1:8301; Connection refused
* Closing connection 0
+ res=
+ echo ''
+ grep -q 'failed to get info:'
+ echo ''
+ grep -q 'etcd info'
+ '[' 0 -eq 50 ']'
+ sleep 3
VARIABLE_NAME	VARIABLE_VALUE	COMMENT
bootstrapped	True	Bootstrap flag. Do not delete.
tidb_server_version	196	Bootstrap version. Do not delete.
system_tz	Asia/Shanghai	TiDB Global System Timezone.
new_collation_enabled	True	If the new collations are enabled. Do not edit it.
ddl_table_version	3	DDL Table Version. Do not delete.
tikv_gc_leader_uuid	63d0c25d0540013	Current GC worker leader UUID. (DO NOT EDIT)
tikv_gc_leader_desc	host:pingcap-tiflow-pull-cdc-integration-pulsar-test-1528-bws0-3bjck, pid:9130, start at 2024-05-04 17:57:24.146870863 +0800 CST m=+5.089129637	Host name and pid of current GC leader. (DO NOT EDIT)
tikv_gc_leader_lease	20240504-17:59:24.153 +0800	Current GC worker leader lease. (DO NOT EDIT)
tikv_gc_auto_concurrency	true	Let TiDB pick the concurrency automatically. If set false, tikv_gc_concurrency will be used
tikv_gc_enable	true	Current GC enable status
tikv_gc_run_interval	10m0s	GC run interval, at least 10m, in Go format.
tikv_gc_life_time	10m0s	All versions within life time will not be collected by GC, at least 10m, in Go format.
tikv_gc_last_run_time	20240504-17:57:24.117 +0800	The time when last GC starts. (DO NOT EDIT)
tikv_gc_safe_point	20240504-17:47:24.117 +0800	All versions after safe point can be accessed. (DO NOT EDIT)
VARIABLE_NAME	VARIABLE_VALUE	COMMENT
bootstrapped	True	Bootstrap flag. Do not delete.
tidb_server_version	196	Bootstrap version. Do not delete.
system_tz	Asia/Shanghai	TiDB Global System Timezone.
new_collation_enabled	True	If the new collations are enabled. Do not edit it.
ddl_table_version	3	DDL Table Version. Do not delete.
tikv_gc_leader_uuid	63d0c25d0540013	Current GC worker leader UUID. (DO NOT EDIT)
tikv_gc_leader_desc	host:pingcap-tiflow-pull-cdc-integration-pulsar-test-1528-bws0-3bjck, pid:9130, start at 2024-05-04 17:57:24.146870863 +0800 CST m=+5.089129637	Host name and pid of current GC leader. (DO NOT EDIT)
tikv_gc_leader_lease	20240504-17:59:24.153 +0800	Current GC worker leader lease. (DO NOT EDIT)
tikv_gc_auto_concurrency	true	Let TiDB pick the concurrency automatically. If set false, tikv_gc_concurrency will be used
tikv_gc_enable	true	Current GC enable status
tikv_gc_run_interval	10m0s	GC run interval, at least 10m, in Go format.
tikv_gc_life_time	10m0s	All versions within life time will not be collected by GC, at least 10m, in Go format.
tikv_gc_last_run_time	20240504-17:57:24.117 +0800	The time when last GC starts. (DO NOT EDIT)
tikv_gc_safe_point	20240504-17:47:24.117 +0800	All versions after safe point can be accessed. (DO NOT EDIT)
Verifying Downstream TiDB is started...
VARIABLE_NAME	VARIABLE_VALUE	COMMENT
bootstrapped	True	Bootstrap flag. Do not delete.
tidb_server_version	196	Bootstrap version. Do not delete.
system_tz	Asia/Shanghai	TiDB Global System Timezone.
new_collation_enabled	True	If the new collations are enabled. Do not edit it.
ddl_table_version	3	DDL Table Version. Do not delete.
tikv_gc_leader_uuid	63d0c25d2b80014	Current GC worker leader UUID. (DO NOT EDIT)
tikv_gc_leader_desc	host:pingcap-tiflow-pull-cdc-integration-pulsar-test-1528-bws0-3bjck, pid:9219, start at 2024-05-04 17:57:24.293540339 +0800 CST m=+5.179378340	Host name and pid of current GC leader. (DO NOT EDIT)
tikv_gc_leader_lease	20240504-17:59:24.301 +0800	Current GC worker leader lease. (DO NOT EDIT)
tikv_gc_auto_concurrency	true	Let TiDB pick the concurrency automatically. If set false, tikv_gc_concurrency will be used
tikv_gc_enable	true	Current GC enable status
tikv_gc_run_interval	10m0s	GC run interval, at least 10m, in Go format.
tikv_gc_life_time	10m0s	All versions within life time will not be collected by GC, at least 10m, in Go format.
tikv_gc_last_run_time	20240504-17:57:24.270 +0800	The time when last GC starts. (DO NOT EDIT)
tikv_gc_safe_point	20240504-17:47:24.270 +0800	All versions after safe point can be accessed. (DO NOT EDIT)
Starting Upstream TiFlash...
TiFlash
Release Version: v8.2.0-alpha-16-g8e170090f
Edition:         Community
Git Commit Hash: 8e170090fad91c94bef8d908e21c195c1d145b02
Git Branch:      HEAD
UTC Build Time:  2024-04-30 02:34:21
Enable Features: jemalloc sm4(GmSSL) avx2 avx512 unwind thinlto
Profile:         RELWITHDEBINFO
Compiler:        clang++ 13.0.0

Raft Proxy
Git Commit Hash:   7dc50b4eb06124e31f03adb06c20ff7ab61c5f79
Git Commit Branch: HEAD
UTC Build Time:    2024-04-30 02:38:45
Rust Version:      rustc 1.67.0-nightly (96ddd32c4 2022-11-14)
Storage Engine:    tiflash
Prometheus Prefix: tiflash_proxy_
Profile:           release
Enable Features:   external-jemalloc portable sse test-engine-kv-rocksdb test-engine-raft-raft-engine cloud-aws cloud-gcp cloud-azure openssl-vendored portable sse test-engine-kv-rocksdb test-engine-raft-raft-engine cloud-aws cloud-gcp cloud-azure openssl-vendored
Verifying Upstream TiFlash is started...
Logging trace to /tmp/tidb_cdc_test/sink_hang/tiflash/log/server.log
Logging errors to /tmp/tidb_cdc_test/sink_hang/tiflash/log/error.log
arg matches is ArgMatches { args: {"log-file": MatchedArg { occurs: 1, indices: [18], vals: ["/tmp/tidb_cdc_test/sink_hang/tiflash/log/proxy.log"] }, "engine-addr": MatchedArg { occurs: 1, indices: [2], vals: ["127.0.0.1:9500"] }, "config": MatchedArg { occurs: 1, indices: [8], vals: ["/tmp/tidb_cdc_test/sink_hang/tiflash-proxy.toml"] }, "pd-endpoints": MatchedArg { occurs: 1, indices: [16], vals: ["127.0.0.1:2379"] }, "engine-version": MatchedArg { occurs: 1, indices: [12], vals: ["v8.2.0-alpha-16-g8e170090f"] }, "advertise-addr": MatchedArg { occurs: 1, indices: [4], vals: ["127.0.0.1:9000"] }, "data-dir": MatchedArg { occurs: 1, indices: [6], vals: ["/tmp/tidb_cdc_test/sink_hang/tiflash/db/proxy"] }, "engine-label": MatchedArg { occurs: 1, indices: [14], vals: ["tiflash"] }, "engine-git-hash": MatchedArg { occurs: 1, indices: [10], vals: ["8e170090fad91c94bef8d908e21c195c1d145b02"] }, "addr": MatchedArg { occurs: 1, indices: [20], vals: ["127.0.0.1:9000"] }}, subcommand: None, usage: Some("USAGE:\n    TiFlash Proxy [FLAGS] [OPTIONS] --engine-git-hash <engine-git-hash> --engine-label <engine-label> --engine-version <engine-version>") }
\033[0;36m<<< Run all test success >>>\033[0m
[Pipeline] }
Cache not saved (ws/jenkins-pingcap-tiflow-pull_cdc_integration_pulsar_test-1528/tiflow-cdc already exists)
[Pipeline] // cache
[Pipeline] }
[Pipeline] // dir
[Pipeline] }
[Pipeline] // timeout
[Pipeline] }
[Pipeline] // stage
[Pipeline] }
[Pipeline] // container
[Pipeline] }
[Pipeline] // withEnv
[Pipeline] }
[Pipeline] // node
[Pipeline] }
[Pipeline] // podTemplate
[Pipeline] }
[Pipeline] // withEnv
[Pipeline] }
[Pipeline] // stage
[Pipeline] }
[Sat May  4 17:57:27 CST 2024] <<<<<< START cdc server in sink_hang case >>>>>>
+ [[ '' == \t\r\u\e ]]
+ set +e
+ get_info_fail_msg='failed to get info:'
+ etcd_info_msg='etcd info'
+ '[' -z '' ']'
+ curl_status_cmd='curl -vsL --max-time 20 http://127.0.0.1:8300/debug/info --user ticdc:ticdc_secret -vsL'
+ [[ no != \n\o ]]
+ GO_FAILPOINTS='github.com/pingcap/tiflow/cdc/sink/dmlsink/txn/mysql/MySQLSinkExecDMLError=2*return(true)'
+ (( i = 0 ))
+ cdc.test -test.coverprofile=/tmp/tidb_cdc_test/cov.sink_hang.1054010542.out server --log-file /tmp/tidb_cdc_test/sink_hang/cdc.log --log-level debug --data-dir /tmp/tidb_cdc_test/sink_hang/cdc_data --cluster-id default --addr 127.0.0.1:8300 --pd http://127.0.0.1:2379
+ (( i <= 50 ))
++ curl -vsL --max-time 20 http://127.0.0.1:8300/debug/info --user ticdc:ticdc_secret -vsL
* About to connect() to 127.0.0.1 port 8300 (#0)
*   Trying 127.0.0.1...
* Connection refused
* Failed connect to 127.0.0.1:8300; Connection refused
* Closing connection 0
+ res=
+ echo ''
+ grep -q 'failed to get info:'
+ echo ''
+ grep -q 'etcd info'
+ '[' 0 -eq 50 ']'
+ sleep 3
+ (( i++ ))
+ (( i <= 50 ))
++ curl -vsL --max-time 20 http://127.0.0.1:8301/debug/info --user ticdc:ticdc_secret -vsL
* About to connect() to 127.0.0.1 port 8301 (#0)
*   Trying 127.0.0.1...
* Connected to 127.0.0.1 (127.0.0.1) port 8301 (#0)
* Server auth using Basic with user 'ticdc'
> GET /debug/info HTTP/1.1
> Authorization: Basic dGljZGM6dGljZGNfc2VjcmV0
> User-Agent: curl/7.29.0
> Host: 127.0.0.1:8301
> Accept: */*
> 
< HTTP/1.1 200 OK
< Date: Sat, 04 May 2024 09:57:28 GMT
< Content-Length: 816
< Content-Type: text/plain; charset=utf-8
< 
{ [data not shown]
* Connection #0 to host 127.0.0.1 left intact
+ res='

*** owner info ***:



*** processors info ***:



*** etcd info ***:

/tidb/cdc/default/__cdc_meta__/capture/f40fb1ce-1195-4636-b599-d9b9b07fc07e
	{"id":"f40fb1ce-1195-4636-b599-d9b9b07fc07e","address":"127.0.0.1:8301","version":"v8.2.0-alpha-84-gecb3fedc8","git-hash":"ecb3fedc801aec53272bf16e67f5d34b8da910c1","deploy-path":"/home/jenkins/agent/workspace/pingcap/tiflow/pull_cdc_integration_pulsar_test/tiflow/bin/cdc.test","start-timestamp":1714816645}

/tidb/cdc/default/__cdc_meta__/meta/meta-version
	1

/tidb/cdc/default/__cdc_meta__/owner/22318f4309400632
	f40fb1ce-1195-4636-b599-d9b9b07fc07e

/tidb/cdc/default/default/upstream/7365081349785656684
	{"id":7365081349785656684,"pd-endpoints":"http://127.0.0.1:2379,http://127.0.0.1:2379","key-path":"","cert-path":"","ca-path":"","cert-allowed-cn":null}'
+ echo '

*** owner info ***:



*** processors info ***:



*** etcd info ***:

/tidb/cdc/default/__cdc_meta__/capture/f40fb1ce-1195-4636-b599-d9b9b07fc07e
	{"id":"f40fb1ce-1195-4636-b599-d9b9b07fc07e","address":"127.0.0.1:8301","version":"v8.2.0-alpha-84-gecb3fedc8","git-hash":"ecb3fedc801aec53272bf16e67f5d34b8da910c1","deploy-path":"/home/jenkins/agent/workspace/pingcap/tiflow/pull_cdc_integration_pulsar_test/tiflow/bin/cdc.test","start-timestamp":1714816645}

/tidb/cdc/default/__cdc_meta__/meta/meta-version
	1

/tidb/cdc/default/__cdc_meta__/owner/22318f4309400632
	f40fb1ce-1195-4636-b599-d9b9b07fc07e

/tidb/cdc/default/default/upstream/7365081349785656684
	{"id":7365081349785656684,"pd-endpoints":"http://127.0.0.1:2379,http://127.0.0.1:2379","key-path":"","cert-path":"","ca-path":"","cert-allowed-cn":null}'
+ grep -q 'failed to get info:'
+ echo '

*** owner info ***:



*** processors info ***:



*** etcd info ***:

/tidb/cdc/default/__cdc_meta__/capture/f40fb1ce-1195-4636-b599-d9b9b07fc07e
	{"id":"f40fb1ce-1195-4636-b599-d9b9b07fc07e","address":"127.0.0.1:8301","version":"v8.2.0-alpha-84-gecb3fedc8","git-hash":"ecb3fedc801aec53272bf16e67f5d34b8da910c1","deploy-path":"/home/jenkins/agent/workspace/pingcap/tiflow/pull_cdc_integration_pulsar_test/tiflow/bin/cdc.test","start-timestamp":1714816645}

/tidb/cdc/default/__cdc_meta__/meta/meta-version
	1

/tidb/cdc/default/__cdc_meta__/owner/22318f4309400632
	f40fb1ce-1195-4636-b599-d9b9b07fc07e

/tidb/cdc/default/default/upstream/7365081349785656684
	{"id":7365081349785656684,"pd-endpoints":"http://127.0.0.1:2379,http://127.0.0.1:2379","key-path":"","cert-path":"","ca-path":"","cert-allowed-cn":null}'
+ grep -q 'etcd info'
+ break
+ set +x
[Sat May  4 17:57:28 CST 2024] <<<<<< START cdc server in changefeed_auto_stop case >>>>>>
+ [[ '' == \t\r\u\e ]]
+ set +e
+ get_info_fail_msg='failed to get info:'
+ etcd_info_msg='etcd info'
+ '[' -z '' ']'
+ curl_status_cmd='curl -vsL --max-time 20 http://127.0.0.1:8302/debug/info --user ticdc:ticdc_secret -vsL'
+ GO_FAILPOINTS='github.com/pingcap/tiflow/cdc/processor/pipeline/ProcessorSyncResolvedError=1*return(true);github.com/pingcap/tiflow/cdc/processor/ProcessorUpdatePositionDelaying=sleep(1000)'
+ [[ no != \n\o ]]
+ cdc.test -test.coverprofile=/tmp/tidb_cdc_test/cov.changefeed_auto_stop.1396913971.out server --log-file /tmp/tidb_cdc_test/changefeed_auto_stop/cdc2.log --log-level debug --data-dir /tmp/tidb_cdc_test/changefeed_auto_stop/cdc_data2 --cluster-id default --addr 127.0.0.1:8302 --pd http://127.0.0.1:2379
+ (( i = 0 ))
+ (( i <= 50 ))
++ curl -vsL --max-time 20 http://127.0.0.1:8302/debug/info --user ticdc:ticdc_secret -vsL
* About to connect() to 127.0.0.1 port 8302 (#0)
*   Trying 127.0.0.1...
* Connection refused
* Failed connect to 127.0.0.1:8302; Connection refused
* Closing connection 0
+ res=
+ echo ''
+ grep -q 'failed to get info:'
+ echo ''
+ grep -q 'etcd info'
+ '[' 0 -eq 50 ']'
+ sleep 3
=================>> Running test /home/jenkins/agent/workspace/pingcap/tiflow/pull_cdc_integration_pulsar_test/tiflow/tests/integration_tests/savepoint/run.sh using Sink-Type: pulsar... <<=================
The 1 times to try to start tidb cluster...
+ (( i++ ))
+ (( i <= 50 ))
++ curl -vsL --max-time 20 http://127.0.0.1:8300/debug/info --user ticdc:ticdc_secret -vsL
* About to connect() to 127.0.0.1 port 8300 (#0)
*   Trying 127.0.0.1...
* Connected to 127.0.0.1 (127.0.0.1) port 8300 (#0)
* Server auth using Basic with user 'ticdc'
> GET /debug/info HTTP/1.1
> Authorization: Basic dGljZGM6dGljZGNfc2VjcmV0
> User-Agent: curl/7.29.0
> Host: 127.0.0.1:8300
> Accept: */*
> 
< HTTP/1.1 200 OK
< Date: Sat, 04 May 2024 09:57:30 GMT
< Content-Length: 816
< Content-Type: text/plain; charset=utf-8
< 
{ [data not shown]
* Connection #0 to host 127.0.0.1 left intact
+ res='

*** owner info ***:



*** processors info ***:



*** etcd info ***:

/tidb/cdc/default/__cdc_meta__/capture/2c1cb25d-713f-4e7b-82f4-794a7acee4f5
	{"id":"2c1cb25d-713f-4e7b-82f4-794a7acee4f5","address":"127.0.0.1:8300","version":"v8.2.0-alpha-84-gecb3fedc8","git-hash":"ecb3fedc801aec53272bf16e67f5d34b8da910c1","deploy-path":"/home/jenkins/agent/workspace/pingcap/tiflow/pull_cdc_integration_pulsar_test/tiflow/bin/cdc.test","start-timestamp":1714816647}

/tidb/cdc/default/__cdc_meta__/meta/meta-version
	1

/tidb/cdc/default/__cdc_meta__/owner/22318f43094a77c6
	2c1cb25d-713f-4e7b-82f4-794a7acee4f5

/tidb/cdc/default/default/upstream/7365081367515714604
	{"id":7365081367515714604,"pd-endpoints":"http://127.0.0.1:2379,http://127.0.0.1:2379","key-path":"","cert-path":"","ca-path":"","cert-allowed-cn":null}'
+ echo '

*** owner info ***:



*** processors info ***:



*** etcd info ***:

/tidb/cdc/default/__cdc_meta__/capture/2c1cb25d-713f-4e7b-82f4-794a7acee4f5
	{"id":"2c1cb25d-713f-4e7b-82f4-794a7acee4f5","address":"127.0.0.1:8300","version":"v8.2.0-alpha-84-gecb3fedc8","git-hash":"ecb3fedc801aec53272bf16e67f5d34b8da910c1","deploy-path":"/home/jenkins/agent/workspace/pingcap/tiflow/pull_cdc_integration_pulsar_test/tiflow/bin/cdc.test","start-timestamp":1714816647}

/tidb/cdc/default/__cdc_meta__/meta/meta-version
	1

/tidb/cdc/default/__cdc_meta__/owner/22318f43094a77c6
	2c1cb25d-713f-4e7b-82f4-794a7acee4f5

/tidb/cdc/default/default/upstream/7365081367515714604
	{"id":7365081367515714604,"pd-endpoints":"http://127.0.0.1:2379,http://127.0.0.1:2379","key-path":"","cert-path":"","ca-path":"","cert-allowed-cn":null}'
+ grep -q 'failed to get info:'
+ echo '

*** owner info ***:



*** processors info ***:



*** etcd info ***:

/tidb/cdc/default/__cdc_meta__/capture/2c1cb25d-713f-4e7b-82f4-794a7acee4f5
	{"id":"2c1cb25d-713f-4e7b-82f4-794a7acee4f5","address":"127.0.0.1:8300","version":"v8.2.0-alpha-84-gecb3fedc8","git-hash":"ecb3fedc801aec53272bf16e67f5d34b8da910c1","deploy-path":"/home/jenkins/agent/workspace/pingcap/tiflow/pull_cdc_integration_pulsar_test/tiflow/bin/cdc.test","start-timestamp":1714816647}

/tidb/cdc/default/__cdc_meta__/meta/meta-version
	1

/tidb/cdc/default/__cdc_meta__/owner/22318f43094a77c6
	2c1cb25d-713f-4e7b-82f4-794a7acee4f5

/tidb/cdc/default/default/upstream/7365081367515714604
+ grep -q 'etcd info'
	{"id":7365081367515714604,"pd-endpoints":"http://127.0.0.1:2379,http://127.0.0.1:2379","key-path":"","cert-path":"","ca-path":"","cert-allowed-cn":null}'
+ break
+ set +x
[Sat May  4 17:57:30 CST 2024] <<<<<< START Pulsar consumer in sink_hang case >>>>>>
check_changefeed_status 127.0.0.1:8300 a0362928-e9f5-4e7a-891a-7382b8a09ab4 normal last_error null
+ endpoint=127.0.0.1:8300
+ changefeed_id=a0362928-e9f5-4e7a-891a-7382b8a09ab4
+ expected_state=normal
+ field=last_error
+ error_pattern=null
++ curl 127.0.0.1:8300/api/v2/changefeeds/a0362928-e9f5-4e7a-891a-7382b8a09ab4/status
  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                 Dload  Upload   Total   Spent    Left  Speed

  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0
100    86  100    86    0     0    665      0 --:--:-- --:--:-- --:--:--   666
+ info='{"state":"normal","resolved_ts":449528896033390597,"checkpoint_ts":449528896033390597}'
+ echo '{"state":"normal","resolved_ts":449528896033390597,"checkpoint_ts":449528896033390597}'
{"state":"normal","resolved_ts":449528896033390597,"checkpoint_ts":449528896033390597}
++ echo '{"state":"normal","resolved_ts":449528896033390597,"checkpoint_ts":449528896033390597}'
++ jq -r .state
+ state=normal
+ [[ ! normal == \n\o\r\m\a\l ]]
+ [[ -z last_error ]]
++ echo '{"state":"normal","resolved_ts":449528896033390597,"checkpoint_ts":449528896033390597}'
++ jq -r .last_error.message
+ error_msg=null
+ [[ ! null =~ null ]]
run task successfully
check_changefeed_status 127.0.0.1:8300 a0362928-e9f5-4e7a-891a-7382b8a09ab4 normal last_warning null
+ endpoint=127.0.0.1:8300
+ changefeed_id=a0362928-e9f5-4e7a-891a-7382b8a09ab4
+ expected_state=normal
+ field=last_warning
+ error_pattern=null
++ curl 127.0.0.1:8300/api/v2/changefeeds/a0362928-e9f5-4e7a-891a-7382b8a09ab4/status
  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                 Dload  Upload   Total   Spent    Left  Speed

  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0
100    86  100    86    0     0    656      0 --:--:-- --:--:-- --:--:--   661
+ info='{"state":"normal","resolved_ts":449528896033390597,"checkpoint_ts":449528896033390597}'
+ echo '{"state":"normal","resolved_ts":449528896033390597,"checkpoint_ts":449528896033390597}'
{"state":"normal","resolved_ts":449528896033390597,"checkpoint_ts":449528896033390597}
++ echo '{"state":"normal","resolved_ts":449528896033390597,"checkpoint_ts":449528896033390597}'
++ jq -r .state
+ state=normal
+ [[ ! normal == \n\o\r\m\a\l ]]
+ [[ -z last_warning ]]
++ echo '{"state":"normal","resolved_ts":449528896033390597,"checkpoint_ts":449528896033390597}'
++ jq -r .last_warning.message
+ error_msg=null
+ [[ ! null =~ null ]]
run task successfully
table sink_hang.t1 not exists for 1-th check, retry later
+ (( i++ ))
+ (( i <= 50 ))
++ curl -vsL --max-time 20 http://127.0.0.1:8302/debug/info --user ticdc:ticdc_secret -vsL
* About to connect() to 127.0.0.1 port 8302 (#0)
*   Trying 127.0.0.1...
* Connected to 127.0.0.1 (127.0.0.1) port 8302 (#0)
* Server auth using Basic with user 'ticdc'
> GET /debug/info HTTP/1.1
> Authorization: Basic dGljZGM6dGljZGNfc2VjcmV0
> User-Agent: curl/7.29.0
> Host: 127.0.0.1:8302
> Accept: */*
> 
< HTTP/1.1 200 OK
< Date: Sat, 04 May 2024 09:57:31 GMT
< Content-Length: 1273
< Content-Type: text/plain; charset=utf-8
< 
{ [data not shown]
* Connection #0 to host 127.0.0.1 left intact
+ res='

*** processors info ***:



*** etcd info ***:

/tidb/cdc/default/__cdc_meta__/capture/099c262b-e85d-45d4-b2e5-018a3bff42ba
	{"id":"099c262b-e85d-45d4-b2e5-018a3bff42ba","address":"127.0.0.1:8302","version":"v8.2.0-alpha-84-gecb3fedc8","git-hash":"ecb3fedc801aec53272bf16e67f5d34b8da910c1","deploy-path":"/home/jenkins/agent/workspace/pingcap/tiflow/pull_cdc_integration_pulsar_test/tiflow/bin/cdc.test","start-timestamp":1714816648}

/tidb/cdc/default/__cdc_meta__/capture/f40fb1ce-1195-4636-b599-d9b9b07fc07e
	{"id":"f40fb1ce-1195-4636-b599-d9b9b07fc07e","address":"127.0.0.1:8301","version":"v8.2.0-alpha-84-gecb3fedc8","git-hash":"ecb3fedc801aec53272bf16e67f5d34b8da910c1","deploy-path":"/home/jenkins/agent/workspace/pingcap/tiflow/pull_cdc_integration_pulsar_test/tiflow/bin/cdc.test","start-timestamp":1714816645}

/tidb/cdc/default/__cdc_meta__/meta/meta-version
	1

/tidb/cdc/default/__cdc_meta__/owner/22318f4309400632
	f40fb1ce-1195-4636-b599-d9b9b07fc07e

/tidb/cdc/default/__cdc_meta__/owner/22318f4309400669
	099c262b-e85d-45d4-b2e5-018a3bff42ba

/tidb/cdc/default/default/upstream/7365081349785656684
	{"id":7365081349785656684,"pd-endpoints":"http://127.0.0.1:2379,http://127.0.0.1:2379","key-path":"","cert-path":"","ca-path":"","cert-allowed-cn":null}'
+ echo '

*** processors info ***:



*** etcd info ***:

/tidb/cdc/default/__cdc_meta__/capture/099c262b-e85d-45d4-b2e5-018a3bff42ba
	{"id":"099c262b-e85d-45d4-b2e5-018a3bff42ba","address":"127.0.0.1:8302","version":"v8.2.0-alpha-84-gecb3fedc8","git-hash":"ecb3fedc801aec53272bf16e67f5d34b8da910c1","deploy-path":"/home/jenkins/agent/workspace/pingcap/tiflow/pull_cdc_integration_pulsar_test/tiflow/bin/cdc.test","start-timestamp":1714816648}

/tidb/cdc/default/__cdc_meta__/capture/f40fb1ce-1195-4636-b599-d9b9b07fc07e
	{"id":"f40fb1ce-1195-4636-b599-d9b9b07fc07e","address":"127.0.0.1:8301","version":"v8.2.0-alpha-84-gecb3fedc8","git-hash":"ecb3fedc801aec53272bf16e67f5d34b8da910c1","deploy-path":"/home/jenkins/agent/workspace/pingcap/tiflow/pull_cdc_integration_pulsar_test/tiflow/bin/cdc.test","start-timestamp":1714816645}

/tidb/cdc/default/__cdc_meta__/meta/meta-version
	1

/tidb/cdc/default/__cdc_meta__/owner/22318f4309400632
	f40fb1ce-1195-4636-b599-d9b9b07fc07e

/tidb/cdc/default/__cdc_meta__/owner/22318f4309400669
	099c262b-e85d-45d4-b2e5-018a3bff42ba

/tidb/cdc/default/default/upstream/7365081349785656684
	{"id":7365081349785656684,"pd-endpoints":"http://127.0.0.1:2379,http://127.0.0.1:2379","key-path":"","cert-path":"","ca-path":"","cert-allowed-cn":null}'
+ grep -q 'failed to get info:'
+ echo '

*** processors info ***:



*** etcd info ***:

/tidb/cdc/default/__cdc_meta__/capture/099c262b-e85d-45d4-b2e5-018a3bff42ba
	{"id":"099c262b-e85d-45d4-b2e5-018a3bff42ba","address":"127.0.0.1:8302","version":"v8.2.0-alpha-84-gecb3fedc8","git-hash":"ecb3fedc801aec53272bf16e67f5d34b8da910c1","deploy-path":"/home/jenkins/agent/workspace/pingcap/tiflow/pull_cdc_integration_pulsar_test/tiflow/bin/cdc.test","start-timestamp":1714816648}

/tidb/cdc/default/__cdc_meta__/capture/f40fb1ce-1195-4636-b599-d9b9b07fc07e
	{"id":"f40fb1ce-1195-4636-b599-d9b9b07fc07e","address":"127.0.0.1:8301","version":"v8.2.0-alpha-84-gecb3fedc8","git-hash":"ecb3fedc801aec53272bf16e67f5d34b8da910c1","deploy-path":"/home/jenkins/agent/workspace/pingcap/tiflow/pull_cdc_integration_pulsar_test/tiflow/bin/cdc.test","start-timestamp":1714816645}

/tidb/cdc/default/__cdc_meta__/meta/meta-version
	1

/tidb/cdc/default/__cdc_meta__/owner/22318f4309400632
	f40fb1ce-1195-4636-b599-d9b9b07fc07e

/tidb/cdc/default/__cdc_meta__/owner/22318f4309400669
	099c262b-e85d-45d4-b2e5-018a3bff42ba

/tidb/cdc/default/default/upstream/7365081349785656684
	{"id":7365081349785656684,"pd-endpoints":"http://127.0.0.1:2379,http://127.0.0.1:2379","key-path":"","cert-path":"","ca-path":"","cert-allowed-cn":null}'
+ grep -q 'etcd info'
+ break
+ set +x
++ date
+ echo '[Sat May  4 17:57:31 CST 2024] <<<<<< START pulsar cluster in changefeed_auto_stop case >>>>>>'
[Sat May  4 17:57:31 CST 2024] <<<<<< START pulsar cluster in changefeed_auto_stop case >>>>>>
+ workdir=/tmp/tidb_cdc_test/changefeed_auto_stop
+ cluster_type=normal
+ cd /tmp/tidb_cdc_test/changefeed_auto_stop
+ DEFAULT_PULSAR_HOME=/usr/local/pulsar
+ pulsar_dir=/usr/local/pulsar
++ cat
+ mtls_conf='
authenticationEnabled=true
authenticationProviders=org.apache.pulsar.broker.authentication.AuthenticationProviderTls
brokerClientTlsEnabled=true
brokerClientTrustCertsFilePath=/tmp/tidb_cdc_test/changefeed_auto_stop/ca.cert.pem
brokerClientAuthenticationPlugin=org.apache.pulsar.client.impl.auth.AuthenticationTls
brokerClientAuthenticationParameters={"tlsCertFile":"/tmp/tidb_cdc_test/changefeed_auto_stop/broker_client.cert.pem","tlsKeyFile":"/tmp/tidb_cdc_test/changefeed_auto_stop/broker_client.key-pk8.pem"}
brokerServicePortTls=6651
webServicePortTls=8443
tlsTrustCertsFilePath=/tmp/tidb_cdc_test/changefeed_auto_stop/ca.cert.pem
tlsCertificateFilePath=/tmp/tidb_cdc_test/changefeed_auto_stop/server.cert.pem
tlsKeyFilePath=/tmp/tidb_cdc_test/changefeed_auto_stop/server.key-pk8.pem
tlsRequireTrustedClientCertOnConnect=true
tlsAllowInsecureConnection=false
tlsCertRefreshCheckDurationSec=300'
++ cat
+ normal_client_conf='
webServiceUrl=http://localhost:8080/
brokerServiceUrl=pulsar://localhost:6650/'
++ cat
+ mtls_client_conf='
webServiceUrl=https://localhost:8443/
brokerServiceUrl=pulsar+ssl://localhost:6651/
authPlugin=org.apache.pulsar.client.impl.auth.AuthenticationTls
authParams=tlsCertFile:/tmp/tidb_cdc_test/changefeed_auto_stop/broker_client.cert.pem,tlsKeyFile:/tmp/tidb_cdc_test/changefeed_auto_stop/broker_client.key-pk8.pem
tlsTrustCertsFilePath=/tmp/tidb_cdc_test/changefeed_auto_stop/ca.cert.pem'
++ cat
+ oauth_client_conf='
    webServiceUrl=http://localhost:8080/
    brokerServiceUrl=pulsar://localhost:6650/
    authPlugin=org.apache.pulsar.client.impl.auth.oauth2.AuthenticationOAuth2
    authParams={"privateKey":"/tmp/tidb_cdc_test/changefeed_auto_stop/credential.json","audience":"cdc-api-uri","issuerUrl":"http://localhost:9096"}'
++ cat
+ oauth_conf='
authenticationEnabled=true
authenticationProviders=org.apache.pulsar.broker.authentication.AuthenticationProviderToken

brokerClientAuthenticationPlugin=org.apache.pulsar.client.impl.auth.oauth2.AuthenticationOAuth2
brokerClientAuthenticationParameters={"privateKey":"file:///tmp/tidb_cdc_test/changefeed_auto_stop/credential.json","audience":"cdc-api-uri","issuerUrl":"http://localhost:9096"}
tokenSecretKey=data:;base64,U0poWDM2X0thcFlTeWJCdEpxMzVseFhfQnJyNExSVVJTa203UW1YSkdteThwVUZXOUVJT2NWUVBzeWt6OS1qag=='
++ cat
+ credential_json='
    {
        "client_id":"1234",
        "client_secret":"e0KVlA2EiBfjoN13olyZd2kv1KL",
        "audience":"cdc-api-uri",
        "issuer_url":"http://localhost:9096",
        "type": "client_credentials"
    }'
++ cat
+ cert_server_conf='[ req ]
default_bits = 2048
prompt = no
default_md = sha256
distinguished_name = dn

[ v3_ext ]
authorityKeyIdentifier=keyid,issuer:always
basicConstraints=CA:FALSE
keyUsage=critical, digitalSignature, keyEncipherment
extendedKeyUsage=serverAuth
subjectAltName=@alt_names

[ dn ]
CN = server

[ alt_names ]
DNS.1 = localhost
IP.1 = 127.0.0.1'
+ echo '
webServiceUrl=http://localhost:8080/
brokerServiceUrl=pulsar://localhost:6650/'
+ cp /usr/local/pulsar/conf/standalone.conf /tmp/tidb_cdc_test/changefeed_auto_stop/pulsar_standalone.conf
+ pulsar_port=6650
+ '[' normal == mtls ']'
+ '[' normal == oauth ']'
+ echo 'no cluster type specified, using default configuration.'
no cluster type specified, using default configuration.
++ date
+ echo '[Sat May  4 17:57:31 CST 2024] <<<<<< START pulsar cluster in normal mode in changefeed_auto_stop case >>>>>>'
[Sat May  4 17:57:31 CST 2024] <<<<<< START pulsar cluster in normal mode in changefeed_auto_stop case >>>>>>
+ echo 'Waiting for pulsar port to be ready...'
Waiting for pulsar port to be ready...
+ i=0
+ nc -z localhost 6650
+ /usr/local/pulsar/bin/pulsar standalone --config /tmp/tidb_cdc_test/changefeed_auto_stop/pulsar_standalone.conf -nfw --metadata-dir /tmp/tidb_cdc_test/changefeed_auto_stop/pulsar-metadata --bookkeeper-dir /tmp/tidb_cdc_test/changefeed_auto_stop/pulsar-bookie
+ i=1
+ '[' 1 -gt 20 ']'
+ sleep 2
+ '[' pulsar == mysql ']'
+ stop_tidb_cluster
+ nc -z localhost 6650
+ i=2
+ '[' 2 -gt 20 ']'
+ sleep 2
table sink_hang.t1 exists
table sink_hang.t2 not exists for 1-th check, retry later
+ nc -z localhost 6650
+ echo 'Waiting for pulsar namespace to be ready...'
Waiting for pulsar namespace to be ready...
+ i=0
+ /usr/local/pulsar/bin/pulsar-admin namespaces list public
table sink_hang.t2 exists
check diff failed 1-th time, retry later
start tidb cluster in /tmp/tidb_cdc_test/savepoint
Starting Upstream PD...
Release Version: v8.2.0-alpha-14-g1679dbca2
Edition: Community
Git Commit Hash: 1679dbca25b3483d1375c7e747da27e99ad77360
Git Branch: master
UTC Build Time:  2024-04-30 08:09:12
Starting Downstream PD...
Release Version: v8.2.0-alpha-14-g1679dbca2
Edition: Community
Git Commit Hash: 1679dbca25b3483d1375c7e747da27e99ad77360
Git Branch: master
UTC Build Time:  2024-04-30 08:09:12
Verifying upstream PD is started...
public/default
++ date
+ echo '[Sat May  4 17:57:37 CST 2024] <<<<<< pulsar is ready >>>>>>'
[Sat May  4 17:57:37 CST 2024] <<<<<< pulsar is ready >>>>>>
check diff failed 2-th time, retry later
[Sat May  4 17:57:38 CST 2024] <<<<<< START Pulsar consumer in changefeed_auto_stop case >>>>>>
check_changefeed_state http://127.0.0.1:2379 e2a661fa-03d4-46d7-b7ca-36d1d67012c6 normal null
+ endpoints=http://127.0.0.1:2379
+ changefeed_id=e2a661fa-03d4-46d7-b7ca-36d1d67012c6
+ expected_state=normal
+ error_msg=null
+ tls_dir=null
+ [[ http://127.0.0.1:2379 =~ https ]]
++ cdc cli changefeed query --pd=http://127.0.0.1:2379 -c e2a661fa-03d4-46d7-b7ca-36d1d67012c6 -s
+ info='{
  "upstream_id": 7365081349785656684,
  "namespace": "default",
  "id": "e2a661fa-03d4-46d7-b7ca-36d1d67012c6",
  "state": "normal",
  "checkpoint_tso": 449528894401544193,
  "checkpoint_time": "2024-05-04 17:57:24.293",
  "error": null
}'
+ echo '{
  "upstream_id": 7365081349785656684,
  "namespace": "default",
  "id": "e2a661fa-03d4-46d7-b7ca-36d1d67012c6",
  "state": "normal",
  "checkpoint_tso": 449528894401544193,
  "checkpoint_time": "2024-05-04 17:57:24.293",
  "error": null
}'
{
  "upstream_id": 7365081349785656684,
  "namespace": "default",
  "id": "e2a661fa-03d4-46d7-b7ca-36d1d67012c6",
  "state": "normal",
  "checkpoint_tso": 449528894401544193,
  "checkpoint_time": "2024-05-04 17:57:24.293",
  "error": null
}
++ echo '{' '"upstream_id":' 7365081349785656684, '"namespace":' '"default",' '"id":' '"e2a661fa-03d4-46d7-b7ca-36d1d67012c6",' '"state":' '"normal",' '"checkpoint_tso":' 449528894401544193, '"checkpoint_time":' '"2024-05-04' '17:57:24.293",' '"error":' null '}'
++ jq -r .state
+ state=normal
+ [[ ! normal == \n\o\r\m\a\l ]]
++ echo '{' '"upstream_id":' 7365081349785656684, '"namespace":' '"default",' '"id":' '"e2a661fa-03d4-46d7-b7ca-36d1d67012c6",' '"state":' '"normal",' '"checkpoint_tso":' 449528894401544193, '"checkpoint_time":' '"2024-05-04' '17:57:24.293",' '"error":' null '}'
++ jq -r .error.message
+ message=null
+ [[ ! null =~ null ]]
run task successfully
table changefeed_auto_stop_1.usertable not exists for 1-th check, retry later
Verifying downstream PD is started...
Starting Upstream TiKV...
TiKV 
Release Version:   8.2.0-alpha
Edition:           Community
Git Commit Hash:   72a0fd5b00235a7c56014b77ddd933e2a0d33c88
Git Commit Branch: master
UTC Build Time:    2024-04-30 02:23:51
Rust Version:      rustc 1.77.0-nightly (89e2160c4 2023-12-27)
Enable Features:   memory-engine pprof-fp jemalloc mem-profiling portable sse test-engine-kv-rocksdb test-engine-raft-raft-engine trace-async-tasks openssl-vendored
Profile:           dist_release
Starting Downstream TiKV...
TiKV 
Release Version:   8.2.0-alpha
Edition:           Community
Git Commit Hash:   72a0fd5b00235a7c56014b77ddd933e2a0d33c88
Git Commit Branch: master
UTC Build Time:    2024-04-30 02:23:51
Rust Version:      rustc 1.77.0-nightly (89e2160c4 2023-12-27)
Enable Features:   memory-engine pprof-fp jemalloc mem-profiling portable sse test-engine-kv-rocksdb test-engine-raft-raft-engine trace-async-tasks openssl-vendored
Profile:           dist_release
check diff failed 3-th time, retry later
Starting Upstream TiDB...
Release Version: v8.2.0-alpha-79-g600b2ed4bf
Edition: Community
Git Commit Hash: 600b2ed4bf0aa38224a1c4c4c68831820735515c
Git Branch: master
UTC Build Time: 2024-05-01 02:56:48
GoVersion: go1.21.6
Race Enabled: false
Check Table Before Drop: false
Store: unistore
Starting Downstream TiDB...
Release Version: v8.2.0-alpha-79-g600b2ed4bf
Edition: Community
Git Commit Hash: 600b2ed4bf0aa38224a1c4c4c68831820735515c
Git Branch: master
UTC Build Time: 2024-05-01 02:56:48
GoVersion: go1.21.6
Race Enabled: false
Check Table Before Drop: false
Store: unistore
Verifying Upstream TiDB is started...
ERROR 2003 (HY000): Can't connect to MySQL server on '127.0.0.1' (111)
table changefeed_auto_stop_1.usertable not exists for 2-th check, retry later
check diff failed 4-th time, retry later
ERROR 2003 (HY000): Can't connect to MySQL server on '127.0.0.1' (111)
table changefeed_auto_stop_1.usertable exists
table changefeed_auto_stop_2.usertable not exists for 1-th check, retry later
check diff failed 5-th time, retry later
ERROR 2003 (HY000): Can't connect to MySQL server on '127.0.0.1' (111)
+ stop_tidb_cluster
table changefeed_auto_stop_2.usertable exists
table changefeed_auto_stop_3.usertable exists
table changefeed_auto_stop_4.usertable not exists for 1-th check, retry later
check diff failed 6-th time, retry later
VARIABLE_NAME	VARIABLE_VALUE	COMMENT
bootstrapped	True	Bootstrap flag. Do not delete.
tidb_server_version	196	Bootstrap version. Do not delete.
system_tz	Asia/Shanghai	TiDB Global System Timezone.
new_collation_enabled	True	If the new collations are enabled. Do not edit it.
ddl_table_version	3	DDL Table Version. Do not delete.
tikv_gc_leader_uuid	63d0c2725ac0012	Current GC worker leader UUID. (DO NOT EDIT)
tikv_gc_leader_desc	host:pingcap-tiflow-pull-cdc-integration-pulsar-test-1528-d242-jlv8r, pid:16307, start at 2024-05-04 17:57:45.991010477 +0800 CST m=+5.490451931	Host name and pid of current GC leader. (DO NOT EDIT)
tikv_gc_leader_lease	20240504-17:59:45.999 +0800	Current GC worker leader lease. (DO NOT EDIT)
tikv_gc_auto_concurrency	true	Let TiDB pick the concurrency automatically. If set false, tikv_gc_concurrency will be used
tikv_gc_enable	true	Current GC enable status
tikv_gc_run_interval	10m0s	GC run interval, at least 10m, in Go format.
tikv_gc_life_time	10m0s	All versions within life time will not be collected by GC, at least 10m, in Go format.
tikv_gc_last_run_time	20240504-17:57:45.963 +0800	The time when last GC starts. (DO NOT EDIT)
tikv_gc_safe_point	20240504-17:47:45.963 +0800	All versions after safe point can be accessed. (DO NOT EDIT)
ERROR 2003 (HY000): Can't connect to MySQL server on '127.0.0.1' (111)
table changefeed_auto_stop_4.usertable exists
check diff failed 1-th time, retry later
check diff failed 7-th time, retry later
VARIABLE_NAME	VARIABLE_VALUE	COMMENT
bootstrapped	True	Bootstrap flag. Do not delete.
tidb_server_version	196	Bootstrap version. Do not delete.
system_tz	Asia/Shanghai	TiDB Global System Timezone.
new_collation_enabled	True	If the new collations are enabled. Do not edit it.
ddl_table_version	3	DDL Table Version. Do not delete.
tikv_gc_leader_uuid	63d0c2725ac0012	Current GC worker leader UUID. (DO NOT EDIT)
tikv_gc_leader_desc	host:pingcap-tiflow-pull-cdc-integration-pulsar-test-1528-d242-jlv8r, pid:16307, start at 2024-05-04 17:57:45.991010477 +0800 CST m=+5.490451931	Host name and pid of current GC leader. (DO NOT EDIT)
tikv_gc_leader_lease	20240504-17:59:45.999 +0800	Current GC worker leader lease. (DO NOT EDIT)
tikv_gc_auto_concurrency	true	Let TiDB pick the concurrency automatically. If set false, tikv_gc_concurrency will be used
tikv_gc_enable	true	Current GC enable status
tikv_gc_run_interval	10m0s	GC run interval, at least 10m, in Go format.
tikv_gc_life_time	10m0s	All versions within life time will not be collected by GC, at least 10m, in Go format.
tikv_gc_last_run_time	20240504-17:57:45.963 +0800	The time when last GC starts. (DO NOT EDIT)
tikv_gc_safe_point	20240504-17:47:45.963 +0800	All versions after safe point can be accessed. (DO NOT EDIT)
Verifying Downstream TiDB is started...
VARIABLE_NAME	VARIABLE_VALUE	COMMENT
bootstrapped	True	Bootstrap flag. Do not delete.
tidb_server_version	196	Bootstrap version. Do not delete.
system_tz	Asia/Shanghai	TiDB Global System Timezone.
new_collation_enabled	True	If the new collations are enabled. Do not edit it.
ddl_table_version	3	DDL Table Version. Do not delete.
tikv_gc_leader_uuid	63d0c2725040014	Current GC worker leader UUID. (DO NOT EDIT)
tikv_gc_leader_desc	host:pingcap-tiflow-pull-cdc-integration-pulsar-test-1528-d242-jlv8r, pid:16385, start at 2024-05-04 17:57:45.94712956 +0800 CST m=+5.395035170	Host name and pid of current GC leader. (DO NOT EDIT)
tikv_gc_leader_lease	20240504-17:59:45.956 +0800	Current GC worker leader lease. (DO NOT EDIT)
tikv_gc_auto_concurrency	true	Let TiDB pick the concurrency automatically. If set false, tikv_gc_concurrency will be used
tikv_gc_enable	true	Current GC enable status
tikv_gc_run_interval	10m0s	GC run interval, at least 10m, in Go format.
tikv_gc_life_time	10m0s	All versions within life time will not be collected by GC, at least 10m, in Go format.
tikv_gc_last_run_time	20240504-17:57:45.921 +0800	The time when last GC starts. (DO NOT EDIT)
tikv_gc_safe_point	20240504-17:47:45.921 +0800	All versions after safe point can be accessed. (DO NOT EDIT)
Starting Upstream TiFlash...
TiFlash
Release Version: v8.2.0-alpha-16-g8e170090f
Edition:         Community
Git Commit Hash: 8e170090fad91c94bef8d908e21c195c1d145b02
Git Branch:      HEAD
UTC Build Time:  2024-04-30 02:34:21
Enable Features: jemalloc sm4(GmSSL) avx2 avx512 unwind thinlto
Profile:         RELWITHDEBINFO
Compiler:        clang++ 13.0.0

Raft Proxy
Git Commit Hash:   7dc50b4eb06124e31f03adb06c20ff7ab61c5f79
Git Commit Branch: HEAD
UTC Build Time:    2024-04-30 02:38:45
Rust Version:      rustc 1.67.0-nightly (96ddd32c4 2022-11-14)
Storage Engine:    tiflash
Prometheus Prefix: tiflash_proxy_
Profile:           release
Enable Features:   external-jemalloc portable sse test-engine-kv-rocksdb test-engine-raft-raft-engine cloud-aws cloud-gcp cloud-azure openssl-vendored portable sse test-engine-kv-rocksdb test-engine-raft-raft-engine cloud-aws cloud-gcp cloud-azure openssl-vendored
Verifying Upstream TiFlash is started...
Logging trace to /tmp/tidb_cdc_test/savepoint/tiflash/log/server.log
Logging errors to /tmp/tidb_cdc_test/savepoint/tiflash/log/error.log
arg matches is ArgMatches { args: {"engine-addr": MatchedArg { occurs: 1, indices: [2], vals: ["127.0.0.1:9500"] }, "engine-label": MatchedArg { occurs: 1, indices: [14], vals: ["tiflash"] }, "addr": MatchedArg { occurs: 1, indices: [20], vals: ["127.0.0.1:9000"] }, "data-dir": MatchedArg { occurs: 1, indices: [6], vals: ["/tmp/tidb_cdc_test/savepoint/tiflash/db/proxy"] }, "engine-version": MatchedArg { occurs: 1, indices: [12], vals: ["v8.2.0-alpha-16-g8e170090f"] }, "config": MatchedArg { occurs: 1, indices: [8], vals: ["/tmp/tidb_cdc_test/savepoint/tiflash-proxy.toml"] }, "advertise-addr": MatchedArg { occurs: 1, indices: [4], vals: ["127.0.0.1:9000"] }, "pd-endpoints": MatchedArg { occurs: 1, indices: [16], vals: ["127.0.0.1:2379"] }, "log-file": MatchedArg { occurs: 1, indices: [18], vals: ["/tmp/tidb_cdc_test/savepoint/tiflash/log/proxy.log"] }, "engine-git-hash": MatchedArg { occurs: 1, indices: [10], vals: ["8e170090fad91c94bef8d908e21c195c1d145b02"] }}, subcommand: None, usage: Some("USAGE:\n    TiFlash Proxy [FLAGS] [OPTIONS] --engine-git-hash <engine-git-hash> --engine-label <engine-label> --engine-version <engine-version>") }
\033[0;36m<<< Run all test success >>>\033[0m
[Pipeline] }
Cache not saved (ws/jenkins-pingcap-tiflow-pull_cdc_integration_pulsar_test-1528/tiflow-cdc already exists)
[Pipeline] // cache
[Pipeline] }
[Pipeline] // dir
[Pipeline] }
[Pipeline] // timeout
[Pipeline] }
[Pipeline] // stage
[Pipeline] }
check diff failed 2-th time, retry later
[Pipeline] // container
[Pipeline] }
[Pipeline] // withEnv
[Pipeline] }
[Pipeline] // node
[Pipeline] }
[Pipeline] // podTemplate
[Pipeline] }
[Pipeline] // withEnv
[Pipeline] }
[Pipeline] // stage
[Pipeline] }
check diff failed 8-th time, retry later
+ pd_host=127.0.0.1
+ pd_port=2379
+ is_tls=false
+ '[' false == true ']'
++ run_cdc_cli tso query --pd=http://127.0.0.1:2379
+ cdc.test -test.coverprofile=/tmp/tidb_cdc_test/cov.savepoint.cli.17847.out cli tso query --pd=http://127.0.0.1:2379
check diff successfully
wait process cdc.test exit for 1-th time...
wait process cdc.test exit for 2-th time...
+ set +x
+ tso='449528901432508418
PASS
coverage: 1.8% of statements in github.com/pingcap/tiflow/...'
+ echo 449528901432508418 PASS coverage: 1.8% of statements in github.com/pingcap/tiflow/...
+ awk -F ' ' '{print $1}'
+ set +x
[Sat May  4 17:57:52 CST 2024] <<<<<< START cdc server in savepoint case >>>>>>
+ [[ '' == \t\r\u\e ]]
+ set +e
+ get_info_fail_msg='failed to get info:'
+ etcd_info_msg='etcd info'
+ GO_FAILPOINTS=
+ cdc.test -test.coverprofile=/tmp/tidb_cdc_test/cov.savepoint.1788517887.out server --log-file /tmp/tidb_cdc_test/savepoint/cdc.log --log-level debug --data-dir /tmp/tidb_cdc_test/savepoint/cdc_data --cluster-id default
+ '[' -z '' ']'
+ curl_status_cmd='curl -vsL --max-time 20 http://127.0.0.1:8300/debug/info --user ticdc:ticdc_secret -vsL'
+ [[ no != \n\o ]]
+ (( i = 0 ))
+ (( i <= 50 ))
++ curl -vsL --max-time 20 http://127.0.0.1:8300/debug/info --user ticdc:ticdc_secret -vsL
* About to connect() to 127.0.0.1 port 8300 (#0)
*   Trying 127.0.0.1...
* Connection refused
* Failed connect to 127.0.0.1:8300; Connection refused
* Closing connection 0
+ res=
+ echo ''
+ grep -q 'failed to get info:'
+ echo ''
+ grep -q 'etcd info'
+ '[' 0 -eq 50 ']'
+ sleep 3
check diff failed 9-th time, retry later
wait process cdc.test exit for 3-th time...
cdc.test: no process found
wait process cdc.test exit for 4-th time...
process cdc.test already exit
[Sat May  4 17:57:53 CST 2024] <<<<<< run test case changefeed_auto_stop success! >>>>>>
check diff failed 10-th time, retry later
+ (( i++ ))
+ (( i <= 50 ))
++ curl -vsL --max-time 20 http://127.0.0.1:8300/debug/info --user ticdc:ticdc_secret -vsL
* About to connect() to 127.0.0.1 port 8300 (#0)
*   Trying 127.0.0.1...
* Connected to 127.0.0.1 (127.0.0.1) port 8300 (#0)
* Server auth using Basic with user 'ticdc'
> GET /debug/info HTTP/1.1
> Authorization: Basic dGljZGM6dGljZGNfc2VjcmV0
> User-Agent: curl/7.29.0
> Host: 127.0.0.1:8300
> Accept: */*
> 
< HTTP/1.1 200 OK
< Date: Sat, 04 May 2024 09:57:55 GMT
< Content-Length: 816
< Content-Type: text/plain; charset=utf-8
< 
{ [data not shown]
* Connection #0 to host 127.0.0.1 left intact
+ res='

*** owner info ***:



*** processors info ***:



*** etcd info ***:

/tidb/cdc/default/__cdc_meta__/capture/adeeb80a-b918-41cf-825d-e2fca754b6a2
	{"id":"adeeb80a-b918-41cf-825d-e2fca754b6a2","address":"127.0.0.1:8300","version":"v8.2.0-alpha-84-gecb3fedc8","git-hash":"ecb3fedc801aec53272bf16e67f5d34b8da910c1","deploy-path":"/home/jenkins/agent/workspace/pingcap/tiflow/pull_cdc_integration_pulsar_test/tiflow/bin/cdc.test","start-timestamp":1714816672}

/tidb/cdc/default/__cdc_meta__/meta/meta-version
	1

/tidb/cdc/default/__cdc_meta__/owner/22318f4309a415d3
	adeeb80a-b918-41cf-825d-e2fca754b6a2

/tidb/cdc/default/default/upstream/7365081461474273269
	{"id":7365081461474273269,"pd-endpoints":"http://127.0.0.1:2379,http://127.0.0.1:2379","key-path":"","cert-path":"","ca-path":"","cert-allowed-cn":null}'
+ echo '

*** owner info ***:



*** processors info ***:



*** etcd info ***:

/tidb/cdc/default/__cdc_meta__/capture/adeeb80a-b918-41cf-825d-e2fca754b6a2
	{"id":"adeeb80a-b918-41cf-825d-e2fca754b6a2","address":"127.0.0.1:8300","version":"v8.2.0-alpha-84-gecb3fedc8","git-hash":"ecb3fedc801aec53272bf16e67f5d34b8da910c1","deploy-path":"/home/jenkins/agent/workspace/pingcap/tiflow/pull_cdc_integration_pulsar_test/tiflow/bin/cdc.test","start-timestamp":1714816672}

/tidb/cdc/default/__cdc_meta__/meta/meta-version
	1

/tidb/cdc/default/__cdc_meta__/owner/22318f4309a415d3
	adeeb80a-b918-41cf-825d-e2fca754b6a2

/tidb/cdc/default/default/upstream/7365081461474273269
	{"id":7365081461474273269,"pd-endpoints":"http://127.0.0.1:2379,http://127.0.0.1:2379","key-path":"","cert-path":"","ca-path":"","cert-allowed-cn":null}'
+ grep -q 'failed to get info:'
+ echo '

*** owner info ***:



*** processors info ***:



*** etcd info ***:

/tidb/cdc/default/__cdc_meta__/capture/adeeb80a-b918-41cf-825d-e2fca754b6a2
	{"id":"adeeb80a-b918-41cf-825d-e2fca754b6a2","address":"127.0.0.1:8300","version":"v8.2.0-alpha-84-gecb3fedc8","git-hash":"ecb3fedc801aec53272bf16e67f5d34b8da910c1","deploy-path":"/home/jenkins/agent/workspace/pingcap/tiflow/pull_cdc_integration_pulsar_test/tiflow/bin/cdc.test","start-timestamp":1714816672}

/tidb/cdc/default/__cdc_meta__/meta/meta-version
	1

/tidb/cdc/default/__cdc_meta__/owner/22318f4309a415d3
	adeeb80a-b918-41cf-825d-e2fca754b6a2

/tidb/cdc/default/default/upstream/7365081461474273269
	{"id":7365081461474273269,"pd-endpoints":"http://127.0.0.1:2379,http://127.0.0.1:2379","key-path":"","cert-path":"","ca-path":"","cert-allowed-cn":null}'
+ grep -q 'etcd info'
+ break
+ set +x
++ date
+ echo '[Sat May  4 17:57:55 CST 2024] <<<<<< START pulsar cluster in savepoint case >>>>>>'
[Sat May  4 17:57:55 CST 2024] <<<<<< START pulsar cluster in savepoint case >>>>>>
+ workdir=/tmp/tidb_cdc_test/savepoint
+ cluster_type=normal
+ cd /tmp/tidb_cdc_test/savepoint
+ DEFAULT_PULSAR_HOME=/usr/local/pulsar
+ pulsar_dir=/usr/local/pulsar
++ cat
+ mtls_conf='
authenticationEnabled=true
authenticationProviders=org.apache.pulsar.broker.authentication.AuthenticationProviderTls
brokerClientTlsEnabled=true
brokerClientTrustCertsFilePath=/tmp/tidb_cdc_test/savepoint/ca.cert.pem
brokerClientAuthenticationPlugin=org.apache.pulsar.client.impl.auth.AuthenticationTls
brokerClientAuthenticationParameters={"tlsCertFile":"/tmp/tidb_cdc_test/savepoint/broker_client.cert.pem","tlsKeyFile":"/tmp/tidb_cdc_test/savepoint/broker_client.key-pk8.pem"}
brokerServicePortTls=6651
webServicePortTls=8443
tlsTrustCertsFilePath=/tmp/tidb_cdc_test/savepoint/ca.cert.pem
tlsCertificateFilePath=/tmp/tidb_cdc_test/savepoint/server.cert.pem
tlsKeyFilePath=/tmp/tidb_cdc_test/savepoint/server.key-pk8.pem
tlsRequireTrustedClientCertOnConnect=true
tlsAllowInsecureConnection=false
tlsCertRefreshCheckDurationSec=300'
++ cat
+ normal_client_conf='
webServiceUrl=http://localhost:8080/
brokerServiceUrl=pulsar://localhost:6650/'
++ cat
+ mtls_client_conf='
webServiceUrl=https://localhost:8443/
brokerServiceUrl=pulsar+ssl://localhost:6651/
authPlugin=org.apache.pulsar.client.impl.auth.AuthenticationTls
authParams=tlsCertFile:/tmp/tidb_cdc_test/savepoint/broker_client.cert.pem,tlsKeyFile:/tmp/tidb_cdc_test/savepoint/broker_client.key-pk8.pem
tlsTrustCertsFilePath=/tmp/tidb_cdc_test/savepoint/ca.cert.pem'
++ cat
+ oauth_client_conf='
    webServiceUrl=http://localhost:8080/
    brokerServiceUrl=pulsar://localhost:6650/
    authPlugin=org.apache.pulsar.client.impl.auth.oauth2.AuthenticationOAuth2
    authParams={"privateKey":"/tmp/tidb_cdc_test/savepoint/credential.json","audience":"cdc-api-uri","issuerUrl":"http://localhost:9096"}'
++ cat
+ oauth_conf='
authenticationEnabled=true
authenticationProviders=org.apache.pulsar.broker.authentication.AuthenticationProviderToken

brokerClientAuthenticationPlugin=org.apache.pulsar.client.impl.auth.oauth2.AuthenticationOAuth2
brokerClientAuthenticationParameters={"privateKey":"file:///tmp/tidb_cdc_test/savepoint/credential.json","audience":"cdc-api-uri","issuerUrl":"http://localhost:9096"}
tokenSecretKey=data:;base64,U0poWDM2X0thcFlTeWJCdEpxMzVseFhfQnJyNExSVVJTa203UW1YSkdteThwVUZXOUVJT2NWUVBzeWt6OS1qag=='
++ cat
+ credential_json='
    {
        "client_id":"1234",
        "client_secret":"e0KVlA2EiBfjoN13olyZd2kv1KL",
        "audience":"cdc-api-uri",
        "issuer_url":"http://localhost:9096",
        "type": "client_credentials"
    }'
++ cat
+ cert_server_conf='[ req ]
default_bits = 2048
prompt = no
default_md = sha256
distinguished_name = dn

[ v3_ext ]
authorityKeyIdentifier=keyid,issuer:always
basicConstraints=CA:FALSE
keyUsage=critical, digitalSignature, keyEncipherment
extendedKeyUsage=serverAuth
subjectAltName=@alt_names

[ dn ]
CN = server

[ alt_names ]
DNS.1 = localhost
IP.1 = 127.0.0.1'
+ echo '
webServiceUrl=http://localhost:8080/
brokerServiceUrl=pulsar://localhost:6650/'
+ cp /usr/local/pulsar/conf/standalone.conf /tmp/tidb_cdc_test/savepoint/pulsar_standalone.conf
+ pulsar_port=6650
+ '[' normal == mtls ']'
+ '[' normal == oauth ']'
+ echo 'no cluster type specified, using default configuration.'
no cluster type specified, using default configuration.
++ date
+ echo '[Sat May  4 17:57:55 CST 2024] <<<<<< START pulsar cluster in normal mode in savepoint case >>>>>>'
[Sat May  4 17:57:55 CST 2024] <<<<<< START pulsar cluster in normal mode in savepoint case >>>>>>
+ echo 'Waiting for pulsar port to be ready...'
Waiting for pulsar port to be ready...
+ i=0
+ nc -z localhost 6650
+ /usr/local/pulsar/bin/pulsar standalone --config /tmp/tidb_cdc_test/savepoint/pulsar_standalone.conf -nfw --metadata-dir /tmp/tidb_cdc_test/savepoint/pulsar-metadata --bookkeeper-dir /tmp/tidb_cdc_test/savepoint/pulsar-bookie
+ i=1
+ '[' 1 -gt 20 ']'
+ sleep 2
check diff failed at last
A total of 2 tables need to be compared


Comparing the table structure of ``sink_hang`.`t2`` ... equivalent
Comparing the table data of ``sink_hang`.`t2`` ...
_____________________________________________________________________________
Progress [>------------------------------------------------------------] 0% 0/1
Comparing the table structure of ``sink_hang`.`t1`` ... equivalent
Comparing the table data of ``sink_hang`.`t2`` ...
Comparing the table data of ``sink_hang`.`t1`` ...
_____________________________________________________________________________
Progress [>------------------------------------------------------------] 0% 0/2
Comparing the table data of ``sink_hang`.`t2`` ... failure
Comparing the table data of ``sink_hang`.`t1`` ...
_____________________________________________________________________________
Progress [==============================>------------------------------] 50% 0/1
Comparing the table data of ``sink_hang`.`t1`` ... failure
_____________________________________________________________________________
Progress [============================================================>] 100% 0/0
Progress [============================================================>] 100% 0/0
The data of `sink_hang`.`t2` is not equal
The data of `sink_hang`.`t1` is not equal

The rest of tables are all equal.

A total of 2 tables have been compared, 0 tables finished, 2 tables failed, 0 tables skipped.
The patch file has been generated in 
	'/tmp/tidb_cdc_test/sink_hang/sync_diff/output/fix-on-tidb0/'
You can view the comparision details through '/tmp/tidb_cdc_test/sink_hang/sync_diff/output/sync_diff.log'
A total of 2 tables need to be compared


Comparing the table structure of ``sink_hang`.`t1`` ... equivalent
Comparing the table data of ``sink_hang`.`t1`` ...
_____________________________________________________________________________
Progress [>------------------------------------------------------------] 0% 0/1
Comparing the table structure of ``sink_hang`.`t2`` ... equivalent
Comparing the table data of ``sink_hang`.`t2`` ...
Comparing the table data of ``sink_hang`.`t1`` ...
_____________________________________________________________________________
Progress [>------------------------------------------------------------] 0% 0/2
Comparing the table data of ``sink_hang`.`t1`` ... failure
Comparing the table data of ``sink_hang`.`t2`` ...
_____________________________________________________________________________
Progress [==============================>------------------------------] 50% 0/1
Comparing the table data of ``sink_hang`.`t2`` ... failure
_____________________________________________________________________________
Progress [============================================================>] 100% 0/0
Progress [============================================================>] 100% 0/0
The data of `sink_hang`.`t2` is not equal
The data of `sink_hang`.`t1` is not equal

The rest of tables are all equal.

A total of 2 tables have been compared, 0 tables finished, 2 tables failed, 0 tables skipped.
The patch file has been generated in 
	'/tmp/tidb_cdc_test/sink_hang/sync_diff/output/fix-on-tidb0/'
You can view the comparision details through '/tmp/tidb_cdc_test/sink_hang/sync_diff/output/sync_diff.log'
A total of 2 tables need to be compared


Comparing the table structure of ``sink_hang`.`t2`` ... equivalent
Comparing the table data of ``sink_hang`.`t2`` ...
_____________________________________________________________________________
Progress [>------------------------------------------------------------] 0% 0/1
Comparing the table structure of ``sink_hang`.`t1`` ... equivalent
Comparing the table data of ``sink_hang`.`t2`` ...
Comparing the table data of ``sink_hang`.`t1`` ...
_____________________________________________________________________________
Progress [>------------------------------------------------------------] 0% 0/2
Comparing the table data of ``sink_hang`.`t2`` ... failure
Comparing the table data of ``sink_hang`.`t1`` ...
_____________________________________________________________________________
Progress [==============================>------------------------------] 50% 0/1
Comparing the table data of ``sink_hang`.`t1`` ... failure
_____________________________________________________________________________
Progress [============================================================>] 100% 0/0
Progress [============================================================>] 100% 0/0
The data of `sink_hang`.`t1` is not equal
The data of `sink_hang`.`t2` is not equal

The rest of tables are all equal.

A total of 2 tables have been compared, 0 tables finished, 2 tables failed, 0 tables skipped.
The patch file has been generated in 
	'/tmp/tidb_cdc_test/sink_hang/sync_diff/output/fix-on-tidb0/'
You can view the comparision details through '/tmp/tidb_cdc_test/sink_hang/sync_diff/output/sync_diff.log'
A total of 2 tables need to be compared


Comparing the table structure of ``sink_hang`.`t2`` ... equivalent
Comparing the table data of ``sink_hang`.`t2`` ...
_____________________________________________________________________________
Progress [>------------------------------------------------------------] 0% 0/1
Comparing the table structure of ``sink_hang`.`t1`` ... equivalent
Comparing the table data of ``sink_hang`.`t2`` ...
Comparing the table data of ``sink_hang`.`t1`` ...
_____________________________________________________________________________
Progress [>------------------------------------------------------------] 0% 0/2
Comparing the table data of ``sink_hang`.`t2`` ... failure
Comparing the table data of ``sink_hang`.`t1`` ...
_____________________________________________________________________________
Progress [==============================>------------------------------] 50% 0/1
Comparing the table data of ``sink_hang`.`t1`` ... failure
_____________________________________________________________________________
Progress [============================================================>] 100% 0/0
Progress [============================================================>] 100% 0/0
The data of `sink_hang`.`t2` is not equal
The data of `sink_hang`.`t1` is not equal

The rest of tables are all equal.

A total of 2 tables have been compared, 0 tables finished, 2 tables failed, 0 tables skipped.
The patch file has been generated in 
	'/tmp/tidb_cdc_test/sink_hang/sync_diff/output/fix-on-tidb0/'
You can view the comparision details through '/tmp/tidb_cdc_test/sink_hang/sync_diff/output/sync_diff.log'
A total of 2 tables need to be compared


Comparing the table structure of ``sink_hang`.`t2`` ... equivalent
Comparing the table data of ``sink_hang`.`t2`` ...
_____________________________________________________________________________
Progress [>------------------------------------------------------------] 0% 0/1
Comparing the table structure of ``sink_hang`.`t1`` ... equivalent
Comparing the table data of ``sink_hang`.`t2`` ...
Comparing the table data of ``sink_hang`.`t1`` ...
_____________________________________________________________________________
Progress [>------------------------------------------------------------] 0% 0/2
Comparing the table data of ``sink_hang`.`t2`` ... failure
Comparing the table data of ``sink_hang`.`t1`` ...
_____________________________________________________________________________
Progress [==============================>------------------------------] 50% 0/1
Comparing the table data of ``sink_hang`.`t1`` ... failure
_____________________________________________________________________________
Progress [============================================================>] 100% 0/0
Progress [============================================================>] 100% 0/0
The data of `sink_hang`.`t2` is not equal
The data of `sink_hang`.`t1` is not equal

The rest of tables are all equal.

A total of 2 tables have been compared, 0 tables finished, 2 tables failed, 0 tables skipped.
The patch file has been generated in 
	'/tmp/tidb_cdc_test/sink_hang/sync_diff/output/fix-on-tidb0/'
You can view the comparision details through '/tmp/tidb_cdc_test/sink_hang/sync_diff/output/sync_diff.log'
A total of 2 tables need to be compared


Comparing the table structure of ``sink_hang`.`t1`` ... equivalent
Comparing the table data of ``sink_hang`.`t1`` ...
_____________________________________________________________________________
Progress [>------------------------------------------------------------] 0% 0/1
Comparing the table structure of ``sink_hang`.`t2`` ... equivalent
Comparing the table data of ``sink_hang`.`t2`` ...
Comparing the table data of ``sink_hang`.`t1`` ...
_____________________________________________________________________________
Progress [>------------------------------------------------------------] 0% 0/2
Comparing the table data of ``sink_hang`.`t1`` ... failure
Comparing the table data of ``sink_hang`.`t2`` ...
_____________________________________________________________________________
Progress [==============================>------------------------------] 50% 0/1
Comparing the table data of ``sink_hang`.`t2`` ... failure
_____________________________________________________________________________
Progress [============================================================>] 100% 0/0
Progress [============================================================>] 100% 0/0
The data of `sink_hang`.`t2` is not equal
The data of `sink_hang`.`t1` is not equal

The rest of tables are all equal.

A total of 2 tables have been compared, 0 tables finished, 2 tables failed, 0 tables skipped.
The patch file has been generated in 
	'/tmp/tidb_cdc_test/sink_hang/sync_diff/output/fix-on-tidb0/'
You can view the comparision details through '/tmp/tidb_cdc_test/sink_hang/sync_diff/output/sync_diff.log'
A total of 2 tables need to be compared


Comparing the table structure of ``sink_hang`.`t1`` ... equivalent
Comparing the table data of ``sink_hang`.`t1`` ...
_____________________________________________________________________________
Progress [>------------------------------------------------------------] 0% 0/1
Comparing the table structure of ``sink_hang`.`t2`` ... equivalent
Comparing the table data of ``sink_hang`.`t2`` ...
Comparing the table data of ``sink_hang`.`t1`` ...
_____________________________________________________________________________
Progress [>------------------------------------------------------------] 0% 0/2
Comparing the table data of ``sink_hang`.`t1`` ... failure
Comparing the table data of ``sink_hang`.`t2`` ...
_____________________________________________________________________________
Progress [==============================>------------------------------] 50% 0/1
Comparing the table data of ``sink_hang`.`t2`` ... failure
_____________________________________________________________________________
Progress [============================================================>] 100% 0/0
Progress [============================================================>] 100% 0/0
The data of `sink_hang`.`t2` is not equal
The data of `sink_hang`.`t1` is not equal

The rest of tables are all equal.

A total of 2 tables have been compared, 0 tables finished, 2 tables failed, 0 tables skipped.
The patch file has been generated in 
	'/tmp/tidb_cdc_test/sink_hang/sync_diff/output/fix-on-tidb0/'
You can view the comparision details through '/tmp/tidb_cdc_test/sink_hang/sync_diff/output/sync_diff.log'
A total of 2 tables need to be compared


Comparing the table structure of ``sink_hang`.`t2`` ... equivalent
Comparing the table data of ``sink_hang`.`t2`` ...
_____________________________________________________________________________
Progress [>------------------------------------------------------------] 0% 0/1
Comparing the table structure of ``sink_hang`.`t1`` ... equivalent
Comparing the table data of ``sink_hang`.`t2`` ...
Comparing the table data of ``sink_hang`.`t1`` ...
_____________________________________________________________________________
Progress [>------------------------------------------------------------] 0% 0/2
Comparing the table data of ``sink_hang`.`t2`` ... failure
Comparing the table data of ``sink_hang`.`t1`` ...
_____________________________________________________________________________
Progress [==============================>------------------------------] 50% 0/1
Comparing the table data of ``sink_hang`.`t1`` ... failure
_____________________________________________________________________________
Progress [============================================================>] 100% 0/0
Progress [============================================================>] 100% 0/0
The data of `sink_hang`.`t2` is not equal
The data of `sink_hang`.`t1` is not equal

The rest of tables are all equal.

A total of 2 tables have been compared, 0 tables finished, 2 tables failed, 0 tables skipped.
The patch file has been generated in 
	'/tmp/tidb_cdc_test/sink_hang/sync_diff/output/fix-on-tidb0/'
You can view the comparision details through '/tmp/tidb_cdc_test/sink_hang/sync_diff/output/sync_diff.log'
A total of 2 tables need to be compared


Comparing the table structure of ``sink_hang`.`t1`` ... equivalent
Comparing the table data of ``sink_hang`.`t1`` ...
_____________________________________________________________________________
Progress [>------------------------------------------------------------] 0% 0/1
Comparing the table structure of ``sink_hang`.`t2`` ... equivalent
Comparing the table data of ``sink_hang`.`t2`` ...
Comparing the table data of ``sink_hang`.`t1`` ...
_____________________________________________________________________________
Progress [>------------------------------------------------------------] 0% 0/2
Comparing the table data of ``sink_hang`.`t1`` ... failure
Comparing the table data of ``sink_hang`.`t2`` ...
_____________________________________________________________________________
Progress [==============================>------------------------------] 50% 0/1
Comparing the table data of ``sink_hang`.`t2`` ... failure
_____________________________________________________________________________
Progress [============================================================>] 100% 0/0
Progress [============================================================>] 100% 0/0
The data of `sink_hang`.`t2` is not equal
The data of `sink_hang`.`t1` is not equal

The rest of tables are all equal.

A total of 2 tables have been compared, 0 tables finished, 2 tables failed, 0 tables skipped.
The patch file has been generated in 
	'/tmp/tidb_cdc_test/sink_hang/sync_diff/output/fix-on-tidb0/'
You can view the comparision details through '/tmp/tidb_cdc_test/sink_hang/sync_diff/output/sync_diff.log'
A total of 2 tables need to be compared


Comparing the table structure of ``sink_hang`.`t1`` ... equivalent
Comparing the table data of ``sink_hang`.`t1`` ...
_____________________________________________________________________________
Progress [>------------------------------------------------------------] 0% 0/1
Comparing the table structure of ``sink_hang`.`t2`` ... equivalent
Comparing the table data of ``sink_hang`.`t2`` ...
Comparing the table data of ``sink_hang`.`t1`` ...
_____________________________________________________________________________
Progress [>------------------------------------------------------------] 0% 0/2
Comparing the table data of ``sink_hang`.`t1`` ... failure
Comparing the table data of ``sink_hang`.`t2`` ...
_____________________________________________________________________________
Progress [==============================>------------------------------] 50% 0/1
Comparing the table data of ``sink_hang`.`t2`` ... failure
_____________________________________________________________________________
Progress [============================================================>] 100% 0/0
Progress [============================================================>] 100% 0/0
The data of `sink_hang`.`t2` is not equal
The data of `sink_hang`.`t1` is not equal

The rest of tables are all equal.

A total of 2 tables have been compared, 0 tables finished, 2 tables failed, 0 tables skipped.
The patch file has been generated in 
	'/tmp/tidb_cdc_test/sink_hang/sync_diff/output/fix-on-tidb0/'
You can view the comparision details through '/tmp/tidb_cdc_test/sink_hang/sync_diff/output/sync_diff.log'

[2024/05/04 17:57:54.314 +08:00] [INFO] [printer.go:46] ["Welcome to sync_diff_inspector"] ["Release Version"=v7.4.0] ["Git Commit Hash"=d671b0840063bc2532941f02e02e12627402844c] ["Git Branch"=heads/refs/tags/v7.4.0] ["UTC Build Time"="2023-09-22 03:51:56"] ["Go Version"=go1.21.1]
[2024/05/04 17:57:54.314 +08:00] [INFO] [main.go:101] [config="{\"check-thread-count\":4,\"split-thread-count\":5,\"export-fix-sql\":true,\"check-struct-only\":false,\"dm-addr\":\"\",\"dm-task\":\"\",\"data-sources\":{\"mysql1\":{\"host\":\"127.0.0.1\",\"port\":4000,\"user\":\"root\",\"password\":\"******\",\"sql-mode\":\"\",\"snapshot\":\"\",\"security\":null,\"route-rules\":null,\"Router\":{\"Selector\":{}},\"Conn\":null},\"tidb0\":{\"host\":\"127.0.0.1\",\"port\":3306,\"user\":\"root\",\"password\":\"******\",\"sql-mode\":\"\",\"snapshot\":\"\",\"security\":null,\"route-rules\":null,\"Router\":{\"Selector\":{}},\"Conn\":null}},\"routes\":null,\"table-configs\":null,\"task\":{\"source-instances\":[\"mysql1\"],\"source-routes\":null,\"target-instance\":\"tidb0\",\"target-check-tables\":[\"sink_hang.t?*\"],\"target-configs\":null,\"output-dir\":\"/tmp/tidb_cdc_test/sink_hang/sync_diff/output\",\"SourceInstances\":[{\"host\":\"127.0.0.1\",\"port\":4000,\"user\":\"root\",\"password\":\"******\",\"sql-mode\":\"\",\"snapshot\":\"\",\"security\":null,\"route-rules\":null,\"Router\":{\"Selector\":{}},\"Conn\":null}],\"TargetInstance\":{\"host\":\"127.0.0.1\",\"port\":3306,\"user\":\"root\",\"password\":\"******\",\"sql-mode\":\"\",\"snapshot\":\"\",\"security\":null,\"route-rules\":null,\"Router\":{\"Selector\":{}},\"Conn\":null},\"TargetTableConfigs\":null,\"TargetCheckTables\":[{}],\"FixDir\":\"/tmp/tidb_cdc_test/sink_hang/sync_diff/output/fix-on-tidb0\",\"CheckpointDir\":\"/tmp/tidb_cdc_test/sink_hang/sync_diff/output/checkpoint\",\"HashFile\":\"\"},\"ConfigFile\":\"/home/jenkins/agent/workspace/pingcap/tiflow/pull_cdc_integration_pulsar_test/tiflow/tests/integration_tests/sink_hang/conf/diff_config.toml\",\"PrintVersion\":false}"]
[2024/05/04 17:57:54.315 +08:00] [DEBUG] [diff.go:842] ["set tidb cfg"]
[2024/05/04 17:57:54.317 +08:00] [DEBUG] [common.go:386] ["query tables"] [query="SHOW FULL TABLES IN `sink_hang` WHERE Table_Type = 'BASE TABLE';"]
[2024/05/04 17:57:54.317 +08:00] [DEBUG] [common.go:386] ["query tables"] [query="SHOW FULL TABLES IN `test` WHERE Table_Type = 'BASE TABLE';"]
[2024/05/04 17:57:54.318 +08:00] [DEBUG] [source.go:326] ["match target table"] [table=`sink_hang`.`t1`]
[2024/05/04 17:57:54.319 +08:00] [DEBUG] [source.go:326] ["match target table"] [table=`sink_hang`.`t2`]
[2024/05/04 17:57:54.321 +08:00] [INFO] [tidb.go:209] ["find router for tidb source"]
[2024/05/04 17:57:54.321 +08:00] [DEBUG] [common.go:386] ["query tables"] [query="SHOW FULL TABLES IN `sink_hang` WHERE Table_Type = 'BASE TABLE';"]
[2024/05/04 17:57:54.321 +08:00] [DEBUG] [common.go:386] ["query tables"] [query="SHOW FULL TABLES IN `test` WHERE Table_Type = 'BASE TABLE';"]
[2024/05/04 17:57:54.322 +08:00] [INFO] [source.go:412] ["table match check finished"]
[2024/05/04 17:57:54.322 +08:00] [INFO] [tidb.go:209] ["find router for tidb source"]
[2024/05/04 17:57:54.322 +08:00] [DEBUG] [common.go:386] ["query tables"] [query="SHOW FULL TABLES IN `sink_hang` WHERE Table_Type = 'BASE TABLE';"]
[2024/05/04 17:57:54.322 +08:00] [DEBUG] [common.go:386] ["query tables"] [query="SHOW FULL TABLES IN `test` WHERE Table_Type = 'BASE TABLE';"]
[2024/05/04 17:57:54.323 +08:00] [INFO] [source.go:412] ["table match check finished"]
[2024/05/04 17:57:54.323 +08:00] [INFO] [diff.go:358] ["The upstream is TiDB. pick it as work source candidate"]
[2024/05/04 17:57:54.330 +08:00] [INFO] [pd_service_discovery.go:628] ["[pd] switch leader"] [new-leader=http://127.0.0.1:2379] [old-leader=]
[2024/05/04 17:57:54.330 +08:00] [INFO] [pd_service_discovery.go:195] ["[pd] init cluster id"] [cluster-id=7365081367515714604]
[2024/05/04 17:57:54.330 +08:00] [INFO] [client.go:607] ["[pd] changing service mode"] [old-mode=UNKNOWN_SVC_MODE] [new-mode=PD_SVC_MODE]
[2024/05/04 17:57:54.330 +08:00] [INFO] [tso_client.go:230] ["[tso] switch dc tso global allocator serving address"] [dc-location=global] [new-address=http://127.0.0.1:2379]
[2024/05/04 17:57:54.330 +08:00] [INFO] [tso_dispatcher.go:313] ["[tso] tso dispatcher created"] [dc-location=global]
[2024/05/04 17:57:54.330 +08:00] [INFO] [client.go:655] ["[pd] service mode changed"] [old-mode=UNKNOWN_SVC_MODE] [new-mode=PD_SVC_MODE]
[2024/05/04 17:57:54.331 +08:00] [INFO] [pd.go:212] ["tidb support auto gc safepoint"] [version=8.2.0-alpha-79-g600b2ed4bf]
[2024/05/04 17:57:54.331 +08:00] [INFO] [diff.go:349] ["start update service to keep GC stopped automatically"]
[2024/05/04 17:57:54.331 +08:00] [INFO] [pd.go:227] ["generate dumpling gc safePoint id"] [id=Sync_diff_1714816674331570364]
[2024/05/04 17:57:54.331 +08:00] [DEBUG] [pd.go:229] ["update PD safePoint limit with ttl"] [safePoint=449528902272417803] [updateInterval=2m30s]
[2024/05/04 17:57:54.331 +08:00] [INFO] [diff.go:363] ["The downstream is TiDB. pick it as work source first"]
[2024/05/04 17:57:54.336 +08:00] [INFO] [pd_service_discovery.go:628] ["[pd] switch leader"] [new-leader=http://127.0.0.1:2479] [old-leader=]
[2024/05/04 17:57:54.336 +08:00] [INFO] [pd_service_discovery.go:195] ["[pd] init cluster id"] [cluster-id=7365081365959189520]
[2024/05/04 17:57:54.337 +08:00] [INFO] [client.go:607] ["[pd] changing service mode"] [old-mode=UNKNOWN_SVC_MODE] [new-mode=PD_SVC_MODE]
[2024/05/04 17:57:54.337 +08:00] [INFO] [tso_client.go:230] ["[tso] switch dc tso global allocator serving address"] [dc-location=global] [new-address=http://127.0.0.1:2479]
[2024/05/04 17:57:54.337 +08:00] [INFO] [tso_dispatcher.go:313] ["[tso] tso dispatcher created"] [dc-location=global]
[2024/05/04 17:57:54.337 +08:00] [INFO] [client.go:655] ["[pd] service mode changed"] [old-mode=UNKNOWN_SVC_MODE] [new-mode=PD_SVC_MODE]
[2024/05/04 17:57:54.338 +08:00] [INFO] [pd.go:212] ["tidb support auto gc safepoint"] [version=8.2.0-alpha-79-g600b2ed4bf]
[2024/05/04 17:57:54.338 +08:00] [INFO] [diff.go:349] ["start update service to keep GC stopped automatically"]
[2024/05/04 17:57:54.338 +08:00] [INFO] [diff.go:191] ["not found checkpoint file, start from beginning"]
[2024/05/04 17:57:54.338 +08:00] [INFO] [pd.go:227] ["generate dumpling gc safePoint id"] [id=Sync_diff_1714816674338184165]
[2024/05/04 17:57:54.338 +08:00] [DEBUG] [pd.go:229] ["update PD safePoint limit with ttl"] [safePoint=449528902272942093] [updateInterval=2m30s]
[2024/05/04 17:57:54.340 +08:00] [INFO] [diff.go:721] ["start writeSQLs goroutine"]
[2024/05/04 17:57:54.340 +08:00] [INFO] [diff.go:377] ["start handleCheckpoint goroutine"]
[2024/05/04 17:57:54.340 +08:00] [DEBUG] [common.go:525] [GetBucketsInfo] [sql="SHOW STATS_BUCKETS WHERE db_name= ? AND table_name= ?;"] [schema=sink_hang] [table=t1]
[2024/05/04 17:57:54.340 +08:00] [DEBUG] [common.go:525] [GetBucketsInfo] [sql="SHOW STATS_BUCKETS WHERE db_name= ? AND table_name= ?;"] [schema=sink_hang] [table=t2]
[2024/05/04 17:57:54.341 +08:00] [INFO] [tidb.go:58] ["failed to build bucket iterator, fall back to use random iterator"] [error="primary key on id in buckets info not found"] [errorVerbose="primary key on id in buckets info not found\ngithub.com/pingcap/errors.NotFoundf\n\t/go/pkg/mod/github.com/pingcap/errors@v0.11.5-0.20221009092201-b66cddb77c32/juju_adaptor.go:117\ngithub.com/pingcap/tidb-tools/pkg/dbutil.GetBucketsInfo\n\t/home/jenkins/agent/workspace/build-common/go/src/github.com/pingcap/tidb-tools/pkg/dbutil/common.go:576\ngithub.com/pingcap/tidb-tools/sync_diff_inspector/splitter.(*BucketIterator).init\n\t/home/jenkins/agent/workspace/build-common/go/src/github.com/pingcap/tidb-tools/sync_diff_inspector/splitter/bucket.go:139\ngithub.com/pingcap/tidb-tools/sync_diff_inspector/splitter.NewBucketIteratorWithCheckpoint\n\t/home/jenkins/agent/workspace/build-common/go/src/github.com/pingcap/tidb-tools/sync_diff_inspector/splitter/bucket.go:80\ngithub.com/pingcap/tidb-tools/sync_diff_inspector/source.(*TiDBTableAnalyzer).AnalyzeSplitter\n\t/home/jenkins/agent/workspace/build-common/go/src/github.com/pingcap/tidb-tools/sync_diff_inspector/source/tidb.go:54\ngithub.com/pingcap/tidb-tools/sync_diff_inspector/source.(*ChunksIterator).produceChunks.func3\n\t/home/jenkins/agent/workspace/build-common/go/src/github.com/pingcap/tidb-tools/sync_diff_inspector/source/chunks_iter.go:133\ngithub.com/pingcap/tidb-tools/sync_diff_inspector/utils.(*WorkerPool).Apply.func1\n\t/home/jenkins/agent/workspace/build-common/go/src/github.com/pingcap/tidb-tools/sync_diff_inspector/utils/utils.go:94\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1650"]
[2024/05/04 17:57:54.341 +08:00] [DEBUG] [common.go:237] ["get row count"] [sql="SELECT COUNT(1) cnt FROM `sink_hang`.`t1` WHERE TRUE"] [args=null]
[2024/05/04 17:57:54.342 +08:00] [INFO] [random.go:110] ["get chunk size for table"] ["chunk size"=50000] [db=sink_hang] [table=t1]
[2024/05/04 17:57:54.342 +08:00] [INFO] [random.go:116] ["split range by random"] ["row count"=0] ["split chunk num"=0]
[2024/05/04 17:57:54.342 +08:00] [INFO] [diff.go:280] ["global consume chunk info"] ["chunk index"="{\"table-index\":1,\"bucket-index-left\":0,\"bucket-index-right\":0,\"chunk-index\":0,\"chunk-count\":0}"] ["chunk bound"="[]"]
[2024/05/04 17:57:54.342 +08:00] [INFO] [tidb.go:58] ["failed to build bucket iterator, fall back to use random iterator"] [error="primary key on id in buckets info not found"] [errorVerbose="primary key on id in buckets info not found\ngithub.com/pingcap/errors.NotFoundf\n\t/go/pkg/mod/github.com/pingcap/errors@v0.11.5-0.20221009092201-b66cddb77c32/juju_adaptor.go:117\ngithub.com/pingcap/tidb-tools/pkg/dbutil.GetBucketsInfo\n\t/home/jenkins/agent/workspace/build-common/go/src/github.com/pingcap/tidb-tools/pkg/dbutil/common.go:576\ngithub.com/pingcap/tidb-tools/sync_diff_inspector/splitter.(*BucketIterator).init\n\t/home/jenkins/agent/workspace/build-common/go/src/github.com/pingcap/tidb-tools/sync_diff_inspector/splitter/bucket.go:139\ngithub.com/pingcap/tidb-tools/sync_diff_inspector/splitter.NewBucketIteratorWithCheckpoint\n\t/home/jenkins/agent/workspace/build-common/go/src/github.com/pingcap/tidb-tools/sync_diff_inspector/splitter/bucket.go:80\ngithub.com/pingcap/tidb-tools/sync_diff_inspector/source.(*TiDBTableAnalyzer).AnalyzeSplitter\n\t/home/jenkins/agent/workspace/build-common/go/src/github.com/pingcap/tidb-tools/sync_diff_inspector/source/tidb.go:54\ngithub.com/pingcap/tidb-tools/sync_diff_inspector/source.(*ChunksIterator).produceChunks.func3\n\t/home/jenkins/agent/workspace/build-common/go/src/github.com/pingcap/tidb-tools/sync_diff_inspector/source/chunks_iter.go:133\ngithub.com/pingcap/tidb-tools/sync_diff_inspector/utils.(*WorkerPool).Apply.func1\n\t/home/jenkins/agent/workspace/build-common/go/src/github.com/pingcap/tidb-tools/sync_diff_inspector/utils/utils.go:94\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1650"]
[2024/05/04 17:57:54.342 +08:00] [DEBUG] [common.go:237] ["get row count"] [sql="SELECT COUNT(1) cnt FROM `sink_hang`.`t2` WHERE TRUE"] [args=null]
[2024/05/04 17:57:54.342 +08:00] [DEBUG] [utils.go:766] ["table columns"] [columns="[{\"id\":1,\"name\":{\"O\":\"id\",\"L\":\"id\"},\"offset\":0,\"origin_default\":null,\"origin_default_bit\":null,\"default\":null,\"default_bit\":null,\"default_is_expr\":false,\"generated_expr_string\":\"\",\"generated_stored\":false,\"dependences\":null,\"type\":{\"Tp\":3,\"Flag\":515,\"Flen\":11,\"Decimal\":0,\"Charset\":\"binary\",\"Collate\":\"binary\",\"Elems\":null,\"ElemsIsBinaryLit\":null,\"Array\":false},\"state\":5,\"comment\":\"\",\"hidden\":false,\"change_state_info\":null,\"version\":2},{\"id\":2,\"name\":{\"O\":\"val\",\"L\":\"val\"},\"offset\":1,\"origin_default\":null,\"origin_default_bit\":null,\"default\":null,\"default_bit\":null,\"default_is_expr\":false,\"generated_expr_string\":\"\",\"generated_stored\":false,\"dependences\":null,\"type\":{\"Tp\":3,\"Flag\":0,\"Flen\":11,\"Decimal\":0,\"Charset\":\"binary\",\"Collate\":\"binary\",\"Elems\":null,\"ElemsIsBinaryLit\":null,\"Array\":false},\"state\":5,\"comment\":\"\",\"hidden\":false,\"change_state_info\":null,\"version\":2}]"]
[2024/05/04 17:57:54.342 +08:00] [DEBUG] [utils.go:785] ["count and checksum"] [sql="SELECT COUNT(*) as CNT, BIT_XOR(CAST(CRC32(CONCAT_WS(',', `id`, `val`, CONCAT(ISNULL(`id`), ISNULL(`val`))))AS UNSIGNED)) as CHECKSUM FROM `sink_hang`.`t1` WHERE ((TRUE) AND (TRUE));"] [args=null]
[2024/05/04 17:57:54.342 +08:00] [DEBUG] [utils.go:766] ["table columns"] [columns="[{\"id\":1,\"name\":{\"O\":\"id\",\"L\":\"id\"},\"offset\":0,\"origin_default\":null,\"origin_default_bit\":null,\"default\":null,\"default_bit\":null,\"default_is_expr\":false,\"generated_expr_string\":\"\",\"generated_stored\":false,\"dependences\":null,\"type\":{\"Tp\":3,\"Flag\":515,\"Flen\":11,\"Decimal\":0,\"Charset\":\"binary\",\"Collate\":\"binary\",\"Elems\":null,\"ElemsIsBinaryLit\":null,\"Array\":false},\"state\":5,\"comment\":\"\",\"hidden\":false,\"change_state_info\":null,\"version\":2},{\"id\":2,\"name\":{\"O\":\"val\",\"L\":\"val\"},\"offset\":1,\"origin_default\":null,\"origin_default_bit\":null,\"default\":null,\"default_bit\":null,\"default_is_expr\":false,\"generated_expr_string\":\"\",\"generated_stored\":false,\"dependences\":null,\"type\":{\"Tp\":3,\"Flag\":0,\"Flen\":11,\"Decimal\":0,\"Charset\":\"binary\",\"Collate\":\"binary\",\"Elems\":null,\"ElemsIsBinaryLit\":null,\"Array\":false},\"state\":5,\"comment\":\"\",\"hidden\":false,\"change_state_info\":null,\"version\":2}]"]
[2024/05/04 17:57:54.342 +08:00] [DEBUG] [utils.go:785] ["count and checksum"] [sql="SELECT COUNT(*) as CNT, BIT_XOR(CAST(CRC32(CONCAT_WS(',', `id`, `val`, CONCAT(ISNULL(`id`), ISNULL(`val`))))AS UNSIGNED)) as CHECKSUM FROM `sink_hang`.`t1` WHERE ((TRUE) AND (TRUE));"] [args=null]
[2024/05/04 17:57:54.343 +08:00] [INFO] [random.go:110] ["get chunk size for table"] ["chunk size"=50000] [db=sink_hang] [table=t2]
[2024/05/04 17:57:54.343 +08:00] [INFO] [random.go:116] ["split range by random"] ["row count"=0] ["split chunk num"=0]
[2024/05/04 17:57:54.343 +08:00] [INFO] [diff.go:280] ["global consume chunk info"] ["chunk index"="{\"table-index\":0,\"bucket-index-left\":0,\"bucket-index-right\":0,\"chunk-index\":0,\"chunk-count\":0}"] ["chunk bound"="[]"]
[2024/05/04 17:57:54.343 +08:00] [DEBUG] [utils.go:766] ["table columns"] [columns="[{\"id\":1,\"name\":{\"O\":\"id\",\"L\":\"id\"},\"offset\":0,\"origin_default\":null,\"origin_default_bit\":null,\"default\":null,\"default_bit\":null,\"default_is_expr\":false,\"generated_expr_string\":\"\",\"generated_stored\":false,\"dependences\":null,\"type\":{\"Tp\":3,\"Flag\":515,\"Flen\":11,\"Decimal\":0,\"Charset\":\"binary\",\"Collate\":\"binary\",\"Elems\":null,\"ElemsIsBinaryLit\":null,\"Array\":false},\"state\":5,\"comment\":\"\",\"hidden\":false,\"change_state_info\":null,\"version\":2},{\"id\":2,\"name\":{\"O\":\"val\",\"L\":\"val\"},\"offset\":1,\"origin_default\":null,\"origin_default_bit\":null,\"default\":null,\"default_bit\":null,\"default_is_expr\":false,\"generated_expr_string\":\"\",\"generated_stored\":false,\"dependences\":null,\"type\":{\"Tp\":3,\"Flag\":0,\"Flen\":11,\"Decimal\":0,\"Charset\":\"binary\",\"Collate\":\"binary\",\"Elems\":null,\"ElemsIsBinaryLit\":null,\"Array\":false},\"state\":5,\"comment\":\"\",\"hidden\":false,\"change_state_info\":null,\"version\":2}]"]
[2024/05/04 17:57:54.343 +08:00] [DEBUG] [utils.go:785] ["count and checksum"] [sql="SELECT COUNT(*) as CNT, BIT_XOR(CAST(CRC32(CONCAT_WS(',', `id`, `val`, CONCAT(ISNULL(`id`), ISNULL(`val`))))AS UNSIGNED)) as CHECKSUM FROM `sink_hang`.`t2` WHERE ((TRUE) AND (TRUE));"] [args=null]
[2024/05/04 17:57:54.343 +08:00] [DEBUG] [utils.go:766] ["table columns"] [columns="[{\"id\":1,\"name\":{\"O\":\"id\",\"L\":\"id\"},\"offset\":0,\"origin_default\":null,\"origin_default_bit\":null,\"default\":null,\"default_bit\":null,\"default_is_expr\":false,\"generated_expr_string\":\"\",\"generated_stored\":false,\"dependences\":null,\"type\":{\"Tp\":3,\"Flag\":515,\"Flen\":11,\"Decimal\":0,\"Charset\":\"binary\",\"Collate\":\"binary\",\"Elems\":null,\"ElemsIsBinaryLit\":null,\"Array\":false},\"state\":5,\"comment\":\"\",\"hidden\":false,\"change_state_info\":null,\"version\":2},{\"id\":2,\"name\":{\"O\":\"val\",\"L\":\"val\"},\"offset\":1,\"origin_default\":null,\"origin_default_bit\":null,\"default\":null,\"default_bit\":null,\"default_is_expr\":false,\"generated_expr_string\":\"\",\"generated_stored\":false,\"dependences\":null,\"type\":{\"Tp\":3,\"Flag\":0,\"Flen\":11,\"Decimal\":0,\"Charset\":\"binary\",\"Collate\":\"binary\",\"Elems\":null,\"ElemsIsBinaryLit\":null,\"Array\":false},\"state\":5,\"comment\":\"\",\"hidden\":false,\"change_state_info\":null,\"version\":2}]"]
[2024/05/04 17:57:54.343 +08:00] [DEBUG] [utils.go:785] ["count and checksum"] [sql="SELECT COUNT(*) as CNT, BIT_XOR(CAST(CRC32(CONCAT_WS(',', `id`, `val`, CONCAT(ISNULL(`id`), ISNULL(`val`))))AS UNSIGNED)) as CHECKSUM FROM `sink_hang`.`t2` WHERE ((TRUE) AND (TRUE));"] [args=null]
[2024/05/04 17:57:54.344 +08:00] [DEBUG] [diff.go:604] ["checksum failed"] ["chunk id"="{\"table-index\":1,\"bucket-index-left\":0,\"bucket-index-right\":0,\"chunk-index\":0,\"chunk-count\":0}"] [table=t1] ["upstream chunk size"=3] ["downstream chunk size"=0] ["upstream checksum"=1856024304] ["downstream checksum"=0]
[2024/05/04 17:57:54.344 +08:00] [DEBUG] [tidb.go:189] ["select data"] [sql="SELECT /*!40001 SQL_NO_CACHE */ `id`, `val` FROM `sink_hang`.`t1` WHERE ((TRUE) AND (TRUE)) ORDER BY `id`"] [args=null]
[2024/05/04 17:57:54.345 +08:00] [DEBUG] [tidb.go:189] ["select data"] [sql="SELECT /*!40001 SQL_NO_CACHE */ `id`, `val` FROM `sink_hang`.`t1` WHERE ((TRUE) AND (TRUE)) ORDER BY `id`"] [args=null]
[2024/05/04 17:57:54.345 +08:00] [DEBUG] [diff.go:604] ["checksum failed"] ["chunk id"="{\"table-index\":0,\"bucket-index-left\":0,\"bucket-index-right\":0,\"chunk-index\":0,\"chunk-count\":0}"] [table=t2] ["upstream chunk size"=3] ["downstream chunk size"=0] ["upstream checksum"=1856024304] ["downstream checksum"=0]
[2024/05/04 17:57:54.345 +08:00] [DEBUG] [tidb.go:189] ["select data"] [sql="SELECT /*!40001 SQL_NO_CACHE */ `id`, `val` FROM `sink_hang`.`t2` WHERE ((TRUE) AND (TRUE)) ORDER BY `id`"] [args=null]
[2024/05/04 17:57:54.345 +08:00] [DEBUG] [diff.go:663] ["[insert]"] [sql="REPLACE INTO `sink_hang`.`t1`(`id`,`val`) VALUES (1,NULL);"]
[2024/05/04 17:57:54.345 +08:00] [DEBUG] [diff.go:663] ["[insert]"] [sql="REPLACE INTO `sink_hang`.`t1`(`id`,`val`) VALUES (2,NULL);"]
[2024/05/04 17:57:54.345 +08:00] [DEBUG] [diff.go:663] ["[insert]"] [sql="REPLACE INTO `sink_hang`.`t1`(`id`,`val`) VALUES (3,NULL);"]
[2024/05/04 17:57:54.346 +08:00] [DEBUG] [diff.go:762] ["insert node"] ["chunk index"="{\"table-index\":1,\"bucket-index-left\":0,\"bucket-index-right\":0,\"chunk-index\":0,\"chunk-count\":0}"]
[2024/05/04 17:57:54.346 +08:00] [DEBUG] [tidb.go:189] ["select data"] [sql="SELECT /*!40001 SQL_NO_CACHE */ `id`, `val` FROM `sink_hang`.`t2` WHERE ((TRUE) AND (TRUE)) ORDER BY `id`"] [args=null]
[2024/05/04 17:57:54.347 +08:00] [DEBUG] [diff.go:663] ["[insert]"] [sql="REPLACE INTO `sink_hang`.`t2`(`id`,`val`) VALUES (1,NULL);"]
[2024/05/04 17:57:54.347 +08:00] [DEBUG] [diff.go:663] ["[insert]"] [sql="REPLACE INTO `sink_hang`.`t2`(`id`,`val`) VALUES (2,NULL);"]
[2024/05/04 17:57:54.347 +08:00] [DEBUG] [diff.go:663] ["[insert]"] [sql="REPLACE INTO `sink_hang`.`t2`(`id`,`val`) VALUES (3,NULL);"]
[2024/05/04 17:57:54.347 +08:00] [DEBUG] [diff.go:263] ["all consume tasks finished"]
[2024/05/04 17:57:54.347 +08:00] [DEBUG] [diff.go:762] ["insert node"] ["chunk index"="{\"table-index\":0,\"bucket-index-left\":0,\"bucket-index-right\":0,\"chunk-index\":0,\"chunk-count\":0}"]
[2024/05/04 17:57:54.347 +08:00] [INFO] [diff.go:732] ["write sql channel closed"]
[2024/05/04 17:57:54.347 +08:00] [INFO] [diff.go:723] ["close writeSQLs goroutine"]
[2024/05/04 17:57:54.347 +08:00] [INFO] [diff.go:405] ["Stop do checkpoint"]
[2024/05/04 17:57:54.347 +08:00] [INFO] [checkpoints.go:225] ["save checkpoint"] [chunk="{\"state\":\"failed\",\"chunk-range\":{\"index\":{\"table-index\":1,\"bucket-index-left\":0,\"bucket-index-right\":0,\"chunk-index\":0,\"chunk-count\":0},\"type\":2,\"bounds\":[],\"is-first\":false,\"is-last\":false,\"where\":\"((TRUE) AND (TRUE))\",\"args\":null},\"index-id\":0}"] [state=failed]
[2024/05/04 17:57:54.347 +08:00] [INFO] [diff.go:379] ["close handleCheckpoint goroutine"]
[2024/05/04 17:57:54.348 +08:00] [WARN] [report.go:164] ["fail to get the correct size of table, if you want to get the correct size, please analyze the corresponding tables"] [table=`sink_hang`.`t2`] []
[2024/05/04 17:57:54.349 +08:00] [WARN] [report.go:164] ["fail to get the correct size of table, if you want to get the correct size, please analyze the corresponding tables"] [table=`sink_hang`.`t1`] []
[2024/05/04 17:57:54.349 +08:00] [INFO] [main.go:114] ["check data finished"] [cost=34.670827ms]
[2024/05/04 17:57:54.349 +08:00] [WARN] [main.go:105] ["check failed!!!"]

+ nc -z localhost 6650
+ i=2
+ '[' 2 -gt 20 ']'
+ sleep 2
+ nc -z localhost 6650
+ echo 'Waiting for pulsar namespace to be ready...'
Waiting for pulsar namespace to be ready...
+ i=0
+ /usr/local/pulsar/bin/pulsar-admin namespaces list public
\033[0;36m<<< Run all test success >>>\033[0m
[Pipeline] }
Cache not saved (ws/jenkins-pingcap-tiflow-pull_cdc_integration_pulsar_test-1528/tiflow-cdc already exists)
[Pipeline] // cache
[Pipeline] }
[Pipeline] // dir
[Pipeline] }
[Pipeline] // timeout
[Pipeline] }
public/default
++ date
+ echo '[Sat May  4 17:58:04 CST 2024] <<<<<< pulsar is ready >>>>>>'
[Sat May  4 17:58:04 CST 2024] <<<<<< pulsar is ready >>>>>>
+ cdc.test -test.coverprofile=/tmp/tidb_cdc_test/cov.savepoint.cli.18357.out cli changefeed create --start-ts=449528901432508418 '--sink-uri=pulsar://127.0.0.1:6650/ticdc-savepoint-test-16898?protocol=canal-json&enable-tidb-extension=true'
[Pipeline] // stage
[Pipeline] }
[Pipeline] // container
[Pipeline] }
[Pipeline] // withEnv
[Pipeline] }
[Pipeline] // node
[Pipeline] }
[Pipeline] // podTemplate
[Pipeline] }
[Pipeline] // withEnv
[Pipeline] }
[Pipeline] // stage
[Pipeline] }
Create changefeed successfully!
ID: b4f7338f-4360-4f14-8cec-9bc9ede3a558
Info: {"upstream_id":7365081461474273269,"namespace":"default","id":"b4f7338f-4360-4f14-8cec-9bc9ede3a558","sink_uri":"pulsar://127.0.0.1:6650/ticdc-savepoint-test-16898?protocol=canal-json\u0026enable-tidb-extension=true","create_time":"2024-05-04T17:58:05.428834784+08:00","start_ts":449528901432508418,"config":{"memory_quota":1073741824,"case_sensitive":false,"force_replicate":false,"ignore_ineligible_table":false,"check_gc_safe_point":true,"enable_sync_point":false,"enable_table_monitor":false,"bdr_mode":false,"sync_point_interval":600000000000,"sync_point_retention":86400000000000,"filter":{"rules":["*.*"]},"mounter":{"worker_num":16},"sink":{"protocol":"canal-json","csv":{"delimiter":",","quote":"\"","null":"\\N","include_commit_ts":false,"binary_encoding_method":"base64","output_old_value":false,"output_handle_key":false},"encoder_concurrency":32,"terminator":"\r\n","date_separator":"day","enable_partition_separator":true,"enable_kafka_sink_v2":false,"only_output_updated_columns":false,"delete_only_output_handle_key_columns":false,"content_compatible":false,"pulsar_config":{"connection-timeout":5,"operation-timeout":30,"batching-max-messages":1000,"batching-max-publish-delay":10,"send-timeout":30},"advance_timeout":150,"send_bootstrap_interval_in_sec":120,"send_bootstrap_in_msg_count":10000,"send_bootstrap_to_all_partition":true,"debezium_disable_schema":false,"debezium":{"output_old_value":true},"open":{"output_old_value":true}},"consistent":{"level":"none","max_log_size":64,"flush_interval":2000,"meta_flush_interval":200,"encoding_worker_num":16,"flush_worker_num":8,"use_file_backend":false,"memory_usage":{"memory_quota_percentage":50}},"scheduler":{"enable_table_across_nodes":false,"region_threshold":100000,"write_key_threshold":0},"integrity":{"integrity_check_level":"none","corruption_handle_level":"warn"},"changefeed_error_stuck_duration":1800000000000,"synced_status":{"synced_check_interval":300,"checkpoint_interval":15}},"state":"normal","creator_version":"v8.2.0-alpha-84-gecb3fedc8","resolved_ts":449528901432508418,"checkpoint_ts":449528901432508418,"checkpoint_time":"2024-05-04 17:57:51.114"}
PASS
coverage: 2.4% of statements in github.com/pingcap/tiflow/...
++ curl -X GET http://127.0.0.1:8300/api/v2/changefeeds/test-1/synced
  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                 Dload  Upload   Total   Spent    Left  Speed

  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0
100   221  100   221    0     0   2689      0 --:--:-- --:--:-- --:--:--  2695
+ synced_status='{"synced":true,"sink_checkpoint_ts":"2024-05-04 17:57:52.997","puller_resolved_ts":"2024-05-04 17:57:46.597","last_synced_ts":"2024-05-04 17:55:37.696","now_ts":"2024-05-04 17:57:54.000","info":"Data syncing is finished"}'
++ echo '{"synced":true,"sink_checkpoint_ts":"2024-05-04' '17:57:52.997","puller_resolved_ts":"2024-05-04' '17:57:46.597","last_synced_ts":"2024-05-04' '17:55:37.696","now_ts":"2024-05-04' '17:57:54.000","info":"Data' syncing is 'finished"}'
++ jq .synced
+ status=true
+ '[' true '!=' true ']'
+ kill_pd
++ ps aux
++ grep pd-server
++ grep /tmp/tidb_cdc_test/synced_status_with_redo
+ info='jenkins    11418  7.8  0.0 13725720 141936 ?     Sl   17:55   0:12 pd-server --advertise-client-urls http://127.0.0.1:2379 --client-urls http://0.0.0.0:2379 --advertise-peer-urls http://127.0.0.1:2380 --peer-urls http://0.0.0.0:2380 --config /tmp/tidb_cdc_test/synced_status_with_redo/pd-config.toml --log-file /tmp/tidb_cdc_test/synced_status_with_redo/pd1.log --data-dir /tmp/tidb_cdc_test/synced_status_with_redo/pd1 --name=pd1 --initial-cluster=pd1=http://127.0.0.1:2380
jenkins    11477  5.2  0.0 13784276 138224 ?     Sl   17:55   0:08 pd-server --advertise-client-urls http://127.0.0.1:2479 --client-urls http://0.0.0.0:2479 --advertise-peer-urls http://127.0.0.1:2480 --peer-urls http://0.0.0.0:2480 --config /tmp/tidb_cdc_test/synced_status_with_redo/pd-config.toml --log-file /tmp/tidb_cdc_test/synced_status_with_redo/down_pd.log --data-dir /tmp/tidb_cdc_test/synced_status_with_redo/down_pd'
++ ps aux
++ grep pd-server
++ grep /tmp/tidb_cdc_test/synced_status_with_redo
++ awk '{print $2}'
++ xargs kill -9
+ sleep 20
{"level":"warn","ts":1714816680.3930712,"caller":"v3@v3.5.12/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc00390c700/127.0.0.1:2379","attempt":0,"error":"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \"transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused\""}
{"level":"info","ts":1714816680.393125,"caller":"v3@v3.5.12/client.go:210","msg":"Auto sync endpoints failed.","error":"context deadline exceeded"}
{"level":"warn","ts":1714816680.4957607,"caller":"v3@v3.5.12/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc001008700/127.0.0.1:2479","attempt":0,"error":"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \"transport: Error while dialing: dial tcp 127.0.0.1:2479: connect: connection refused\""}
{"level":"info","ts":1714816680.4958208,"caller":"v3@v3.5.12/client.go:210","msg":"Auto sync endpoints failed.","error":"context deadline exceeded"}
{"level":"warn","ts":1714816681.3719773,"caller":"v3@v3.5.12/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc002316e00/127.0.0.1:2379","attempt":0,"error":"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \"transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused\""}
{"level":"info","ts":1714816681.3720279,"caller":"v3@v3.5.12/client.go:210","msg":"Auto sync endpoints failed.","error":"context deadline exceeded"}
{"level":"warn","ts":"2024-05-04T17:58:05.241982+0800","logger":"etcd-client","caller":"v3@v3.5.12/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc0014c4000/127.0.0.1:2379","attempt":0,"error":"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \"transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused\""}
{"level":"warn","ts":"2024-05-04T17:58:05.24853+0800","logger":"etcd-client","caller":"v3@v3.5.12/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc0003ad340/127.0.0.1:2379","attempt":0,"error":"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \"transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused\""}
{"level":"warn","ts":"2024-05-04T17:58:05.293395+0800","logger":"etcd-client","caller":"v3@v3.5.12/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc0012fa000/127.0.0.1:2479","attempt":0,"error":"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \"transport: Error while dialing: dial tcp 127.0.0.1:2479: connect: connection refused\""}
+ set +x
[Sat May  4 17:58:06 CST 2024] <<<<<< START Pulsar consumer in savepoint case >>>>>>
table savepoint.finish_mark not exists for 1-th check, retry later
[Pipeline] }
Cache not saved (inner-step execution failed)
[Pipeline] // cache
[Pipeline] }
[Pipeline] // dir
Post stage
[Pipeline] sh
+ ls /tmp/tidb_cdc_test/
cov.owner_resign.28082810.out
cov.owner_resign.35613563.out
cov.owner_resign.cli.3321.out
cov.processor_etcd_worker_delay.68026804.out
owner_resign
processor_etcd_worker_delay
sink_hang
sql_res.owner_resign.txt
sql_res.processor_etcd_worker_delay.txt
sql_res.sink_hang.txt
++ find /tmp/tidb_cdc_test/ -type f -name '*.log'
+ tar -cvzf log-G16.tar.gz /tmp/tidb_cdc_test/owner_resign/cdc1.log /tmp/tidb_cdc_test/owner_resign/pd1.log /tmp/tidb_cdc_test/owner_resign/pulsar_stdout.log /tmp/tidb_cdc_test/owner_resign/cdc_pulsar_consumer_stdout.log /tmp/tidb_cdc_test/owner_resign/tidb-slow.log /tmp/tidb_cdc_test/owner_resign/stdout1.log /tmp/tidb_cdc_test/owner_resign/tidb.log /tmp/tidb_cdc_test/owner_resign/sync_diff_inspector.log /tmp/tidb_cdc_test/owner_resign/tikv3.log /tmp/tidb_cdc_test/owner_resign/stdout2.log /tmp/tidb_cdc_test/owner_resign/cdc_pulsar_consumer.log /tmp/tidb_cdc_test/owner_resign/tidb_other.log /tmp/tidb_cdc_test/owner_resign/tidb_down.log /tmp/tidb_cdc_test/owner_resign/tikv1.log /tmp/tidb_cdc_test/owner_resign/tikv_down.log /tmp/tidb_cdc_test/owner_resign/cdc2.log /tmp/tidb_cdc_test/owner_resign/down_pd.log /tmp/tidb_cdc_test/owner_resign/tikv2.log /tmp/tidb_cdc_test/sink_hang/pd1.log /tmp/tidb_cdc_test/sink_hang/pd1/region-meta/000001.log /tmp/tidb_cdc_test/sink_hang/pd1/hot-region/000001.log /tmp/tidb_cdc_test/sink_hang/cdc_pulsar_consumer_stdout.log /tmp/tidb_cdc_test/sink_hang/cdc_data/tmp/sorter/0006/000002.log /tmp/tidb_cdc_test/sink_hang/cdc_data/tmp/sorter/0004/000002.log /tmp/tidb_cdc_test/sink_hang/cdc_data/tmp/sorter/0000/000002.log /tmp/tidb_cdc_test/sink_hang/cdc_data/tmp/sorter/0005/000002.log /tmp/tidb_cdc_test/sink_hang/cdc_data/tmp/sorter/0001/000002.log /tmp/tidb_cdc_test/sink_hang/cdc_data/tmp/sorter/0007/000002.log /tmp/tidb_cdc_test/sink_hang/cdc_data/tmp/sorter/0003/000002.log /tmp/tidb_cdc_test/sink_hang/cdc_data/tmp/sorter/0002/000002.log /tmp/tidb_cdc_test/sink_hang/cdc.log /tmp/tidb_cdc_test/sink_hang/tidb-slow.log /tmp/tidb_cdc_test/sink_hang/tikv3/db/000005.log /tmp/tidb_cdc_test/sink_hang/tidb.log /tmp/tidb_cdc_test/sink_hang/sync_diff_inspector.log /tmp/tidb_cdc_test/sink_hang/tikv3.log /tmp/tidb_cdc_test/sink_hang/stdout.log /tmp/tidb_cdc_test/sink_hang/cdc_pulsar_consumer.log /tmp/tidb_cdc_test/sink_hang/sync_diff/output/sync_diff.log /tmp/tidb_cdc_test/sink_hang/tiflash/db/proxy/db/000005.log /tmp/tidb_cdc_test/sink_hang/tiflash/log/error.log /tmp/tidb_cdc_test/sink_hang/tiflash/log/server.log /tmp/tidb_cdc_test/sink_hang/tiflash/log/proxy.log /tmp/tidb_cdc_test/sink_hang/tidb_other.log /tmp/tidb_cdc_test/sink_hang/tidb_down.log /tmp/tidb_cdc_test/sink_hang/tikv1.log /tmp/tidb_cdc_test/sink_hang/tikv_down.log /tmp/tidb_cdc_test/sink_hang/tikv2/db/000005.log /tmp/tidb_cdc_test/sink_hang/tikv1/db/000005.log /tmp/tidb_cdc_test/sink_hang/down_pd/region-meta/000001.log /tmp/tidb_cdc_test/sink_hang/down_pd/hot-region/000001.log /tmp/tidb_cdc_test/sink_hang/down_pd.log /tmp/tidb_cdc_test/sink_hang/tikv2.log /tmp/tidb_cdc_test/sink_hang/tikv_down/db/000005.log /tmp/tidb_cdc_test/processor_etcd_worker_delay/pd1.log /tmp/tidb_cdc_test/processor_etcd_worker_delay/pulsar_stdout.log /tmp/tidb_cdc_test/processor_etcd_worker_delay/cdc_pulsar_consumer_stdout.log /tmp/tidb_cdc_test/processor_etcd_worker_delay/cdc.log /tmp/tidb_cdc_test/processor_etcd_worker_delay/tidb-slow.log /tmp/tidb_cdc_test/processor_etcd_worker_delay/tidb.log /tmp/tidb_cdc_test/processor_etcd_worker_delay/sync_diff_inspector.log /tmp/tidb_cdc_test/processor_etcd_worker_delay/tikv3.log /tmp/tidb_cdc_test/processor_etcd_worker_delay/stdout.log /tmp/tidb_cdc_test/processor_etcd_worker_delay/cdc_pulsar_consumer.log /tmp/tidb_cdc_test/processor_etcd_worker_delay/tidb_other.log /tmp/tidb_cdc_test/processor_etcd_worker_delay/tidb_down.log /tmp/tidb_cdc_test/processor_etcd_worker_delay/tikv1.log /tmp/tidb_cdc_test/processor_etcd_worker_delay/tikv_down.log /tmp/tidb_cdc_test/processor_etcd_worker_delay/down_pd.log /tmp/tidb_cdc_test/processor_etcd_worker_delay/tikv2.log
tar: Removing leading `/' from member names
/tmp/tidb_cdc_test/owner_resign/cdc1.log
/tmp/tidb_cdc_test/owner_resign/pd1.log
/tmp/tidb_cdc_test/owner_resign/pulsar_stdout.log
/tmp/tidb_cdc_test/owner_resign/cdc_pulsar_consumer_stdout.log
/tmp/tidb_cdc_test/owner_resign/tidb-slow.log
/tmp/tidb_cdc_test/owner_resign/stdout1.log
/tmp/tidb_cdc_test/owner_resign/tidb.log
/tmp/tidb_cdc_test/owner_resign/sync_diff_inspector.log
/tmp/tidb_cdc_test/owner_resign/tikv3.log
/tmp/tidb_cdc_test/owner_resign/stdout2.log
/tmp/tidb_cdc_test/owner_resign/cdc_pulsar_consumer.log
/tmp/tidb_cdc_test/owner_resign/tidb_other.log
/tmp/tidb_cdc_test/owner_resign/tidb_down.log
/tmp/tidb_cdc_test/owner_resign/tikv1.log
/tmp/tidb_cdc_test/owner_resign/tikv_down.log
table savepoint.finish_mark not exists for 2-th check, retry later
/tmp/tidb_cdc_test/owner_resign/cdc2.log
/tmp/tidb_cdc_test/owner_resign/down_pd.log
/tmp/tidb_cdc_test/owner_resign/tikv2.log
/tmp/tidb_cdc_test/sink_hang/pd1.log
/tmp/tidb_cdc_test/sink_hang/pd1/region-meta/000001.log
/tmp/tidb_cdc_test/sink_hang/pd1/hot-region/000001.log
/tmp/tidb_cdc_test/sink_hang/cdc_pulsar_consumer_stdout.log
/tmp/tidb_cdc_test/sink_hang/cdc_data/tmp/sorter/0006/000002.log
/tmp/tidb_cdc_test/sink_hang/cdc_data/tmp/sorter/0004/000002.log
/tmp/tidb_cdc_test/sink_hang/cdc_data/tmp/sorter/0000/000002.log
/tmp/tidb_cdc_test/sink_hang/cdc_data/tmp/sorter/0005/000002.log
/tmp/tidb_cdc_test/sink_hang/cdc_data/tmp/sorter/0001/000002.log
/tmp/tidb_cdc_test/sink_hang/cdc_data/tmp/sorter/0007/000002.log
/tmp/tidb_cdc_test/sink_hang/cdc_data/tmp/sorter/0003/000002.log
/tmp/tidb_cdc_test/sink_hang/cdc_data/tmp/sorter/0002/000002.log
/tmp/tidb_cdc_test/sink_hang/cdc.log
/tmp/tidb_cdc_test/sink_hang/tidb-slow.log
/tmp/tidb_cdc_test/sink_hang/tikv3/db/000005.log
/tmp/tidb_cdc_test/sink_hang/tidb.log
/tmp/tidb_cdc_test/sink_hang/sync_diff_inspector.log
/tmp/tidb_cdc_test/sink_hang/tikv3.log
/tmp/tidb_cdc_test/sink_hang/stdout.log
/tmp/tidb_cdc_test/sink_hang/cdc_pulsar_consumer.log
/tmp/tidb_cdc_test/sink_hang/sync_diff/output/sync_diff.log
/tmp/tidb_cdc_test/sink_hang/tiflash/db/proxy/db/000005.log
/tmp/tidb_cdc_test/sink_hang/tiflash/log/error.log
/tmp/tidb_cdc_test/sink_hang/tiflash/log/server.log
/tmp/tidb_cdc_test/sink_hang/tiflash/log/proxy.log
/tmp/tidb_cdc_test/sink_hang/tidb_other.log
/tmp/tidb_cdc_test/sink_hang/tidb_down.log
/tmp/tidb_cdc_test/sink_hang/tikv1.log
/tmp/tidb_cdc_test/sink_hang/tikv_down.log
/tmp/tidb_cdc_test/sink_hang/tikv2/db/000005.log
/tmp/tidb_cdc_test/sink_hang/tikv1/db/000005.log
/tmp/tidb_cdc_test/sink_hang/down_pd/region-meta/000001.log
/tmp/tidb_cdc_test/sink_hang/down_pd/hot-region/000001.log
/tmp/tidb_cdc_test/sink_hang/down_pd.log
/tmp/tidb_cdc_test/sink_hang/tikv2.log
/tmp/tidb_cdc_test/sink_hang/tikv_down/db/000005.log
/tmp/tidb_cdc_test/processor_etcd_worker_delay/pd1.log
/tmp/tidb_cdc_test/processor_etcd_worker_delay/pulsar_stdout.log
/tmp/tidb_cdc_test/processor_etcd_worker_delay/cdc_pulsar_consumer_stdout.log
/tmp/tidb_cdc_test/processor_etcd_worker_delay/cdc.log
/tmp/tidb_cdc_test/processor_etcd_worker_delay/tidb-slow.log
/tmp/tidb_cdc_test/processor_etcd_worker_delay/tidb.log
/tmp/tidb_cdc_test/processor_etcd_worker_delay/sync_diff_inspector.log
/tmp/tidb_cdc_test/processor_etcd_worker_delay/tikv3.log
/tmp/tidb_cdc_test/processor_etcd_worker_delay/stdout.log
/tmp/tidb_cdc_test/processor_etcd_worker_delay/cdc_pulsar_consumer.log
/tmp/tidb_cdc_test/processor_etcd_worker_delay/tidb_other.log
/tmp/tidb_cdc_test/processor_etcd_worker_delay/tidb_down.log
/tmp/tidb_cdc_test/processor_etcd_worker_delay/tikv1.log
/tmp/tidb_cdc_test/processor_etcd_worker_delay/tikv_down.log
/tmp/tidb_cdc_test/processor_etcd_worker_delay/down_pd.log
/tmp/tidb_cdc_test/processor_etcd_worker_delay/tikv2.log
+ ls -alh log-G16.tar.gz
-rw-r--r-- 1 jenkins jenkins 12M May  4 17:58 log-G16.tar.gz
[Pipeline] archiveArtifacts
Archiving artifacts
table savepoint.finish_mark exists
check diff successfully
wait process cdc.test exit for 1-th time...
{"level":"warn","ts":"2024-05-04T17:58:11.243173+0800","logger":"etcd-client","caller":"v3@v3.5.12/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc0014c4000/127.0.0.1:2379","attempt":0,"error":"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \"transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused\""}
{"level":"warn","ts":"2024-05-04T17:58:11.250483+0800","logger":"etcd-client","caller":"v3@v3.5.12/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc0003ad340/127.0.0.1:2379","attempt":0,"error":"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \"transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused\""}
{"level":"warn","ts":"2024-05-04T17:58:11.294343+0800","logger":"etcd-client","caller":"v3@v3.5.12/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc0012fa000/127.0.0.1:2479","attempt":0,"error":"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \"transport: Error while dialing: dial tcp 127.0.0.1:2479: connect: connection refused\""}
wait process cdc.test exit for 2-th time...
Recording fingerprints
[Pipeline] }
[Pipeline] // timeout
[Pipeline] }
[Pipeline] // stage
[Pipeline] }
[Pipeline] // container
[Pipeline] }
[Pipeline] // withEnv
[Pipeline] }
[Pipeline] // node
[Pipeline] }
[Pipeline] // podTemplate
[Pipeline] }
[Pipeline] // withEnv
[Pipeline] }
wait process cdc.test exit for 3-th time...
[Pipeline] // stage
[Pipeline] }
Failed in branch Matrix - TEST_GROUP = 'G16'
Sending interrupt signal to process
Killing processes
cdc.test: no process found
wait process cdc.test exit for 4-th time...
process cdc.test already exit
[Sat May  4 17:58:13 CST 2024] <<<<<< run test case savepoint success! >>>>>>
kill finished with exit code 0
Sending interrupt signal to process
Killing processes
++ stop_tidb_cluster
script returned exit code 143
kill finished with exit code 0
[Pipeline] }
Cache not saved (inner-step execution failed)
script returned exit code 143
[Pipeline] // cache
[Pipeline] }
[Pipeline] }
Cache not saved (inner-step execution failed)
[Pipeline] // dir
[Pipeline] // cache
[Pipeline] }
[Pipeline] }
[Pipeline] // timeout
[Pipeline] // dir
[Pipeline] }
[Pipeline] }
[Pipeline] // stage
[Pipeline] // timeout
[Pipeline] }
[Pipeline] }
[Pipeline] // container
[Pipeline] // stage
[Pipeline] }
[Pipeline] }
[Pipeline] // withEnv
[Pipeline] // container
[Pipeline] }
[Pipeline] }
[Pipeline] // node
[Pipeline] // withEnv
[Pipeline] }
[Pipeline] }
[Pipeline] // podTemplate
[Pipeline] // node
[Pipeline] }
[Pipeline] }
[Pipeline] // withEnv
[Pipeline] // podTemplate
[Pipeline] }
[Pipeline] }
[Pipeline] // stage
[Pipeline] // withEnv
[Pipeline] }
Failed in branch Matrix - TEST_GROUP = 'G08'
[Pipeline] }
[Pipeline] // stage
[Pipeline] }
Failed in branch Matrix - TEST_GROUP = 'G09'
[Pipeline] // parallel
[Pipeline] }
[Pipeline] // stage
[Pipeline] }
[Pipeline] // timeout
[Pipeline] }
[Pipeline] // withEnv
[Pipeline] }
[Pipeline] // container
[Pipeline] }
[Pipeline] // withEnv
[Pipeline] }
[Pipeline] // node
[Pipeline] }
[Pipeline] // podTemplate
[Pipeline] End of Pipeline
ERROR: script returned exit code 1
Finished: FAILURE