Skip to content

Console Output

Skipping 1,715 KB.. Full Log
table move_table.check2 not exists for 4-th check, retry later
[Fri May 17 15:31:51 CST 2024] <<<<<< START cdc server in sink_hang case >>>>>>
+ [[ '' == \t\r\u\e ]]
+ set +e
+ get_info_fail_msg='failed to get info:'
+ etcd_info_msg='etcd info'
+ '[' -z '' ']'
+ curl_status_cmd='curl -vsL --max-time 20 http://127.0.0.1:8300/debug/info'
+ [[ no != \n\o ]]
+ GO_FAILPOINTS='github.com/pingcap/tiflow/cdc/sink/dmlsink/txn/mysql/MySQLSinkExecDMLError=2*return(true)'
+ (( i = 0 ))
+ (( i <= 50 ))
+ cdc.test -test.coverprofile=/tmp/tidb_cdc_test/cov.sink_hang.95759577.out server --log-file /tmp/tidb_cdc_test/sink_hang/cdc.log --log-level debug --data-dir /tmp/tidb_cdc_test/sink_hang/cdc_data --cluster-id default --addr 127.0.0.1:8300 --pd http://127.0.0.1:2379
++ curl -vsL --max-time 20 http://127.0.0.1:8300/debug/info
* About to connect() to 127.0.0.1 port 8300 (#0)
*   Trying 127.0.0.1...
* Connection refused
* Failed connect to 127.0.0.1:8300; Connection refused
* Closing connection 0
+ res=
+ echo ''
+ grep -q 'failed to get info:'
+ echo ''
+ grep -q 'etcd info'
+ '[' 0 -eq 50 ']'
+ sleep 3
table move_table.check2 not exists for 5-th check, retry later
=================>> Running test /home/jenkins/agent/workspace/pingcap/tiflow/release-7.5/pull_cdc_integration_storage_test/tiflow/tests/integration_tests/changefeed_auto_stop/run.sh using Sink-Type: storage... <<=================
The 1 times to try to start tidb cluster...
table move_table.check2 exists
check diff successfully
start tidb cluster in /tmp/tidb_cdc_test/changefeed_auto_stop
Starting Upstream PD...
Release Version: v7.5.1-7-g7eb188c4f
Edition: Community
Git Commit Hash: 7eb188c4f8caba495a33eafedd4540afbc4ca6fc
Git Branch: release-7.5
UTC Build Time:  2024-05-13 04:29:07
Starting Downstream PD...
Release Version: v7.5.1-7-g7eb188c4f
Edition: Community
Git Commit Hash: 7eb188c4f8caba495a33eafedd4540afbc4ca6fc
Git Branch: release-7.5
UTC Build Time:  2024-05-13 04:29:07
Verifying upstream PD is started...
+ (( i++ ))
+ (( i <= 50 ))
++ curl -vsL --max-time 20 http://127.0.0.1:8300/debug/info
* About to connect() to 127.0.0.1 port 8300 (#0)
*   Trying 127.0.0.1...
* Connected to 127.0.0.1 (127.0.0.1) port 8300 (#0)
> GET /debug/info HTTP/1.1
> User-Agent: curl/7.29.0
> Host: 127.0.0.1:8300
> Accept: */*
> 
< HTTP/1.1 200 OK
< Date: Fri, 17 May 2024 07:31:54 GMT
< Content-Length: 613
< Content-Type: text/plain; charset=utf-8
< 
{ [data not shown]
* Connection #0 to host 127.0.0.1 left intact
+ res='

*** owner info ***:



*** processors info ***:



*** etcd info ***:

/tidb/cdc/default/__cdc_meta__/capture/0a2367bb-87af-4d6d-a472-8cce3127ba3a
	{"id":"0a2367bb-87af-4d6d-a472-8cce3127ba3a","address":"127.0.0.1:8300","version":"v7.5.1-31-gc7ef293f7"}

/tidb/cdc/default/__cdc_meta__/meta/meta-version
	1

/tidb/cdc/default/__cdc_meta__/owner/22318f8576a74df3
	0a2367bb-87af-4d6d-a472-8cce3127ba3a

/tidb/cdc/default/default/upstream/7369867945341863518
	{"id":7369867945341863518,"pd-endpoints":"http://127.0.0.1:2379,http://127.0.0.1:2379","key-path":"","cert-path":"","ca-path":"","cert-allowed-cn":null}'
+ echo '

*** owner info ***:



*** processors info ***:



*** etcd info ***:

/tidb/cdc/default/__cdc_meta__/capture/0a2367bb-87af-4d6d-a472-8cce3127ba3a
	{"id":"0a2367bb-87af-4d6d-a472-8cce3127ba3a","address":"127.0.0.1:8300","version":"v7.5.1-31-gc7ef293f7"}

/tidb/cdc/default/__cdc_meta__/meta/meta-version
	1

/tidb/cdc/default/__cdc_meta__/owner/22318f8576a74df3
	0a2367bb-87af-4d6d-a472-8cce3127ba3a

/tidb/cdc/default/default/upstream/7369867945341863518
	{"id":7369867945341863518,"pd-endpoints":"http://127.0.0.1:2379,http://127.0.0.1:2379","key-path":"","cert-path":"","ca-path":"","cert-allowed-cn":null}'
+ grep -q 'failed to get info:'
+ echo '

*** owner info ***:



*** processors info ***:



*** etcd info ***:

/tidb/cdc/default/__cdc_meta__/capture/0a2367bb-87af-4d6d-a472-8cce3127ba3a
	{"id":"0a2367bb-87af-4d6d-a472-8cce3127ba3a","address":"127.0.0.1:8300","version":"v7.5.1-31-gc7ef293f7"}

/tidb/cdc/default/__cdc_meta__/meta/meta-version
	1

/tidb/cdc/default/__cdc_meta__/owner/22318f8576a74df3
	0a2367bb-87af-4d6d-a472-8cce3127ba3a

/tidb/cdc/default/default/upstream/7369867945341863518
	{"id":7369867945341863518,"pd-endpoints":"http://127.0.0.1:2379,http://127.0.0.1:2379","key-path":"","cert-path":"","ca-path":"","cert-allowed-cn":null}'
+ grep -q 'etcd info'
+ break
+ set +x
+ workdir=/tmp/tidb_cdc_test/sink_hang
+ sink_uri='file:///tmp/tidb_cdc_test/sink_hang/storage_test/ticdc-sink-hang-test-30100?protocol=canal-json&enable-tidb-extension=true'
+ consumer_replica_config=
+ log_suffix=
++ pwd
+ pwd=/tmp/tidb_cdc_test/sink_hang
++ date
+ echo '[Fri May 17 15:31:54 CST 2024] <<<<<< START storage consumer in sink_hang case >>>>>>'
[Fri May 17 15:31:54 CST 2024] <<<<<< START storage consumer in sink_hang case >>>>>>
+ cd /tmp/tidb_cdc_test/sink_hang
+ '[' '' '!=' '' ']'
+ cd /tmp/tidb_cdc_test/sink_hang
+ set +x
+ cdc_storage_consumer --log-file /tmp/tidb_cdc_test/sink_hang/cdc_storage_consumer.log --log-level debug --upstream-uri 'file:///tmp/tidb_cdc_test/sink_hang/storage_test/ticdc-sink-hang-test-30100?protocol=canal-json&enable-tidb-extension=true' --downstream-uri 'mysql://root@127.0.0.1:3306/?safe-mode=true&batch-dml-enable=false'
wait process cdc.test exit for 1-th time...
check_changefeed_status 127.0.0.1:8300 2c898032-5552-4d43-8f2f-c39fb682c9ec normal last_error null
+ endpoint=127.0.0.1:8300
+ changefeed_id=2c898032-5552-4d43-8f2f-c39fb682c9ec
+ expected_state=normal
+ field=last_error
+ error_pattern=null
++ curl 127.0.0.1:8300/api/v2/changefeeds/2c898032-5552-4d43-8f2f-c39fb682c9ec/status
  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                 Dload  Upload   Total   Spent    Left  Speed

  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0
100    86  100    86    0     0    575      0 --:--:-- --:--:-- --:--:--   577
+ info='{"state":"normal","resolved_ts":449821046083944452,"checkpoint_ts":449821046083944452}'
+ echo '{"state":"normal","resolved_ts":449821046083944452,"checkpoint_ts":449821046083944452}'
{"state":"normal","resolved_ts":449821046083944452,"checkpoint_ts":449821046083944452}
++ echo '{"state":"normal","resolved_ts":449821046083944452,"checkpoint_ts":449821046083944452}'
++ jq -r .state
+ state=normal
+ [[ ! normal == \n\o\r\m\a\l ]]
+ [[ -z last_error ]]
++ echo '{"state":"normal","resolved_ts":449821046083944452,"checkpoint_ts":449821046083944452}'
++ jq -r .last_error.message
+ error_msg=null
+ [[ ! null =~ null ]]
run task successfully
check_changefeed_status 127.0.0.1:8300 2c898032-5552-4d43-8f2f-c39fb682c9ec normal last_warning null
+ endpoint=127.0.0.1:8300
+ changefeed_id=2c898032-5552-4d43-8f2f-c39fb682c9ec
+ expected_state=normal
+ field=last_warning
+ error_pattern=null
++ curl 127.0.0.1:8300/api/v2/changefeeds/2c898032-5552-4d43-8f2f-c39fb682c9ec/status
  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                 Dload  Upload   Total   Spent    Left  Speed
wait process cdc.test exit for 2-th time...

  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0
100    86  100    86    0     0    664      0 --:--:-- --:--:-- --:--:--   661
100    86  100    86    0     0    664      0 --:--:-- --:--:-- --:--:--   661
+ info='{"state":"normal","resolved_ts":449821046083944452,"checkpoint_ts":449821046083944452}'
+ echo '{"state":"normal","resolved_ts":449821046083944452,"checkpoint_ts":449821046083944452}'
{"state":"normal","resolved_ts":449821046083944452,"checkpoint_ts":449821046083944452}
++ echo '{"state":"normal","resolved_ts":449821046083944452,"checkpoint_ts":449821046083944452}'
++ jq -r .state
+ state=normal
+ [[ ! normal == \n\o\r\m\a\l ]]
+ [[ -z last_warning ]]
++ echo '{"state":"normal","resolved_ts":449821046083944452,"checkpoint_ts":449821046083944452}'
++ jq -r .last_warning.message
+ error_msg=null
+ [[ ! null =~ null ]]
run task successfully
table sink_hang.t1 not exists for 1-th check, retry later
cdc.test: no process found
wait process cdc.test exit for 3-th time...
process cdc.test already exit
[Fri May 17 15:31:55 CST 2024] <<<<<< run test case move_table success! >>>>>>
table sink_hang.t1 not exists for 2-th check, retry later
Verifying downstream PD is started...
Starting Upstream TiKV...
TiKV 
Release Version:   7.5.2
Edition:           Community
Git Commit Hash:   f2be3c0b9f0e60b619dade22410979ca91f4d85a
Git Commit Branch: release-7.5
UTC Build Time:    2024-05-14 11:07:23
Rust Version:      rustc 1.67.0-nightly (96ddd32c4 2022-11-14)
Enable Features:   pprof-fp jemalloc mem-profiling portable sse test-engine-kv-rocksdb test-engine-raft-raft-engine cloud-aws cloud-gcp cloud-azure
Profile:           dist_release
Starting Downstream TiKV...
TiKV 
Release Version:   7.5.2
Edition:           Community
Git Commit Hash:   f2be3c0b9f0e60b619dade22410979ca91f4d85a
Git Commit Branch: release-7.5
UTC Build Time:    2024-05-14 11:07:23
Rust Version:      rustc 1.67.0-nightly (96ddd32c4 2022-11-14)
Enable Features:   pprof-fp jemalloc mem-profiling portable sse test-engine-kv-rocksdb test-engine-raft-raft-engine cloud-aws cloud-gcp cloud-azure
Profile:           dist_release
table sink_hang.t1 not exists for 3-th check, retry later
Starting Upstream TiDB...
Release Version: v7.5.1-69-g19b7d4f8f3
Edition: Community
Git Commit Hash: 19b7d4f8f3d1492d7ace4da21a1cdc37a390b333
Git Branch: release-7.5
UTC Build Time: 2024-05-16 15:24:14
GoVersion: go1.21.6
Race Enabled: false
Check Table Before Drop: false
Store: unistore
Starting Downstream TiDB...
Release Version: v7.5.1-69-g19b7d4f8f3
Edition: Community
Git Commit Hash: 19b7d4f8f3d1492d7ace4da21a1cdc37a390b333
Git Branch: release-7.5
UTC Build Time: 2024-05-16 15:24:14
GoVersion: go1.21.6
Race Enabled: false
Check Table Before Drop: false
Store: unistore
Verifying Upstream TiDB is started...
ERROR 2003 (HY000): Can't connect to MySQL server on '127.0.0.1' (111)
check_changefeed_state http://127.0.0.1:2379 e0b05583-fe52-471b-ade2-a0fb04f071c8 finished null
+ endpoints=http://127.0.0.1:2379
+ changefeed_id=e0b05583-fe52-471b-ade2-a0fb04f071c8
+ expected_state=finished
+ error_msg=null
+ tls_dir=null
+ [[ http://127.0.0.1:2379 =~ https ]]
++ cdc cli changefeed query --pd=http://127.0.0.1:2379 -c e0b05583-fe52-471b-ade2-a0fb04f071c8 -s
+ info='{
  "upstream_id": 7369867515892756516,
  "namespace": "default",
  "id": "e0b05583-fe52-471b-ade2-a0fb04f071c8",
  "state": "finished",
  "checkpoint_tso": 449821042854592520,
  "checkpoint_time": "2024-05-17 15:31:42.198",
  "error": null
}'
+ echo '{
  "upstream_id": 7369867515892756516,
  "namespace": "default",
  "id": "e0b05583-fe52-471b-ade2-a0fb04f071c8",
  "state": "finished",
  "checkpoint_tso": 449821042854592520,
  "checkpoint_time": "2024-05-17 15:31:42.198",
  "error": null
}'
{
  "upstream_id": 7369867515892756516,
  "namespace": "default",
  "id": "e0b05583-fe52-471b-ade2-a0fb04f071c8",
  "state": "finished",
  "checkpoint_tso": 449821042854592520,
  "checkpoint_time": "2024-05-17 15:31:42.198",
  "error": null
}
++ echo '{' '"upstream_id":' 7369867515892756516, '"namespace":' '"default",' '"id":' '"e0b05583-fe52-471b-ade2-a0fb04f071c8",' '"state":' '"finished",' '"checkpoint_tso":' 449821042854592520, '"checkpoint_time":' '"2024-05-17' '15:31:42.198",' '"error":' null '}'
++ jq -r .state
+ state=finished
+ [[ ! finished == \f\i\n\i\s\h\e\d ]]
++ echo '{' '"upstream_id":' 7369867515892756516, '"namespace":' '"default",' '"id":' '"e0b05583-fe52-471b-ade2-a0fb04f071c8",' '"state":' '"finished",' '"checkpoint_tso":' 449821042854592520, '"checkpoint_time":' '"2024-05-17' '15:31:42.198",' '"error":' null '}'
++ jq -r .error.message
+ message=null
+ [[ ! null =~ null ]]
run task successfully
wait process cdc.test exit for 1-th time...
wait process cdc.test exit for 2-th time...
cdc.test: no process found
wait process cdc.test exit for 3-th time...
process cdc.test already exit
[Fri May 17 15:31:56 CST 2024] <<<<<< run test case changefeed_finish success! >>>>>>
table sink_hang.t1 not exists for 4-th check, retry later
ERROR 2003 (HY000): Can't connect to MySQL server on '127.0.0.1' (111)
table sink_hang.t1 not exists for 5-th check, retry later
ERROR 2003 (HY000): Can't connect to MySQL server on '127.0.0.1' (111)
table sink_hang.t1 exists
table sink_hang.t2 exists
check diff failed 1-th time, retry later
ERROR 2003 (HY000): Can't connect to MySQL server on '127.0.0.1' (111)
check diff failed 2-th time, retry later
=================>> Running test /home/jenkins/agent/workspace/pingcap/tiflow/release-7.5/pull_cdc_integration_storage_test/tiflow/tests/integration_tests/autorandom/run.sh using Sink-Type: storage... <<=================
The 1 times to try to start tidb cluster...
VARIABLE_NAME	VARIABLE_VALUE	COMMENT
bootstrapped	True	Bootstrap flag. Do not delete.
tidb_server_version	179	Bootstrap version. Do not delete.
system_tz	Asia/Shanghai	TiDB Global System Timezone.
new_collation_enabled	True	If the new collations are enabled. Do not edit it.
ddl_table_version	3	DDL Table Version. Do not delete.
tikv_gc_leader_uuid	63e15dc62780012	Current GC worker leader UUID. (DO NOT EDIT)
tikv_gc_leader_desc	host:xtiflow-release-7-5-pull-cdc-integration-storage-test-361-xrbpb, pid:10109, start at 2024-05-17 15:32:05.939796335 +0800 CST m=+5.235354678	Host name and pid of current GC leader. (DO NOT EDIT)
tikv_gc_leader_lease	20240517-15:34:05.947 +0800	Current GC worker leader lease. (DO NOT EDIT)
tikv_gc_auto_concurrency	true	Let TiDB pick the concurrency automatically. If set false, tikv_gc_concurrency will be used
tikv_gc_enable	true	Current GC enable status
tikv_gc_run_interval	10m0s	GC run interval, at least 10m, in Go format.
tikv_gc_life_time	10m0s	All versions within life time will not be collected by GC, at least 10m, in Go format.
tikv_gc_last_run_time	20240517-15:32:05.918 +0800	The time when last GC starts. (DO NOT EDIT)
tikv_gc_safe_point	20240517-15:22:05.918 +0800	All versions after safe point can be accessed. (DO NOT EDIT)
VARIABLE_NAME	VARIABLE_VALUE	COMMENT
bootstrapped	True	Bootstrap flag. Do not delete.
tidb_server_version	179	Bootstrap version. Do not delete.
system_tz	Asia/Shanghai	TiDB Global System Timezone.
new_collation_enabled	True	If the new collations are enabled. Do not edit it.
ddl_table_version	3	DDL Table Version. Do not delete.
tikv_gc_leader_uuid	63e15dc62780012	Current GC worker leader UUID. (DO NOT EDIT)
tikv_gc_leader_desc	host:xtiflow-release-7-5-pull-cdc-integration-storage-test-361-xrbpb, pid:10109, start at 2024-05-17 15:32:05.939796335 +0800 CST m=+5.235354678	Host name and pid of current GC leader. (DO NOT EDIT)
tikv_gc_leader_lease	20240517-15:34:05.947 +0800	Current GC worker leader lease. (DO NOT EDIT)
tikv_gc_auto_concurrency	true	Let TiDB pick the concurrency automatically. If set false, tikv_gc_concurrency will be used
tikv_gc_enable	true	Current GC enable status
tikv_gc_run_interval	10m0s	GC run interval, at least 10m, in Go format.
tikv_gc_life_time	10m0s	All versions within life time will not be collected by GC, at least 10m, in Go format.
tikv_gc_last_run_time	20240517-15:32:05.918 +0800	The time when last GC starts. (DO NOT EDIT)
tikv_gc_safe_point	20240517-15:22:05.918 +0800	All versions after safe point can be accessed. (DO NOT EDIT)
Verifying Downstream TiDB is started...
VARIABLE_NAME	VARIABLE_VALUE	COMMENT
bootstrapped	True	Bootstrap flag. Do not delete.
tidb_server_version	179	Bootstrap version. Do not delete.
system_tz	Asia/Shanghai	TiDB Global System Timezone.
new_collation_enabled	True	If the new collations are enabled. Do not edit it.
ddl_table_version	3	DDL Table Version. Do not delete.
tikv_gc_leader_uuid	63e15dc63e00014	Current GC worker leader UUID. (DO NOT EDIT)
tikv_gc_leader_desc	host:xtiflow-release-7-5-pull-cdc-integration-storage-test-361-xrbpb, pid:10190, start at 2024-05-17 15:32:06.040616039 +0800 CST m=+5.289869971	Host name and pid of current GC leader. (DO NOT EDIT)
tikv_gc_leader_lease	20240517-15:34:06.048 +0800	Current GC worker leader lease. (DO NOT EDIT)
tikv_gc_auto_concurrency	true	Let TiDB pick the concurrency automatically. If set false, tikv_gc_concurrency will be used
tikv_gc_enable	true	Current GC enable status
tikv_gc_run_interval	10m0s	GC run interval, at least 10m, in Go format.
tikv_gc_life_time	10m0s	All versions within life time will not be collected by GC, at least 10m, in Go format.
tikv_gc_last_run_time	20240517-15:32:06.008 +0800	The time when last GC starts. (DO NOT EDIT)
tikv_gc_safe_point	20240517-15:22:06.008 +0800	All versions after safe point can be accessed. (DO NOT EDIT)
Starting Upstream TiFlash...
TiFlash
Release Version: v7.5.1-22-gacdbe728f
Edition:         Community
Git Commit Hash: acdbe728f97e2f5e0625d44d24ddbd1cd90d7a59
Git Branch:      HEAD
UTC Build Time:  2024-05-16 14:18:59
Enable Features: jemalloc sm4(GmSSL) avx2 avx512 unwind thinlto
Profile:         RELWITHDEBINFO

Raft Proxy
Git Commit Hash:   521fd9dbc55e58646045d88f91c3c35db50b5981
Git Commit Branch: HEAD
UTC Build Time:    2024-05-16 14:22:45
Rust Version:      rustc 1.67.0-nightly (96ddd32c4 2022-11-14)
Storage Engine:    tiflash
Prometheus Prefix: tiflash_proxy_
Profile:           release
Enable Features:    portable sse test-engine-kv-rocksdb test-engine-raft-raft-engine cloud-aws cloud-gcp cloud-azure
Verifying Upstream TiFlash is started...
Logging trace to /tmp/tidb_cdc_test/changefeed_auto_stop/tiflash/log/server.log
Logging errors to /tmp/tidb_cdc_test/changefeed_auto_stop/tiflash/log/error.log
arg matches is ArgMatches { args: {"engine-label": MatchedArg { occurs: 1, indices: [14], vals: ["tiflash"] }, "log-file": MatchedArg { occurs: 1, indices: [18], vals: ["/tmp/tidb_cdc_test/changefeed_auto_stop/tiflash/log/proxy.log"] }, "engine-addr": MatchedArg { occurs: 1, indices: [2], vals: ["127.0.0.1:9500"] }, "engine-git-hash": MatchedArg { occurs: 1, indices: [10], vals: ["acdbe728f97e2f5e0625d44d24ddbd1cd90d7a59"] }, "engine-version": MatchedArg { occurs: 1, indices: [12], vals: ["v7.5.1-22-gacdbe728f"] }, "advertise-addr": MatchedArg { occurs: 1, indices: [4], vals: ["127.0.0.1:9000"] }, "data-dir": MatchedArg { occurs: 1, indices: [6], vals: ["/tmp/tidb_cdc_test/changefeed_auto_stop/tiflash/db/proxy"] }, "config": MatchedArg { occurs: 1, indices: [8], vals: ["/tmp/tidb_cdc_test/changefeed_auto_stop/tiflash-proxy.toml"] }, "pd-endpoints": MatchedArg { occurs: 1, indices: [16], vals: ["127.0.0.1:2379"] }, "addr": MatchedArg { occurs: 1, indices: [20], vals: ["127.0.0.1:9000"] }}, subcommand: None, usage: Some("USAGE:\n    TiFlash Proxy [FLAGS] [OPTIONS] --engine-git-hash <engine-git-hash> --engine-label <engine-label> --engine-version <engine-version>") }
check diff failed 3-th time, retry later
start tidb cluster in /tmp/tidb_cdc_test/autorandom
Starting Upstream PD...
Release Version: v7.5.1-7-g7eb188c4f
Edition: Community
Git Commit Hash: 7eb188c4f8caba495a33eafedd4540afbc4ca6fc
Git Branch: release-7.5
UTC Build Time:  2024-05-13 04:29:07
Starting Downstream PD...
Release Version: v7.5.1-7-g7eb188c4f
Edition: Community
Git Commit Hash: 7eb188c4f8caba495a33eafedd4540afbc4ca6fc
Git Branch: release-7.5
UTC Build Time:  2024-05-13 04:29:07
Verifying upstream PD is started...
=================>> Running test /home/jenkins/agent/workspace/pingcap/tiflow/release-7.5/pull_cdc_integration_storage_test/tiflow/tests/integration_tests/force_replicate_table/run.sh using Sink-Type: storage... <<=================
The 1 times to try to start tidb cluster...
***************** properties *****************
"dotransactions"="false"
"readproportion"="0"
"scanproportion"="0"
"mysql.port"="4000"
"insertproportion"="0"
"mysql.db"="changefeed_auto_stop_1"
"threadcount"="4"
"operationcount"="0"
"mysql.host"="127.0.0.1"
"workload"="core"
"mysql.user"="root"
"requestdistribution"="uniform"
"readallfields"="true"
"updateproportion"="0"
"recordcount"="20"
**********************************************
Run finished, takes 11.150008ms
INSERT - Takes(s): 0.0, Count: 20, OPS: 3555.6, Avg(us): 2075, Min(us): 982, Max(us): 5776, 95th(us): 6000, 99th(us): 6000
check diff failed 4-th time, retry later
Verifying downstream PD is started...
Starting Upstream TiKV...
TiKV 
Release Version:   7.5.2
Edition:           Community
Git Commit Hash:   f2be3c0b9f0e60b619dade22410979ca91f4d85a
Git Commit Branch: release-7.5
UTC Build Time:    2024-05-14 11:07:23
Rust Version:      rustc 1.67.0-nightly (96ddd32c4 2022-11-14)
Enable Features:   pprof-fp jemalloc mem-profiling portable sse test-engine-kv-rocksdb test-engine-raft-raft-engine cloud-aws cloud-gcp cloud-azure
Profile:           dist_release
Starting Downstream TiKV...
TiKV 
Release Version:   7.5.2
Edition:           Community
Git Commit Hash:   f2be3c0b9f0e60b619dade22410979ca91f4d85a
Git Commit Branch: release-7.5
UTC Build Time:    2024-05-14 11:07:23
Rust Version:      rustc 1.67.0-nightly (96ddd32c4 2022-11-14)
Enable Features:   pprof-fp jemalloc mem-profiling portable sse test-engine-kv-rocksdb test-engine-raft-raft-engine cloud-aws cloud-gcp cloud-azure
Profile:           dist_release
***************** properties *****************
"mysql.host"="127.0.0.1"
"readproportion"="0"
"dotransactions"="false"
"recordcount"="20"
"readallfields"="true"
"threadcount"="4"
"updateproportion"="0"
"mysql.db"="changefeed_auto_stop_2"
"workload"="core"
"mysql.user"="root"
"requestdistribution"="uniform"
"mysql.port"="4000"
"scanproportion"="0"
"insertproportion"="0"
"operationcount"="0"
**********************************************
Run finished, takes 11.727233ms
INSERT - Takes(s): 0.0, Count: 20, OPS: 3073.0, Avg(us): 2085, Min(us): 913, Max(us): 5420, 95th(us): 6000, 99th(us): 6000
start tidb cluster in /tmp/tidb_cdc_test/force_replicate_table
Starting Upstream PD...
Release Version: v7.5.1-7-g7eb188c4f
Edition: Community
Git Commit Hash: 7eb188c4f8caba495a33eafedd4540afbc4ca6fc
Git Branch: release-7.5
UTC Build Time:  2024-05-13 04:29:07
Starting Downstream PD...
Release Version: v7.5.1-7-g7eb188c4f
Edition: Community
Git Commit Hash: 7eb188c4f8caba495a33eafedd4540afbc4ca6fc
Git Branch: release-7.5
UTC Build Time:  2024-05-13 04:29:07
Verifying upstream PD is started...
check diff failed 5-th time, retry later
***************** properties *****************
"scanproportion"="0"
"updateproportion"="0"
"readallfields"="true"
"mysql.user"="root"
"recordcount"="20"
"mysql.port"="4000"
"readproportion"="0"
"mysql.host"="127.0.0.1"
"operationcount"="0"
"dotransactions"="false"
"insertproportion"="0"
"workload"="core"
"requestdistribution"="uniform"
"threadcount"="4"
"mysql.db"="changefeed_auto_stop_3"
**********************************************
Run finished, takes 11.273268ms
INSERT - Takes(s): 0.0, Count: 19, OPS: 3369.5, Avg(us): 1929, Min(us): 1013, Max(us): 5892, 95th(us): 6000, 99th(us): 6000
Starting Upstream TiDB...
Release Version: v7.5.1-69-g19b7d4f8f3
Edition: Community
Git Commit Hash: 19b7d4f8f3d1492d7ace4da21a1cdc37a390b333
Git Branch: release-7.5
UTC Build Time: 2024-05-16 15:24:14
GoVersion: go1.21.6
Race Enabled: false
Check Table Before Drop: false
Store: unistore
Starting Downstream TiDB...
Release Version: v7.5.1-69-g19b7d4f8f3
Edition: Community
Git Commit Hash: 19b7d4f8f3d1492d7ace4da21a1cdc37a390b333
Git Branch: release-7.5
UTC Build Time: 2024-05-16 15:24:14
GoVersion: go1.21.6
Race Enabled: false
Check Table Before Drop: false
Store: unistore
Verifying Upstream TiDB is started...
ERROR 2003 (HY000): Can't connect to MySQL server on '127.0.0.1' (111)
***************** properties *****************
"readallfields"="true"
"operationcount"="0"
"threadcount"="4"
"scanproportion"="0"
"mysql.port"="4000"
"readproportion"="0"
"workload"="core"
"updateproportion"="0"
"mysql.db"="changefeed_auto_stop_4"
"mysql.host"="127.0.0.1"
"recordcount"="20"
"insertproportion"="0"
"requestdistribution"="uniform"
"dotransactions"="false"
"mysql.user"="root"
**********************************************
Run finished, takes 10.931134ms
INSERT - Takes(s): 0.0, Count: 20, OPS: 3399.7, Avg(us): 2035, Min(us): 990, Max(us): 5413, 95th(us): 6000, 99th(us): 6000
[Fri May 17 15:32:15 CST 2024] <<<<<< START cdc server in changefeed_auto_stop case >>>>>>
+ [[ '' == \t\r\u\e ]]
+ set +e
+ get_info_fail_msg='failed to get info:'
+ etcd_info_msg='etcd info'
+ GO_FAILPOINTS=
+ '[' -z '' ']'
+ curl_status_cmd='curl -vsL --max-time 20 http://127.0.0.1:8301/debug/info'
+ cdc.test -test.coverprofile=/tmp/tidb_cdc_test/cov.changefeed_auto_stop.1157311575.out server --log-file /tmp/tidb_cdc_test/changefeed_auto_stop/cdc1.log --log-level debug --data-dir /tmp/tidb_cdc_test/changefeed_auto_stop/cdc_data1 --cluster-id default --addr 127.0.0.1:8301 --pd http://127.0.0.1:2379
+ [[ no != \n\o ]]
+ (( i = 0 ))
+ (( i <= 50 ))
++ curl -vsL --max-time 20 http://127.0.0.1:8301/debug/info
* About to connect() to 127.0.0.1 port 8301 (#0)
*   Trying 127.0.0.1...
* Connection refused
* Failed connect to 127.0.0.1:8301; Connection refused
* Closing connection 0
+ res=
+ echo ''
+ grep -q 'failed to get info:'
+ echo ''
+ grep -q 'etcd info'
+ '[' 0 -eq 50 ']'
+ sleep 3
check diff successfully
wait process cdc.test exit for 1-th time...
Verifying downstream PD is started...
Starting Upstream TiKV...
TiKV 
Release Version:   7.5.2
Edition:           Community
Git Commit Hash:   f2be3c0b9f0e60b619dade22410979ca91f4d85a
Git Commit Branch: release-7.5
UTC Build Time:    2024-05-14 11:07:23
Rust Version:      rustc 1.67.0-nightly (96ddd32c4 2022-11-14)
Enable Features:   pprof-fp jemalloc mem-profiling portable sse test-engine-kv-rocksdb test-engine-raft-raft-engine cloud-aws cloud-gcp cloud-azure
Profile:           dist_release
Starting Downstream TiKV...
TiKV 
Release Version:   7.5.2
Edition:           Community
Git Commit Hash:   f2be3c0b9f0e60b619dade22410979ca91f4d85a
Git Commit Branch: release-7.5
UTC Build Time:    2024-05-14 11:07:23
Rust Version:      rustc 1.67.0-nightly (96ddd32c4 2022-11-14)
Enable Features:   pprof-fp jemalloc mem-profiling portable sse test-engine-kv-rocksdb test-engine-raft-raft-engine cloud-aws cloud-gcp cloud-azure
Profile:           dist_release
ERROR 2003 (HY000): Can't connect to MySQL server on '127.0.0.1' (111)
wait process cdc.test exit for 2-th time...
wait process cdc.test exit for 3-th time...
cdc.test: no process found
wait process cdc.test exit for 4-th time...
process cdc.test already exit
[Fri May 17 15:32:18 CST 2024] <<<<<< run test case sink_hang success! >>>>>>
+ (( i++ ))
+ (( i <= 50 ))
++ curl -vsL --max-time 20 http://127.0.0.1:8301/debug/info
* About to connect() to 127.0.0.1 port 8301 (#0)
*   Trying 127.0.0.1...
* Connected to 127.0.0.1 (127.0.0.1) port 8301 (#0)
> GET /debug/info HTTP/1.1
> User-Agent: curl/7.29.0
> Host: 127.0.0.1:8301
> Accept: */*
> 
< HTTP/1.1 200 OK
< Date: Fri, 17 May 2024 07:32:18 GMT
< Content-Length: 613
< Content-Type: text/plain; charset=utf-8
< 
{ [data not shown]
* Connection #0 to host 127.0.0.1 left intact
+ res='

*** owner info ***:



*** processors info ***:



*** etcd info ***:

/tidb/cdc/default/__cdc_meta__/capture/7a41be68-036b-41e8-a425-5dd70e823e80
	{"id":"7a41be68-036b-41e8-a425-5dd70e823e80","address":"127.0.0.1:8301","version":"v7.5.1-31-gc7ef293f7"}

/tidb/cdc/default/__cdc_meta__/meta/meta-version
	1

/tidb/cdc/default/__cdc_meta__/owner/22318f8576ec744c
	7a41be68-036b-41e8-a425-5dd70e823e80

/tidb/cdc/default/default/upstream/7369868032519443222
	{"id":7369868032519443222,"pd-endpoints":"http://127.0.0.1:2379,http://127.0.0.1:2379","key-path":"","cert-path":"","ca-path":"","cert-allowed-cn":null}'
+ echo '

*** owner info ***:



*** processors info ***:



*** etcd info ***:

/tidb/cdc/default/__cdc_meta__/capture/7a41be68-036b-41e8-a425-5dd70e823e80
	{"id":"7a41be68-036b-41e8-a425-5dd70e823e80","address":"127.0.0.1:8301","version":"v7.5.1-31-gc7ef293f7"}

/tidb/cdc/default/__cdc_meta__/meta/meta-version
	1

/tidb/cdc/default/__cdc_meta__/owner/22318f8576ec744c
	7a41be68-036b-41e8-a425-5dd70e823e80

/tidb/cdc/default/default/upstream/7369868032519443222
	{"id":7369868032519443222,"pd-endpoints":"http://127.0.0.1:2379,http://127.0.0.1:2379","key-path":"","cert-path":"","ca-path":"","cert-allowed-cn":null}'
+ grep -q 'failed to get info:'
+ echo '

*** owner info ***:



*** processors info ***:



*** etcd info ***:

/tidb/cdc/default/__cdc_meta__/capture/7a41be68-036b-41e8-a425-5dd70e823e80
	{"id":"7a41be68-036b-41e8-a425-5dd70e823e80","address":"127.0.0.1:8301","version":"v7.5.1-31-gc7ef293f7"}

/tidb/cdc/default/__cdc_meta__/meta/meta-version
	1

/tidb/cdc/default/__cdc_meta__/owner/22318f8576ec744c
	7a41be68-036b-41e8-a425-5dd70e823e80

/tidb/cdc/default/default/upstream/7369868032519443222
	{"id":7369868032519443222,"pd-endpoints":"http://127.0.0.1:2379,http://127.0.0.1:2379","key-path":"","cert-path":"","ca-path":"","cert-allowed-cn":null}'
+ grep -q 'etcd info'
+ break
+ set +x
[Fri May 17 15:32:18 CST 2024] <<<<<< START cdc server in changefeed_auto_stop case >>>>>>
+ [[ '' == \t\r\u\e ]]
+ set +e
+ get_info_fail_msg='failed to get info:'
+ etcd_info_msg='etcd info'
+ '[' -z '' ']'
+ GO_FAILPOINTS='github.com/pingcap/tiflow/cdc/processor/pipeline/ProcessorSyncResolvedError=1*return(true);github.com/pingcap/tiflow/cdc/processor/ProcessorUpdatePositionDelaying=sleep(1000)'
+ curl_status_cmd='curl -vsL --max-time 20 http://127.0.0.1:8302/debug/info'
+ [[ no != \n\o ]]
+ cdc.test -test.coverprofile=/tmp/tidb_cdc_test/cov.changefeed_auto_stop.1161911621.out server --log-file /tmp/tidb_cdc_test/changefeed_auto_stop/cdc2.log --log-level debug --data-dir /tmp/tidb_cdc_test/changefeed_auto_stop/cdc_data2 --cluster-id default --addr 127.0.0.1:8302 --pd http://127.0.0.1:2379
+ (( i = 0 ))
+ (( i <= 50 ))
++ curl -vsL --max-time 20 http://127.0.0.1:8302/debug/info
* About to connect() to 127.0.0.1 port 8302 (#0)
*   Trying 127.0.0.1...
* Connection refused
* Failed connect to 127.0.0.1:8302; Connection refused
* Closing connection 0
+ res=
+ echo ''
+ grep -q 'failed to get info:'
+ echo ''
+ grep -q 'etcd info'
+ '[' 0 -eq 50 ']'
+ sleep 3
Starting Upstream TiDB...
Release Version: v7.5.1-69-g19b7d4f8f3
Edition: Community
Git Commit Hash: 19b7d4f8f3d1492d7ace4da21a1cdc37a390b333
Git Branch: release-7.5
UTC Build Time: 2024-05-16 15:24:14
GoVersion: go1.21.6
Race Enabled: false
Check Table Before Drop: false
Store: unistore
Starting Downstream TiDB...
Release Version: v7.5.1-69-g19b7d4f8f3
Edition: Community
Git Commit Hash: 19b7d4f8f3d1492d7ace4da21a1cdc37a390b333
Git Branch: release-7.5
UTC Build Time: 2024-05-16 15:24:14
GoVersion: go1.21.6
Race Enabled: false
Check Table Before Drop: false
Store: unistore
Verifying Upstream TiDB is started...
ERROR 2003 (HY000): Can't connect to MySQL server on '127.0.0.1' (111)
ERROR 2003 (HY000): Can't connect to MySQL server on '127.0.0.1' (111)
ERROR 2003 (HY000): Can't connect to MySQL server on '127.0.0.1' (111)
ERROR 2003 (HY000): Can't connect to MySQL server on '127.0.0.1' (111)
+ (( i++ ))
+ (( i <= 50 ))
++ curl -vsL --max-time 20 http://127.0.0.1:8302/debug/info
* About to connect() to 127.0.0.1 port 8302 (#0)
*   Trying 127.0.0.1...
* Connected to 127.0.0.1 (127.0.0.1) port 8302 (#0)
> GET /debug/info HTTP/1.1
> User-Agent: curl/7.29.0
> Host: 127.0.0.1:8302
> Accept: */*
> 
< HTTP/1.1 200 OK
< Date: Fri, 17 May 2024 07:32:21 GMT
< Content-Length: 867
< Content-Type: text/plain; charset=utf-8
< 
{ [data not shown]
* Connection #0 to host 127.0.0.1 left intact
+ res='

*** processors info ***:



*** etcd info ***:

/tidb/cdc/default/__cdc_meta__/capture/228086c1-58ac-4a72-ab1c-fc74c9bab343
	{"id":"228086c1-58ac-4a72-ab1c-fc74c9bab343","address":"127.0.0.1:8302","version":"v7.5.1-31-gc7ef293f7"}

/tidb/cdc/default/__cdc_meta__/capture/7a41be68-036b-41e8-a425-5dd70e823e80
	{"id":"7a41be68-036b-41e8-a425-5dd70e823e80","address":"127.0.0.1:8301","version":"v7.5.1-31-gc7ef293f7"}

/tidb/cdc/default/__cdc_meta__/meta/meta-version
	1

/tidb/cdc/default/__cdc_meta__/owner/22318f8576ec744c
	7a41be68-036b-41e8-a425-5dd70e823e80

/tidb/cdc/default/__cdc_meta__/owner/22318f8576ec7478
	228086c1-58ac-4a72-ab1c-fc74c9bab343

/tidb/cdc/default/default/upstream/7369868032519443222
	{"id":7369868032519443222,"pd-endpoints":"http://127.0.0.1:2379,http://127.0.0.1:2379","key-path":"","cert-path":"","ca-path":"","cert-allowed-cn":null}'
+ echo '

*** processors info ***:



*** etcd info ***:

/tidb/cdc/default/__cdc_meta__/capture/228086c1-58ac-4a72-ab1c-fc74c9bab343
	{"id":"228086c1-58ac-4a72-ab1c-fc74c9bab343","address":"127.0.0.1:8302","version":"v7.5.1-31-gc7ef293f7"}

/tidb/cdc/default/__cdc_meta__/capture/7a41be68-036b-41e8-a425-5dd70e823e80
	{"id":"7a41be68-036b-41e8-a425-5dd70e823e80","address":"127.0.0.1:8301","version":"v7.5.1-31-gc7ef293f7"}

/tidb/cdc/default/__cdc_meta__/meta/meta-version
	1

/tidb/cdc/default/__cdc_meta__/owner/22318f8576ec744c
	7a41be68-036b-41e8-a425-5dd70e823e80

/tidb/cdc/default/__cdc_meta__/owner/22318f8576ec7478
	228086c1-58ac-4a72-ab1c-fc74c9bab343

/tidb/cdc/default/default/upstream/7369868032519443222
	{"id":7369868032519443222,"pd-endpoints":"http://127.0.0.1:2379,http://127.0.0.1:2379","key-path":"","cert-path":"","ca-path":"","cert-allowed-cn":null}'
+ grep -q 'failed to get info:'
+ echo '

*** processors info ***:



*** etcd info ***:

/tidb/cdc/default/__cdc_meta__/capture/228086c1-58ac-4a72-ab1c-fc74c9bab343
	{"id":"228086c1-58ac-4a72-ab1c-fc74c9bab343","address":"127.0.0.1:8302","version":"v7.5.1-31-gc7ef293f7"}

/tidb/cdc/default/__cdc_meta__/capture/7a41be68-036b-41e8-a425-5dd70e823e80
	{"id":"7a41be68-036b-41e8-a425-5dd70e823e80","address":"127.0.0.1:8301","version":"v7.5.1-31-gc7ef293f7"}

/tidb/cdc/default/__cdc_meta__/meta/meta-version
	1

/tidb/cdc/default/__cdc_meta__/owner/22318f8576ec744c
	7a41be68-036b-41e8-a425-5dd70e823e80

/tidb/cdc/default/__cdc_meta__/owner/22318f8576ec7478
	228086c1-58ac-4a72-ab1c-fc74c9bab343

/tidb/cdc/default/default/upstream/7369868032519443222
	{"id":7369868032519443222,"pd-endpoints":"http://127.0.0.1:2379,http://127.0.0.1:2379","key-path":"","cert-path":"","ca-path":"","cert-allowed-cn":null}'
+ grep -q 'etcd info'
+ break
+ set +x
+ workdir=/tmp/tidb_cdc_test/changefeed_auto_stop
+ sink_uri='file:///tmp/tidb_cdc_test/changefeed_auto_stop/storage_test/ticdc-changefeed-auto-stop-test-30921?protocol=canal-json&enable-tidb-extension=true'
+ consumer_replica_config=
+ log_suffix=
++ pwd
+ pwd=/tmp/tidb_cdc_test/changefeed_auto_stop
++ date
+ echo '[Fri May 17 15:32:21 CST 2024] <<<<<< START storage consumer in changefeed_auto_stop case >>>>>>'
[Fri May 17 15:32:21 CST 2024] <<<<<< START storage consumer in changefeed_auto_stop case >>>>>>
+ cd /tmp/tidb_cdc_test/changefeed_auto_stop
+ '[' '' '!=' '' ']'
+ cd /tmp/tidb_cdc_test/changefeed_auto_stop
+ set +x
+ cdc_storage_consumer --log-file /tmp/tidb_cdc_test/changefeed_auto_stop/cdc_storage_consumer.log --log-level debug --upstream-uri 'file:///tmp/tidb_cdc_test/changefeed_auto_stop/storage_test/ticdc-changefeed-auto-stop-test-30921?protocol=canal-json&enable-tidb-extension=true' --downstream-uri 'mysql://root@127.0.0.1:3306/?safe-mode=true&batch-dml-enable=false'
check_changefeed_state http://127.0.0.1:2379 e6f36f44-a095-444d-a4f6-33fa9ede60d6 normal null
+ endpoints=http://127.0.0.1:2379
+ changefeed_id=e6f36f44-a095-444d-a4f6-33fa9ede60d6
+ expected_state=normal
+ error_msg=null
+ tls_dir=null
+ [[ http://127.0.0.1:2379 =~ https ]]
++ cdc cli changefeed query --pd=http://127.0.0.1:2379 -c e6f36f44-a095-444d-a4f6-33fa9ede60d6 -s
+ info='{
  "upstream_id": 7369868032519443222,
  "namespace": "default",
  "id": "e6f36f44-a095-444d-a4f6-33fa9ede60d6",
  "state": "normal",
  "checkpoint_tso": 449821050435796993,
  "checkpoint_time": "2024-05-17 15:32:11.118",
  "error": null
}'
+ echo '{
  "upstream_id": 7369868032519443222,
  "namespace": "default",
  "id": "e6f36f44-a095-444d-a4f6-33fa9ede60d6",
  "state": "normal",
  "checkpoint_tso": 449821050435796993,
  "checkpoint_time": "2024-05-17 15:32:11.118",
  "error": null
}'
{
  "upstream_id": 7369868032519443222,
  "namespace": "default",
  "id": "e6f36f44-a095-444d-a4f6-33fa9ede60d6",
  "state": "normal",
  "checkpoint_tso": 449821050435796993,
  "checkpoint_time": "2024-05-17 15:32:11.118",
  "error": null
}
++ echo '{' '"upstream_id":' 7369868032519443222, '"namespace":' '"default",' '"id":' '"e6f36f44-a095-444d-a4f6-33fa9ede60d6",' '"state":' '"normal",' '"checkpoint_tso":' 449821050435796993, '"checkpoint_time":' '"2024-05-17' '15:32:11.118",' '"error":' null '}'
++ jq -r .state
+ state=normal
+ [[ ! normal == \n\o\r\m\a\l ]]
++ echo '{' '"upstream_id":' 7369868032519443222, '"namespace":' '"default",' '"id":' '"e6f36f44-a095-444d-a4f6-33fa9ede60d6",' '"state":' '"normal",' '"checkpoint_tso":' 449821050435796993, '"checkpoint_time":' '"2024-05-17' '15:32:11.118",' '"error":' null '}'
++ jq -r .error.message
+ message=null
+ [[ ! null =~ null ]]
run task successfully
table changefeed_auto_stop_1.usertable not exists for 1-th check, retry later
ERROR 2003 (HY000): Can't connect to MySQL server on '127.0.0.1' (111)
VARIABLE_NAME	VARIABLE_VALUE	COMMENT
bootstrapped	True	Bootstrap flag. Do not delete.
tidb_server_version	179	Bootstrap version. Do not delete.
system_tz	Asia/Shanghai	TiDB Global System Timezone.
new_collation_enabled	True	If the new collations are enabled. Do not edit it.
ddl_table_version	3	DDL Table Version. Do not delete.
tikv_gc_leader_uuid	63e15dd419c0018	Current GC worker leader UUID. (DO NOT EDIT)
tikv_gc_leader_desc	host:xtiflow-release-7-5-pull-cdc-integration-storage-test-361-zb0cd, pid:25785, start at 2024-05-17 15:32:20.240647761 +0800 CST m=+5.243148630	Host name and pid of current GC leader. (DO NOT EDIT)
tikv_gc_leader_lease	20240517-15:34:20.247 +0800	Current GC worker leader lease. (DO NOT EDIT)
tikv_gc_auto_concurrency	true	Let TiDB pick the concurrency automatically. If set false, tikv_gc_concurrency will be used
tikv_gc_enable	true	Current GC enable status
tikv_gc_run_interval	10m0s	GC run interval, at least 10m, in Go format.
tikv_gc_life_time	10m0s	All versions within life time will not be collected by GC, at least 10m, in Go format.
tikv_gc_last_run_time	20240517-15:32:20.249 +0800	The time when last GC starts. (DO NOT EDIT)
tikv_gc_safe_point	20240517-15:22:20.249 +0800	All versions after safe point can be accessed. (DO NOT EDIT)
VARIABLE_NAME	VARIABLE_VALUE	COMMENT
bootstrapped	True	Bootstrap flag. Do not delete.
tidb_server_version	179	Bootstrap version. Do not delete.
system_tz	Asia/Shanghai	TiDB Global System Timezone.
new_collation_enabled	True	If the new collations are enabled. Do not edit it.
ddl_table_version	3	DDL Table Version. Do not delete.
tikv_gc_leader_uuid	63e15dd419c0018	Current GC worker leader UUID. (DO NOT EDIT)
tikv_gc_leader_desc	host:xtiflow-release-7-5-pull-cdc-integration-storage-test-361-zb0cd, pid:25785, start at 2024-05-17 15:32:20.240647761 +0800 CST m=+5.243148630	Host name and pid of current GC leader. (DO NOT EDIT)
tikv_gc_leader_lease	20240517-15:34:20.247 +0800	Current GC worker leader lease. (DO NOT EDIT)
tikv_gc_auto_concurrency	true	Let TiDB pick the concurrency automatically. If set false, tikv_gc_concurrency will be used
tikv_gc_enable	true	Current GC enable status
tikv_gc_run_interval	10m0s	GC run interval, at least 10m, in Go format.
tikv_gc_life_time	10m0s	All versions within life time will not be collected by GC, at least 10m, in Go format.
tikv_gc_last_run_time	20240517-15:32:20.249 +0800	The time when last GC starts. (DO NOT EDIT)
tikv_gc_safe_point	20240517-15:22:20.249 +0800	All versions after safe point can be accessed. (DO NOT EDIT)
Verifying Downstream TiDB is started...
VARIABLE_NAME	VARIABLE_VALUE	COMMENT
bootstrapped	True	Bootstrap flag. Do not delete.
tidb_server_version	179	Bootstrap version. Do not delete.
system_tz	Asia/Shanghai	TiDB Global System Timezone.
new_collation_enabled	True	If the new collations are enabled. Do not edit it.
ddl_table_version	3	DDL Table Version. Do not delete.
tikv_gc_leader_uuid	63e15dd40c40014	Current GC worker leader UUID. (DO NOT EDIT)
tikv_gc_leader_desc	host:xtiflow-release-7-5-pull-cdc-integration-storage-test-361-zb0cd, pid:25848, start at 2024-05-17 15:32:20.177255221 +0800 CST m=+5.116002268	Host name and pid of current GC leader. (DO NOT EDIT)
tikv_gc_leader_lease	20240517-15:34:20.187 +0800	Current GC worker leader lease. (DO NOT EDIT)
tikv_gc_auto_concurrency	true	Let TiDB pick the concurrency automatically. If set false, tikv_gc_concurrency will be used
tikv_gc_enable	true	Current GC enable status
tikv_gc_run_interval	10m0s	GC run interval, at least 10m, in Go format.
tikv_gc_life_time	10m0s	All versions within life time will not be collected by GC, at least 10m, in Go format.
tikv_gc_last_run_time	20240517-15:32:20.145 +0800	The time when last GC starts. (DO NOT EDIT)
tikv_gc_safe_point	20240517-15:22:20.145 +0800	All versions after safe point can be accessed. (DO NOT EDIT)
Starting Upstream TiFlash...
TiFlash
Release Version: v7.5.1-22-gacdbe728f
Edition:         Community
Git Commit Hash: acdbe728f97e2f5e0625d44d24ddbd1cd90d7a59
Git Branch:      HEAD
UTC Build Time:  2024-05-16 14:18:59
Enable Features: jemalloc sm4(GmSSL) avx2 avx512 unwind thinlto
Profile:         RELWITHDEBINFO

Raft Proxy
Git Commit Hash:   521fd9dbc55e58646045d88f91c3c35db50b5981
Git Commit Branch: HEAD
UTC Build Time:    2024-05-16 14:22:45
Rust Version:      rustc 1.67.0-nightly (96ddd32c4 2022-11-14)
Storage Engine:    tiflash
Prometheus Prefix: tiflash_proxy_
Profile:           release
Enable Features:    portable sse test-engine-kv-rocksdb test-engine-raft-raft-engine cloud-aws cloud-gcp cloud-azure
Verifying Upstream TiFlash is started...
Logging trace to /tmp/tidb_cdc_test/autorandom/tiflash/log/server.log
Logging errors to /tmp/tidb_cdc_test/autorandom/tiflash/log/error.log
arg matches is ArgMatches { args: {"advertise-addr": MatchedArg { occurs: 1, indices: [4], vals: ["127.0.0.1:9000"] }, "engine-addr": MatchedArg { occurs: 1, indices: [2], vals: ["127.0.0.1:9500"] }, "engine-version": MatchedArg { occurs: 1, indices: [12], vals: ["v7.5.1-22-gacdbe728f"] }, "data-dir": MatchedArg { occurs: 1, indices: [6], vals: ["/tmp/tidb_cdc_test/autorandom/tiflash/db/proxy"] }, "engine-git-hash": MatchedArg { occurs: 1, indices: [10], vals: ["acdbe728f97e2f5e0625d44d24ddbd1cd90d7a59"] }, "config": MatchedArg { occurs: 1, indices: [8], vals: ["/tmp/tidb_cdc_test/autorandom/tiflash-proxy.toml"] }, "pd-endpoints": MatchedArg { occurs: 1, indices: [16], vals: ["127.0.0.1:2379"] }, "addr": MatchedArg { occurs: 1, indices: [20], vals: ["127.0.0.1:9000"] }, "engine-label": MatchedArg { occurs: 1, indices: [14], vals: ["tiflash"] }, "log-file": MatchedArg { occurs: 1, indices: [18], vals: ["/tmp/tidb_cdc_test/autorandom/tiflash/log/proxy.log"] }}, subcommand: None, usage: Some("USAGE:\n    TiFlash Proxy [FLAGS] [OPTIONS] --engine-git-hash <engine-git-hash> --engine-label <engine-label> --engine-version <engine-version>") }
table changefeed_auto_stop_1.usertable not exists for 2-th check, retry later
VARIABLE_NAME	VARIABLE_VALUE	COMMENT
bootstrapped	True	Bootstrap flag. Do not delete.
tidb_server_version	179	Bootstrap version. Do not delete.
system_tz	Asia/Shanghai	TiDB Global System Timezone.
new_collation_enabled	True	If the new collations are enabled. Do not edit it.
ddl_table_version	3	DDL Table Version. Do not delete.
tikv_gc_leader_uuid	63e15dd7d400017	Current GC worker leader UUID. (DO NOT EDIT)
tikv_gc_leader_desc	host:xtiflow-release-7-5-pull-cdc-integration-storage-test-361-cc4gl, pid:6843, start at 2024-05-17 15:32:24.056702271 +0800 CST m=+5.187710894	Host name and pid of current GC leader. (DO NOT EDIT)
tikv_gc_leader_lease	20240517-15:34:24.064 +0800	Current GC worker leader lease. (DO NOT EDIT)
tikv_gc_auto_concurrency	true	Let TiDB pick the concurrency automatically. If set false, tikv_gc_concurrency will be used
tikv_gc_enable	true	Current GC enable status
tikv_gc_run_interval	10m0s	GC run interval, at least 10m, in Go format.
tikv_gc_life_time	10m0s	All versions within life time will not be collected by GC, at least 10m, in Go format.
tikv_gc_last_run_time	20240517-15:32:24.066 +0800	The time when last GC starts. (DO NOT EDIT)
tikv_gc_safe_point	20240517-15:22:24.066 +0800	All versions after safe point can be accessed. (DO NOT EDIT)
VARIABLE_NAME	VARIABLE_VALUE	COMMENT
bootstrapped	True	Bootstrap flag. Do not delete.
tidb_server_version	179	Bootstrap version. Do not delete.
system_tz	Asia/Shanghai	TiDB Global System Timezone.
new_collation_enabled	True	If the new collations are enabled. Do not edit it.
ddl_table_version	3	DDL Table Version. Do not delete.
tikv_gc_leader_uuid	63e15dd7d400017	Current GC worker leader UUID. (DO NOT EDIT)
tikv_gc_leader_desc	host:xtiflow-release-7-5-pull-cdc-integration-storage-test-361-cc4gl, pid:6843, start at 2024-05-17 15:32:24.056702271 +0800 CST m=+5.187710894	Host name and pid of current GC leader. (DO NOT EDIT)
tikv_gc_leader_lease	20240517-15:34:24.064 +0800	Current GC worker leader lease. (DO NOT EDIT)
tikv_gc_auto_concurrency	true	Let TiDB pick the concurrency automatically. If set false, tikv_gc_concurrency will be used
tikv_gc_enable	true	Current GC enable status
tikv_gc_run_interval	10m0s	GC run interval, at least 10m, in Go format.
tikv_gc_life_time	10m0s	All versions within life time will not be collected by GC, at least 10m, in Go format.
tikv_gc_last_run_time	20240517-15:32:24.066 +0800	The time when last GC starts. (DO NOT EDIT)
tikv_gc_safe_point	20240517-15:22:24.066 +0800	All versions after safe point can be accessed. (DO NOT EDIT)
Verifying Downstream TiDB is started...
VARIABLE_NAME	VARIABLE_VALUE	COMMENT
bootstrapped	True	Bootstrap flag. Do not delete.
tidb_server_version	179	Bootstrap version. Do not delete.
system_tz	Asia/Shanghai	TiDB Global System Timezone.
new_collation_enabled	True	If the new collations are enabled. Do not edit it.
ddl_table_version	3	DDL Table Version. Do not delete.
tikv_gc_leader_uuid	63e15dd7d280013	Current GC worker leader UUID. (DO NOT EDIT)
tikv_gc_leader_desc	host:xtiflow-release-7-5-pull-cdc-integration-storage-test-361-cc4gl, pid:6925, start at 2024-05-17 15:32:24.035037041 +0800 CST m=+5.100905646	Host name and pid of current GC leader. (DO NOT EDIT)
tikv_gc_leader_lease	20240517-15:34:24.042 +0800	Current GC worker leader lease. (DO NOT EDIT)
tikv_gc_auto_concurrency	true	Let TiDB pick the concurrency automatically. If set false, tikv_gc_concurrency will be used
tikv_gc_enable	true	Current GC enable status
tikv_gc_run_interval	10m0s	GC run interval, at least 10m, in Go format.
tikv_gc_life_time	10m0s	All versions within life time will not be collected by GC, at least 10m, in Go format.
tikv_gc_last_run_time	20240517-15:32:24.010 +0800	The time when last GC starts. (DO NOT EDIT)
tikv_gc_safe_point	20240517-15:22:24.010 +0800	All versions after safe point can be accessed. (DO NOT EDIT)
Starting Upstream TiFlash...
TiFlash
Release Version: v7.5.1-22-gacdbe728f
Edition:         Community
Git Commit Hash: acdbe728f97e2f5e0625d44d24ddbd1cd90d7a59
Git Branch:      HEAD
UTC Build Time:  2024-05-16 14:18:59
Enable Features: jemalloc sm4(GmSSL) avx2 avx512 unwind thinlto
Profile:         RELWITHDEBINFO

Raft Proxy
Git Commit Hash:   521fd9dbc55e58646045d88f91c3c35db50b5981
Git Commit Branch: HEAD
UTC Build Time:    2024-05-16 14:22:45
Rust Version:      rustc 1.67.0-nightly (96ddd32c4 2022-11-14)
Storage Engine:    tiflash
Prometheus Prefix: tiflash_proxy_
Profile:           release
Enable Features:    portable sse test-engine-kv-rocksdb test-engine-raft-raft-engine cloud-aws cloud-gcp cloud-azure
Verifying Upstream TiFlash is started...
Logging trace to /tmp/tidb_cdc_test/force_replicate_table/tiflash/log/server.log
Logging errors to /tmp/tidb_cdc_test/force_replicate_table/tiflash/log/error.log
arg matches is ArgMatches { args: {"config": MatchedArg { occurs: 1, indices: [8], vals: ["/tmp/tidb_cdc_test/force_replicate_table/tiflash-proxy.toml"] }, "engine-git-hash": MatchedArg { occurs: 1, indices: [10], vals: ["acdbe728f97e2f5e0625d44d24ddbd1cd90d7a59"] }, "engine-label": MatchedArg { occurs: 1, indices: [14], vals: ["tiflash"] }, "pd-endpoints": MatchedArg { occurs: 1, indices: [16], vals: ["127.0.0.1:2379"] }, "addr": MatchedArg { occurs: 1, indices: [20], vals: ["127.0.0.1:9000"] }, "data-dir": MatchedArg { occurs: 1, indices: [6], vals: ["/tmp/tidb_cdc_test/force_replicate_table/tiflash/db/proxy"] }, "advertise-addr": MatchedArg { occurs: 1, indices: [4], vals: ["127.0.0.1:9000"] }, "log-file": MatchedArg { occurs: 1, indices: [18], vals: ["/tmp/tidb_cdc_test/force_replicate_table/tiflash/log/proxy.log"] }, "engine-version": MatchedArg { occurs: 1, indices: [12], vals: ["v7.5.1-22-gacdbe728f"] }, "engine-addr": MatchedArg { occurs: 1, indices: [2], vals: ["127.0.0.1:9500"] }}, subcommand: None, usage: Some("USAGE:\n    TiFlash Proxy [FLAGS] [OPTIONS] --engine-git-hash <engine-git-hash> --engine-label <engine-label> --engine-version <engine-version>") }
[Fri May 17 15:32:25 CST 2024] <<<<<< START cdc server in autorandom case >>>>>>
+ [[ '' == \t\r\u\e ]]
+ set +e
+ get_info_fail_msg='failed to get info:'
+ etcd_info_msg='etcd info'
+ '[' -z '' ']'
+ curl_status_cmd='curl -vsL --max-time 20 http://127.0.0.1:8300/debug/info'
+ GO_FAILPOINTS=
+ [[ no != \n\o ]]
+ cdc.test -test.coverprofile=/tmp/tidb_cdc_test/cov.autorandom.2708927091.out server --log-file /tmp/tidb_cdc_test/autorandom/cdc.log --log-level debug --data-dir /tmp/tidb_cdc_test/autorandom/cdc_data --cluster-id default
+ (( i = 0 ))
+ (( i <= 50 ))
++ curl -vsL --max-time 20 http://127.0.0.1:8300/debug/info
* About to connect() to 127.0.0.1 port 8300 (#0)
*   Trying 127.0.0.1...
* Connection refused
* Failed connect to 127.0.0.1:8300; Connection refused
* Closing connection 0
+ res=
+ echo ''
+ grep -q 'failed to get info:'
+ echo ''
+ grep -q 'etcd info'
+ '[' 0 -eq 50 ']'
+ sleep 3
table changefeed_auto_stop_1.usertable not exists for 3-th check, retry later
[Fri May 17 15:32:27 CST 2024] <<<<<< START cdc server in force_replicate_table case >>>>>>
+ [[ '' == \t\r\u\e ]]
+ set +e
+ get_info_fail_msg='failed to get info:'
+ etcd_info_msg='etcd info'
+ '[' -z '' ']'
+ curl_status_cmd='curl -vsL --max-time 20 http://127.0.0.1:8300/debug/info'
+ [[ no != \n\o ]]
+ GO_FAILPOINTS=
+ (( i = 0 ))
+ cdc.test -test.coverprofile=/tmp/tidb_cdc_test/cov.force_replicate_table.82758277.out server --log-file /tmp/tidb_cdc_test/force_replicate_table/cdc.log --log-level debug --data-dir /tmp/tidb_cdc_test/force_replicate_table/cdc_data --cluster-id default
+ (( i <= 50 ))
++ curl -vsL --max-time 20 http://127.0.0.1:8300/debug/info
* About to connect() to 127.0.0.1 port 8300 (#0)
*   Trying 127.0.0.1...
* Connection refused
* Failed connect to 127.0.0.1:8300; Connection refused
* Closing connection 0
+ res=
+ echo ''
+ grep -q 'failed to get info:'
+ echo ''
+ grep -q 'etcd info'
+ '[' 0 -eq 50 ']'
+ sleep 3
table changefeed_auto_stop_1.usertable not exists for 4-th check, retry later
+ (( i++ ))
+ (( i <= 50 ))
++ curl -vsL --max-time 20 http://127.0.0.1:8300/debug/info
* About to connect() to 127.0.0.1 port 8300 (#0)
*   Trying 127.0.0.1...
* Connected to 127.0.0.1 (127.0.0.1) port 8300 (#0)
> GET /debug/info HTTP/1.1
> User-Agent: curl/7.29.0
> Host: 127.0.0.1:8300
> Accept: */*
> 
< HTTP/1.1 200 OK
< Date: Fri, 17 May 2024 07:32:28 GMT
< Content-Length: 613
< Content-Type: text/plain; charset=utf-8
< 
{ [data not shown]
* Connection #0 to host 127.0.0.1 left intact
+ res='

*** owner info ***:



*** processors info ***:



*** etcd info ***:

/tidb/cdc/default/__cdc_meta__/capture/42ce50b5-7e8b-4246-abc5-be9675d899df
	{"id":"42ce50b5-7e8b-4246-abc5-be9675d899df","address":"127.0.0.1:8300","version":"v7.5.1-31-gc7ef293f7"}

/tidb/cdc/default/__cdc_meta__/meta/meta-version
	1

/tidb/cdc/default/__cdc_meta__/owner/22318f85772a1bf3
	42ce50b5-7e8b-4246-abc5-be9675d899df

/tidb/cdc/default/default/upstream/7369868090429951295
	{"id":7369868090429951295,"pd-endpoints":"http://127.0.0.1:2379,http://127.0.0.1:2379","key-path":"","cert-path":"","ca-path":"","cert-allowed-cn":null}'
+ echo '

*** owner info ***:



*** processors info ***:



*** etcd info ***:

/tidb/cdc/default/__cdc_meta__/capture/42ce50b5-7e8b-4246-abc5-be9675d899df
	{"id":"42ce50b5-7e8b-4246-abc5-be9675d899df","address":"127.0.0.1:8300","version":"v7.5.1-31-gc7ef293f7"}

/tidb/cdc/default/__cdc_meta__/meta/meta-version
	1

/tidb/cdc/default/__cdc_meta__/owner/22318f85772a1bf3
	42ce50b5-7e8b-4246-abc5-be9675d899df

/tidb/cdc/default/default/upstream/7369868090429951295
	{"id":7369868090429951295,"pd-endpoints":"http://127.0.0.1:2379,http://127.0.0.1:2379","key-path":"","cert-path":"","ca-path":"","cert-allowed-cn":null}'
+ grep -q 'failed to get info:'
+ echo '

*** owner info ***:



*** processors info ***:



*** etcd info ***:

/tidb/cdc/default/__cdc_meta__/capture/42ce50b5-7e8b-4246-abc5-be9675d899df
	{"id":"42ce50b5-7e8b-4246-abc5-be9675d899df","address":"127.0.0.1:8300","version":"v7.5.1-31-gc7ef293f7"}

/tidb/cdc/default/__cdc_meta__/meta/meta-version
	1

/tidb/cdc/default/__cdc_meta__/owner/22318f85772a1bf3
	42ce50b5-7e8b-4246-abc5-be9675d899df

/tidb/cdc/default/default/upstream/7369868090429951295
	{"id":7369868090429951295,"pd-endpoints":"http://127.0.0.1:2379,http://127.0.0.1:2379","key-path":"","cert-path":"","ca-path":"","cert-allowed-cn":null}'
+ grep -q 'etcd info'
+ break
+ set +x
Create changefeed successfully!
ID: a9157bc8-8ebd-45f1-8429-72b5647b2aa3
Info: {"upstream_id":7369868090429951295,"namespace":"default","id":"a9157bc8-8ebd-45f1-8429-72b5647b2aa3","sink_uri":"file:///tmp/tidb_cdc_test/autorandom/storage_test/ticdc-autorandom-test-12298?protocol=canal-json\u0026enable-tidb-extension=true","create_time":"2024-05-17T15:32:28.641571002+08:00","start_ts":449821054992384006,"config":{"memory_quota":1073741824,"case_sensitive":false,"force_replicate":false,"ignore_ineligible_table":false,"check_gc_safe_point":true,"enable_sync_point":false,"bdr_mode":false,"sync_point_interval":600000000000,"sync_point_retention":86400000000000,"filter":{"rules":["*.*"]},"mounter":{"worker_num":16},"sink":{"protocol":"canal-json","csv":{"delimiter":",","quote":"\"","null":"\\N","include_commit_ts":false,"binary_encoding_method":"base64"},"encoder_concurrency":32,"terminator":"\r\n","date_separator":"day","enable_partition_separator":true,"file_index_width":20,"enable_kafka_sink_v2":false,"only_output_updated_columns":false,"delete_only_output_handle_key_columns":false,"advance_timeout":150,"send_bootstrap_interval_in_sec":120,"send_bootstrap_in_msg_count":10000,"send_bootstrap_to_all_partition":true,"open":{"output_old_value":true}},"consistent":{"level":"none","max_log_size":64,"flush_interval":2000,"meta_flush_interval":200,"encoding_worker_num":16,"flush_worker_num":8,"use_file_backend":false,"memory_usage":{"memory_quota_percentage":50,"event_cache_percentage":0}},"scheduler":{"enable_table_across_nodes":false,"region_threshold":100000,"write_key_threshold":0},"integrity":{"integrity_check_level":"none","corruption_handle_level":"warn"},"changefeed_error_stuck_duration":1800000000000,"sql_mode":"ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION","synced_status":{"synced_check_interval":300,"checkpoint_interval":15}},"state":"normal","creator_version":"v7.5.1-31-gc7ef293f7","resolved_ts":449821054992384006,"checkpoint_ts":449821054992384006,"checkpoint_time":"2024-05-17 15:32:28.500"}
+ workdir=/tmp/tidb_cdc_test/autorandom
+ sink_uri='file:///tmp/tidb_cdc_test/autorandom/storage_test/ticdc-autorandom-test-12298?protocol=canal-json&enable-tidb-extension=true'
+ consumer_replica_config=
+ log_suffix=
++ pwd
+ pwd=/tmp/tidb_cdc_test/autorandom
++ date
+ echo '[Fri May 17 15:32:28 CST 2024] <<<<<< START storage consumer in autorandom case >>>>>>'
[Fri May 17 15:32:28 CST 2024] <<<<<< START storage consumer in autorandom case >>>>>>
+ cd /tmp/tidb_cdc_test/autorandom
+ '[' '' '!=' '' ']'
+ cd /tmp/tidb_cdc_test/autorandom
+ set +x
+ cdc_storage_consumer --log-file /tmp/tidb_cdc_test/autorandom/cdc_storage_consumer.log --log-level debug --upstream-uri 'file:///tmp/tidb_cdc_test/autorandom/storage_test/ticdc-autorandom-test-12298?protocol=canal-json&enable-tidb-extension=true' --downstream-uri 'mysql://root@127.0.0.1:3306/?safe-mode=true&batch-dml-enable=false'
table autorandom_test.table_a not exists for 1-th check, retry later
table changefeed_auto_stop_1.usertable not exists for 5-th check, retry later
\033[0;36m<<< Run all test success >>>\033[0m
[Pipeline] }
Cache not saved (ws/jenkins-pingcap-tiflow-release-7.5-pull_cdc_integration_storage_test-361/tiflow-cdc already exists)
[Pipeline] // cache
[Pipeline] }
[Pipeline] // dir
[Pipeline] }
[Pipeline] // withCredentials
[Pipeline] }
[Pipeline] // timeout
[Pipeline] }
+ (( i++ ))
+ (( i <= 50 ))
++ curl -vsL --max-time 20 http://127.0.0.1:8300/debug/info
* About to connect() to 127.0.0.1 port 8300 (#0)
*   Trying 127.0.0.1...
* Connected to 127.0.0.1 (127.0.0.1) port 8300 (#0)
> GET /debug/info HTTP/1.1
> User-Agent: curl/7.29.0
> Host: 127.0.0.1:8300
> Accept: */*
> 
< HTTP/1.1 200 OK
< Date: Fri, 17 May 2024 07:32:30 GMT
< Content-Length: 613
< Content-Type: text/plain; charset=utf-8
< 
{ [data not shown]
* Connection #0 to host 127.0.0.1 left intact
+ res='

*** owner info ***:



*** processors info ***:



*** etcd info ***:

/tidb/cdc/default/__cdc_meta__/capture/24852b08-267d-4ca5-bfc4-e658a1776a04
	{"id":"24852b08-267d-4ca5-bfc4-e658a1776a04","address":"127.0.0.1:8300","version":"v7.5.1-31-gc7ef293f7"}

/tidb/cdc/default/__cdc_meta__/meta/meta-version
	1

/tidb/cdc/default/__cdc_meta__/owner/22318f857735e3f3
	24852b08-267d-4ca5-bfc4-e658a1776a04

/tidb/cdc/default/default/upstream/7369868108426514211
	{"id":7369868108426514211,"pd-endpoints":"http://127.0.0.1:2379,http://127.0.0.1:2379","key-path":"","cert-path":"","ca-path":"","cert-allowed-cn":null}'
+ echo '

*** owner info ***:



*** processors info ***:



*** etcd info ***:

/tidb/cdc/default/__cdc_meta__/capture/24852b08-267d-4ca5-bfc4-e658a1776a04
	{"id":"24852b08-267d-4ca5-bfc4-e658a1776a04","address":"127.0.0.1:8300","version":"v7.5.1-31-gc7ef293f7"}

/tidb/cdc/default/__cdc_meta__/meta/meta-version
	1

/tidb/cdc/default/__cdc_meta__/owner/22318f857735e3f3
	24852b08-267d-4ca5-bfc4-e658a1776a04

/tidb/cdc/default/default/upstream/7369868108426514211
	{"id":7369868108426514211,"pd-endpoints":"http://127.0.0.1:2379,http://127.0.0.1:2379","key-path":"","cert-path":"","ca-path":"","cert-allowed-cn":null}'
+ grep -q 'failed to get info:'
+ echo '

*** owner info ***:



*** processors info ***:



*** etcd info ***:

/tidb/cdc/default/__cdc_meta__/capture/24852b08-267d-4ca5-bfc4-e658a1776a04
	{"id":"24852b08-267d-4ca5-bfc4-e658a1776a04","address":"127.0.0.1:8300","version":"v7.5.1-31-gc7ef293f7"}

/tidb/cdc/default/__cdc_meta__/meta/meta-version
	1

/tidb/cdc/default/__cdc_meta__/owner/22318f857735e3f3
	24852b08-267d-4ca5-bfc4-e658a1776a04

/tidb/cdc/default/default/upstream/7369868108426514211
	{"id":7369868108426514211,"pd-endpoints":"http://127.0.0.1:2379,http://127.0.0.1:2379","key-path":"","cert-path":"","ca-path":"","cert-allowed-cn":null}'
+ grep -q 'etcd info'
+ break
+ set +x
Create changefeed successfully!
ID: ddea0680-c8a0-4579-a8a3-bb8e076962d4
Info: {"upstream_id":7369868108426514211,"namespace":"default","id":"ddea0680-c8a0-4579-a8a3-bb8e076962d4","sink_uri":"file:///tmp/tidb_cdc_test/force_replicate_table/storage_test/ticdc-force_replicate_table-test-18531?protocol=canal-json\u0026enable-tidb-extension=true","create_time":"2024-05-17T15:32:30.560616458+08:00","start_ts":449821054668898305,"config":{"memory_quota":1073741824,"case_sensitive":false,"force_replicate":true,"ignore_ineligible_table":false,"check_gc_safe_point":true,"enable_sync_point":false,"bdr_mode":false,"sync_point_interval":600000000000,"sync_point_retention":86400000000000,"filter":{"rules":["*.*"]},"mounter":{"worker_num":16},"sink":{"protocol":"canal-json","csv":{"delimiter":",","quote":"\"","null":"\\N","include_commit_ts":false,"binary_encoding_method":"base64"},"encoder_concurrency":32,"terminator":"\r\n","date_separator":"day","enable_partition_separator":true,"file_index_width":20,"enable_kafka_sink_v2":false,"only_output_updated_columns":false,"delete_only_output_handle_key_columns":false,"advance_timeout":150,"send_bootstrap_interval_in_sec":120,"send_bootstrap_in_msg_count":10000,"send_bootstrap_to_all_partition":true,"open":{"output_old_value":true}},"consistent":{"level":"none","max_log_size":64,"flush_interval":2000,"meta_flush_interval":200,"encoding_worker_num":16,"flush_worker_num":8,"use_file_backend":false,"memory_usage":{"memory_quota_percentage":50,"event_cache_percentage":0}},"scheduler":{"enable_table_across_nodes":false,"region_threshold":100000,"write_key_threshold":0},"integrity":{"integrity_check_level":"none","corruption_handle_level":"warn"},"changefeed_error_stuck_duration":1800000000000,"sql_mode":"ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION","synced_status":{"synced_check_interval":300,"checkpoint_interval":15}},"state":"normal","creator_version":"v7.5.1-31-gc7ef293f7","resolved_ts":449821054668898305,"checkpoint_ts":449821054668898305,"checkpoint_time":"2024-05-17 15:32:27.266"}
+ workdir=/tmp/tidb_cdc_test/force_replicate_table
+ sink_uri='file:///tmp/tidb_cdc_test/force_replicate_table/storage_test/ticdc-force_replicate_table-test-18531?protocol=canal-json&enable-tidb-extension=true'
+ consumer_replica_config=/home/jenkins/agent/workspace/pingcap/tiflow/release-7.5/pull_cdc_integration_storage_test/tiflow/tests/integration_tests/force_replicate_table/conf/changefeed.toml
+ log_suffix=
++ pwd
+ pwd=/tmp/tidb_cdc_test/force_replicate_table
++ date
+ echo '[Fri May 17 15:32:30 CST 2024] <<<<<< START storage consumer in force_replicate_table case >>>>>>'
[Fri May 17 15:32:30 CST 2024] <<<<<< START storage consumer in force_replicate_table case >>>>>>
+ cd /tmp/tidb_cdc_test/force_replicate_table
+ '[' /home/jenkins/agent/workspace/pingcap/tiflow/release-7.5/pull_cdc_integration_storage_test/tiflow/tests/integration_tests/force_replicate_table/conf/changefeed.toml '!=' '' ']'
+ cd /tmp/tidb_cdc_test/force_replicate_table
+ set +x
+ cdc_storage_consumer --log-file /tmp/tidb_cdc_test/force_replicate_table/cdc_storage_consumer.log --log-level debug --upstream-uri 'file:///tmp/tidb_cdc_test/force_replicate_table/storage_test/ticdc-force_replicate_table-test-18531?protocol=canal-json&enable-tidb-extension=true' --downstream-uri 'mysql://root@127.0.0.1:3306/?safe-mode=true&batch-dml-enable=false' --config /home/jenkins/agent/workspace/pingcap/tiflow/release-7.5/pull_cdc_integration_storage_test/tiflow/tests/integration_tests/force_replicate_table/conf/changefeed.toml
[Pipeline] // stage
[Pipeline] }
[Pipeline] // container
[Pipeline] }
[Pipeline] // withEnv
[Pipeline] }
[Pipeline] // node
[Pipeline] }
[Pipeline] // podTemplate
[Pipeline] }
[Pipeline] // withEnv
[Pipeline] }
[Pipeline] // stage
[Pipeline] }
table autorandom_test.table_a not exists for 2-th check, retry later
table changefeed_auto_stop_1.usertable exists
table changefeed_auto_stop_2.usertable not exists for 1-th check, retry later
table force_replicate_table.t0 not exists for 1-th check, retry later
table autorandom_test.table_a not exists for 3-th check, retry later
table changefeed_auto_stop_2.usertable exists
table changefeed_auto_stop_3.usertable not exists for 1-th check, retry later
table force_replicate_table.t0 not exists for 2-th check, retry later
table autorandom_test.table_a not exists for 4-th check, retry later
table changefeed_auto_stop_3.usertable not exists for 2-th check, retry later
table force_replicate_table.t0 not exists for 3-th check, retry later
table autorandom_test.table_a not exists for 5-th check, retry later
table changefeed_auto_stop_3.usertable not exists for 3-th check, retry later
table force_replicate_table.t0 not exists for 4-th check, retry later
table autorandom_test.table_a exists
check diff successfully
table changefeed_auto_stop_3.usertable not exists for 4-th check, retry later
wait process cdc.test exit for 1-th time...
table force_replicate_table.t0 not exists for 5-th check, retry later
wait process cdc.test exit for 2-th time...
cdc.test: no process found
wait process cdc.test exit for 3-th time...
process cdc.test already exit
[Fri May 17 15:32:41 CST 2024] <<<<<< run test case autorandom success! >>>>>>
table changefeed_auto_stop_3.usertable not exists for 5-th check, retry later
table force_replicate_table.t0 exists
table force_replicate_table.t1 exists
table force_replicate_table.t2 exists
table force_replicate_table.t3 exists
table force_replicate_table.t4 not exists for 1-th check, retry later
table force_replicate_table.t4 not exists for 2-th check, retry later
table changefeed_auto_stop_3.usertable exists
table changefeed_auto_stop_4.usertable exists
check diff failed 1-th time, retry later
check diff failed 2-th time, retry later
table force_replicate_table.t4 not exists for 3-th check, retry later
check diff failed 3-th time, retry later
table force_replicate_table.t4 not exists for 4-th check, retry later
table force_replicate_table.t4 not exists for 5-th check, retry later
check diff failed 4-th time, retry later
table force_replicate_table.t4 exists
table force_replicate_table.t5 exists
table force_replicate_table.t6 not exists for 1-th check, retry later
check diff successfully
wait process cdc.test exit for 1-th time...
=================>> Running test /home/jenkins/agent/workspace/pingcap/tiflow/release-7.5/pull_cdc_integration_storage_test/tiflow/tests/integration_tests/generate_column/run.sh using Sink-Type: storage... <<=================
[Fri May 17 15:32:52 CST 2024] <<<<<< run test case generate_column success! >>>>>>
wait process cdc.test exit for 2-th time...
cdc.test: no process found
wait process cdc.test exit for 3-th time...
process cdc.test already exit
[Fri May 17 15:32:54 CST 2024] <<<<<< run test case changefeed_auto_stop success! >>>>>>
table force_replicate_table.t6 not exists for 2-th check, retry later
\033[0;36m<<< Run all test success >>>\033[0m
[Pipeline] }
Cache not saved (ws/jenkins-pingcap-tiflow-release-7.5-pull_cdc_integration_storage_test-361/tiflow-cdc already exists)
[Pipeline] // cache
[Pipeline] }
[Pipeline] // dir
[Pipeline] }
[Pipeline] // withCredentials
[Pipeline] }
[Pipeline] // timeout
[Pipeline] }
[Pipeline] // stage
[Pipeline] }
[Pipeline] // container
[Pipeline] }
[Pipeline] // withEnv
[Pipeline] }
[Pipeline] // node
[Pipeline] }
table force_replicate_table.t6 not exists for 3-th check, retry later
[Pipeline] // podTemplate
[Pipeline] }
[Pipeline] // withEnv
[Pipeline] }
[Pipeline] // stage
[Pipeline] }
table force_replicate_table.t6 not exists for 4-th check, retry later
table force_replicate_table.t6 not exists for 5-th check, retry later
table force_replicate_table.t6 exists
check_data_subset force_replicate_table.t0 127.0.0.1 4000 127.0.0.1 3306
run task successfully
check_data_subset force_replicate_table.t1 127.0.0.1 4000 127.0.0.1 3306
run task successfully
check_data_subset force_replicate_table.t2 127.0.0.1 4000 127.0.0.1 3306
run task successfully
check_data_subset force_replicate_table.t3 127.0.0.1 4000 127.0.0.1 3306
run task successfully
check_data_subset force_replicate_table.t4 127.0.0.1 4000 127.0.0.1 3306
run task successfully
check_data_subset force_replicate_table.t5 127.0.0.1 4000 127.0.0.1 3306
\033[0;36m<<< Run all test success >>>\033[0m
[Pipeline] }
Cache not saved (ws/jenkins-pingcap-tiflow-release-7.5-pull_cdc_integration_storage_test-361/tiflow-cdc already exists)
[Pipeline] // cache
[Pipeline] }
[Pipeline] // dir
[Pipeline] }
[Pipeline] // withCredentials
[Pipeline] }
[Pipeline] // timeout
[Pipeline] }
[Pipeline] // stage
[Pipeline] }
[Pipeline] // container
[Pipeline] }
run task successfully
check_data_subset force_replicate_table.t6 127.0.0.1 4000 127.0.0.1 3306
id=7,a=NULL doesn't exist in downstream table force_replicate_table.t6
run task failed 1-th time, retry later
[Pipeline] // withEnv
[Pipeline] }
[Pipeline] // node
[Pipeline] }
[Pipeline] // podTemplate
[Pipeline] }
[Pipeline] // withEnv
[Pipeline] }
[Pipeline] // stage
[Pipeline] }
check_data_subset force_replicate_table.t6 127.0.0.1 4000 127.0.0.1 3306
id=7,a=NULL doesn't exist in downstream table force_replicate_table.t6
run task failed 2-th time, retry later
check_data_subset force_replicate_table.t6 127.0.0.1 4000 127.0.0.1 3306
id=7,a=NULL doesn't exist in downstream table force_replicate_table.t6
run task failed 3-th time, retry later
check_data_subset force_replicate_table.t6 127.0.0.1 4000 127.0.0.1 3306
id=7,a=NULL doesn't exist in downstream table force_replicate_table.t6
run task failed 4-th time, retry later
check_data_subset force_replicate_table.t6 127.0.0.1 4000 127.0.0.1 3306
run task successfully
wait process cdc.test exit for 1-th time...
wait process cdc.test exit for 2-th time...
cdc.test: no process found
wait process cdc.test exit for 3-th time...
process cdc.test already exit
[Fri May 17 15:33:29 CST 2024] <<<<<< run test case force_replicate_table success! >>>>>>
++ curl -X GET http://127.0.0.1:8300/api/v2/changefeeds/test-1/synced
  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                 Dload  Upload   Total   Spent    Left  Speed

  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0
100   221  100   221    0     0   2706      0 --:--:-- --:--:-- --:--:--  2728
+ synced_status='{"synced":true,"sink_checkpoint_ts":"2024-05-17 15:33:27.682","puller_resolved_ts":"2024-05-17 15:33:19.682","last_synced_ts":"2024-05-17 15:31:11.432","now_ts":"2024-05-17 15:33:28.000","info":"Data syncing is finished"}'
++ echo '{"synced":true,"sink_checkpoint_ts":"2024-05-17' '15:33:27.682","puller_resolved_ts":"2024-05-17' '15:33:19.682","last_synced_ts":"2024-05-17' '15:31:11.432","now_ts":"2024-05-17' '15:33:28.000","info":"Data' syncing is 'finished"}'
++ jq .synced
+ status=true
+ '[' true '!=' true ']'
+ kill_pd
++ ps aux
++ grep pd-server
++ grep /tmp/tidb_cdc_test/synced_status
+ info='jenkins    16581  7.4  0.0 13323028 142128 ?     Sl   15:30   0:12 pd-server --advertise-client-urls http://127.0.0.1:2379 --client-urls http://0.0.0.0:2379 --advertise-peer-urls http://127.0.0.1:2380 --peer-urls http://0.0.0.0:2380 --config /tmp/tidb_cdc_test/synced_status/pd-config.toml --log-file /tmp/tidb_cdc_test/synced_status/pd1.log --data-dir /tmp/tidb_cdc_test/synced_status/pd1 --name=pd1 --initial-cluster=pd1=http://127.0.0.1:2380
jenkins    16646  5.2  0.0 13518804 133080 ?     Sl   15:30   0:08 pd-server --advertise-client-urls http://127.0.0.1:2479 --client-urls http://0.0.0.0:2479 --advertise-peer-urls http://127.0.0.1:2480 --peer-urls http://0.0.0.0:2480 --config /tmp/tidb_cdc_test/synced_status/pd-config.toml --log-file /tmp/tidb_cdc_test/synced_status/down_pd.log --data-dir /tmp/tidb_cdc_test/synced_status/down_pd'
++ ps aux
++ grep pd-server
++ grep /tmp/tidb_cdc_test/synced_status
++ awk '{print $2}'
++ xargs kill -9
+ sleep 20
{"level":"warn","ts":1715931208.7734127,"caller":"v3@v3.5.10/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc00332ce00/127.0.0.1:2379","attempt":0,"error":"rpc error: code = Unavailable desc = error reading from server: read tcp 127.0.0.1:48238->127.0.0.1:2379: read: connection reset by peer"}
{"level":"warn","ts":1715931213.7728875,"caller":"v3@v3.5.10/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc00332ce00/127.0.0.1:2379","attempt":1,"error":"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \"transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused\""}
{"level":"info","ts":1715931213.7729394,"caller":"v3@v3.5.10/client.go:210","msg":"Auto sync endpoints failed.","error":"context deadline exceeded"}
{"level":"warn","ts":1715931213.8688235,"caller":"v3@v3.5.10/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc0026f1dc0/127.0.0.1:2479","attempt":0,"error":"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \"transport: Error while dialing: dial tcp 127.0.0.1:2479: connect: connection refused\""}
{"level":"info","ts":1715931213.8688786,"caller":"v3@v3.5.10/client.go:210","msg":"Auto sync endpoints failed.","error":"context deadline exceeded"}
{"level":"warn","ts":1715931214.7435417,"caller":"v3@v3.5.10/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc001420a80/127.0.0.1:2379","attempt":0,"error":"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \"transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused\""}
{"level":"info","ts":1715931214.7436035,"caller":"v3@v3.5.10/client.go:210","msg":"Auto sync endpoints failed.","error":"context deadline exceeded"}
{"level":"warn","ts":"2024-05-17T15:33:38.582329+0800","logger":"etcd-client","caller":"v3@v3.5.10/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc000e14700/127.0.0.1:2379","attempt":0,"error":"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \"transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused\""}
{"level":"warn","ts":"2024-05-17T15:33:38.583108+0800","logger":"etcd-client","caller":"v3@v3.5.10/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc000b64fc0/127.0.0.1:2379","attempt":0,"error":"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \"transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused\""}
{"level":"warn","ts":"2024-05-17T15:33:38.664313+0800","logger":"etcd-client","caller":"v3@v3.5.10/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc000efe380/127.0.0.1:2479","attempt":0,"error":"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \"transport: Error while dialing: dial tcp 127.0.0.1:2479: connect: connection refused\""}
\033[0;36m<<< Run all test success >>>\033[0m
[Pipeline] }
Cache not saved (ws/jenkins-pingcap-tiflow-release-7.5-pull_cdc_integration_storage_test-361/tiflow-cdc already exists)
[Pipeline] // cache
[Pipeline] }
[Pipeline] // dir
[Pipeline] }
[Pipeline] // withCredentials
[Pipeline] }
[Pipeline] // timeout
[Pipeline] }
[Pipeline] // stage
[Pipeline] }
[Pipeline] // container
[Pipeline] }
[Pipeline] // withEnv
[Pipeline] }
[Pipeline] // node
[Pipeline] }
[Pipeline] // podTemplate
{"level":"warn","ts":"2024-05-17T15:33:44.583523+0800","logger":"etcd-client","caller":"v3@v3.5.10/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc000e14700/127.0.0.1:2379","attempt":0,"error":"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \"transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused\""}
{"level":"warn","ts":"2024-05-17T15:33:44.584832+0800","logger":"etcd-client","caller":"v3@v3.5.10/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc000b64fc0/127.0.0.1:2379","attempt":0,"error":"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \"transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused\""}
{"level":"warn","ts":"2024-05-17T15:33:44.665045+0800","logger":"etcd-client","caller":"v3@v3.5.10/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc000efe380/127.0.0.1:2479","attempt":0,"error":"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \"transport: Error while dialing: dial tcp 127.0.0.1:2479: connect: connection refused\""}
[Pipeline] }
[Pipeline] // withEnv
[Pipeline] }
[Pipeline] // stage
[Pipeline] }
++ curl -X GET http://127.0.0.1:8300/api/v2/changefeeds/test-1/synced
  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                 Dload  Upload   Total   Spent    Left  Speed

  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0
  0     0    0     0    0     0      0      0 --:--:--  0:00:01 --:--:--     0{"level":"warn","ts":"2024-05-17T15:33:50.58425+0800","logger":"etcd-client","caller":"v3@v3.5.10/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc000e14700/127.0.0.1:2379","attempt":0,"error":"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \"transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused\""}
{"level":"warn","ts":"2024-05-17T15:33:50.585852+0800","logger":"etcd-client","caller":"v3@v3.5.10/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc000b64fc0/127.0.0.1:2379","attempt":0,"error":"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \"transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused\""}
{"level":"warn","ts":"2024-05-17T15:33:50.666329+0800","logger":"etcd-client","caller":"v3@v3.5.10/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc000efe380/127.0.0.1:2479","attempt":0,"error":"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \"transport: Error while dialing: dial tcp 127.0.0.1:2479: connect: connection refused\""}

  0     0    0     0    0     0      0      0 --:--:--  0:00:02 --:--:--     0
  0     0    0     0    0     0      0      0 --:--:--  0:00:03 --:--:--     0
  0     0    0     0    0     0      0      0 --:--:--  0:00:04 --:--:--     0
  0     0    0     0    0     0      0      0 --:--:--  0:00:05 --:--:--     0
  0     0    0     0    0     0      0      0 --:--:--  0:00:06 --:--:--     0
  0     0    0     0    0     0      0      0 --:--:--  0:00:07 --:--:--     0{"level":"warn","ts":"2024-05-17T15:33:56.585382+0800","logger":"etcd-client","caller":"v3@v3.5.10/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc000e14700/127.0.0.1:2379","attempt":0,"error":"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \"transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused\""}
{"level":"warn","ts":"2024-05-17T15:33:56.586972+0800","logger":"etcd-client","caller":"v3@v3.5.10/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc000b64fc0/127.0.0.1:2379","attempt":0,"error":"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \"transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused\""}
{"level":"warn","ts":"2024-05-17T15:33:56.668115+0800","logger":"etcd-client","caller":"v3@v3.5.10/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc000efe380/127.0.0.1:2479","attempt":0,"error":"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \"transport: Error while dialing: dial tcp 127.0.0.1:2479: connect: connection refused\""}

  0     0    0     0    0     0      0      0 --:--:--  0:00:08 --:--:--     0
  0     0    0     0    0     0      0      0 --:--:--  0:00:09 --:--:--     0{"level":"warn","ts":"2024-05-17T15:33:58.572174+0800","logger":"etcd-client","caller":"v3@v3.5.10/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc000e14700/127.0.0.1:2379","attempt":0,"error":"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \"transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused\""}
{"level":"info","ts":"2024-05-17T15:33:58.572232+0800","logger":"etcd-client","caller":"v3@v3.5.10/client.go:210","msg":"Auto sync endpoints failed.","error":"context deadline exceeded"}
{"level":"warn","ts":"2024-05-17T15:33:58.572689+0800","logger":"etcd-client","caller":"v3@v3.5.10/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc000b64fc0/127.0.0.1:2379","attempt":0,"error":"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \"transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused\""}
{"level":"info","ts":"2024-05-17T15:33:58.572752+0800","logger":"etcd-client","caller":"v3@v3.5.10/client.go:210","msg":"Auto sync endpoints failed.","error":"context deadline exceeded"}
{"level":"warn","ts":"2024-05-17T15:33:58.657938+0800","logger":"etcd-client","caller":"v3@v3.5.10/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc000efe380/127.0.0.1:2479","attempt":0,"error":"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \"transport: Error while dialing: dial tcp 127.0.0.1:2479: connect: connection refused\""}
{"level":"info","ts":"2024-05-17T15:33:58.65798+0800","logger":"etcd-client","caller":"v3@v3.5.10/client.go:210","msg":"Auto sync endpoints failed.","error":"context deadline exceeded"}

  0     0    0     0    0     0      0      0 --:--:--  0:00:10 --:--:--     0
  0     0    0     0    0     0      0      0 --:--:--  0:00:11 --:--:--     0
  0     0    0     0    0     0      0      0 --:--:--  0:00:12 --:--:--     0
  0     0    0     0    0     0      0      0 --:--:--  0:00:13 --:--:--     0{"level":"warn","ts":"2024-05-17T15:34:02.586249+0800","logger":"etcd-client","caller":"v3@v3.5.10/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc000e14700/127.0.0.1:2379","attempt":0,"error":"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \"transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused\""}
{"level":"warn","ts":"2024-05-17T15:34:02.588471+0800","logger":"etcd-client","caller":"v3@v3.5.10/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc000b64fc0/127.0.0.1:2379","attempt":0,"error":"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \"transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused\""}
{"level":"warn","ts":"2024-05-17T15:34:02.669126+0800","logger":"etcd-client","caller":"v3@v3.5.10/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc000efe380/127.0.0.1:2479","attempt":0,"error":"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \"transport: Error while dialing: dial tcp 127.0.0.1:2479: connect: connection refused\""}

  0     0    0     0    0     0      0      0 --:--:--  0:00:14 --:--:--     0
  0     0    0     0    0     0      0      0 --:--:--  0:00:15 --:--:--     0
  0     0    0     0    0     0      0      0 --:--:--  0:00:16 --:--:--     0
  0     0    0     0    0     0      0      0 --:--:--  0:00:17 --:--:--     0
  0     0    0     0    0     0      0      0 --:--:--  0:00:18 --:--:--     0
  0     0    0     0    0     0      0      0 --:--:--  0:00:19 --:--:--     0{"level":"warn","ts":"2024-05-17T15:34:08.587999+0800","logger":"etcd-client","caller":"v3@v3.5.10/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc000e14700/127.0.0.1:2379","attempt":0,"error":"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \"transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused\""}
{"level":"warn","ts":"2024-05-17T15:34:08.588834+0800","logger":"etcd-client","caller":"v3@v3.5.10/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc000b64fc0/127.0.0.1:2379","attempt":0,"error":"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \"transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused\""}
{"level":"warn","ts":"2024-05-17T15:34:08.670493+0800","logger":"etcd-client","caller":"v3@v3.5.10/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc000efe380/127.0.0.1:2479","attempt":0,"error":"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \"transport: Error while dialing: dial tcp 127.0.0.1:2479: connect: connection refused\""}
{"level":"warn","ts":1715931248.7738066,"caller":"v3@v3.5.10/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc00332ce00/127.0.0.1:2379","attempt":0,"error":"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \"transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused\""}
{"level":"info","ts":1715931248.773845,"caller":"v3@v3.5.10/client.go:210","msg":"Auto sync endpoints failed.","error":"context deadline exceeded"}

  0     0    0     0    0     0      0      0 --:--:--  0:00:20 --:--:--     0{"level":"warn","ts":1715931248.8704832,"caller":"v3@v3.5.10/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc0026f1dc0/127.0.0.1:2479","attempt":0,"error":"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \"transport: Error while dialing: dial tcp 127.0.0.1:2479: connect: connection refused\""}
{"level":"info","ts":1715931248.8705206,"caller":"v3@v3.5.10/client.go:210","msg":"Auto sync endpoints failed.","error":"context deadline exceeded"}
{"level":"warn","ts":1715931249.7450116,"caller":"v3@v3.5.10/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc001420a80/127.0.0.1:2379","attempt":0,"error":"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \"transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused\""}
{"level":"info","ts":1715931249.7450476,"caller":"v3@v3.5.10/client.go:210","msg":"Auto sync endpoints failed.","error":"context deadline exceeded"}

  0     0    0     0    0     0      0      0 --:--:--  0:00:21 --:--:--     0
  0     0    0     0    0     0      0      0 --:--:--  0:00:22 --:--:--     0
  0     0    0     0    0     0      0      0 --:--:--  0:00:23 --:--:--     0
  0     0    0     0    0     0      0      0 --:--:--  0:00:24 --:--:--     0
  0     0    0     0    0     0      0      0 --:--:--  0:00:25 --:--:--     0{"level":"warn","ts":"2024-05-17T15:34:14.589689+0800","logger":"etcd-client","caller":"v3@v3.5.10/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc000e14700/127.0.0.1:2379","attempt":0,"error":"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \"transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused\""}
{"level":"warn","ts":"2024-05-17T15:34:14.589883+0800","logger":"etcd-client","caller":"v3@v3.5.10/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc000b64fc0/127.0.0.1:2379","attempt":0,"error":"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \"transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused\""}
{"level":"warn","ts":"2024-05-17T15:34:14.67133+0800","logger":"etcd-client","caller":"v3@v3.5.10/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc000efe380/127.0.0.1:2479","attempt":0,"error":"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \"transport: Error while dialing: dial tcp 127.0.0.1:2479: connect: connection refused\""}

  0     0    0     0    0     0      0      0 --:--:--  0:00:26 --:--:--     0
  0     0    0     0    0     0      0      0 --:--:--  0:00:27 --:--:--     0
  0     0    0     0    0     0      0      0 --:--:--  0:00:28 --:--:--     0
  0     0    0     0    0     0      0      0 --:--:--  0:00:29 --:--:--     0
100   135  100   135    0     0      4      0  0:00:33  0:00:30  0:00:03    27
100   135  100   135    0     0      4      0  0:00:33  0:00:30  0:00:03    33
+ synced_status='{
    "error_msg": "[CDC:ErrPDEtcdAPIError]etcd api call error: context deadline exceeded",
    "error_code": "CDC:ErrPDEtcdAPIError"
}'
++ echo '{' '"error_msg":' '"[CDC:ErrPDEtcdAPIError]etcd' api call error: context deadline 'exceeded",' '"error_code":' '"CDC:ErrPDEtcdAPIError"' '}'
++ jq -r .error_code
+ error_code=CDC:ErrPDEtcdAPIError
+ cleanup_process cdc.test
wait process cdc.test exit for 1-th time...
wait process cdc.test exit for 2-th time...
wait process cdc.test exit for 3-th time...
{"level":"warn","ts":"2024-05-17T15:34:20.590671+0800","logger":"etcd-client","caller":"v3@v3.5.10/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc000e14700/127.0.0.1:2379","attempt":0,"error":"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \"transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused\""}
{"level":"warn","ts":"2024-05-17T15:34:20.59161+0800","logger":"etcd-client","caller":"v3@v3.5.10/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc000b64fc0/127.0.0.1:2379","attempt":0,"error":"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \"transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused\""}
{"level":"warn","ts":"2024-05-17T15:34:20.672989+0800","logger":"etcd-client","caller":"v3@v3.5.10/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc000efe380/127.0.0.1:2479","attempt":0,"error":"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \"transport: Error while dialing: dial tcp 127.0.0.1:2479: connect: connection refused\""}
cdc.test: no process found
wait process cdc.test exit for 4-th time...
process cdc.test already exit
+ stop_tidb_cluster
+ run_case_with_unavailable_tikv conf/changefeed.toml
+ rm -rf /tmp/tidb_cdc_test/synced_status
+ mkdir -p /tmp/tidb_cdc_test/synced_status
+ start_tidb_cluster --workdir /tmp/tidb_cdc_test/synced_status
shell-init: error retrieving current directory: getcwd: cannot access parent directories: No such file or directory
chdir: error retrieving current directory: getcwd: cannot access parent directories: No such file or directory
The 1 times to try to start tidb cluster...
shell-init: error retrieving current directory: getcwd: cannot access parent directories: No such file or directory
chdir: error retrieving current directory: getcwd: cannot access parent directories: No such file or directory
shell-init: error retrieving current directory: getcwd: cannot access parent directories: No such file or directory
chdir: error retrieving current directory: getcwd: cannot access parent directories: No such file or directory
chdir: error retrieving current directory: getcwd: cannot access parent directories: No such file or directory
start tidb cluster in /tmp/tidb_cdc_test/synced_status
Starting Upstream PD...
Release Version: v7.5.1-7-g7eb188c4f
Edition: Community
Git Commit Hash: 7eb188c4f8caba495a33eafedd4540afbc4ca6fc
Git Branch: release-7.5
UTC Build Time:  2024-05-13 04:29:07
Starting Downstream PD...
Release Version: v7.5.1-7-g7eb188c4f
Edition: Community
Git Commit Hash: 7eb188c4f8caba495a33eafedd4540afbc4ca6fc
Git Branch: release-7.5
UTC Build Time:  2024-05-13 04:29:07
Verifying upstream PD is started...
Verifying downstream PD is started...
Starting Upstream TiKV...
TiKV 
Release Version:   7.5.2
Edition:           Community
Git Commit Hash:   f2be3c0b9f0e60b619dade22410979ca91f4d85a
Git Commit Branch: release-7.5
UTC Build Time:    2024-05-14 11:07:23
Rust Version:      rustc 1.67.0-nightly (96ddd32c4 2022-11-14)
Enable Features:   pprof-fp jemalloc mem-profiling portable sse test-engine-kv-rocksdb test-engine-raft-raft-engine cloud-aws cloud-gcp cloud-azure
Profile:           dist_release
Starting Downstream TiKV...
TiKV 
Release Version:   7.5.2
Edition:           Community
Git Commit Hash:   f2be3c0b9f0e60b619dade22410979ca91f4d85a
Git Commit Branch: release-7.5
UTC Build Time:    2024-05-14 11:07:23
Rust Version:      rustc 1.67.0-nightly (96ddd32c4 2022-11-14)
Enable Features:   pprof-fp jemalloc mem-profiling portable sse test-engine-kv-rocksdb test-engine-raft-raft-engine cloud-aws cloud-gcp cloud-azure
Profile:           dist_release
Starting Upstream TiDB...
Release Version: v7.5.1-69-g19b7d4f8f3
Edition: Community
Git Commit Hash: 19b7d4f8f3d1492d7ace4da21a1cdc37a390b333
Git Branch: release-7.5
UTC Build Time: 2024-05-16 15:24:14
GoVersion: go1.21.6
Race Enabled: false
Check Table Before Drop: false
Store: unistore
Starting Downstream TiDB...
Release Version: v7.5.1-69-g19b7d4f8f3
Edition: Community
Git Commit Hash: 19b7d4f8f3d1492d7ace4da21a1cdc37a390b333
Git Branch: release-7.5
UTC Build Time: 2024-05-16 15:24:14
GoVersion: go1.21.6
Race Enabled: false
Check Table Before Drop: false
Store: unistore
Verifying Upstream TiDB is started...
ERROR 2003 (HY000): Can't connect to MySQL server on '127.0.0.1' (111)
ERROR 2003 (HY000): Can't connect to MySQL server on '127.0.0.1' (111)
ERROR 2003 (HY000): Can't connect to MySQL server on '127.0.0.1' (111)
VARIABLE_NAME	VARIABLE_VALUE	COMMENT
bootstrapped	True	Bootstrap flag. Do not delete.
tidb_server_version	179	Bootstrap version. Do not delete.
system_tz	Asia/Shanghai	TiDB Global System Timezone.
new_collation_enabled	True	If the new collations are enabled. Do not edit it.
ddl_table_version	3	DDL Table Version. Do not delete.
tikv_gc_leader_uuid	63e15e5e1fc0008	Current GC worker leader UUID. (DO NOT EDIT)
tikv_gc_leader_desc	host:xtiflow-release-7-5-pull-cdc-integration-storage-test-361-xz426, pid:19917, start at 2024-05-17 15:34:41.543821043 +0800 CST m=+5.193075338	Host name and pid of current GC leader. (DO NOT EDIT)
tikv_gc_leader_lease	20240517-15:36:41.551 +0800	Current GC worker leader lease. (DO NOT EDIT)
tikv_gc_auto_concurrency	true	Let TiDB pick the concurrency automatically. If set false, tikv_gc_concurrency will be used
tikv_gc_enable	true	Current GC enable status
tikv_gc_run_interval	10m0s	GC run interval, at least 10m, in Go format.
tikv_gc_life_time	10m0s	All versions within life time will not be collected by GC, at least 10m, in Go format.
tikv_gc_last_run_time	20240517-15:34:41.535 +0800	The time when last GC starts. (DO NOT EDIT)
tikv_gc_safe_point	20240517-15:24:41.535 +0800	All versions after safe point can be accessed. (DO NOT EDIT)
VARIABLE_NAME	VARIABLE_VALUE	COMMENT
bootstrapped	True	Bootstrap flag. Do not delete.
tidb_server_version	179	Bootstrap version. Do not delete.
system_tz	Asia/Shanghai	TiDB Global System Timezone.
new_collation_enabled	True	If the new collations are enabled. Do not edit it.
ddl_table_version	3	DDL Table Version. Do not delete.
tikv_gc_leader_uuid	63e15e5e1fc0008	Current GC worker leader UUID. (DO NOT EDIT)
tikv_gc_leader_desc	host:xtiflow-release-7-5-pull-cdc-integration-storage-test-361-xz426, pid:19917, start at 2024-05-17 15:34:41.543821043 +0800 CST m=+5.193075338	Host name and pid of current GC leader. (DO NOT EDIT)
tikv_gc_leader_lease	20240517-15:36:41.551 +0800	Current GC worker leader lease. (DO NOT EDIT)
tikv_gc_auto_concurrency	true	Let TiDB pick the concurrency automatically. If set false, tikv_gc_concurrency will be used
tikv_gc_enable	true	Current GC enable status
tikv_gc_run_interval	10m0s	GC run interval, at least 10m, in Go format.
tikv_gc_life_time	10m0s	All versions within life time will not be collected by GC, at least 10m, in Go format.
tikv_gc_last_run_time	20240517-15:34:41.535 +0800	The time when last GC starts. (DO NOT EDIT)
tikv_gc_safe_point	20240517-15:24:41.535 +0800	All versions after safe point can be accessed. (DO NOT EDIT)
Verifying Downstream TiDB is started...
VARIABLE_NAME	VARIABLE_VALUE	COMMENT
bootstrapped	True	Bootstrap flag. Do not delete.
tidb_server_version	179	Bootstrap version. Do not delete.
system_tz	Asia/Shanghai	TiDB Global System Timezone.
new_collation_enabled	True	If the new collations are enabled. Do not edit it.
ddl_table_version	3	DDL Table Version. Do not delete.
tikv_gc_leader_uuid	63e15e5e1c80010	Current GC worker leader UUID. (DO NOT EDIT)
tikv_gc_leader_desc	host:xtiflow-release-7-5-pull-cdc-integration-storage-test-361-xz426, pid:20000, start at 2024-05-17 15:34:41.538530281 +0800 CST m=+5.111605446	Host name and pid of current GC leader. (DO NOT EDIT)
tikv_gc_leader_lease	20240517-15:36:41.545 +0800	Current GC worker leader lease. (DO NOT EDIT)
tikv_gc_auto_concurrency	true	Let TiDB pick the concurrency automatically. If set false, tikv_gc_concurrency will be used
tikv_gc_enable	true	Current GC enable status
tikv_gc_run_interval	10m0s	GC run interval, at least 10m, in Go format.
tikv_gc_life_time	10m0s	All versions within life time will not be collected by GC, at least 10m, in Go format.
tikv_gc_last_run_time	20240517-15:34:41.522 +0800	The time when last GC starts. (DO NOT EDIT)
tikv_gc_safe_point	20240517-15:24:41.522 +0800	All versions after safe point can be accessed. (DO NOT EDIT)
Starting Upstream TiFlash...
TiFlash
Release Version: v7.5.1-22-gacdbe728f
Edition:         Community
Git Commit Hash: acdbe728f97e2f5e0625d44d24ddbd1cd90d7a59
Git Branch:      HEAD
UTC Build Time:  2024-05-16 14:18:59
Enable Features: jemalloc sm4(GmSSL) avx2 avx512 unwind thinlto
Profile:         RELWITHDEBINFO

Raft Proxy
Git Commit Hash:   521fd9dbc55e58646045d88f91c3c35db50b5981
Git Commit Branch: HEAD
UTC Build Time:    2024-05-16 14:22:45
Rust Version:      rustc 1.67.0-nightly (96ddd32c4 2022-11-14)
Storage Engine:    tiflash
Prometheus Prefix: tiflash_proxy_
Profile:           release
Enable Features:    portable sse test-engine-kv-rocksdb test-engine-raft-raft-engine cloud-aws cloud-gcp cloud-azure
Verifying Upstream TiFlash is started...
Logging trace to /tmp/tidb_cdc_test/synced_status/tiflash/log/server.log
Logging errors to /tmp/tidb_cdc_test/synced_status/tiflash/log/error.log
arg matches is ArgMatches { args: {"data-dir": MatchedArg { occurs: 1, indices: [6], vals: ["/tmp/tidb_cdc_test/synced_status/tiflash/db/proxy"] }, "engine-git-hash": MatchedArg { occurs: 1, indices: [10], vals: ["acdbe728f97e2f5e0625d44d24ddbd1cd90d7a59"] }, "engine-addr": MatchedArg { occurs: 1, indices: [2], vals: ["127.0.0.1:9500"] }, "log-file": MatchedArg { occurs: 1, indices: [18], vals: ["/tmp/tidb_cdc_test/synced_status/tiflash/log/proxy.log"] }, "engine-version": MatchedArg { occurs: 1, indices: [12], vals: ["v7.5.1-22-gacdbe728f"] }, "pd-endpoints": MatchedArg { occurs: 1, indices: [16], vals: ["127.0.0.1:2379"] }, "config": MatchedArg { occurs: 1, indices: [8], vals: ["/tmp/tidb_cdc_test/synced_status/tiflash-proxy.toml"] }, "advertise-addr": MatchedArg { occurs: 1, indices: [4], vals: ["127.0.0.1:9000"] }, "engine-label": MatchedArg { occurs: 1, indices: [14], vals: ["tiflash"] }, "addr": MatchedArg { occurs: 1, indices: [20], vals: ["127.0.0.1:9000"] }}, subcommand: None, usage: Some("USAGE:\n    TiFlash Proxy [FLAGS] [OPTIONS] --engine-git-hash <engine-git-hash> --engine-label <engine-label> --engine-version <engine-version>") }
+ cd /tmp/tidb_cdc_test/synced_status
++ run_cdc_cli_tso_query 127.0.0.1 2379
+ pd_host=127.0.0.1
+ pd_port=2379
++ run_cdc_cli tso query --pd=http://127.0.0.1:2379
+ cdc.test -test.coverprofile=/tmp/tidb_cdc_test/cov.synced_status.cli.21251.out cli tso query --pd=http://127.0.0.1:2379
+ set +x
+ tso='449821091099049985
PASS
coverage: 1.8% of statements in github.com/pingcap/tiflow/...'
+ echo 449821091099049985 PASS coverage: 1.8% of statements in github.com/pingcap/tiflow/...
+ awk -F ' ' '{print $1}'
+ set +x
+ start_ts=449821091099049985
+ run_cdc_server --workdir /tmp/tidb_cdc_test/synced_status --binary cdc.test
[Fri May 17 15:34:47 CST 2024] <<<<<< START cdc server in synced_status case >>>>>>
+ [[ '' == \t\r\u\e ]]
+ set +e
+ get_info_fail_msg='failed to get info:'
+ etcd_info_msg='etcd info'
+ '[' -z '' ']'
+ curl_status_cmd='curl -vsL --max-time 20 http://127.0.0.1:8300/debug/info'
+ GO_FAILPOINTS=
+ [[ no != \n\o ]]
+ cdc.test -test.coverprofile=/tmp/tidb_cdc_test/cov.synced_status.2129621298.out server --log-file /tmp/tidb_cdc_test/synced_status/cdc.log --log-level debug --data-dir /tmp/tidb_cdc_test/synced_status/cdc_data --cluster-id default
+ (( i = 0 ))
+ (( i <= 50 ))
++ curl -vsL --max-time 20 http://127.0.0.1:8300/debug/info
* About to connect() to 127.0.0.1 port 8300 (#0)
*   Trying 127.0.0.1...
* Connection refused
* Failed connect to 127.0.0.1:8300; Connection refused
* Closing connection 0
+ res=
+ echo ''
+ grep -q 'failed to get info:'
+ echo ''
+ grep -q 'etcd info'
+ '[' 0 -eq 50 ']'
+ sleep 3
+ (( i++ ))
+ (( i <= 50 ))
++ curl -vsL --max-time 20 http://127.0.0.1:8300/debug/info
* About to connect() to 127.0.0.1 port 8300 (#0)
*   Trying 127.0.0.1...
* Connected to 127.0.0.1 (127.0.0.1) port 8300 (#0)
> GET /debug/info HTTP/1.1
> User-Agent: curl/7.29.0
> Host: 127.0.0.1:8300
> Accept: */*
> 
< HTTP/1.1 200 OK
< Date: Fri, 17 May 2024 07:34:50 GMT
< Content-Length: 613
< Content-Type: text/plain; charset=utf-8
< 
{ [data not shown]
* Connection #0 to host 127.0.0.1 left intact
+ res='

*** owner info ***:



*** processors info ***:



*** etcd info ***:

/tidb/cdc/default/__cdc_meta__/capture/7cdf82e9-e47b-419b-8343-e309b28cb3c7
	{"id":"7cdf82e9-e47b-419b-8343-e309b28cb3c7","address":"127.0.0.1:8300","version":"v7.5.1-31-gc7ef293f7"}

/tidb/cdc/default/__cdc_meta__/meta/meta-version
	1

/tidb/cdc/default/__cdc_meta__/owner/22318f85794e48f5
	7cdf82e9-e47b-419b-8343-e309b28cb3c7

/tidb/cdc/default/default/upstream/7369868691605374537
	{"id":7369868691605374537,"pd-endpoints":"http://127.0.0.1:2379,http://127.0.0.1:2379","key-path":"","cert-path":"","ca-path":"","cert-allowed-cn":null}'
+ echo '

*** owner info ***:



*** processors info ***:



*** etcd info ***:

/tidb/cdc/default/__cdc_meta__/capture/7cdf82e9-e47b-419b-8343-e309b28cb3c7
	{"id":"7cdf82e9-e47b-419b-8343-e309b28cb3c7","address":"127.0.0.1:8300","version":"v7.5.1-31-gc7ef293f7"}

/tidb/cdc/default/__cdc_meta__/meta/meta-version
	1

/tidb/cdc/default/__cdc_meta__/owner/22318f85794e48f5
	7cdf82e9-e47b-419b-8343-e309b28cb3c7

/tidb/cdc/default/default/upstream/7369868691605374537
	{"id":7369868691605374537,"pd-endpoints":"http://127.0.0.1:2379,http://127.0.0.1:2379","key-path":"","cert-path":"","ca-path":"","cert-allowed-cn":null}'
+ grep -q 'failed to get info:'
+ echo '

*** owner info ***:



*** processors info ***:



*** etcd info ***:

/tidb/cdc/default/__cdc_meta__/capture/7cdf82e9-e47b-419b-8343-e309b28cb3c7
	{"id":"7cdf82e9-e47b-419b-8343-e309b28cb3c7","address":"127.0.0.1:8300","version":"v7.5.1-31-gc7ef293f7"}

/tidb/cdc/default/__cdc_meta__/meta/meta-version
	1

/tidb/cdc/default/__cdc_meta__/owner/22318f85794e48f5
	7cdf82e9-e47b-419b-8343-e309b28cb3c7

/tidb/cdc/default/default/upstream/7369868691605374537
	{"id":7369868691605374537,"pd-endpoints":"http://127.0.0.1:2379,http://127.0.0.1:2379","key-path":"","cert-path":"","ca-path":"","cert-allowed-cn":null}'
+ grep -q 'etcd info'
+ break
+ set +x
+ config_path=conf/changefeed.toml
+ SINK_URI='mysql://root@127.0.0.1:3306/?max-txn-row=1'
+ run_cdc_cli changefeed create --start-ts=449821091099049985 '--sink-uri=mysql://root@127.0.0.1:3306/?max-txn-row=1' --changefeed-id=test-1 --config=/home/jenkins/agent/workspace/pingcap/tiflow/release-7.5/pull_cdc_integration_storage_test/tiflow/tests/integration_tests/synced_status/conf/changefeed.toml
+ cdc.test -test.coverprofile=/tmp/tidb_cdc_test/cov.synced_status.cli.21348.out cli changefeed create --start-ts=449821091099049985 '--sink-uri=mysql://root@127.0.0.1:3306/?max-txn-row=1' --changefeed-id=test-1 --config=/home/jenkins/agent/workspace/pingcap/tiflow/release-7.5/pull_cdc_integration_storage_test/tiflow/tests/integration_tests/synced_status/conf/changefeed.toml
Create changefeed successfully!
ID: test-1
Info: {"upstream_id":7369868691605374537,"namespace":"default","id":"test-1","sink_uri":"mysql://root@127.0.0.1:3306/?max-txn-row=1","create_time":"2024-05-17T15:34:51.138970927+08:00","start_ts":449821091099049985,"config":{"memory_quota":1073741824,"case_sensitive":false,"force_replicate":false,"ignore_ineligible_table":false,"check_gc_safe_point":true,"enable_sync_point":false,"bdr_mode":false,"sync_point_interval":600000000000,"sync_point_retention":86400000000000,"filter":{"rules":["*.*"]},"mounter":{"worker_num":16},"sink":{"csv":{"delimiter":",","quote":"\"","null":"\\N","include_commit_ts":false,"binary_encoding_method":"base64"},"encoder_concurrency":32,"terminator":"\r\n","date_separator":"day","enable_partition_separator":true,"enable_kafka_sink_v2":false,"only_output_updated_columns":false,"delete_only_output_handle_key_columns":false,"advance_timeout":150,"send_bootstrap_interval_in_sec":120,"send_bootstrap_in_msg_count":10000,"send_bootstrap_to_all_partition":true,"open":{"output_old_value":true}},"consistent":{"level":"none","max_log_size":64,"flush_interval":2000,"meta_flush_interval":200,"encoding_worker_num":16,"flush_worker_num":8,"use_file_backend":false,"memory_usage":{"memory_quota_percentage":50,"event_cache_percentage":0}},"scheduler":{"enable_table_across_nodes":false,"region_threshold":100000,"write_key_threshold":0},"integrity":{"integrity_check_level":"none","corruption_handle_level":"warn"},"changefeed_error_stuck_duration":1800000000000,"sql_mode":"ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION","synced_status":{"synced_check_interval":120,"checkpoint_interval":20}},"state":"normal","creator_version":"v7.5.1-31-gc7ef293f7","resolved_ts":449821091099049985,"checkpoint_ts":449821091099049985,"checkpoint_time":"2024-05-17 15:34:46.236"}
PASS
coverage: 2.4% of statements in github.com/pingcap/tiflow/...
+ set +x
+ run_sql 'USE TEST;Create table t1(a int primary key, b int);insert into t1 values(1,2);insert into t1 values(2,3);'
+ check_table_exists test.t1 127.0.0.1 3306
table test.t1 not exists for 1-th check, retry later
table test.t1 exists
+ sleep 5
+ kill_tikv
++ ps aux
++ grep tikv-server
++ grep /tmp/tidb_cdc_test/synced_status
+ info='jenkins    19306 16.6  0.4 3769320 1617600 ?     Sl   15:34   0:04 tikv-server --pd 127.0.0.1:2379 -A 127.0.0.1:20160 --status-addr 127.0.0.1:20181 --log-file /tmp/tidb_cdc_test/synced_status/tikv1.log --log-level debug -C /tmp/tidb_cdc_test/synced_status/tikv-config.toml -s /tmp/tidb_cdc_test/synced_status/tikv1
jenkins    19307 16.3  0.4 3769320 1614236 ?     Sl   15:34   0:04 tikv-server --pd 127.0.0.1:2379 -A 127.0.0.1:20161 --status-addr 127.0.0.1:20182 --log-file /tmp/tidb_cdc_test/synced_status/tikv2.log --log-level debug -C /tmp/tidb_cdc_test/synced_status/tikv-config.toml -s /tmp/tidb_cdc_test/synced_status/tikv2
jenkins    19308 22.6  0.4 3816936 1683144 ?     Sl   15:34   0:05 tikv-server --pd 127.0.0.1:2379 -A 127.0.0.1:20162 --status-addr 127.0.0.1:20183 --log-file /tmp/tidb_cdc_test/synced_status/tikv3.log --log-level debug -C /tmp/tidb_cdc_test/synced_status/tikv-config.toml -s /tmp/tidb_cdc_test/synced_status/tikv3
jenkins    19310 22.0  0.4 3810788 1675156 ?     Sl   15:34   0:05 tikv-server --pd 127.0.0.1:2479 -A 127.0.0.1:21160 --status-addr 127.0.0.1:21180 --log-file /tmp/tidb_cdc_test/synced_status/tikv_down.log --log-level debug -C /tmp/tidb_cdc_test/synced_status/tikv-config.toml -s /tmp/tidb_cdc_test/synced_status/tikv_down'
++ ps aux
++ grep tikv-server
++ grep /tmp/tidb_cdc_test/synced_status
++ awk '{print $2}'
++ xargs kill -9
++ curl -X GET http://127.0.0.1:8300/api/v2/changefeeds/test-1/synced
  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                 Dload  Upload   Total   Spent    Left  Speed

  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0
100   243  100   243    0     0   2399      0 --:--:-- --:--:-- --:--:--  2405
+ synced_status='{"synced":false,"sink_checkpoint_ts":"2024-05-17 15:34:59.435","puller_resolved_ts":"1970-01-01 08:00:00.000","last_synced_ts":"2024-05-17 15:34:52.635","now_ts":"2024-05-17 15:34:59.000","info":"The data syncing is not finished, please wait"}'
++ echo '{"synced":false,"sink_checkpoint_ts":"2024-05-17' '15:34:59.435","puller_resolved_ts":"1970-01-01' '08:00:00.000","last_synced_ts":"2024-05-17' '15:34:52.635","now_ts":"2024-05-17' '15:34:59.000","info":"The' data syncing is not finished, please 'wait"}'
++ jq .synced
+ status=false
+ '[' false '!=' false ']'
++ echo '{"synced":false,"sink_checkpoint_ts":"2024-05-17' '15:34:59.435","puller_resolved_ts":"1970-01-01' '08:00:00.000","last_synced_ts":"2024-05-17' '15:34:52.635","now_ts":"2024-05-17' '15:34:59.000","info":"The' data syncing is not finished, please 'wait"}'
++ jq -r .info
+ info='The data syncing is not finished, please wait'
+ target_message='The data syncing is not finished, please wait'
+ '[' 'The data syncing is not finished, please wait' '!=' 'The data syncing is not finished, please wait' ']'
+ sleep 130
ERROR: Failed to launch xtiflow-release-7-5-pull-cdc-integration-storage-test-361-0v3j1
java.lang.IllegalStateException: Node was deleted, computer is null
	at org.csanchez.jenkins.plugins.kubernetes.KubernetesLauncher.launch(KubernetesLauncher.java:203)
	at hudson.slaves.SlaveComputer.lambda$_connect$0(SlaveComputer.java:297)
	at jenkins.util.ContextResettingExecutorService$2.call(ContextResettingExecutorService.java:46)
	at jenkins.security.ImpersonatingExecutorService$2.call(ImpersonatingExecutorService.java:80)
	at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264)
	at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128)
	at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)
	at java.base/java.lang.Thread.run(Thread.java:829)
Cancelling nested steps due to timeout
Sending interrupt signal to process
Killing processes
kill finished with exit code 0
Sending interrupt signal to process
Killing processes
kill finished with exit code 0
++ stop_tidb_cluster
/home/jenkins/agent/workspace/pingcap/tiflow/release-7.5/pull_cdc_integration_storage_test/tiflow/tests/integration_tests/synced_status/run.sh: line 1: 21421 Terminated              sleep 130
/home/jenkins/agent/workspace/pingcap/tiflow/release-7.5/pull_cdc_integration_storage_test/tiflow/tests/integration_tests/synced_status/run.sh: line 1: 21505 Terminated              sleep 130
{"level":"warn","ts":"2024-05-17T15:36:21.43184+0800","logger":"etcd-client","caller":"v3@v3.5.10/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc00086cfc0/127.0.0.1:2379","attempt":0,"error":"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \"transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused\""}
{"level":"warn","ts":"2024-05-17T15:36:21.439503+0800","logger":"etcd-client","caller":"v3@v3.5.10/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc001094380/127.0.0.1:2379","attempt":0,"error":"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \"transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused\""}
{"level":"warn","ts":"2024-05-17T15:36:21.490994+0800","logger":"etcd-client","caller":"v3@v3.5.10/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc00102c1c0/127.0.0.1:2479","attempt":0,"error":"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \"transport: Error while dialing: dial tcp 127.0.0.1:2479: connect: connection refused\""}
script returned exit code 143
[Pipeline] }
Cache not saved (inner-step execution failed)
[Pipeline] // cache
[Pipeline] }
[Pipeline] // dir
[Pipeline] }
[Pipeline] // withCredentials
[Pipeline] }
[Pipeline] // timeout
[Pipeline] }
[Pipeline] // stage
[Pipeline] }
[Pipeline] // container
[Pipeline] }
[Pipeline] // withEnv
[Pipeline] }
[Pipeline] // node
[Pipeline] }
[Pipeline] // podTemplate
[Pipeline] }
[Pipeline] // withEnv
[Pipeline] }
[Pipeline] // stage
[Pipeline] }
Failed in branch Matrix - TEST_GROUP = 'G09'
[Pipeline] // parallel
[Pipeline] }
[Pipeline] // stage
[Pipeline] }
[Pipeline] // timeout
[Pipeline] }
[Pipeline] // withEnv
[Pipeline] }
[Pipeline] // container
[Pipeline] }
[Pipeline] // withEnv
[Pipeline] }
[Pipeline] // node
[Pipeline] }
[Pipeline] // podTemplate
[Pipeline] End of Pipeline
Timeout has been exceeded
org.jenkinsci.plugins.workflow.actions.ErrorAction$ErrorId: c77e9679-7548-4b83-8fb6-cf5d33afd7dd
Finished: ABORTED