Skip to content

Console Output

Skipping 1,852 KB.. Full Log
/tidb/cdc/default/default/changefeed/status/ddl-only-block-related-table
	{"checkpoint-ts":449409905116053517,"min-table-barrier-ts":449409923820027911,"admin-job-type":0}

/tidb/cdc/default/default/task/position/0ccbd4c6-ea2f-4336-91ad-c0dd22326477/ddl-only-block-related-table
	{"checkpoint-ts":0,"resolved-ts":0,"count":0,"error":null,"warning":null}

/tidb/cdc/default/default/upstream/7363131731462806829
	{"id":7363131731462806829,"pd-endpoints":"http://127.0.0.1:2379,http://127.0.0.1:2379","key-path":"","cert-path":"","ca-path":"","cert-allowed-cn":null}'
+ echo '

*** owner info ***:



*** processors info ***:

changefeedID: default/ddl-only-block-related-table
{UpstreamID:7363131731462806829 Namespace:default ID:ddl-only-block-related-table SinkURI:file:///tmp/tidb_cdc_test/ddl_only_block_related_table/storage_test/ticdc-common-1-test-1723?protocol=canal-json&enable-tidb-extension=true CreateTime:2024-04-29 11:51:57.288544303 +0800 CST StartTs:449409900135317506 TargetTs:0 AdminJobType:noop Engine:unified SortDir: Config:0xc003e3a360 State:normal Error:<nil> Warning:<nil> CreatorVersion:v7.5.1-19-gd6783fb76 Epoch:449409900148424710}
{CheckpointTs:449409905116053517 MinTableBarrierTs:449409923570991111 AdminJobType:noop}
span: {table_id:104,start_key:7480000000000000ff685f720000000000fa,end_key:7480000000000000ff685f730000000000fa}, resolvedTs: 449409905116053517, checkpointTs: 449409905116053517, state: Preparing
span: {table_id:106,start_key:7480000000000000ff6a5f720000000000fa,end_key:7480000000000000ff6a5f730000000000fa}, resolvedTs: 449409905116053517, checkpointTs: 449409905116053517, state: Preparing
span: {table_id:108,start_key:7480000000000000ff6c5f720000000000fa,end_key:7480000000000000ff6c5f730000000000fa}, resolvedTs: 449409905116053517, checkpointTs: 449409905116053517, state: Preparing
span: {table_id:110,start_key:7480000000000000ff6e5f720000000000fa,end_key:7480000000000000ff6e5f730000000000fa}, resolvedTs: 449409905116053517, checkpointTs: 449409905116053517, state: Preparing



*** etcd info ***:

/tidb/cdc/default/__cdc_meta__/capture/0ccbd4c6-ea2f-4336-91ad-c0dd22326477
	{"id":"0ccbd4c6-ea2f-4336-91ad-c0dd22326477","address":"127.0.0.1:8300","version":"v7.5.1-19-gd6783fb76"}

/tidb/cdc/default/__cdc_meta__/meta/meta-version
	1

/tidb/cdc/default/__cdc_meta__/owner/22318f27facc548e
	0ccbd4c6-ea2f-4336-91ad-c0dd22326477

/tidb/cdc/default/default/changefeed/info/ddl-only-block-related-table
	{"upstream-id":7363131731462806829,"namespace":"default","changefeed-id":"ddl-only-block-related-table","sink-uri":"file:///tmp/tidb_cdc_test/ddl_only_block_related_table/storage_test/ticdc-common-1-test-1723?protocol=canal-json\u0026enable-tidb-extension=true","create-time":"2024-04-29T11:51:57.288544303+08:00","start-ts":449409900135317506,"target-ts":0,"admin-job-type":0,"sort-engine":"","sort-dir":"","config":{"memory-quota":1073741824,"case-sensitive":false,"force-replicate":false,"check-gc-safe-point":true,"enable-sync-point":false,"ignore-ineligible-table":false,"bdr-mode":false,"sync-point-interval":600000000000,"sync-point-retention":86400000000000,"filter":{"rules":["*.*"],"ignore-txn-start-ts":null,"event-filters":null},"mounter":{"worker-num":16},"sink":{"protocol":"canal-json","csv":{"delimiter":",","quote":"\"","null":"\\N","include-commit-ts":false,"binary-encoding-method":"base64"},"encoder-concurrency":32,"terminator":"\r\n","date-separator":"day","enable-partition-separator":true,"file-index-digit":20,"enable-kafka-sink-v2":false,"only-output-updated-columns":false,"delete-only-output-handle-key-columns":false,"advance-timeout-in-sec":150,"send-bootstrap-interval-in-sec":120,"send-bootstrap-in-msg-count":10000,"send-bootstrap-to-all-partition":true},"consistent":{"level":"none","max-log-size":64,"flush-interval":2000,"meta-flush-interval":200,"encoding-worker-num":16,"flush-worker-num":8,"storage":"","use-file-backend":false,"compression":"","memory-usage":{"memory-quota-percentage":50,"event-cache-percentage":0}},"scheduler":{"enable-table-across-nodes":false,"region-threshold":100000,"write-key-threshold":0,"region-per-span":0},"integrity":{"integrity-check-level":"none","corruption-handle-level":"warn"},"changefeed-error-stuck-duration":1800000000000,"sql-mode":"ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION","synced-status":{"synced-check-interval":300,"checkpoint-interval":15}},"state":"normal","error":null,"warning":null,"creator-version":"v7.5.1-19-gd6783fb76","epoch":449409900148424710}

/tidb/cdc/default/default/changefeed/status/ddl-only-block-rel+ grep -q 'etcd info'
ated-table
	{"checkpoint-ts":449409905116053517,"min-table-barrier-ts":449409923820027911,"admin-job-type":0}

/tidb/cdc/default/default/task/position/0ccbd4c6-ea2f-4336-91ad-c0dd22326477/ddl-only-block-related-table
	{"checkpoint-ts":0,"resolved-ts":0,"count":0,"error":null,"warning":null}

/tidb/cdc/default/default/upstream/7363131731462806829
	{"id":7363131731462806829,"pd-endpoints":"http://127.0.0.1:2379,http://127.0.0.1:2379","key-path":"","cert-path":"","ca-path":"","cert-allowed-cn":null}'
+ break
+ set +x
check diff failed 1-th time, retry later
check diff successfully
check_ts_forward ddl-only-block-related-table
table partition_table.finish_mark not exists for 2-th check, retry later
table force_replicate_table.t0 exists
table force_replicate_table.t1 exists
table force_replicate_table.t2 exists
table force_replicate_table.t3 exists
table force_replicate_table.t4 exists
table force_replicate_table.t5 not exists for 1-th check, retry later
table sink_hang.t1 not exists for 4-th check, retry later
changefeed is working normally rts: 449409924344315900->449409924619304960 checkpoint: 449409924344315900->449409924619304960
run task successfully
wait process cdc.test exit for 1-th time...
table partition_table.finish_mark not exists for 3-th check, retry later
wait process cdc.test exit for 2-th time...
cdc.test: no process found
wait process cdc.test exit for 3-th time...
process cdc.test already exit
[Mon Apr 29 11:53:33 CST 2024] <<<<<< run test case ddl_only_block_related_table success! >>>>>>
table force_replicate_table.t5 not exists for 2-th check, retry later
table sink_hang.t1 exists
table sink_hang.t2 exists
check diff successfully
wait process cdc.test exit for 1-th time...

  0     0    0     0    0     0      0      0 --:--:--  0:00:05 --:--:--     0
  0     0    0     0    0     0      0      0 --:--:--  0:00:06 --:--:--     0
  0     0    0     0    0     0      0      0 --:--:--  0:00:07 --:--:--     0
  0     0    0     0    0     0      0      0 --:--:--  0:00:08 --:--:--     0
  0     0    0     0    0     0      0      0 --:--:--  0:00:09 --:--:--     0
  0     0    0     0    0     0      0      0 --:--:--  0:00:10 --:--:--     0{"level":"warn","ts":"2024-04-29T11:53:34.038965+0800","logger":"etcd-client","caller":"v3@v3.5.10/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc000ef8380/127.0.0.1:2379","attempt":0,"error":"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \"transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused\""}
{"level":"warn","ts":"2024-04-29T11:53:34.048017+0800","logger":"etcd-client","caller":"v3@v3.5.10/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc000e56e00/127.0.0.1:2379","attempt":0,"error":"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \"transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused\""}
{"level":"warn","ts":"2024-04-29T11:53:34.140978+0800","logger":"etcd-client","caller":"v3@v3.5.10/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc000e44e00/127.0.0.1:2479","attempt":0,"error":"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \"transport: Error while dialing: dial tcp 127.0.0.1:2479: connect: connection refused\""}
wait process cdc.test exit for 2-th time...
table partition_table.finish_mark not exists for 4-th check, retry later
cdc.test: no process found
wait process cdc.test exit for 3-th time...
process cdc.test already exit
[Mon Apr 29 11:53:35 CST 2024] <<<<<< run test case sink_hang success! >>>>>>
table force_replicate_table.t5 not exists for 3-th check, retry later

  0     0    0     0    0     0      0      0 --:--:--  0:00:11 --:--:--     0
  0     0    0     0    0     0      0      0 --:--:--  0:00:12 --:--:--     0{"level":"warn","ts":"2024-04-29T11:53:36.026822+0800","logger":"etcd-client","caller":"v3@v3.5.10/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc000ef8380/127.0.0.1:2379","attempt":0,"error":"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \"transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused\""}
{"level":"info","ts":"2024-04-29T11:53:36.026883+0800","logger":"etcd-client","caller":"v3@v3.5.10/client.go:210","msg":"Auto sync endpoints failed.","error":"context deadline exceeded"}
{"level":"warn","ts":"2024-04-29T11:53:36.035594+0800","logger":"etcd-client","caller":"v3@v3.5.10/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc000e56e00/127.0.0.1:2379","attempt":0,"error":"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \"transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused\""}
{"level":"info","ts":"2024-04-29T11:53:36.035627+0800","logger":"etcd-client","caller":"v3@v3.5.10/client.go:210","msg":"Auto sync endpoints failed.","error":"context deadline exceeded"}
{"level":"warn","ts":"2024-04-29T11:53:36.126213+0800","logger":"etcd-client","caller":"v3@v3.5.10/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc000e44e00/127.0.0.1:2479","attempt":0,"error":"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \"transport: Error while dialing: dial tcp 127.0.0.1:2479: connect: connection refused\""}
{"level":"info","ts":"2024-04-29T11:53:36.126268+0800","logger":"etcd-client","caller":"v3@v3.5.10/client.go:210","msg":"Auto sync endpoints failed.","error":"context deadline exceeded"}
table partition_table.finish_mark not exists for 5-th check, retry later
table force_replicate_table.t5 not exists for 4-th check, retry later
table partition_table.finish_mark not exists for 6-th check, retry later
table force_replicate_table.t5 not exists for 5-th check, retry later

  0     0    0     0    0     0      0      0 --:--:--  0:00:13 --:--:--     0
  0     0    0     0    0     0      0      0 --:--:--  0:00:14 --:--:--     0
  0     0    0     0    0     0      0      0 --:--:--  0:00:15 --:--:--     0
  0     0    0     0    0     0      0      0 --:--:--  0:00:16 --:--:--     0{"level":"warn","ts":"2024-04-29T11:53:40.040972+0800","logger":"etcd-client","caller":"v3@v3.5.10/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc000ef8380/127.0.0.1:2379","attempt":0,"error":"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \"transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused\""}
{"level":"warn","ts":"2024-04-29T11:53:40.049128+0800","logger":"etcd-client","caller":"v3@v3.5.10/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc000e56e00/127.0.0.1:2379","attempt":0,"error":"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \"transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused\""}
{"level":"warn","ts":"2024-04-29T11:53:40.142534+0800","logger":"etcd-client","caller":"v3@v3.5.10/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc000e44e00/127.0.0.1:2479","attempt":0,"error":"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \"transport: Error while dialing: dial tcp 127.0.0.1:2479: connect: connection refused\""}
table partition_table.finish_mark not exists for 7-th check, retry later
table force_replicate_table.t5 exists
table force_replicate_table.t6 not exists for 1-th check, retry later
table partition_table.finish_mark not exists for 8-th check, retry later
table force_replicate_table.t6 not exists for 2-th check, retry later
table partition_table.finish_mark not exists for 9-th check, retry later
table force_replicate_table.t6 not exists for 3-th check, retry later
=================>> Running test /home/jenkins/agent/workspace/pingcap/tiflow/release-7.5/pull_cdc_integration_storage_test/tiflow/tests/integration_tests/changefeed_auto_stop/run.sh using Sink-Type: storage... <<=================
The 1 times to try to start tidb cluster...

  0     0    0     0    0     0      0      0 --:--:--  0:00:17 --:--:--     0
  0     0    0     0    0     0      0      0 --:--:--  0:00:18 --:--:--     0
  0     0    0     0    0     0      0      0 --:--:--  0:00:19 --:--:--     0
  0     0    0     0    0     0      0      0 --:--:--  0:00:20 --:--:--     0
  0     0    0     0    0     0      0      0 --:--:--  0:00:21 --:--:--     0
  0     0    0     0    0     0      0      0 --:--:--  0:00:22 --:--:--     0{"level":"warn","ts":"2024-04-29T11:53:46.042372+0800","logger":"etcd-client","caller":"v3@v3.5.10/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc000ef8380/127.0.0.1:2379","attempt":0,"error":"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \"transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused\""}
{"level":"warn","ts":"2024-04-29T11:53:46.04988+0800","logger":"etcd-client","caller":"v3@v3.5.10/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc000e56e00/127.0.0.1:2379","attempt":0,"error":"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \"transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused\""}
{"level":"warn","ts":"2024-04-29T11:53:46.144554+0800","logger":"etcd-client","caller":"v3@v3.5.10/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc000e44e00/127.0.0.1:2479","attempt":0,"error":"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \"transport: Error while dialing: dial tcp 127.0.0.1:2479: connect: connection refused\""}
{"level":"warn","ts":1714362826.209288,"caller":"v3@v3.5.10/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc002f9a540/127.0.0.1:2379","attempt":0,"error":"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \"transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused\""}
{"level":"info","ts":1714362826.2093532,"caller":"v3@v3.5.10/client.go:210","msg":"Auto sync endpoints failed.","error":"context deadline exceeded"}
{"level":"warn","ts":1714362826.2290344,"caller":"v3@v3.5.10/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc00205b500/127.0.0.1:2379","attempt":0,"error":"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \"transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused\""}
{"level":"info","ts":1714362826.2290854,"caller":"v3@v3.5.10/client.go:210","msg":"Auto sync endpoints failed.","error":"context deadline exceeded"}
{"level":"warn","ts":1714362826.2871907,"caller":"v3@v3.5.10/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc002fbca80/127.0.0.1:2479","attempt":0,"error":"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \"transport: Error while dialing: dial tcp 127.0.0.1:2479: connect: connection refused\""}
{"level":"info","ts":1714362826.2872462,"caller":"v3@v3.5.10/client.go:210","msg":"Auto sync endpoints failed.","error":"context deadline exceeded"}
table partition_table.finish_mark not exists for 10-th check, retry later
\033[0;36m<<< Run all test success >>>\033[0m
[Pipeline] }
Cache not saved (ws/jenkins-pingcap-tiflow-release-7.5-pull_cdc_integration_storage_test-304/tiflow-cdc already exists)
[Pipeline] // cache
[Pipeline] }
[Pipeline] // dir
[Pipeline] }
[Pipeline] // withCredentials
[Pipeline] }
[Pipeline] // timeout
[Pipeline] }
start tidb cluster in /tmp/tidb_cdc_test/changefeed_auto_stop
Starting Upstream PD...
Release Version: v7.5.1-5-g584533652
Edition: Community
Git Commit Hash: 58453365285465cd90bc4472cff2bad7ce4d764b
Git Branch: release-7.5
UTC Build Time:  2024-04-03 10:04:14
Starting Downstream PD...
Release Version: v7.5.1-5-g584533652
Edition: Community
Git Commit Hash: 58453365285465cd90bc4472cff2bad7ce4d764b
Git Branch: release-7.5
UTC Build Time:  2024-04-03 10:04:14
Verifying upstream PD is started...
[Pipeline] // stage
[Pipeline] }
[Pipeline] // container
[Pipeline] }
[Pipeline] // withEnv
[Pipeline] }
[Pipeline] // node
[Pipeline] }
[Pipeline] // podTemplate
[Pipeline] }
table force_replicate_table.t6 not exists for 4-th check, retry later
[Pipeline] // withEnv
[Pipeline] }
[Pipeline] // stage
[Pipeline] }
table partition_table.finish_mark not exists for 11-th check, retry later
Verifying downstream PD is started...
table force_replicate_table.t6 not exists for 5-th check, retry later
Starting Upstream TiKV...
TiKV 
Release Version:   7.5.2
Edition:           Community
Git Commit Hash:   3478895c2a700e4824bb41940260b6b28013275e
Git Commit Branch: release-7.5
UTC Build Time:    2024-04-28 08:20:54
Rust Version:      rustc 1.67.0-nightly (96ddd32c4 2022-11-14)
Enable Features:   pprof-fp jemalloc mem-profiling portable sse test-engine-kv-rocksdb test-engine-raft-raft-engine cloud-aws cloud-gcp cloud-azure
Profile:           dist_release
Starting Downstream TiKV...
TiKV 
Release Version:   7.5.2
Edition:           Community
Git Commit Hash:   3478895c2a700e4824bb41940260b6b28013275e
Git Commit Branch: release-7.5
UTC Build Time:    2024-04-28 08:20:54
Rust Version:      rustc 1.67.0-nightly (96ddd32c4 2022-11-14)
Enable Features:   pprof-fp jemalloc mem-profiling portable sse test-engine-kv-rocksdb test-engine-raft-raft-engine cloud-aws cloud-gcp cloud-azure
Profile:           dist_release
table partition_table.finish_mark exists
check diff successfully
wait process cdc.test exit for 1-th time...
table force_replicate_table.t6 exists
check_data_subset force_replicate_table.t0 127.0.0.1 4000 127.0.0.1 3306
wait process cdc.test exit for 2-th time...
wait process cdc.test exit for 3-th time...

  0     0    0     0    0     0      0      0 --:--:--  0:00:23 --:--:--     0
  0     0    0     0    0     0      0      0 --:--:--  0:00:24 --:--:--     0
  0     0    0     0    0     0      0      0 --:--:--  0:00:25 --:--:--     0
  0     0    0     0    0     0      0      0 --:--:--  0:00:26 --:--:--     0
  0     0    0     0    0     0      0      0 --:--:--  0:00:27 --:--:--     0
  0     0    0     0    0     0      0      0 --:--:--  0:00:28 --:--:--     0{"level":"warn","ts":"2024-04-29T11:53:52.044279+0800","logger":"etcd-client","caller":"v3@v3.5.10/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc000ef8380/127.0.0.1:2379","attempt":0,"error":"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \"transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused\""}
{"level":"warn","ts":"2024-04-29T11:53:52.05198+0800","logger":"etcd-client","caller":"v3@v3.5.10/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc000e56e00/127.0.0.1:2379","attempt":0,"error":"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \"transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused\""}
{"level":"warn","ts":"2024-04-29T11:53:52.145573+0800","logger":"etcd-client","caller":"v3@v3.5.10/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc000e44e00/127.0.0.1:2479","attempt":0,"error":"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \"transport: Error while dialing: dial tcp 127.0.0.1:2479: connect: connection refused\""}
Starting Upstream TiDB...
Release Version: v7.5.1-45-gbf84e231e6
Edition: Community
Git Commit Hash: bf84e231e6ef26891d0cb524d938345f43aa047c
Git Branch: release-7.5
UTC Build Time: 2024-04-29 02:05:15
GoVersion: go1.21.6
Race Enabled: false
Check Table Before Drop: false
Store: unistore
Starting Downstream TiDB...
run task successfully
check_data_subset force_replicate_table.t1 127.0.0.1 4000 127.0.0.1 3306
Release Version: v7.5.1-45-gbf84e231e6
Edition: Community
Git Commit Hash: bf84e231e6ef26891d0cb524d938345f43aa047c
Git Branch: release-7.5
UTC Build Time: 2024-04-29 02:05:15
GoVersion: go1.21.6
Race Enabled: false
Check Table Before Drop: false
Store: unistore
Verifying Upstream TiDB is started...
ERROR 2003 (HY000): Can't connect to MySQL server on '127.0.0.1' (111)
cdc.test: no process found
wait process cdc.test exit for 4-th time...
process cdc.test already exit
[Mon Apr 29 11:53:52 CST 2024] <<<<<< run test case csv_storage_partition_table success! >>>>>>
run task successfully
check_data_subset force_replicate_table.t2 127.0.0.1 4000 127.0.0.1 3306

  0     0    0     0    0     0      0      0 --:--:--  0:00:29 --:--:--     0
100   135  100   135    0     0      4      0  0:00:33  0:00:30  0:00:03    27
100   135  100   135    0     0      4      0  0:00:33  0:00:30  0:00:03    33
+ synced_status='{
    "error_msg": "[CDC:ErrPDEtcdAPIError]etcd api call error: context deadline exceeded",
    "error_code": "CDC:ErrPDEtcdAPIError"
}'
++ jq -r .error_code
++ echo '{' '"error_msg":' '"[CDC:ErrPDEtcdAPIError]etcd' api call error: context deadline 'exceeded",' '"error_code":' '"CDC:ErrPDEtcdAPIError"' '}'
+ error_code=CDC:ErrPDEtcdAPIError
+ cleanup_process cdc.test
run task successfully
check_data_subset force_replicate_table.t3 127.0.0.1 4000 127.0.0.1 3306
wait process cdc.test exit for 1-th time...
ERROR 2003 (HY000): Can't connect to MySQL server on '127.0.0.1' (111)
wait process cdc.test exit for 2-th time...
run task successfully
check_data_subset force_replicate_table.t4 127.0.0.1 4000 127.0.0.1 3306
cdc.test: no process found
wait process cdc.test exit for 3-th time...
process cdc.test already exit
+ stop_tidb_cluster
run task successfully
check_data_subset force_replicate_table.t5 127.0.0.1 4000 127.0.0.1 3306
run task successfully
check_data_subset force_replicate_table.t6 127.0.0.1 4000 127.0.0.1 3306
id=7,a=NULL doesn't exist in downstream table force_replicate_table.t6
run task failed 1-th time, retry later
ERROR 2003 (HY000): Can't connect to MySQL server on '127.0.0.1' (111)
check_data_subset force_replicate_table.t6 127.0.0.1 4000 127.0.0.1 3306
id=7,a=NULL doesn't exist in downstream table force_replicate_table.t6
run task failed 2-th time, retry later
VARIABLE_NAME	VARIABLE_VALUE	COMMENT
bootstrapped	True	Bootstrap flag. Do not delete.
tidb_server_version	179	Bootstrap version. Do not delete.
system_tz	Asia/Shanghai	TiDB Global System Timezone.
new_collation_enabled	True	If the new collations are enabled. Do not edit it.
ddl_table_version	3	DDL Table Version. Do not delete.
tikv_gc_leader_uuid	63c9ff3a99c0009	Current GC worker leader UUID. (DO NOT EDIT)
tikv_gc_leader_desc	host:xtiflow-release-7-5-pull-cdc-integration-storage-test-304-w68xk, pid:9658, start at 2024-04-29 11:53:57.617853718 +0800 CST m=+5.338392619	Host name and pid of current GC leader. (DO NOT EDIT)
tikv_gc_leader_lease	20240429-11:55:57.626 +0800	Current GC worker leader lease. (DO NOT EDIT)
tikv_gc_auto_concurrency	true	Let TiDB pick the concurrency automatically. If set false, tikv_gc_concurrency will be used
tikv_gc_enable	true	Current GC enable status
tikv_gc_run_interval	10m0s	GC run interval, at least 10m, in Go format.
tikv_gc_life_time	10m0s	All versions within life time will not be collected by GC, at least 10m, in Go format.
tikv_gc_last_run_time	20240429-11:53:57.607 +0800	The time when last GC starts. (DO NOT EDIT)
tikv_gc_safe_point	20240429-11:43:57.607 +0800	All versions after safe point can be accessed. (DO NOT EDIT)
ERROR 2003 (HY000): Can't connect to MySQL server on '127.0.0.1' (111)
VARIABLE_NAME	VARIABLE_VALUE	COMMENT
bootstrapped	True	Bootstrap flag. Do not delete.
tidb_server_version	179	Bootstrap version. Do not delete.
system_tz	Asia/Shanghai	TiDB Global System Timezone.
new_collation_enabled	True	If the new collations are enabled. Do not edit it.
ddl_table_version	3	DDL Table Version. Do not delete.
tikv_gc_leader_uuid	63c9ff3a99c0009	Current GC worker leader UUID. (DO NOT EDIT)
tikv_gc_leader_desc	host:xtiflow-release-7-5-pull-cdc-integration-storage-test-304-w68xk, pid:9658, start at 2024-04-29 11:53:57.617853718 +0800 CST m=+5.338392619	Host name and pid of current GC leader. (DO NOT EDIT)
tikv_gc_leader_lease	20240429-11:55:57.626 +0800	Current GC worker leader lease. (DO NOT EDIT)
tikv_gc_auto_concurrency	true	Let TiDB pick the concurrency automatically. If set false, tikv_gc_concurrency will be used
tikv_gc_enable	true	Current GC enable status
tikv_gc_run_interval	10m0s	GC run interval, at least 10m, in Go format.
tikv_gc_life_time	10m0s	All versions within life time will not be collected by GC, at least 10m, in Go format.
tikv_gc_last_run_time	20240429-11:53:57.607 +0800	The time when last GC starts. (DO NOT EDIT)
tikv_gc_safe_point	20240429-11:43:57.607 +0800	All versions after safe point can be accessed. (DO NOT EDIT)
Verifying Downstream TiDB is started...
VARIABLE_NAME	VARIABLE_VALUE	COMMENT
bootstrapped	True	Bootstrap flag. Do not delete.
tidb_server_version	179	Bootstrap version. Do not delete.
system_tz	Asia/Shanghai	TiDB Global System Timezone.
new_collation_enabled	True	If the new collations are enabled. Do not edit it.
ddl_table_version	3	DDL Table Version. Do not delete.
tikv_gc_leader_uuid	63c9ff3ab00000b	Current GC worker leader UUID. (DO NOT EDIT)
tikv_gc_leader_desc	host:xtiflow-release-7-5-pull-cdc-integration-storage-test-304-w68xk, pid:9717, start at 2024-04-29 11:53:57.707166564 +0800 CST m=+5.349244398	Host name and pid of current GC leader. (DO NOT EDIT)
tikv_gc_leader_lease	20240429-11:55:57.714 +0800	Current GC worker leader lease. (DO NOT EDIT)
tikv_gc_auto_concurrency	true	Let TiDB pick the concurrency automatically. If set false, tikv_gc_concurrency will be used
tikv_gc_enable	true	Current GC enable status
tikv_gc_run_interval	10m0s	GC run interval, at least 10m, in Go format.
tikv_gc_life_time	10m0s	All versions within life time will not be collected by GC, at least 10m, in Go format.
tikv_gc_last_run_time	20240429-11:53:57.696 +0800	The time when last GC starts. (DO NOT EDIT)
tikv_gc_safe_point	20240429-11:43:57.696 +0800	All versions after safe point can be accessed. (DO NOT EDIT)
Starting Upstream TiFlash...
TiFlash
Release Version: v7.5.1-12-g9002cc34d
Edition:         Community
Git Commit Hash: 9002cc34d3b593a718b6c5260ba18f30a45ab314
Git Branch:      HEAD
UTC Build Time:  2024-04-18 07:24:48
Enable Features: jemalloc sm4(GmSSL) avx2 avx512 unwind thinlto
Profile:         RELWITHDEBINFO

Raft Proxy
Git Commit Hash:   521fd9dbc55e58646045d88f91c3c35db50b5981
Git Commit Branch: HEAD
UTC Build Time:    2024-04-18 07:28:40
Rust Version:      rustc 1.67.0-nightly (96ddd32c4 2022-11-14)
Storage Engine:    tiflash
Prometheus Prefix: tiflash_proxy_
Profile:           release
Enable Features:    portable sse test-engine-kv-rocksdb test-engine-raft-raft-engine cloud-aws cloud-gcp cloud-azure
Verifying Upstream TiFlash is started...
Logging trace to /tmp/tidb_cdc_test/changefeed_auto_stop/tiflash/log/server.log
Logging errors to /tmp/tidb_cdc_test/changefeed_auto_stop/tiflash/log/error.log
arg matches is ArgMatches { args: {"data-dir": MatchedArg { occurs: 1, indices: [6], vals: ["/tmp/tidb_cdc_test/changefeed_auto_stop/tiflash/db/proxy"] }, "config": MatchedArg { occurs: 1, indices: [8], vals: ["/tmp/tidb_cdc_test/changefeed_auto_stop/tiflash-proxy.toml"] }, "engine-addr": MatchedArg { occurs: 1, indices: [2], vals: ["127.0.0.1:9500"] }, "engine-version": MatchedArg { occurs: 1, indices: [12], vals: ["v7.5.1-12-g9002cc34d"] }, "log-file": MatchedArg { occurs: 1, indices: [18], vals: ["/tmp/tidb_cdc_test/changefeed_auto_stop/tiflash/log/proxy.log"] }, "engine-git-hash": MatchedArg { occurs: 1, indices: [10], vals: ["9002cc34d3b593a718b6c5260ba18f30a45ab314"] }, "pd-endpoints": MatchedArg { occurs: 1, indices: [16], vals: ["127.0.0.1:2379"] }, "advertise-addr": MatchedArg { occurs: 1, indices: [4], vals: ["127.0.0.1:9000"] }, "engine-label": MatchedArg { occurs: 1, indices: [14], vals: ["tiflash"] }, "addr": MatchedArg { occurs: 1, indices: [20], vals: ["127.0.0.1:9000"] }}, subcommand: None, usage: Some("USAGE:\n    TiFlash Proxy [FLAGS] [OPTIONS] --engine-git-hash <engine-git-hash> --engine-label <engine-label> --engine-version <engine-version>") }
check_data_subset force_replicate_table.t6 127.0.0.1 4000 127.0.0.1 3306
id=7,a=NULL doesn't exist in downstream table force_replicate_table.t6
run task failed 3-th time, retry later
***************** properties *****************
"readallfields"="true"
"insertproportion"="0"
"requestdistribution"="uniform"
"mysql.port"="4000"
"readproportion"="0"
"workload"="core"
"dotransactions"="false"
"mysql.host"="127.0.0.1"
"recordcount"="20"
"operationcount"="0"
"scanproportion"="0"
"mysql.user"="root"
"mysql.db"="changefeed_auto_stop_1"
"threadcount"="4"
"updateproportion"="0"
**********************************************
Run finished, takes 9.774168ms
INSERT - Takes(s): 0.0, Count: 20, OPS: 3645.2, Avg(us): 1831, Min(us): 978, Max(us): 4405, 95th(us): 5000, 99th(us): 5000
***************** properties *****************
"operationcount"="0"
"readproportion"="0"
"mysql.host"="127.0.0.1"
"mysql.port"="4000"
"insertproportion"="0"
"threadcount"="4"
"workload"="core"
"readallfields"="true"
"scanproportion"="0"
"mysql.user"="root"
"requestdistribution"="uniform"
"dotransactions"="false"
"recordcount"="20"
"mysql.db"="changefeed_auto_stop_2"
"updateproportion"="0"
**********************************************
Run finished, takes 10.82174ms
INSERT - Takes(s): 0.0, Count: 20, OPS: 3275.7, Avg(us): 2028, Min(us): 1148, Max(us): 4760, 95th(us): 5000, 99th(us): 5000
***************** properties *****************
"scanproportion"="0"
"mysql.user"="root"
"dotransactions"="false"
"readproportion"="0"
"recordcount"="20"
"requestdistribution"="uniform"
"insertproportion"="0"
"mysql.host"="127.0.0.1"
"mysql.db"="changefeed_auto_stop_3"
"workload"="core"
"threadcount"="4"
"mysql.port"="4000"
"operationcount"="0"
"updateproportion"="0"
"readallfields"="true"
**********************************************
Run finished, takes 10.485519ms
INSERT - Takes(s): 0.0, Count: 19, OPS: 3435.7, Avg(us): 1804, Min(us): 1002, Max(us): 5049, 95th(us): 6000, 99th(us): 6000
***************** properties *****************
"requestdistribution"="uniform"
"updateproportion"="0"
"mysql.user"="root"
"readallfields"="true"
"mysql.port"="4000"
"insertproportion"="0"
"threadcount"="4"
"mysql.host"="127.0.0.1"
"workload"="core"
"mysql.db"="changefeed_auto_stop_4"
"scanproportion"="0"
"dotransactions"="false"
"recordcount"="20"
"operationcount"="0"
"readproportion"="0"
**********************************************
Run finished, takes 11.693159ms
INSERT - Takes(s): 0.0, Count: 20, OPS: 3178.5, Avg(us): 2190, Min(us): 1112, Max(us): 5621, 95th(us): 6000, 99th(us): 6000
[Mon Apr 29 11:54:03 CST 2024] <<<<<< START cdc server in changefeed_auto_stop case >>>>>>
+ [[ '' == \t\r\u\e ]]
+ set +e
+ get_info_fail_msg='failed to get info:'
+ etcd_info_msg='etcd info'
+ '[' -z '' ']'
+ curl_status_cmd='curl -vsL --max-time 20 http://127.0.0.1:8301/debug/info'
+ [[ no != \n\o ]]
+ GO_FAILPOINTS=
+ (( i = 0 ))
+ (( i <= 50 ))
+ cdc.test -test.coverprofile=/tmp/tidb_cdc_test/cov.changefeed_auto_stop.1109311095.out server --log-file /tmp/tidb_cdc_test/changefeed_auto_stop/cdc1.log --log-level debug --data-dir /tmp/tidb_cdc_test/changefeed_auto_stop/cdc_data1 --cluster-id default --addr 127.0.0.1:8301 --pd http://127.0.0.1:2379
++ curl -vsL --max-time 20 http://127.0.0.1:8301/debug/info
* About to connect() to 127.0.0.1 port 8301 (#0)
*   Trying 127.0.0.1...
* Connection refused
* Failed connect to 127.0.0.1:8301; Connection refused
* Closing connection 0
+ res=
+ echo ''
+ grep -q 'failed to get info:'
+ echo ''
+ grep -q 'etcd info'
+ '[' 0 -eq 50 ']'
+ sleep 3
\033[0;36m<<< Run all test success >>>\033[0m
[Pipeline] }
Cache not saved (ws/jenkins-pingcap-tiflow-release-7.5-pull_cdc_integration_storage_test-304/tiflow-cdc already exists)
[Pipeline] // cache
[Pipeline] }
[Pipeline] // dir
[Pipeline] }
[Pipeline] // withCredentials
[Pipeline] }
[Pipeline] // timeout
[Pipeline] }
[Pipeline] // stage
[Pipeline] }
[Pipeline] // container
[Pipeline] }
[Pipeline] // withEnv
[Pipeline] }
[Pipeline] // node
[Pipeline] }
[Pipeline] // podTemplate
[Pipeline] }
[Pipeline] // withEnv
[Pipeline] }
[Pipeline] // stage
[Pipeline] }
+ (( i++ ))
+ (( i <= 50 ))
++ curl -vsL --max-time 20 http://127.0.0.1:8301/debug/info
* About to connect() to 127.0.0.1 port 8301 (#0)
*   Trying 127.0.0.1...
* Connected to 127.0.0.1 (127.0.0.1) port 8301 (#0)
> GET /debug/info HTTP/1.1
> User-Agent: curl/7.29.0
> Host: 127.0.0.1:8301
> Accept: */*
> 
< HTTP/1.1 200 OK
< Date: Mon, 29 Apr 2024 03:54:06 GMT
< Content-Length: 613
< Content-Type: text/plain; charset=utf-8
< 
{ [data not shown]
* Connection #0 to host 127.0.0.1 left intact
+ res='

*** owner info ***:



*** processors info ***:



*** etcd info ***:

/tidb/cdc/default/__cdc_meta__/capture/ddceacd9-f3ad-49af-aabb-54a61d5612f0
	{"id":"ddceacd9-f3ad-49af-aabb-54a61d5612f0","address":"127.0.0.1:8301","version":"v7.5.1-19-gd6783fb76"}

/tidb/cdc/default/__cdc_meta__/meta/meta-version
	1

/tidb/cdc/default/__cdc_meta__/owner/22318f27fcc38242
	ddceacd9-f3ad-49af-aabb-54a61d5612f0

/tidb/cdc/default/default/upstream/7363132282580286811
	{"id":7363132282580286811,"pd-endpoints":"http://127.0.0.1:2379,http://127.0.0.1:2379","key-path":"","cert-path":"","ca-path":"","cert-allowed-cn":null}'
+ echo '

*** owner info ***:



*** processors info ***:



*** etcd info ***:

/tidb/cdc/default/__cdc_meta__/capture/ddceacd9-f3ad-49af-aabb-54a61d5612f0
	{"id":"ddceacd9-f3ad-49af-aabb-54a61d5612f0","address":"127.0.0.1:8301","version":"v7.5.1-19-gd6783fb76"}

/tidb/cdc/default/__cdc_meta__/meta/meta-version
	1

/tidb/cdc/default/__cdc_meta__/owner/22318f27fcc38242
	ddceacd9-f3ad-49af-aabb-54a61d5612f0

/tidb/cdc/default/default/upstream/7363132282580286811
	{"id":7363132282580286811,"pd-endpoints":"http://127.0.0.1:2379,http://127.0.0.1:2379","key-path":"","cert-path":"","ca-path":"","cert-allowed-cn":null}'
+ grep -q 'failed to get info:'
+ echo '

*** owner info ***:



*** processors info ***:



*** etcd info ***:

/tidb/cdc/default/__cdc_meta__/capture/ddceacd9-f3ad-49af-aabb-54a61d5612f0
	{"id":"ddceacd9-f3ad-49af-aabb-54a61d5612f0","address":"127.0.0.1:8301","version":"v7.5.1-19-gd6783fb76"}

/tidb/cdc/default/__cdc_meta__/meta/meta-version
	1

/tidb/cdc/default/__cdc_meta__/owner/22318f27fcc38242
	ddceacd9-f3ad-49af-aabb-54a61d5612f0

/tidb/cdc/default/default/upstream/7363132282580286811
	{"id":7363132282580286811,"pd-endpoints":"http://127.0.0.1:2379,http://127.0.0.1:2379","key-path":"","cert-path":"","ca-path":"","cert-allowed-cn":null}'
+ grep -q 'etcd info'
+ break
+ set +x
[Mon Apr 29 11:54:06 CST 2024] <<<<<< START cdc server in changefeed_auto_stop case >>>>>>
+ [[ '' == \t\r\u\e ]]
+ set +e
+ get_info_fail_msg='failed to get info:'
+ etcd_info_msg='etcd info'
+ '[' -z '' ']'
+ curl_status_cmd='curl -vsL --max-time 20 http://127.0.0.1:8302/debug/info'
+ [[ no != \n\o ]]
+ GO_FAILPOINTS='github.com/pingcap/tiflow/cdc/processor/pipeline/ProcessorSyncResolvedError=1*return(true);github.com/pingcap/tiflow/cdc/processor/ProcessorUpdatePositionDelaying=sleep(1000)'
+ (( i = 0 ))
+ (( i <= 50 ))
+ cdc.test -test.coverprofile=/tmp/tidb_cdc_test/cov.changefeed_auto_stop.1115111153.out server --log-file /tmp/tidb_cdc_test/changefeed_auto_stop/cdc2.log --log-level debug --data-dir /tmp/tidb_cdc_test/changefeed_auto_stop/cdc_data2 --cluster-id default --addr 127.0.0.1:8302 --pd http://127.0.0.1:2379
++ curl -vsL --max-time 20 http://127.0.0.1:8302/debug/info
* About to connect() to 127.0.0.1 port 8302 (#0)
*   Trying 127.0.0.1...
* Connection refused
* Failed connect to 127.0.0.1:8302; Connection refused
* Closing connection 0
+ res=
+ echo ''
+ grep -q 'failed to get info:'
+ echo ''
+ grep -q 'etcd info'
+ '[' 0 -eq 50 ']'
+ sleep 3
check_data_subset force_replicate_table.t6 127.0.0.1 4000 127.0.0.1 3306
id=7,a=NULL doesn't exist in downstream table force_replicate_table.t6
run task failed 4-th time, retry later
+ run_case_with_unavailable_tikv conf/changefeed-redo.toml
+ rm -rf /tmp/tidb_cdc_test/synced_status
+ mkdir -p /tmp/tidb_cdc_test/synced_status
+ start_tidb_cluster --workdir /tmp/tidb_cdc_test/synced_status
shell-init: error retrieving current directory: getcwd: cannot access parent directories: No such file or directory
chdir: error retrieving current directory: getcwd: cannot access parent directories: No such file or directory
The 1 times to try to start tidb cluster...
shell-init: error retrieving current directory: getcwd: cannot access parent directories: No such file or directory
chdir: error retrieving current directory: getcwd: cannot access parent directories: No such file or directory
shell-init: error retrieving current directory: getcwd: cannot access parent directories: No such file or directory
chdir: error retrieving current directory: getcwd: cannot access parent directories: No such file or directory
+ (( i++ ))
+ (( i <= 50 ))
++ curl -vsL --max-time 20 http://127.0.0.1:8302/debug/info
* About to connect() to 127.0.0.1 port 8302 (#0)
*   Trying 127.0.0.1...
* Connected to 127.0.0.1 (127.0.0.1) port 8302 (#0)
> GET /debug/info HTTP/1.1
> User-Agent: curl/7.29.0
> Host: 127.0.0.1:8302
> Accept: */*
> 
< HTTP/1.1 200 OK
< Date: Mon, 29 Apr 2024 03:54:09 GMT
< Content-Length: 867
< Content-Type: text/plain; charset=utf-8
< 
{ [data not shown]
* Connection #0 to host 127.0.0.1 left intact
+ res='

*** processors info ***:



*** etcd info ***:

/tidb/cdc/default/__cdc_meta__/capture/3e63d51c-f73f-41ca-b740-154fff542010
	{"id":"3e63d51c-f73f-41ca-b740-154fff542010","address":"127.0.0.1:8302","version":"v7.5.1-19-gd6783fb76"}

/tidb/cdc/default/__cdc_meta__/capture/ddceacd9-f3ad-49af-aabb-54a61d5612f0
	{"id":"ddceacd9-f3ad-49af-aabb-54a61d5612f0","address":"127.0.0.1:8301","version":"v7.5.1-19-gd6783fb76"}

/tidb/cdc/default/__cdc_meta__/meta/meta-version
	1

/tidb/cdc/default/__cdc_meta__/owner/22318f27fcc38242
	ddceacd9-f3ad-49af-aabb-54a61d5612f0

/tidb/cdc/default/__cdc_meta__/owner/22318f27fcc38269
	3e63d51c-f73f-41ca-b740-154fff542010

/tidb/cdc/default/default/upstream/7363132282580286811
	{"id":7363132282580286811,"pd-endpoints":"http://127.0.0.1:2379,http://127.0.0.1:2379","key-path":"","cert-path":"","ca-path":"","cert-allowed-cn":null}'
+ echo '

*** processors info ***:



*** etcd info ***:

/tidb/cdc/default/__cdc_meta__/capture/3e63d51c-f73f-41ca-b740-154fff542010
	{"id":"3e63d51c-f73f-41ca-b740-154fff542010","address":"127.0.0.1:8302","version":"v7.5.1-19-gd6783fb76"}

/tidb/cdc/default/__cdc_meta__/capture/ddceacd9-f3ad-49af-aabb-54a61d5612f0
	{"id":"ddceacd9-f3ad-49af-aabb-54a61d5612f0","address":"127.0.0.1:8301","version":"v7.5.1-19-gd6783fb76"}

/tidb/cdc/default/__cdc_meta__/meta/meta-version
	1

/tidb/cdc/default/__cdc_meta__/owner/22318f27fcc38242
	ddceacd9-f3ad-49af-aabb-54a61d5612f0

/tidb/cdc/default/__cdc_meta__/owner/22318f27fcc38269
	3e63d51c-f73f-41ca-b740-154fff542010

/tidb/cdc/default/default/upstream/7363132282580286811
	{"id":7363132282580286811,"pd-endpoints":"http://127.0.0.1:2379,http://127.0.0.1:2379","key-path":"","cert-path":"","ca-path":"","cert-allowed-cn":null}'
+ grep -q 'failed to get info:'
+ echo '

*** processors info ***:



*** etcd info ***:

/tidb/cdc/default/__cdc_meta__/capture/3e63d51c-f73f-41ca-b740-154fff542010
	{"id":"3e63d51c-f73f-41ca-b740-154fff542010","address":"127.0.0.1:8302","version":"v7.5.1-19-gd6783fb76"}

/tidb/cdc/default/__cdc_meta__/capture/ddceacd9-f3ad-49af-aabb-54a61d5612f0
	{"id":"ddceacd9-f3ad-49af-aabb-54a61d5612f0","address":"127.0.0.1:8301","version":"v7.5.1-19-gd6783fb76"}

/tidb/cdc/default/__cdc_meta__/meta/meta-version
	1

/tidb/cdc/default/__cdc_meta__/owner/22318f27fcc38242
	ddceacd9-f3ad-49af-aabb-54a61d5612f0

/tidb/cdc/default/__cdc_meta__/owner/22318f27fcc38269
	3e63d51c-f73f-41ca-b740-154fff542010

/tidb/cdc/default/default/upstream/7363132282580286811
	{"id":7363132282580286811,"pd-endpoints":"http://127.0.0.1:2379,http://127.0.0.1:2379","key-path":"","cert-path":"","ca-path":"","cert-allowed-cn":null}'
+ grep -q 'etcd info'
+ break
+ set +x
+ workdir=/tmp/tidb_cdc_test/changefeed_auto_stop
+ sink_uri='file:///tmp/tidb_cdc_test/changefeed_auto_stop/storage_test/ticdc-changefeed-auto-stop-test-19486?protocol=canal-json&enable-tidb-extension=true'
+ consumer_replica_config=
+ log_suffix=
++ pwd
+ pwd=/tmp/tidb_cdc_test/changefeed_auto_stop
++ date
+ echo '[Mon Apr 29 11:54:09 CST 2024] <<<<<< START storage consumer in changefeed_auto_stop case >>>>>>'
[Mon Apr 29 11:54:09 CST 2024] <<<<<< START storage consumer in changefeed_auto_stop case >>>>>>
+ cd /tmp/tidb_cdc_test/changefeed_auto_stop
+ '[' '' '!=' '' ']'
+ cd /tmp/tidb_cdc_test/changefeed_auto_stop
+ set +x
+ cdc_storage_consumer --log-file /tmp/tidb_cdc_test/changefeed_auto_stop/cdc_storage_consumer.log --log-level debug --upstream-uri 'file:///tmp/tidb_cdc_test/changefeed_auto_stop/storage_test/ticdc-changefeed-auto-stop-test-19486?protocol=canal-json&enable-tidb-extension=true' --downstream-uri 'mysql://root@127.0.0.1:3306/?safe-mode=true&batch-dml-enable=false'
check_changefeed_state http://127.0.0.1:2379 2630c27e-ddfd-466f-9e6b-6992e81a3f48 normal null
+ endpoints=http://127.0.0.1:2379
+ changefeed_id=2630c27e-ddfd-466f-9e6b-6992e81a3f48
+ expected_state=normal
+ error_msg=null
+ tls_dir=null
+ [[ http://127.0.0.1:2379 =~ https ]]
++ cdc cli changefeed query --pd=http://127.0.0.1:2379 -c 2630c27e-ddfd-466f-9e6b-6992e81a3f48 -s
+ info='{
  "upstream_id": 7363132282580286811,
  "namespace": "default",
  "id": "2630c27e-ddfd-466f-9e6b-6992e81a3f48",
  "state": "normal",
  "checkpoint_tso": 449409933038845953,
  "checkpoint_time": "2024-04-29 11:54:02.708",
  "error": null
}'
+ echo '{
  "upstream_id": 7363132282580286811,
  "namespace": "default",
  "id": "2630c27e-ddfd-466f-9e6b-6992e81a3f48",
  "state": "normal",
  "checkpoint_tso": 449409933038845953,
  "checkpoint_time": "2024-04-29 11:54:02.708",
  "error": null
}'
{
  "upstream_id": 7363132282580286811,
  "namespace": "default",
  "id": "2630c27e-ddfd-466f-9e6b-6992e81a3f48",
  "state": "normal",
  "checkpoint_tso": 449409933038845953,
  "checkpoint_time": "2024-04-29 11:54:02.708",
  "error": null
}
++ echo '{' '"upstream_id":' 7363132282580286811, '"namespace":' '"default",' '"id":' '"2630c27e-ddfd-466f-9e6b-6992e81a3f48",' '"state":' '"normal",' '"checkpoint_tso":' 449409933038845953, '"checkpoint_time":' '"2024-04-29' '11:54:02.708",' '"error":' null '}'
++ jq -r .state
+ state=normal
+ [[ ! normal == \n\o\r\m\a\l ]]
++ echo '{' '"upstream_id":' 7363132282580286811, '"namespace":' '"default",' '"id":' '"2630c27e-ddfd-466f-9e6b-6992e81a3f48",' '"state":' '"normal",' '"checkpoint_tso":' 449409933038845953, '"checkpoint_time":' '"2024-04-29' '11:54:02.708",' '"error":' null '}'
++ jq -r .error.message
+ message=null
+ [[ ! null =~ null ]]
run task successfully
table changefeed_auto_stop_1.usertable not exists for 1-th check, retry later
chdir: error retrieving current directory: getcwd: cannot access parent directories: No such file or directory
start tidb cluster in /tmp/tidb_cdc_test/synced_status
Starting Upstream PD...
Release Version: v7.5.1-5-g584533652
Edition: Community
Git Commit Hash: 58453365285465cd90bc4472cff2bad7ce4d764b
Git Branch: release-7.5
UTC Build Time:  2024-04-03 10:04:14
Starting Downstream PD...
Release Version: v7.5.1-5-g584533652
Edition: Community
Git Commit Hash: 58453365285465cd90bc4472cff2bad7ce4d764b
Git Branch: release-7.5
UTC Build Time:  2024-04-03 10:04:14
Verifying upstream PD is started...
Verifying downstream PD is started...
table changefeed_auto_stop_1.usertable not exists for 2-th check, retry later
table changefeed_auto_stop_1.usertable not exists for 3-th check, retry later
Starting Upstream TiKV...
TiKV 
Release Version:   7.5.2
Edition:           Community
Git Commit Hash:   3478895c2a700e4824bb41940260b6b28013275e
Git Commit Branch: release-7.5
UTC Build Time:    2024-04-28 08:20:54
Rust Version:      rustc 1.67.0-nightly (96ddd32c4 2022-11-14)
Enable Features:   pprof-fp jemalloc mem-profiling portable sse test-engine-kv-rocksdb test-engine-raft-raft-engine cloud-aws cloud-gcp cloud-azure
Profile:           dist_release
Starting Downstream TiKV...
TiKV 
Release Version:   7.5.2
Edition:           Community
Git Commit Hash:   3478895c2a700e4824bb41940260b6b28013275e
Git Commit Branch: release-7.5
UTC Build Time:    2024-04-28 08:20:54
Rust Version:      rustc 1.67.0-nightly (96ddd32c4 2022-11-14)
Enable Features:   pprof-fp jemalloc mem-profiling portable sse test-engine-kv-rocksdb test-engine-raft-raft-engine cloud-aws cloud-gcp cloud-azure
Profile:           dist_release
table changefeed_auto_stop_1.usertable not exists for 4-th check, retry later
check_data_subset force_replicate_table.t6 127.0.0.1 4000 127.0.0.1 3306
Starting Upstream TiDB...
Release Version: v7.5.1-45-gbf84e231e6
Edition: Community
Git Commit Hash: bf84e231e6ef26891d0cb524d938345f43aa047c
Git Branch: release-7.5
UTC Build Time: 2024-04-29 02:05:15
GoVersion: go1.21.6
Race Enabled: false
Check Table Before Drop: false
Store: unistore
Starting Downstream TiDB...
Release Version: v7.5.1-45-gbf84e231e6
Edition: Community
Git Commit Hash: bf84e231e6ef26891d0cb524d938345f43aa047c
Git Branch: release-7.5
UTC Build Time: 2024-04-29 02:05:15
GoVersion: go1.21.6
Race Enabled: false
Check Table Before Drop: false
Store: unistore
Verifying Upstream TiDB is started...
ERROR 2003 (HY000): Can't connect to MySQL server on '127.0.0.1' (111)
run task successfully
wait process cdc.test exit for 1-th time...
wait process cdc.test exit for 2-th time...
table changefeed_auto_stop_1.usertable not exists for 5-th check, retry later
cdc.test: no process found
wait process cdc.test exit for 3-th time...
process cdc.test already exit
[Mon Apr 29 11:54:18 CST 2024] <<<<<< run test case force_replicate_table success! >>>>>>
ERROR 2003 (HY000): Can't connect to MySQL server on '127.0.0.1' (111)
table changefeed_auto_stop_1.usertable exists
table changefeed_auto_stop_2.usertable not exists for 1-th check, retry later
ERROR 2003 (HY000): Can't connect to MySQL server on '127.0.0.1' (111)
table changefeed_auto_stop_2.usertable exists
table changefeed_auto_stop_3.usertable not exists for 1-th check, retry later
VARIABLE_NAME	VARIABLE_VALUE	COMMENT
bootstrapped	True	Bootstrap flag. Do not delete.
tidb_server_version	179	Bootstrap version. Do not delete.
system_tz	Asia/Shanghai	TiDB Global System Timezone.
new_collation_enabled	True	If the new collations are enabled. Do not edit it.
ddl_table_version	3	DDL Table Version. Do not delete.
tikv_gc_leader_uuid	63c9ff520ec000e	Current GC worker leader UUID. (DO NOT EDIT)
tikv_gc_leader_desc	host:xtiflow-release-7-5-pull-cdc-integration-storage-test-304-c17q4, pid:29790, start at 2024-04-29 11:54:21.639892 +0800 CST m=+5.138872768	Host name and pid of current GC leader. (DO NOT EDIT)
tikv_gc_leader_lease	20240429-11:56:21.648 +0800	Current GC worker leader lease. (DO NOT EDIT)
tikv_gc_auto_concurrency	true	Let TiDB pick the concurrency automatically. If set false, tikv_gc_concurrency will be used
tikv_gc_enable	true	Current GC enable status
tikv_gc_run_interval	10m0s	GC run interval, at least 10m, in Go format.
tikv_gc_life_time	10m0s	All versions within life time will not be collected by GC, at least 10m, in Go format.
tikv_gc_last_run_time	20240429-11:54:21.627 +0800	The time when last GC starts. (DO NOT EDIT)
tikv_gc_safe_point	20240429-11:44:21.627 +0800	All versions after safe point can be accessed. (DO NOT EDIT)
VARIABLE_NAME	VARIABLE_VALUE	COMMENT
bootstrapped	True	Bootstrap flag. Do not delete.
tidb_server_version	179	Bootstrap version. Do not delete.
system_tz	Asia/Shanghai	TiDB Global System Timezone.
new_collation_enabled	True	If the new collations are enabled. Do not edit it.
ddl_table_version	3	DDL Table Version. Do not delete.
tikv_gc_leader_uuid	63c9ff520ec000e	Current GC worker leader UUID. (DO NOT EDIT)
tikv_gc_leader_desc	host:xtiflow-release-7-5-pull-cdc-integration-storage-test-304-c17q4, pid:29790, start at 2024-04-29 11:54:21.639892 +0800 CST m=+5.138872768	Host name and pid of current GC leader. (DO NOT EDIT)
tikv_gc_leader_lease	20240429-11:56:21.648 +0800	Current GC worker leader lease. (DO NOT EDIT)
tikv_gc_auto_concurrency	true	Let TiDB pick the concurrency automatically. If set false, tikv_gc_concurrency will be used
tikv_gc_enable	true	Current GC enable status
tikv_gc_run_interval	10m0s	GC run interval, at least 10m, in Go format.
tikv_gc_life_time	10m0s	All versions within life time will not be collected by GC, at least 10m, in Go format.
tikv_gc_last_run_time	20240429-11:54:21.627 +0800	The time when last GC starts. (DO NOT EDIT)
tikv_gc_safe_point	20240429-11:44:21.627 +0800	All versions after safe point can be accessed. (DO NOT EDIT)
Verifying Downstream TiDB is started...
VARIABLE_NAME	VARIABLE_VALUE	COMMENT
bootstrapped	True	Bootstrap flag. Do not delete.
tidb_server_version	179	Bootstrap version. Do not delete.
system_tz	Asia/Shanghai	TiDB Global System Timezone.
new_collation_enabled	True	If the new collations are enabled. Do not edit it.
ddl_table_version	3	DDL Table Version. Do not delete.
tikv_gc_leader_uuid	63c9ff5226c0017	Current GC worker leader UUID. (DO NOT EDIT)
tikv_gc_leader_desc	host:xtiflow-release-7-5-pull-cdc-integration-storage-test-304-c17q4, pid:29870, start at 2024-04-29 11:54:21.756328551 +0800 CST m=+5.205350914	Host name and pid of current GC leader. (DO NOT EDIT)
tikv_gc_leader_lease	20240429-11:56:21.765 +0800	Current GC worker leader lease. (DO NOT EDIT)
tikv_gc_auto_concurrency	true	Let TiDB pick the concurrency automatically. If set false, tikv_gc_concurrency will be used
tikv_gc_enable	true	Current GC enable status
tikv_gc_run_interval	10m0s	GC run interval, at least 10m, in Go format.
tikv_gc_life_time	10m0s	All versions within life time will not be collected by GC, at least 10m, in Go format.
tikv_gc_last_run_time	20240429-11:54:21.723 +0800	The time when last GC starts. (DO NOT EDIT)
tikv_gc_safe_point	20240429-11:44:21.723 +0800	All versions after safe point can be accessed. (DO NOT EDIT)
Starting Upstream TiFlash...
TiFlash
Release Version: v7.5.1-12-g9002cc34d
Edition:         Community
Git Commit Hash: 9002cc34d3b593a718b6c5260ba18f30a45ab314
Git Branch:      HEAD
UTC Build Time:  2024-04-18 07:24:48
Enable Features: jemalloc sm4(GmSSL) avx2 avx512 unwind thinlto
Profile:         RELWITHDEBINFO

Raft Proxy
Git Commit Hash:   521fd9dbc55e58646045d88f91c3c35db50b5981
Git Commit Branch: HEAD
UTC Build Time:    2024-04-18 07:28:40
Rust Version:      rustc 1.67.0-nightly (96ddd32c4 2022-11-14)
Storage Engine:    tiflash
Prometheus Prefix: tiflash_proxy_
Profile:           release
Enable Features:    portable sse test-engine-kv-rocksdb test-engine-raft-raft-engine cloud-aws cloud-gcp cloud-azure
Verifying Upstream TiFlash is started...
Logging trace to /tmp/tidb_cdc_test/synced_status/tiflash/log/server.log
Logging errors to /tmp/tidb_cdc_test/synced_status/tiflash/log/error.log
arg matches is ArgMatches { args: {"engine-addr": MatchedArg { occurs: 1, indices: [2], vals: ["127.0.0.1:9500"] }, "config": MatchedArg { occurs: 1, indices: [8], vals: ["/tmp/tidb_cdc_test/synced_status/tiflash-proxy.toml"] }, "pd-endpoints": MatchedArg { occurs: 1, indices: [16], vals: ["127.0.0.1:2379"] }, "engine-version": MatchedArg { occurs: 1, indices: [12], vals: ["v7.5.1-12-g9002cc34d"] }, "addr": MatchedArg { occurs: 1, indices: [20], vals: ["127.0.0.1:9000"] }, "engine-git-hash": MatchedArg { occurs: 1, indices: [10], vals: ["9002cc34d3b593a718b6c5260ba18f30a45ab314"] }, "log-file": MatchedArg { occurs: 1, indices: [18], vals: ["/tmp/tidb_cdc_test/synced_status/tiflash/log/proxy.log"] }, "engine-label": MatchedArg { occurs: 1, indices: [14], vals: ["tiflash"] }, "data-dir": MatchedArg { occurs: 1, indices: [6], vals: ["/tmp/tidb_cdc_test/synced_status/tiflash/db/proxy"] }, "advertise-addr": MatchedArg { occurs: 1, indices: [4], vals: ["127.0.0.1:9000"] }}, subcommand: None, usage: Some("USAGE:\n    TiFlash Proxy [FLAGS] [OPTIONS] --engine-git-hash <engine-git-hash> --engine-label <engine-label> --engine-version <engine-version>") }
table changefeed_auto_stop_3.usertable not exists for 2-th check, retry later
+ cd /tmp/tidb_cdc_test/synced_status
++ run_cdc_cli_tso_query 127.0.0.1 2379
+ pd_host=127.0.0.1
+ pd_port=2379
++ run_cdc_cli tso query --pd=http://127.0.0.1:2379
+ cdc.test -test.coverprofile=/tmp/tidb_cdc_test/cov.synced_status.cli.31165.out cli tso query --pd=http://127.0.0.1:2379
table changefeed_auto_stop_3.usertable not exists for 3-th check, retry later
+ set +x
+ tso='449409938902745089
PASS
coverage: 1.8% of statements in github.com/pingcap/tiflow/...'
+ echo 449409938902745089 PASS coverage: 1.8% of statements in github.com/pingcap/tiflow/...
+ awk -F ' ' '{print $1}'
+ set +x
+ start_ts=449409938902745089
+ run_cdc_server --workdir /tmp/tidb_cdc_test/synced_status --binary cdc.test
[Mon Apr 29 11:54:26 CST 2024] <<<<<< START cdc server in synced_status case >>>>>>
+ [[ '' == \t\r\u\e ]]
+ set +e
+ get_info_fail_msg='failed to get info:'
+ etcd_info_msg='etcd info'
+ '[' -z '' ']'
+ curl_status_cmd='curl -vsL --max-time 20 http://127.0.0.1:8300/debug/info'
+ GO_FAILPOINTS=
+ [[ no != \n\o ]]
+ cdc.test -test.coverprofile=/tmp/tidb_cdc_test/cov.synced_status.3119931201.out server --log-file /tmp/tidb_cdc_test/synced_status/cdc.log --log-level debug --data-dir /tmp/tidb_cdc_test/synced_status/cdc_data --cluster-id default
+ (( i = 0 ))
+ (( i <= 50 ))
++ curl -vsL --max-time 20 http://127.0.0.1:8300/debug/info
* About to connect() to 127.0.0.1 port 8300 (#0)
*   Trying 127.0.0.1...
* Connection refused
* Failed connect to 127.0.0.1:8300; Connection refused
* Closing connection 0
+ res=
+ echo ''
+ grep -q 'failed to get info:'
+ echo ''
+ grep -q 'etcd info'
+ '[' 0 -eq 50 ']'
+ sleep 3
table changefeed_auto_stop_3.usertable not exists for 4-th check, retry later
+ (( i++ ))
+ (( i <= 50 ))
++ curl -vsL --max-time 20 http://127.0.0.1:8300/debug/info
* About to connect() to 127.0.0.1 port 8300 (#0)
*   Trying 127.0.0.1...
* Connected to 127.0.0.1 (127.0.0.1) port 8300 (#0)
> GET /debug/info HTTP/1.1
> User-Agent: curl/7.29.0
> Host: 127.0.0.1:8300
> Accept: */*
> 
< HTTP/1.1 200 OK
< Date: Mon, 29 Apr 2024 03:54:29 GMT
< Content-Length: 613
< Content-Type: text/plain; charset=utf-8
< 
{ [data not shown]
* Connection #0 to host 127.0.0.1 left intact
+ res='

*** owner info ***:



*** processors info ***:



*** etcd info ***:

/tidb/cdc/default/__cdc_meta__/capture/e8dbbff6-111f-489d-a911-1829ca0b4618
	{"id":"e8dbbff6-111f-489d-a911-1829ca0b4618","address":"127.0.0.1:8300","version":"v7.5.1-19-gd6783fb76"}

/tidb/cdc/default/__cdc_meta__/meta/meta-version
	1

/tidb/cdc/default/__cdc_meta__/owner/22318f27fd1e35f7
	e8dbbff6-111f-489d-a911-1829ca0b4618

/tidb/cdc/default/default/upstream/7363132378649491778
	{"id":7363132378649491778,"pd-endpoints":"http://127.0.0.1:2379,http://127.0.0.1:2379","key-path":"","cert-path":"","ca-path":"","cert-allowed-cn":null}'
+ echo '

*** owner info ***:



*** processors info ***:



*** etcd info ***:

/tidb/cdc/default/__cdc_meta__/capture/e8dbbff6-111f-489d-a911-1829ca0b4618
	{"id":"e8dbbff6-111f-489d-a911-1829ca0b4618","address":"127.0.0.1:8300","version":"v7.5.1-19-gd6783fb76"}

/tidb/cdc/default/__cdc_meta__/meta/meta-version
	1

/tidb/cdc/default/__cdc_meta__/owner/22318f27fd1e35f7
	e8dbbff6-111f-489d-a911-1829ca0b4618

/tidb/cdc/default/default/upstream/7363132378649491778
	{"id":7363132378649491778,"pd-endpoints":"http://127.0.0.1:2379,http://127.0.0.1:2379","key-path":"","cert-path":"","ca-path":"","cert-allowed-cn":null}'
+ grep -q 'failed to get info:'
+ echo '

*** owner info ***:



*** processors info ***:



*** etcd info ***:

/tidb/cdc/default/__cdc_meta__/capture/e8dbbff6-111f-489d-a911-1829ca0b4618
	{"id":"e8dbbff6-111f-489d-a911-1829ca0b4618","address":"127.0.0.1:8300","version":"v7.5.1-19-gd6783fb76"}

/tidb/cdc/default/__cdc_meta__/meta/meta-version
	1

/tidb/cdc/default/__cdc_meta__/owner/22318f27fd1e35f7
	e8dbbff6-111f-489d-a911-1829ca0b4618

/tidb/cdc/default/default/upstream/7363132378649491778
	{"id":7363132378649491778,"pd-endpoints":"http://127.0.0.1:2379,http://127.0.0.1:2379","key-path":"","cert-path":"","ca-path":"","cert-allowed-cn":null}'
+ grep -q 'etcd info'
+ break
+ set +x
+ config_path=conf/changefeed-redo.toml
+ SINK_URI='mysql://root@127.0.0.1:3306/?max-txn-row=1'
+ run_cdc_cli changefeed create --start-ts=449409938902745089 '--sink-uri=mysql://root@127.0.0.1:3306/?max-txn-row=1' --changefeed-id=test-1 --config=/home/jenkins/agent/workspace/pingcap/tiflow/release-7.5/pull_cdc_integration_storage_test/tiflow/tests/integration_tests/synced_status/conf/changefeed-redo.toml
+ cdc.test -test.coverprofile=/tmp/tidb_cdc_test/cov.synced_status.cli.31245.out cli changefeed create --start-ts=449409938902745089 '--sink-uri=mysql://root@127.0.0.1:3306/?max-txn-row=1' --changefeed-id=test-1 --config=/home/jenkins/agent/workspace/pingcap/tiflow/release-7.5/pull_cdc_integration_storage_test/tiflow/tests/integration_tests/synced_status/conf/changefeed-redo.toml
Create changefeed successfully!
ID: test-1
Info: {"upstream_id":7363132378649491778,"namespace":"default","id":"test-1","sink_uri":"mysql://root@127.0.0.1:3306/?max-txn-row=1","create_time":"2024-04-29T11:54:29.966329573+08:00","start_ts":449409938902745089,"config":{"memory_quota":1073741824,"case_sensitive":false,"force_replicate":false,"ignore_ineligible_table":false,"check_gc_safe_point":true,"enable_sync_point":false,"bdr_mode":false,"sync_point_interval":600000000000,"sync_point_retention":86400000000000,"filter":{"rules":["*.*"]},"mounter":{"worker_num":16},"sink":{"csv":{"delimiter":",","quote":"\"","null":"\\N","include_commit_ts":false,"binary_encoding_method":"base64"},"encoder_concurrency":32,"terminator":"\r\n","date_separator":"day","enable_partition_separator":true,"enable_kafka_sink_v2":false,"only_output_updated_columns":false,"delete_only_output_handle_key_columns":false,"advance_timeout":150,"send_bootstrap_interval_in_sec":120,"send_bootstrap_in_msg_count":10000,"send_bootstrap_to_all_partition":true},"consistent":{"level":"eventual","max_log_size":64,"flush_interval":2000,"meta_flush_interval":200,"encoding_worker_num":16,"flush_worker_num":8,"storage":"file:///tmp/tidb_cdc_test/synced_status/redo","use_file_backend":false,"memory_usage":{"memory_quota_percentage":50,"event_cache_percentage":0}},"scheduler":{"enable_table_across_nodes":false,"region_threshold":100000,"write_key_threshold":0},"integrity":{"integrity_check_level":"none","corruption_handle_level":"warn"},"changefeed_error_stuck_duration":1800000000000,"sql_mode":"ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION","synced_status":{"synced_check_interval":120,"checkpoint_interval":20}},"state":"normal","creator_version":"v7.5.1-19-gd6783fb76","resolved_ts":449409938902745089,"checkpoint_ts":449409938902745089,"checkpoint_time":"2024-04-29 11:54:25.077"}
PASS
coverage: 2.5% of statements in github.com/pingcap/tiflow/...
table changefeed_auto_stop_3.usertable not exists for 5-th check, retry later
+ set +x
+ run_sql 'USE TEST;Create table t1(a int primary key, b int);insert into t1 values(1,2);insert into t1 values(2,3);'
+ check_table_exists test.t1 127.0.0.1 3306
table test.t1 not exists for 1-th check, retry later
table changefeed_auto_stop_3.usertable exists
table changefeed_auto_stop_4.usertable exists
check diff failed 1-th time, retry later
\033[0;36m<<< Run all test success >>>\033[0m
[Pipeline] }
Cache not saved (ws/jenkins-pingcap-tiflow-release-7.5-pull_cdc_integration_storage_test-304/tiflow-cdc already exists)
[Pipeline] // cache
[Pipeline] }
[Pipeline] // dir
[Pipeline] }
[Pipeline] // withCredentials
[Pipeline] }
[Pipeline] // timeout
[Pipeline] }
table test.t1 exists
+ sleep 5
[Pipeline] // stage
[Pipeline] }
[Pipeline] // container
[Pipeline] }
[Pipeline] // withEnv
[Pipeline] }
[Pipeline] // node
[Pipeline] }
check diff failed 2-th time, retry later
[Pipeline] // podTemplate
[Pipeline] }
[Pipeline] // withEnv
[Pipeline] }
[Pipeline] // stage
[Pipeline] }
check diff failed 3-th time, retry later
check diff failed 4-th time, retry later
+ kill_tikv
++ ps aux
++ grep tikv-server
++ grep /tmp/tidb_cdc_test/synced_status
+ info='jenkins    29238 17.0  0.4 3781552 1639188 ?     Sl   11:54   0:04 tikv-server --pd 127.0.0.1:2379 -A 127.0.0.1:20160 --status-addr 127.0.0.1:20181 --log-file /tmp/tidb_cdc_test/synced_status/tikv1.log --log-level debug -C /tmp/tidb_cdc_test/synced_status/tikv-config.toml -s /tmp/tidb_cdc_test/synced_status/tikv1
jenkins    29239 17.2  0.4 3779504 1634796 ?     Sl   11:54   0:04 tikv-server --pd 127.0.0.1:2379 -A 127.0.0.1:20161 --status-addr 127.0.0.1:20182 --log-file /tmp/tidb_cdc_test/synced_status/tikv2.log --log-level debug -C /tmp/tidb_cdc_test/synced_status/tikv-config.toml -s /tmp/tidb_cdc_test/synced_status/tikv2
jenkins    29240 23.7  0.4 3814836 1683784 ?     Sl   11:54   0:05 tikv-server --pd 127.0.0.1:2379 -A 127.0.0.1:20162 --status-addr 127.0.0.1:20183 --log-file /tmp/tidb_cdc_test/synced_status/tikv3.log --log-level debug -C /tmp/tidb_cdc_test/synced_status/tikv-config.toml -s /tmp/tidb_cdc_test/synced_status/tikv3
jenkins    29242 23.2  0.4 3806648 1676272 ?     Sl   11:54   0:05 tikv-server --pd 127.0.0.1:2479 -A 127.0.0.1:21160 --status-addr 127.0.0.1:21180 --log-file /tmp/tidb_cdc_test/synced_status/tikv_down.log --log-level debug -C /tmp/tidb_cdc_test/synced_status/tikv-config.toml -s /tmp/tidb_cdc_test/synced_status/tikv_down'
++ ps aux
++ grep tikv-server
++ grep /tmp/tidb_cdc_test/synced_status
++ awk '{print $2}'
++ xargs kill -9
++ curl -X GET http://127.0.0.1:8300/api/v2/changefeeds/test-1/synced
  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                 Dload  Upload   Total   Spent    Left  Speed

  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0
100   243  100   243    0     0   2557      0 --:--:-- --:--:-- --:--:--  2585
+ synced_status='{"synced":false,"sink_checkpoint_ts":"2024-04-29 11:54:37.527","puller_resolved_ts":"1970-01-01 08:00:00.000","last_synced_ts":"2024-04-29 11:54:31.477","now_ts":"2024-04-29 11:54:38.000","info":"The data syncing is not finished, please wait"}'
++ echo '{"synced":false,"sink_checkpoint_ts":"2024-04-29' '11:54:37.527","puller_resolved_ts":"1970-01-01' '08:00:00.000","last_synced_ts":"2024-04-29' '11:54:31.477","now_ts":"2024-04-29' '11:54:38.000","info":"The' data syncing is not finished, please 'wait"}'
++ jq .synced
+ status=false
+ '[' false '!=' false ']'
++ echo '{"synced":false,"sink_checkpoint_ts":"2024-04-29' '11:54:37.527","puller_resolved_ts":"1970-01-01' '08:00:00.000","last_synced_ts":"2024-04-29' '11:54:31.477","now_ts":"2024-04-29' '11:54:38.000","info":"The' data syncing is not finished, please 'wait"}'
++ jq -r .info
+ info='The data syncing is not finished, please wait'
+ target_message='The data syncing is not finished, please wait'
+ '[' 'The data syncing is not finished, please wait' '!=' 'The data syncing is not finished, please wait' ']'
+ sleep 130
check diff successfully
wait process cdc.test exit for 1-th time...
wait process cdc.test exit for 2-th time...
wait process cdc.test exit for 3-th time...
cdc.test: no process found
wait process cdc.test exit for 4-th time...
process cdc.test already exit
[Mon Apr 29 11:54:42 CST 2024] <<<<<< run test case changefeed_auto_stop success! >>>>>>
\033[0;36m<<< Run all test success >>>\033[0m
[Pipeline] }
Cache not saved (ws/jenkins-pingcap-tiflow-release-7.5-pull_cdc_integration_storage_test-304/tiflow-cdc already exists)
[Pipeline] // cache
[Pipeline] }
[Pipeline] // dir
[Pipeline] }
[Pipeline] // withCredentials
[Pipeline] }
[Pipeline] // timeout
[Pipeline] }
[Pipeline] // stage
[Pipeline] }
[Pipeline] // container
[Pipeline] }
[Pipeline] // withEnv
[Pipeline] }
[Pipeline] // node
[Pipeline] }
[Pipeline] // podTemplate
[Pipeline] }
[Pipeline] // withEnv
[Pipeline] }
[Pipeline] // stage
[Pipeline] }
++ curl -X GET http://127.0.0.1:8300/api/v2/changefeeds/test-1/synced
  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                 Dload  Upload   Total   Spent    Left  Speed

  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0
100   723  100   723    0     0   9356      0 --:--:-- --:--:-- --:--:--  9389
+ synced_status='{"synced":false,"sink_checkpoint_ts":"2024-04-29 11:54:37.527","puller_resolved_ts":"2024-04-29 11:54:37.577","last_synced_ts":"2024-04-29 11:54:31.477","now_ts":"2024-04-29 11:56:48.000","info":"Please check whether PD is online and TiKV Regions are all available. If PD is offline or some TiKV regions are not available, it means that the data syncing process is complete. To check whether TiKV regions are all available, you can view '\''TiKV-Details'\'' \u003e '\''Resolved-Ts'\'' \u003e '\''Max Leader Resolved TS gap'\'' on Grafana. If the gap is large, such as a few minutes, it means that some regions in TiKV are unavailable. Otherwise, if the gap is small and PD is online, it means the data syncing is incomplete, so please wait"}'
++ echo '{"synced":false,"sink_checkpoint_ts":"2024-04-29' '11:54:37.527","puller_resolved_ts":"2024-04-29' '11:54:37.577","last_synced_ts":"2024-04-29' '11:54:31.477","now_ts":"2024-04-29' '11:56:48.000","info":"Please' check whether PD is online and TiKV Regions are all available. If PD is offline or some TiKV regions are not available, it means that the data syncing process is complete. To check whether TiKV regions are all available, you can view ''\''TiKV-Details'\''' '\u003e' ''\''Resolved-Ts'\''' '\u003e' ''\''Max' Leader Resolved TS 'gap'\''' on Grafana. If the gap is large, such as a few minutes, it means that some regions in TiKV are unavailable. Otherwise, if the gap is small and PD is online, it means the data syncing is incomplete, so please 'wait"}'
++ jq .synced
+ status=false
+ '[' false '!=' false ']'
++ echo '{"synced":false,"sink_checkpoint_ts":"2024-04-29' '11:54:37.527","puller_resolved_ts":"2024-04-29' '11:54:37.577","last_synced_ts":"2024-04-29' '11:54:31.477","now_ts":"2024-04-29' '11:56:48.000","info":"Please' check whether PD is online and TiKV Regions are all available. If PD is offline or some TiKV regions are not available, it means that the data syncing process is complete. To check whether TiKV regions are all available, you can view ''\''TiKV-Details'\''' '\u003e' ''\''Resolved-Ts'\''' '\u003e' ''\''Max' Leader Resolved TS 'gap'\''' on Grafana. If the gap is large, such as a few minutes, it means that some regions in TiKV are unavailable. Otherwise, if the gap is small and PD is online, it means the data syncing is incomplete, so please 'wait"}'
++ jq -r .info
+ info='Please check whether PD is online and TiKV Regions are all available. If PD is offline or some TiKV regions are not available, it means that the data syncing process is complete. To check whether TiKV regions are all available, you can view '\''TiKV-Details'\'' > '\''Resolved-Ts'\'' > '\''Max Leader Resolved TS gap'\'' on Grafana. If the gap is large, such as a few minutes, it means that some regions in TiKV are unavailable. Otherwise, if the gap is small and PD is online, it means the data syncing is incomplete, so please wait'
+ target_message='Please check whether PD is online and TiKV Regions are all available. If PD is offline or some TiKV regions are not available, it means that the data syncing process is complete. To check whether TiKV regions are all available, you can view '\''TiKV-Details'\'' > '\''Resolved-Ts'\'' > '\''Max Leader Resolved TS gap'\'' on Grafana. If the gap is large, such as a few minutes, it means that some regions in TiKV are unavailable. Otherwise, if the gap is small and PD is online, it means the data syncing is incomplete, so please wait'
+ '[' 'Please check whether PD is online and TiKV Regions are all available. If PD is offline or some TiKV regions are not available, it means that the data syncing process is complete. To check whether TiKV regions are all available, you can view '\''TiKV-Details'\'' > '\''Resolved-Ts'\'' > '\''Max Leader Resolved TS gap'\'' on Grafana. If the gap is large, such as a few minutes, it means that some regions in TiKV are unavailable. Otherwise, if the gap is small and PD is online, it means the data syncing is incomplete, so please wait' '!=' 'Please check whether PD is online and TiKV Regions are all available. If PD is offline or some TiKV regions are not available, it means that the data syncing process is complete. To check whether TiKV regions are all available, you can view '\''TiKV-Details'\'' > '\''Resolved-Ts'\'' > '\''Max Leader Resolved TS gap'\'' on Grafana. If the gap is large, such as a few minutes, it means that some regions in TiKV are unavailable. Otherwise, if the gap is small and PD is online, it means the data syncing is incomplete, so please wait' ']'
+ cleanup_process cdc.test
wait process cdc.test exit for 1-th time...
wait process cdc.test exit for 2-th time...
cdc.test: no process found
wait process cdc.test exit for 3-th time...
process cdc.test already exit
+ stop_tidb_cluster
+ run_case_with_unavailable_tidb conf/changefeed-redo.toml
+ rm -rf /tmp/tidb_cdc_test/synced_status
+ mkdir -p /tmp/tidb_cdc_test/synced_status
+ start_tidb_cluster --workdir /tmp/tidb_cdc_test/synced_status
shell-init: error retrieving current directory: getcwd: cannot access parent directories: No such file or directory
chdir: error retrieving current directory: getcwd: cannot access parent directories: No such file or directory
The 1 times to try to start tidb cluster...
shell-init: error retrieving current directory: getcwd: cannot access parent directories: No such file or directory
chdir: error retrieving current directory: getcwd: cannot access parent directories: No such file or directory
shell-init: error retrieving current directory: getcwd: cannot access parent directories: No such file or directory
chdir: error retrieving current directory: getcwd: cannot access parent directories: No such file or directory
chdir: error retrieving current directory: getcwd: cannot access parent directories: No such file or directory
start tidb cluster in /tmp/tidb_cdc_test/synced_status
Starting Upstream PD...
Release Version: v7.5.1-5-g584533652
Edition: Community
Git Commit Hash: 58453365285465cd90bc4472cff2bad7ce4d764b
Git Branch: release-7.5
UTC Build Time:  2024-04-03 10:04:14
Starting Downstream PD...
Release Version: v7.5.1-5-g584533652
Edition: Community
Git Commit Hash: 58453365285465cd90bc4472cff2bad7ce4d764b
Git Branch: release-7.5
UTC Build Time:  2024-04-03 10:04:14
Verifying upstream PD is started...
Verifying downstream PD is started...
Starting Upstream TiKV...
TiKV 
Release Version:   7.5.2
Edition:           Community
Git Commit Hash:   3478895c2a700e4824bb41940260b6b28013275e
Git Commit Branch: release-7.5
UTC Build Time:    2024-04-28 08:20:54
Rust Version:      rustc 1.67.0-nightly (96ddd32c4 2022-11-14)
Enable Features:   pprof-fp jemalloc mem-profiling portable sse test-engine-kv-rocksdb test-engine-raft-raft-engine cloud-aws cloud-gcp cloud-azure
Profile:           dist_release
Starting Downstream TiKV...
TiKV 
Release Version:   7.5.2
Edition:           Community
Git Commit Hash:   3478895c2a700e4824bb41940260b6b28013275e
Git Commit Branch: release-7.5
UTC Build Time:    2024-04-28 08:20:54
Rust Version:      rustc 1.67.0-nightly (96ddd32c4 2022-11-14)
Enable Features:   pprof-fp jemalloc mem-profiling portable sse test-engine-kv-rocksdb test-engine-raft-raft-engine cloud-aws cloud-gcp cloud-azure
Profile:           dist_release
Starting Upstream TiDB...
Release Version: v7.5.1-45-gbf84e231e6
Edition: Community
Git Commit Hash: bf84e231e6ef26891d0cb524d938345f43aa047c
Git Branch: release-7.5
UTC Build Time: 2024-04-29 02:05:15
GoVersion: go1.21.6
Race Enabled: false
Check Table Before Drop: false
Store: unistore
Starting Downstream TiDB...
Release Version: v7.5.1-45-gbf84e231e6
Edition: Community
Git Commit Hash: bf84e231e6ef26891d0cb524d938345f43aa047c
Git Branch: release-7.5
UTC Build Time: 2024-04-29 02:05:15
GoVersion: go1.21.6
Race Enabled: false
Check Table Before Drop: false
Store: unistore
Verifying Upstream TiDB is started...
ERROR 2003 (HY000): Can't connect to MySQL server on '127.0.0.1' (111)
ERROR 2003 (HY000): Can't connect to MySQL server on '127.0.0.1' (111)
ERROR 2003 (HY000): Can't connect to MySQL server on '127.0.0.1' (111)
ERROR 2003 (HY000): Can't connect to MySQL server on '127.0.0.1' (111)
VARIABLE_NAME	VARIABLE_VALUE	COMMENT
bootstrapped	True	Bootstrap flag. Do not delete.
tidb_server_version	179	Bootstrap version. Do not delete.
system_tz	Asia/Shanghai	TiDB Global System Timezone.
new_collation_enabled	True	If the new collations are enabled. Do not edit it.
ddl_table_version	3	DDL Table Version. Do not delete.
tikv_gc_leader_uuid	63c9fff8d000014	Current GC worker leader UUID. (DO NOT EDIT)
tikv_gc_leader_desc	host:xtiflow-release-7-5-pull-cdc-integration-storage-test-304-c17q4, pid:32390, start at 2024-04-29 11:57:12.429986153 +0800 CST m=+6.234576382	Host name and pid of current GC leader. (DO NOT EDIT)
tikv_gc_leader_lease	20240429-11:59:12.437 +0800	Current GC worker leader lease. (DO NOT EDIT)
tikv_gc_auto_concurrency	true	Let TiDB pick the concurrency automatically. If set false, tikv_gc_concurrency will be used
tikv_gc_enable	true	Current GC enable status
tikv_gc_run_interval	10m0s	GC run interval, at least 10m, in Go format.
tikv_gc_life_time	10m0s	All versions within life time will not be collected by GC, at least 10m, in Go format.
tikv_gc_last_run_time	20240429-11:57:12.434 +0800	The time when last GC starts. (DO NOT EDIT)
tikv_gc_safe_point	20240429-11:47:12.434 +0800	All versions after safe point can be accessed. (DO NOT EDIT)
VARIABLE_NAME	VARIABLE_VALUE	COMMENT
bootstrapped	True	Bootstrap flag. Do not delete.
tidb_server_version	179	Bootstrap version. Do not delete.
system_tz	Asia/Shanghai	TiDB Global System Timezone.
new_collation_enabled	True	If the new collations are enabled. Do not edit it.
ddl_table_version	3	DDL Table Version. Do not delete.
tikv_gc_leader_uuid	63c9fff8d000014	Current GC worker leader UUID. (DO NOT EDIT)
tikv_gc_leader_desc	host:xtiflow-release-7-5-pull-cdc-integration-storage-test-304-c17q4, pid:32390, start at 2024-04-29 11:57:12.429986153 +0800 CST m=+6.234576382	Host name and pid of current GC leader. (DO NOT EDIT)
tikv_gc_leader_lease	20240429-11:59:12.437 +0800	Current GC worker leader lease. (DO NOT EDIT)
tikv_gc_auto_concurrency	true	Let TiDB pick the concurrency automatically. If set false, tikv_gc_concurrency will be used
tikv_gc_enable	true	Current GC enable status
tikv_gc_run_interval	10m0s	GC run interval, at least 10m, in Go format.
tikv_gc_life_time	10m0s	All versions within life time will not be collected by GC, at least 10m, in Go format.
tikv_gc_last_run_time	20240429-11:57:12.434 +0800	The time when last GC starts. (DO NOT EDIT)
tikv_gc_safe_point	20240429-11:47:12.434 +0800	All versions after safe point can be accessed. (DO NOT EDIT)
Verifying Downstream TiDB is started...
VARIABLE_NAME	VARIABLE_VALUE	COMMENT
bootstrapped	True	Bootstrap flag. Do not delete.
tidb_server_version	179	Bootstrap version. Do not delete.
system_tz	Asia/Shanghai	TiDB Global System Timezone.
new_collation_enabled	True	If the new collations are enabled. Do not edit it.
ddl_table_version	3	DDL Table Version. Do not delete.
tikv_gc_leader_uuid	63c9fff807c0010	Current GC worker leader UUID. (DO NOT EDIT)
tikv_gc_leader_desc	host:xtiflow-release-7-5-pull-cdc-integration-storage-test-304-c17q4, pid:32456, start at 2024-04-29 11:57:11.607366921 +0800 CST m=+5.347240304	Host name and pid of current GC leader. (DO NOT EDIT)
tikv_gc_leader_lease	20240429-11:59:11.617 +0800	Current GC worker leader lease. (DO NOT EDIT)
tikv_gc_auto_concurrency	true	Let TiDB pick the concurrency automatically. If set false, tikv_gc_concurrency will be used
tikv_gc_enable	true	Current GC enable status
tikv_gc_run_interval	10m0s	GC run interval, at least 10m, in Go format.
tikv_gc_life_time	10m0s	All versions within life time will not be collected by GC, at least 10m, in Go format.
tikv_gc_last_run_time	20240429-11:57:11.583 +0800	The time when last GC starts. (DO NOT EDIT)
tikv_gc_safe_point	20240429-11:47:11.583 +0800	All versions after safe point can be accessed. (DO NOT EDIT)
Starting Upstream TiFlash...
TiFlash
Release Version: v7.5.1-12-g9002cc34d
Edition:         Community
Git Commit Hash: 9002cc34d3b593a718b6c5260ba18f30a45ab314
Git Branch:      HEAD
UTC Build Time:  2024-04-18 07:24:48
Enable Features: jemalloc sm4(GmSSL) avx2 avx512 unwind thinlto
Profile:         RELWITHDEBINFO

Raft Proxy
Git Commit Hash:   521fd9dbc55e58646045d88f91c3c35db50b5981
Git Commit Branch: HEAD
UTC Build Time:    2024-04-18 07:28:40
Rust Version:      rustc 1.67.0-nightly (96ddd32c4 2022-11-14)
Storage Engine:    tiflash
Prometheus Prefix: tiflash_proxy_
Profile:           release
Enable Features:    portable sse test-engine-kv-rocksdb test-engine-raft-raft-engine cloud-aws cloud-gcp cloud-azure
Verifying Upstream TiFlash is started...
Logging trace to /tmp/tidb_cdc_test/synced_status/tiflash/log/server.log
Logging errors to /tmp/tidb_cdc_test/synced_status/tiflash/log/error.log
arg matches is ArgMatches { args: {"config": MatchedArg { occurs: 1, indices: [8], vals: ["/tmp/tidb_cdc_test/synced_status/tiflash-proxy.toml"] }, "data-dir": MatchedArg { occurs: 1, indices: [6], vals: ["/tmp/tidb_cdc_test/synced_status/tiflash/db/proxy"] }, "engine-git-hash": MatchedArg { occurs: 1, indices: [10], vals: ["9002cc34d3b593a718b6c5260ba18f30a45ab314"] }, "log-file": MatchedArg { occurs: 1, indices: [18], vals: ["/tmp/tidb_cdc_test/synced_status/tiflash/log/proxy.log"] }, "advertise-addr": MatchedArg { occurs: 1, indices: [4], vals: ["127.0.0.1:9000"] }, "addr": MatchedArg { occurs: 1, indices: [20], vals: ["127.0.0.1:9000"] }, "pd-endpoints": MatchedArg { occurs: 1, indices: [16], vals: ["127.0.0.1:2379"] }, "engine-label": MatchedArg { occurs: 1, indices: [14], vals: ["tiflash"] }, "engine-addr": MatchedArg { occurs: 1, indices: [2], vals: ["127.0.0.1:9500"] }, "engine-version": MatchedArg { occurs: 1, indices: [12], vals: ["v7.5.1-12-g9002cc34d"] }}, subcommand: None, usage: Some("USAGE:\n    TiFlash Proxy [FLAGS] [OPTIONS] --engine-git-hash <engine-git-hash> --engine-label <engine-label> --engine-version <engine-version>") }
+ cd /tmp/tidb_cdc_test/synced_status
++ run_cdc_cli_tso_query 127.0.0.1 2379
+ pd_host=127.0.0.1
+ pd_port=2379
++ run_cdc_cli tso query --pd=http://127.0.0.1:2379
+ cdc.test -test.coverprofile=/tmp/tidb_cdc_test/cov.synced_status.cli.33703.out cli tso query --pd=http://127.0.0.1:2379
+ set +x
+ tso='449409984176848897
PASS
coverage: 1.8% of statements in github.com/pingcap/tiflow/...'
+ echo 449409984176848897 PASS coverage: 1.8% of statements in github.com/pingcap/tiflow/...
+ awk -F ' ' '{print $1}'
+ set +x
+ start_ts=449409984176848897
+ run_cdc_server --workdir /tmp/tidb_cdc_test/synced_status --binary cdc.test
[Mon Apr 29 11:57:19 CST 2024] <<<<<< START cdc server in synced_status case >>>>>>
+ [[ '' == \t\r\u\e ]]
+ set +e
+ get_info_fail_msg='failed to get info:'
+ etcd_info_msg='etcd info'
+ GO_FAILPOINTS=
+ '[' -z '' ']'
+ curl_status_cmd='curl -vsL --max-time 20 http://127.0.0.1:8300/debug/info'
+ cdc.test -test.coverprofile=/tmp/tidb_cdc_test/cov.synced_status.3374633748.out server --log-file /tmp/tidb_cdc_test/synced_status/cdc.log --log-level debug --data-dir /tmp/tidb_cdc_test/synced_status/cdc_data --cluster-id default
+ [[ no != \n\o ]]
+ (( i = 0 ))
+ (( i <= 50 ))
++ curl -vsL --max-time 20 http://127.0.0.1:8300/debug/info
* About to connect() to 127.0.0.1 port 8300 (#0)
*   Trying 127.0.0.1...
* Connection refused
* Failed connect to 127.0.0.1:8300; Connection refused
* Closing connection 0
+ res=
+ echo ''
+ grep -q 'failed to get info:'
+ echo ''
+ grep -q 'etcd info'
+ '[' 0 -eq 50 ']'
+ sleep 3
+ (( i++ ))
+ (( i <= 50 ))
++ curl -vsL --max-time 20 http://127.0.0.1:8300/debug/info
* About to connect() to 127.0.0.1 port 8300 (#0)
*   Trying 127.0.0.1...
* Connected to 127.0.0.1 (127.0.0.1) port 8300 (#0)
> GET /debug/info HTTP/1.1
> User-Agent: curl/7.29.0
> Host: 127.0.0.1:8300
> Accept: */*
> 
< HTTP/1.1 200 OK
< Date: Mon, 29 Apr 2024 03:57:22 GMT
< Content-Length: 613
< Content-Type: text/plain; charset=utf-8
< 
{ [data not shown]
* Connection #0 to host 127.0.0.1 left intact
+ res='

*** owner info ***:



*** processors info ***:



*** etcd info ***:

/tidb/cdc/default/__cdc_meta__/capture/5b038b4c-8e0c-4858-b0fd-55c92aa5229d
	{"id":"5b038b4c-8e0c-4858-b0fd-55c92aa5229d","address":"127.0.0.1:8300","version":"v7.5.1-19-gd6783fb76"}

/tidb/cdc/default/__cdc_meta__/meta/meta-version
	1

/tidb/cdc/default/__cdc_meta__/owner/22318f27ffbcf3f5
	5b038b4c-8e0c-4858-b0fd-55c92aa5229d

/tidb/cdc/default/default/upstream/7363133118443522419
	{"id":7363133118443522419,"pd-endpoints":"http://127.0.0.1:2379,http://127.0.0.1:2379","key-path":"","cert-path":"","ca-path":"","cert-allowed-cn":null}'
+ echo '

*** owner info ***:



*** processors info ***:



*** etcd info ***:

/tidb/cdc/default/__cdc_meta__/capture/5b038b4c-8e0c-4858-b0fd-55c92aa5229d
	{"id":"5b038b4c-8e0c-4858-b0fd-55c92aa5229d","address":"127.0.0.1:8300","version":"v7.5.1-19-gd6783fb76"}

/tidb/cdc/default/__cdc_meta__/meta/meta-version
	1

/tidb/cdc/default/__cdc_meta__/owner/22318f27ffbcf3f5
	5b038b4c-8e0c-4858-b0fd-55c92aa5229d

/tidb/cdc/default/default/upstream/7363133118443522419
	{"id":7363133118443522419,"pd-endpoints":"http://127.0.0.1:2379,http://127.0.0.1:2379","key-path":"","cert-path":"","ca-path":"","cert-allowed-cn":null}'
+ grep -q 'failed to get info:'
+ echo '

*** owner info ***:



*** processors info ***:



*** etcd info ***:

/tidb/cdc/default/__cdc_meta__/capture/5b038b4c-8e0c-4858-b0fd-55c92aa5229d
	{"id":"5b038b4c-8e0c-4858-b0fd-55c92aa5229d","address":"127.0.0.1:8300","version":"v7.5.1-19-gd6783fb76"}

/tidb/cdc/default/__cdc_meta__/meta/meta-version
	1

/tidb/cdc/default/__cdc_meta__/owner/22318f27ffbcf3f5
	5b038b4c-8e0c-4858-b0fd-55c92aa5229d

/tidb/cdc/default/default/upstream/7363133118443522419
	{"id":7363133118443522419,"pd-endpoints":"http://127.0.0.1:2379,http://127.0.0.1:2379","key-path":"","cert-path":"","ca-path":"","cert-allowed-cn":null}'
+ grep -q 'etcd info'
+ break
+ set +x
+ config_path=conf/changefeed-redo.toml
+ SINK_URI='mysql://root@127.0.0.1:3306/?max-txn-row=1'
+ run_cdc_cli changefeed create --start-ts=449409984176848897 '--sink-uri=mysql://root@127.0.0.1:3306/?max-txn-row=1' --changefeed-id=test-1 --config=/home/jenkins/agent/workspace/pingcap/tiflow/release-7.5/pull_cdc_integration_storage_test/tiflow/tests/integration_tests/synced_status/conf/changefeed-redo.toml
+ cdc.test -test.coverprofile=/tmp/tidb_cdc_test/cov.synced_status.cli.33792.out cli changefeed create --start-ts=449409984176848897 '--sink-uri=mysql://root@127.0.0.1:3306/?max-txn-row=1' --changefeed-id=test-1 --config=/home/jenkins/agent/workspace/pingcap/tiflow/release-7.5/pull_cdc_integration_storage_test/tiflow/tests/integration_tests/synced_status/conf/changefeed-redo.toml
Create changefeed successfully!
ID: test-1
Info: {"upstream_id":7363133118443522419,"namespace":"default","id":"test-1","sink_uri":"mysql://root@127.0.0.1:3306/?max-txn-row=1","create_time":"2024-04-29T11:57:22.708591727+08:00","start_ts":449409984176848897,"config":{"memory_quota":1073741824,"case_sensitive":false,"force_replicate":false,"ignore_ineligible_table":false,"check_gc_safe_point":true,"enable_sync_point":false,"bdr_mode":false,"sync_point_interval":600000000000,"sync_point_retention":86400000000000,"filter":{"rules":["*.*"]},"mounter":{"worker_num":16},"sink":{"csv":{"delimiter":",","quote":"\"","null":"\\N","include_commit_ts":false,"binary_encoding_method":"base64"},"encoder_concurrency":32,"terminator":"\r\n","date_separator":"day","enable_partition_separator":true,"enable_kafka_sink_v2":false,"only_output_updated_columns":false,"delete_only_output_handle_key_columns":false,"advance_timeout":150,"send_bootstrap_interval_in_sec":120,"send_bootstrap_in_msg_count":10000,"send_bootstrap_to_all_partition":true},"consistent":{"level":"eventual","max_log_size":64,"flush_interval":2000,"meta_flush_interval":200,"encoding_worker_num":16,"flush_worker_num":8,"storage":"file:///tmp/tidb_cdc_test/synced_status/redo","use_file_backend":false,"memory_usage":{"memory_quota_percentage":50,"event_cache_percentage":0}},"scheduler":{"enable_table_across_nodes":false,"region_threshold":100000,"write_key_threshold":0},"integrity":{"integrity_check_level":"none","corruption_handle_level":"warn"},"changefeed_error_stuck_duration":1800000000000,"sql_mode":"ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION","synced_status":{"synced_check_interval":120,"checkpoint_interval":20}},"state":"normal","creator_version":"v7.5.1-19-gd6783fb76","resolved_ts":449409984176848897,"checkpoint_ts":449409984176848897,"checkpoint_time":"2024-04-29 11:57:17.784"}
PASS
coverage: 2.5% of statements in github.com/pingcap/tiflow/...
+ set +x
+ run_sql 'USE TEST;Create table t1(a int primary key, b int);insert into t1 values(1,2);insert into t1 values(2,3);'
+ check_table_exists test.t1 127.0.0.1 3306
table test.t1 not exists for 1-th check, retry later
table test.t1 exists
+ sleep 5
+ kill_tidb
++ ps aux
++ grep tidb-server
++ grep /tmp/tidb_cdc_test/synced_status
+ info='jenkins    32386  3.3  0.0 2409276 162472 ?      Sl   11:57   0:00 tidb-server -P 4000 -config /tmp/tidb_cdc_test/synced_status/tidb-config-1714363026183526113.toml --store tikv --path 127.0.0.1:2379 --status=10080 --log-file /tmp/tidb_cdc_test/synced_status/tidb.log
jenkins    32390 10.2  0.0 2278812 190832 ?      Sl   11:57   0:02 tidb-server -P 4001 -config /tmp/tidb_cdc_test/synced_status/tidb-config-1714363026187223809.toml --store tikv --path 127.0.0.1:2379 --status=10081 --log-file /tmp/tidb_cdc_test/synced_status/tidb_other.log
jenkins    32456 11.0  0.0 2403804 249068 ?      Sl   11:57   0:02 tidb-server -P 3306 -config /tmp/tidb_cdc_test/synced_status/tidb-config-1714363026252779192.toml --store tikv --path 127.0.0.1:2479 --status=20080 --log-file /tmp/tidb_cdc_test/synced_status/tidb_down.log'
++ ps aux
++ grep tidb-server
++ grep /tmp/tidb_cdc_test/synced_status
++ awk '{print $2}'
++ xargs kill -9
++ curl -X GET http://127.0.0.1:8300/api/v2/changefeeds/test-1/synced
  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                 Dload  Upload   Total   Spent    Left  Speed

  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0
100   243  100   243    0     0   2901      0 --:--:-- --:--:-- --:--:--  2927
+ synced_status='{"synced":false,"sink_checkpoint_ts":"2024-04-29 11:57:30.235","puller_resolved_ts":"1970-01-01 08:00:00.000","last_synced_ts":"2024-04-29 11:57:24.634","now_ts":"2024-04-29 11:57:31.000","info":"The data syncing is not finished, please wait"}'
++ echo '{"synced":false,"sink_checkpoint_ts":"2024-04-29' '11:57:30.235","puller_resolved_ts":"1970-01-01' '08:00:00.000","last_synced_ts":"2024-04-29' '11:57:24.634","now_ts":"2024-04-29' '11:57:31.000","info":"The' data syncing is not finished, please 'wait"}'
++ jq .synced
+ status=false
+ '[' false '!=' false ']'
++ echo '{"synced":false,"sink_checkpoint_ts":"2024-04-29' '11:57:30.235","puller_resolved_ts":"1970-01-01' '08:00:00.000","last_synced_ts":"2024-04-29' '11:57:24.634","now_ts":"2024-04-29' '11:57:31.000","info":"The' data syncing is not finished, please 'wait"}'
++ jq -r .info
+ info='The data syncing is not finished, please wait'
+ target_message='The data syncing is not finished, please wait'
+ '[' 'The data syncing is not finished, please wait' '!=' 'The data syncing is not finished, please wait' ']'
+ sleep 130
++ curl -X GET http://127.0.0.1:8300/api/v2/changefeeds/test-1/synced
  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                 Dload  Upload   Total   Spent    Left  Speed

  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0
100   221  100   221    0     0   2552      0 --:--:-- --:--:-- --:--:--  2569
+ synced_status='{"synced":true,"sink_checkpoint_ts":"2024-04-29 11:59:40.384","puller_resolved_ts":"2024-04-29 11:59:34.384","last_synced_ts":"2024-04-29 11:57:24.634","now_ts":"2024-04-29 11:59:41.000","info":"Data syncing is finished"}'
++ echo '{"synced":true,"sink_checkpoint_ts":"2024-04-29' '11:59:40.384","puller_resolved_ts":"2024-04-29' '11:59:34.384","last_synced_ts":"2024-04-29' '11:57:24.634","now_ts":"2024-04-29' '11:59:41.000","info":"Data' syncing is 'finished"}'
++ jq .synced
+ status=true
+ '[' true '!=' true ']'
++ echo '{"synced":true,"sink_checkpoint_ts":"2024-04-29' '11:59:40.384","puller_resolved_ts":"2024-04-29' '11:59:34.384","last_synced_ts":"2024-04-29' '11:57:24.634","now_ts":"2024-04-29' '11:59:41.000","info":"Data' syncing is 'finished"}'
++ jq -r .info
+ info='Data syncing is finished'
+ target_message='Data syncing is finished'
+ '[' 'Data syncing is finished' '!=' 'Data syncing is finished' ']'
+ cleanup_process cdc.test
wait process cdc.test exit for 1-th time...
wait process cdc.test exit for 2-th time...
wait process cdc.test exit for 3-th time...
cdc.test: no process found
wait process cdc.test exit for 4-th time...
process cdc.test already exit
+ stop_tidb_cluster
+ run_case_with_failpoint conf/changefeed-redo.toml
+ rm -rf /tmp/tidb_cdc_test/synced_status
+ mkdir -p /tmp/tidb_cdc_test/synced_status
+ start_tidb_cluster --workdir /tmp/tidb_cdc_test/synced_status
shell-init: error retrieving current directory: getcwd: cannot access parent directories: No such file or directory
chdir: error retrieving current directory: getcwd: cannot access parent directories: No such file or directory
The 1 times to try to start tidb cluster...
shell-init: error retrieving current directory: getcwd: cannot access parent directories: No such file or directory
chdir: error retrieving current directory: getcwd: cannot access parent directories: No such file or directory
shell-init: error retrieving current directory: getcwd: cannot access parent directories: No such file or directory
chdir: error retrieving current directory: getcwd: cannot access parent directories: No such file or directory
chdir: error retrieving current directory: getcwd: cannot access parent directories: No such file or directory
start tidb cluster in /tmp/tidb_cdc_test/synced_status
Starting Upstream PD...
Release Version: v7.5.1-5-g584533652
Edition: Community
Git Commit Hash: 58453365285465cd90bc4472cff2bad7ce4d764b
Git Branch: release-7.5
UTC Build Time:  2024-04-03 10:04:14
Starting Downstream PD...
Release Version: v7.5.1-5-g584533652
Edition: Community
Git Commit Hash: 58453365285465cd90bc4472cff2bad7ce4d764b
Git Branch: release-7.5
UTC Build Time:  2024-04-03 10:04:14
Verifying upstream PD is started...
Verifying downstream PD is started...
Starting Upstream TiKV...
TiKV 
Release Version:   7.5.2
Edition:           Community
Git Commit Hash:   3478895c2a700e4824bb41940260b6b28013275e
Git Commit Branch: release-7.5
UTC Build Time:    2024-04-28 08:20:54
Rust Version:      rustc 1.67.0-nightly (96ddd32c4 2022-11-14)
Enable Features:   pprof-fp jemalloc mem-profiling portable sse test-engine-kv-rocksdb test-engine-raft-raft-engine cloud-aws cloud-gcp cloud-azure
Profile:           dist_release
Starting Downstream TiKV...
TiKV 
Release Version:   7.5.2
Edition:           Community
Git Commit Hash:   3478895c2a700e4824bb41940260b6b28013275e
Git Commit Branch: release-7.5
UTC Build Time:    2024-04-28 08:20:54
Rust Version:      rustc 1.67.0-nightly (96ddd32c4 2022-11-14)
Enable Features:   pprof-fp jemalloc mem-profiling portable sse test-engine-kv-rocksdb test-engine-raft-raft-engine cloud-aws cloud-gcp cloud-azure
Profile:           dist_release
Starting Upstream TiDB...
Release Version: v7.5.1-45-gbf84e231e6
Edition: Community
Git Commit Hash: bf84e231e6ef26891d0cb524d938345f43aa047c
Git Branch: release-7.5
UTC Build Time: 2024-04-29 02:05:15
GoVersion: go1.21.6
Race Enabled: false
Check Table Before Drop: false
Store: unistore
Starting Downstream TiDB...
Release Version: v7.5.1-45-gbf84e231e6
Edition: Community
Git Commit Hash: bf84e231e6ef26891d0cb524d938345f43aa047c
Git Branch: release-7.5
UTC Build Time: 2024-04-29 02:05:15
GoVersion: go1.21.6
Race Enabled: false
Check Table Before Drop: false
Store: unistore
Verifying Upstream TiDB is started...
ERROR 2003 (HY000): Can't connect to MySQL server on '127.0.0.1' (111)
ERROR 2003 (HY000): Can't connect to MySQL server on '127.0.0.1' (111)
ERROR 2003 (HY000): Can't connect to MySQL server on '127.0.0.1' (111)
ERROR 2003 (HY000): Can't connect to MySQL server on '127.0.0.1' (111)
VARIABLE_NAME	VARIABLE_VALUE	COMMENT
bootstrapped	True	Bootstrap flag. Do not delete.
tidb_server_version	179	Bootstrap version. Do not delete.
system_tz	Asia/Shanghai	TiDB Global System Timezone.
new_collation_enabled	True	If the new collations are enabled. Do not edit it.
ddl_table_version	3	DDL Table Version. Do not delete.
tikv_gc_leader_uuid	63ca00a4a780013	Current GC worker leader UUID. (DO NOT EDIT)
tikv_gc_leader_desc	host:xtiflow-release-7-5-pull-cdc-integration-storage-test-304-c17q4, pid:34871, start at 2024-04-29 12:00:08.380157768 +0800 CST m=+5.348003100	Host name and pid of current GC leader. (DO NOT EDIT)
tikv_gc_leader_lease	20240429-12:02:08.386 +0800	Current GC worker leader lease. (DO NOT EDIT)
tikv_gc_auto_concurrency	true	Let TiDB pick the concurrency automatically. If set false, tikv_gc_concurrency will be used
tikv_gc_enable	true	Current GC enable status
tikv_gc_run_interval	10m0s	GC run interval, at least 10m, in Go format.
tikv_gc_life_time	10m0s	All versions within life time will not be collected by GC, at least 10m, in Go format.
tikv_gc_last_run_time	20240429-12:00:08.350 +0800	The time when last GC starts. (DO NOT EDIT)
tikv_gc_safe_point	20240429-11:50:08.350 +0800	All versions after safe point can be accessed. (DO NOT EDIT)
VARIABLE_NAME	VARIABLE_VALUE	COMMENT
bootstrapped	True	Bootstrap flag. Do not delete.
tidb_server_version	179	Bootstrap version. Do not delete.
system_tz	Asia/Shanghai	TiDB Global System Timezone.
new_collation_enabled	True	If the new collations are enabled. Do not edit it.
ddl_table_version	3	DDL Table Version. Do not delete.
tikv_gc_leader_uuid	63ca00a4a780013	Current GC worker leader UUID. (DO NOT EDIT)
tikv_gc_leader_desc	host:xtiflow-release-7-5-pull-cdc-integration-storage-test-304-c17q4, pid:34871, start at 2024-04-29 12:00:08.380157768 +0800 CST m=+5.348003100	Host name and pid of current GC leader. (DO NOT EDIT)
tikv_gc_leader_lease	20240429-12:02:08.386 +0800	Current GC worker leader lease. (DO NOT EDIT)
tikv_gc_auto_concurrency	true	Let TiDB pick the concurrency automatically. If set false, tikv_gc_concurrency will be used
tikv_gc_enable	true	Current GC enable status
tikv_gc_run_interval	10m0s	GC run interval, at least 10m, in Go format.
tikv_gc_life_time	10m0s	All versions within life time will not be collected by GC, at least 10m, in Go format.
tikv_gc_last_run_time	20240429-12:00:08.350 +0800	The time when last GC starts. (DO NOT EDIT)
tikv_gc_safe_point	20240429-11:50:08.350 +0800	All versions after safe point can be accessed. (DO NOT EDIT)
Verifying Downstream TiDB is started...
VARIABLE_NAME	VARIABLE_VALUE	COMMENT
bootstrapped	True	Bootstrap flag. Do not delete.
tidb_server_version	179	Bootstrap version. Do not delete.
system_tz	Asia/Shanghai	TiDB Global System Timezone.
new_collation_enabled	True	If the new collations are enabled. Do not edit it.
ddl_table_version	3	DDL Table Version. Do not delete.
tikv_gc_leader_uuid	63ca00a4a68000f	Current GC worker leader UUID. (DO NOT EDIT)
tikv_gc_leader_desc	host:xtiflow-release-7-5-pull-cdc-integration-storage-test-304-c17q4, pid:34945, start at 2024-04-29 12:00:08.365731941 +0800 CST m=+5.262602415	Host name and pid of current GC leader. (DO NOT EDIT)
tikv_gc_leader_lease	20240429-12:02:08.373 +0800	Current GC worker leader lease. (DO NOT EDIT)
tikv_gc_auto_concurrency	true	Let TiDB pick the concurrency automatically. If set false, tikv_gc_concurrency will be used
tikv_gc_enable	true	Current GC enable status
tikv_gc_run_interval	10m0s	GC run interval, at least 10m, in Go format.
tikv_gc_life_time	10m0s	All versions within life time will not be collected by GC, at least 10m, in Go format.
tikv_gc_last_run_time	20240429-12:00:08.346 +0800	The time when last GC starts. (DO NOT EDIT)
tikv_gc_safe_point	20240429-11:50:08.346 +0800	All versions after safe point can be accessed. (DO NOT EDIT)
Starting Upstream TiFlash...
TiFlash
Release Version: v7.5.1-12-g9002cc34d
Edition:         Community
Git Commit Hash: 9002cc34d3b593a718b6c5260ba18f30a45ab314
Git Branch:      HEAD
UTC Build Time:  2024-04-18 07:24:48
Enable Features: jemalloc sm4(GmSSL) avx2 avx512 unwind thinlto
Profile:         RELWITHDEBINFO

Raft Proxy
Git Commit Hash:   521fd9dbc55e58646045d88f91c3c35db50b5981
Git Commit Branch: HEAD
UTC Build Time:    2024-04-18 07:28:40
Rust Version:      rustc 1.67.0-nightly (96ddd32c4 2022-11-14)
Storage Engine:    tiflash
Prometheus Prefix: tiflash_proxy_
Profile:           release
Enable Features:    portable sse test-engine-kv-rocksdb test-engine-raft-raft-engine cloud-aws cloud-gcp cloud-azure
Verifying Upstream TiFlash is started...
Logging trace to /tmp/tidb_cdc_test/synced_status/tiflash/log/server.log
Logging errors to /tmp/tidb_cdc_test/synced_status/tiflash/log/error.log
arg matches is ArgMatches { args: {"data-dir": MatchedArg { occurs: 1, indices: [6], vals: ["/tmp/tidb_cdc_test/synced_status/tiflash/db/proxy"] }, "engine-git-hash": MatchedArg { occurs: 1, indices: [10], vals: ["9002cc34d3b593a718b6c5260ba18f30a45ab314"] }, "engine-label": MatchedArg { occurs: 1, indices: [14], vals: ["tiflash"] }, "log-file": MatchedArg { occurs: 1, indices: [18], vals: ["/tmp/tidb_cdc_test/synced_status/tiflash/log/proxy.log"] }, "pd-endpoints": MatchedArg { occurs: 1, indices: [16], vals: ["127.0.0.1:2379"] }, "engine-addr": MatchedArg { occurs: 1, indices: [2], vals: ["127.0.0.1:9500"] }, "addr": MatchedArg { occurs: 1, indices: [20], vals: ["127.0.0.1:9000"] }, "advertise-addr": MatchedArg { occurs: 1, indices: [4], vals: ["127.0.0.1:9000"] }, "config": MatchedArg { occurs: 1, indices: [8], vals: ["/tmp/tidb_cdc_test/synced_status/tiflash-proxy.toml"] }, "engine-version": MatchedArg { occurs: 1, indices: [12], vals: ["v7.5.1-12-g9002cc34d"] }}, subcommand: None, usage: Some("USAGE:\n    TiFlash Proxy [FLAGS] [OPTIONS] --engine-git-hash <engine-git-hash> --engine-label <engine-label> --engine-version <engine-version>") }
+ cd /tmp/tidb_cdc_test/synced_status
+ export 'GO_FAILPOINTS=github.com/pingcap/tiflow/cdc/owner/ChangefeedOwnerNotUpdateCheckpoint=return(true)'
+ GO_FAILPOINTS='github.com/pingcap/tiflow/cdc/owner/ChangefeedOwnerNotUpdateCheckpoint=return(true)'
++ run_cdc_cli_tso_query 127.0.0.1 2379
+ pd_host=127.0.0.1
+ pd_port=2379
++ run_cdc_cli tso query --pd=http://127.0.0.1:2379
+ cdc.test -test.coverprofile=/tmp/tidb_cdc_test/cov.synced_status.cli.36264.out cli tso query --pd=http://127.0.0.1:2379
+ set +x
+ tso='449410030292172801
PASS
coverage: 1.8% of statements in github.com/pingcap/tiflow/...'
+ echo 449410030292172801 PASS coverage: 1.8% of statements in github.com/pingcap/tiflow/...
+ awk -F ' ' '{print $1}'
+ set +x
+ start_ts=449410030292172801
+ run_cdc_server --workdir /tmp/tidb_cdc_test/synced_status --binary cdc.test
[Mon Apr 29 12:00:15 CST 2024] <<<<<< START cdc server in synced_status case >>>>>>
+ [[ '' == \t\r\u\e ]]
+ set +e
+ get_info_fail_msg='failed to get info:'
+ etcd_info_msg='etcd info'
+ '[' -z '' ']'
+ GO_FAILPOINTS='github.com/pingcap/tiflow/cdc/owner/ChangefeedOwnerNotUpdateCheckpoint=return(true)'
+ curl_status_cmd='curl -vsL --max-time 20 http://127.0.0.1:8300/debug/info'
+ [[ no != \n\o ]]
+ cdc.test -test.coverprofile=/tmp/tidb_cdc_test/cov.synced_status.3629436296.out server --log-file /tmp/tidb_cdc_test/synced_status/cdc.log --log-level debug --data-dir /tmp/tidb_cdc_test/synced_status/cdc_data --cluster-id default
+ (( i = 0 ))
+ (( i <= 50 ))
++ curl -vsL --max-time 20 http://127.0.0.1:8300/debug/info
* About to connect() to 127.0.0.1 port 8300 (#0)
*   Trying 127.0.0.1...
* Connection refused
* Failed connect to 127.0.0.1:8300; Connection refused
* Closing connection 0
+ res=
+ echo ''
+ grep -q 'failed to get info:'
+ echo ''
+ grep -q 'etcd info'
+ '[' 0 -eq 50 ']'
+ sleep 3
+ (( i++ ))
+ (( i <= 50 ))
++ curl -vsL --max-time 20 http://127.0.0.1:8300/debug/info
* About to connect() to 127.0.0.1 port 8300 (#0)
*   Trying 127.0.0.1...
* Connected to 127.0.0.1 (127.0.0.1) port 8300 (#0)
> GET /debug/info HTTP/1.1
> User-Agent: curl/7.29.0
> Host: 127.0.0.1:8300
> Accept: */*
> 
< HTTP/1.1 200 OK
< Date: Mon, 29 Apr 2024 04:00:18 GMT
< Content-Length: 613
< Content-Type: text/plain; charset=utf-8
< 
{ [data not shown]
* Connection #0 to host 127.0.0.1 left intact
+ res='

*** owner info ***:



*** processors info ***:



*** etcd info ***:

/tidb/cdc/default/__cdc_meta__/capture/23a35c01-c901-46b1-ab9f-4bd6bcc8f1e6
	{"id":"23a35c01-c901-46b1-ab9f-4bd6bcc8f1e6","address":"127.0.0.1:8300","version":"v7.5.1-19-gd6783fb76"}

/tidb/cdc/default/__cdc_meta__/meta/meta-version
	1

/tidb/cdc/default/__cdc_meta__/owner/22318f280267d0f6
	23a35c01-c901-46b1-ab9f-4bd6bcc8f1e6

/tidb/cdc/default/default/upstream/7363133871320584423
	{"id":7363133871320584423,"pd-endpoints":"http://127.0.0.1:2379,http://127.0.0.1:2379","key-path":"","cert-path":"","ca-path":"","cert-allowed-cn":null}'
+ echo '

*** owner info ***:



*** processors info ***:



*** etcd info ***:

/tidb/cdc/default/__cdc_meta__/capture/23a35c01-c901-46b1-ab9f-4bd6bcc8f1e6
	{"id":"23a35c01-c901-46b1-ab9f-4bd6bcc8f1e6","address":"127.0.0.1:8300","version":"v7.5.1-19-gd6783fb76"}

/tidb/cdc/default/__cdc_meta__/meta/meta-version
	1

/tidb/cdc/default/__cdc_meta__/owner/22318f280267d0f6
	23a35c01-c901-46b1-ab9f-4bd6bcc8f1e6

/tidb/cdc/default/default/upstream/7363133871320584423
	{"id":7363133871320584423,"pd-endpoints":"http://127.0.0.1:2379,http://127.0.0.1:2379","key-path":"","cert-path":"","ca-path":"","cert-allowed-cn":null}'
+ grep -q 'failed to get info:'
+ echo '

*** owner info ***:



*** processors info ***:



*** etcd info ***:

/tidb/cdc/default/__cdc_meta__/capture/23a35c01-c901-46b1-ab9f-4bd6bcc8f1e6
	{"id":"23a35c01-c901-46b1-ab9f-4bd6bcc8f1e6","address":"127.0.0.1:8300","version":"v7.5.1-19-gd6783fb76"}

/tidb/cdc/default/__cdc_meta__/meta/meta-version
	1

/tidb/cdc/default/__cdc_meta__/owner/22318f280267d0f6
	23a35c01-c901-46b1-ab9f-4bd6bcc8f1e6

/tidb/cdc/default/default/upstream/7363133871320584423
	{"id":7363133871320584423,"pd-endpoints":"http://127.0.0.1:2379,http://127.0.0.1:2379","key-path":"","cert-path":"","ca-path":"","cert-allowed-cn":null}'
+ grep -q 'etcd info'
+ break
+ set +x
+ config_path=conf/changefeed-redo.toml
+ SINK_URI='mysql://root@127.0.0.1:3306/?max-txn-row=1'
+ run_cdc_cli changefeed create --start-ts=449410030292172801 '--sink-uri=mysql://root@127.0.0.1:3306/?max-txn-row=1' --changefeed-id=test-1 --config=/home/jenkins/agent/workspace/pingcap/tiflow/release-7.5/pull_cdc_integration_storage_test/tiflow/tests/integration_tests/synced_status/conf/changefeed-redo.toml
+ cdc.test -test.coverprofile=/tmp/tidb_cdc_test/cov.synced_status.cli.36345.out cli changefeed create --start-ts=449410030292172801 '--sink-uri=mysql://root@127.0.0.1:3306/?max-txn-row=1' --changefeed-id=test-1 --config=/home/jenkins/agent/workspace/pingcap/tiflow/release-7.5/pull_cdc_integration_storage_test/tiflow/tests/integration_tests/synced_status/conf/changefeed-redo.toml
Create changefeed successfully!
ID: test-1
Info: {"upstream_id":7363133871320584423,"namespace":"default","id":"test-1","sink_uri":"mysql://root@127.0.0.1:3306/?max-txn-row=1","create_time":"2024-04-29T12:00:18.62039785+08:00","start_ts":449410030292172801,"config":{"memory_quota":1073741824,"case_sensitive":false,"force_replicate":false,"ignore_ineligible_table":false,"check_gc_safe_point":true,"enable_sync_point":false,"bdr_mode":false,"sync_point_interval":600000000000,"sync_point_retention":86400000000000,"filter":{"rules":["*.*"]},"mounter":{"worker_num":16},"sink":{"csv":{"delimiter":",","quote":"\"","null":"\\N","include_commit_ts":false,"binary_encoding_method":"base64"},"encoder_concurrency":32,"terminator":"\r\n","date_separator":"day","enable_partition_separator":true,"enable_kafka_sink_v2":false,"only_output_updated_columns":false,"delete_only_output_handle_key_columns":false,"advance_timeout":150,"send_bootstrap_interval_in_sec":120,"send_bootstrap_in_msg_count":10000,"send_bootstrap_to_all_partition":true},"consistent":{"level":"eventual","max_log_size":64,"flush_interval":2000,"meta_flush_interval":200,"encoding_worker_num":16,"flush_worker_num":8,"storage":"file:///tmp/tidb_cdc_test/synced_status/redo","use_file_backend":false,"memory_usage":{"memory_quota_percentage":50,"event_cache_percentage":0}},"scheduler":{"enable_table_across_nodes":false,"region_threshold":100000,"write_key_threshold":0},"integrity":{"integrity_check_level":"none","corruption_handle_level":"warn"},"changefeed_error_stuck_duration":1800000000000,"sql_mode":"ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION","synced_status":{"synced_check_interval":120,"checkpoint_interval":20}},"state":"normal","creator_version":"v7.5.1-19-gd6783fb76","resolved_ts":449410030292172801,"checkpoint_ts":449410030292172801,"checkpoint_time":"2024-04-29 12:00:13.700"}
PASS
coverage: 2.5% of statements in github.com/pingcap/tiflow/...
+ set +x
+ sleep 20
++ curl -X GET http://127.0.0.1:8300/api/v2/changefeeds/test-1/synced
  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                 Dload  Upload   Total   Spent    Left  Speed

  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0
100   723  100   723    0     0  11323      0 --:--:-- --:--:-- --:--:-- 11476
+ synced_status='{"synced":false,"sink_checkpoint_ts":"2024-04-29 12:00:13.700","puller_resolved_ts":"1970-01-01 08:00:00.000","last_synced_ts":"1970-01-01 08:00:00.000","now_ts":"2024-04-29 12:00:40.000","info":"Please check whether PD is online and TiKV Regions are all available. If PD is offline or some TiKV regions are not available, it means that the data syncing process is complete. To check whether TiKV regions are all available, you can view '\''TiKV-Details'\'' \u003e '\''Resolved-Ts'\'' \u003e '\''Max Leader Resolved TS gap'\'' on Grafana. If the gap is large, such as a few minutes, it means that some regions in TiKV are unavailable. Otherwise, if the gap is small and PD is online, it means the data syncing is incomplete, so please wait"}'
++ echo '{"synced":false,"sink_checkpoint_ts":"2024-04-29' '12:00:13.700","puller_resolved_ts":"1970-01-01' '08:00:00.000","last_synced_ts":"1970-01-01' '08:00:00.000","now_ts":"2024-04-29' '12:00:40.000","info":"Please' check whether PD is online and TiKV Regions are all available. If PD is offline or some TiKV regions are not available, it means that the data syncing process is complete. To check whether TiKV regions are all available, you can view ''\''TiKV-Details'\''' '\u003e' ''\''Resolved-Ts'\''' '\u003e' ''\''Max' Leader Resolved TS 'gap'\''' on Grafana. If the gap is large, such as a few minutes, it means that some regions in TiKV are unavailable. Otherwise, if the gap is small and PD is online, it means the data syncing is incomplete, so please 'wait"}'
++ jq .synced
+ status=false
+ '[' false '!=' false ']'
++ echo '{"synced":false,"sink_checkpoint_ts":"2024-04-29' '12:00:13.700","puller_resolved_ts":"1970-01-01' '08:00:00.000","last_synced_ts":"1970-01-01' '08:00:00.000","now_ts":"2024-04-29' '12:00:40.000","info":"Please' check whether PD is online and TiKV Regions are all available. If PD is offline or some TiKV regions are not available, it means that the data syncing process is complete. To check whether TiKV regions are all available, you can view ''\''TiKV-Details'\''' '\u003e' ''\''Resolved-Ts'\''' '\u003e' ''\''Max' Leader Resolved TS 'gap'\''' on Grafana. If the gap is large, such as a few minutes, it means that some regions in TiKV are unavailable. Otherwise, if the gap is small and PD is online, it means the data syncing is incomplete, so please 'wait"}'
++ jq -r .info
+ info='Please check whether PD is online and TiKV Regions are all available. If PD is offline or some TiKV regions are not available, it means that the data syncing process is complete. To check whether TiKV regions are all available, you can view '\''TiKV-Details'\'' > '\''Resolved-Ts'\'' > '\''Max Leader Resolved TS gap'\'' on Grafana. If the gap is large, such as a few minutes, it means that some regions in TiKV are unavailable. Otherwise, if the gap is small and PD is online, it means the data syncing is incomplete, so please wait'
+ target_message='Please check whether PD is online and TiKV Regions are all available. If PD is offline or some TiKV regions are not available, it means that the data syncing process is complete. To check whether TiKV regions are all available, you can view '\''TiKV-Details'\'' > '\''Resolved-Ts'\'' > '\''Max Leader Resolved TS gap'\'' on Grafana. If the gap is large, such as a few minutes, it means that some regions in TiKV are unavailable. Otherwise, if the gap is small and PD is online, it means the data syncing is incomplete, so please wait'
+ '[' 'Please check whether PD is online and TiKV Regions are all available. If PD is offline or some TiKV regions are not available, it means that the data syncing process is complete. To check whether TiKV regions are all available, you can view '\''TiKV-Details'\'' > '\''Resolved-Ts'\'' > '\''Max Leader Resolved TS gap'\'' on Grafana. If the gap is large, such as a few minutes, it means that some regions in TiKV are unavailable. Otherwise, if the gap is small and PD is online, it means the data syncing is incomplete, so please wait' '!=' 'Please check whether PD is online and TiKV Regions are all available. If PD is offline or some TiKV regions are not available, it means that the data syncing process is complete. To check whether TiKV regions are all available, you can view '\''TiKV-Details'\'' > '\''Resolved-Ts'\'' > '\''Max Leader Resolved TS gap'\'' on Grafana. If the gap is large, such as a few minutes, it means that some regions in TiKV are unavailable. Otherwise, if the gap is small and PD is online, it means the data syncing is incomplete, so please wait' ']'
+ export GO_FAILPOINTS=
+ GO_FAILPOINTS=
+ cleanup_process cdc.test
wait process cdc.test exit for 1-th time...
wait process cdc.test exit for 2-th time...
cdc.test: no process found
wait process cdc.test exit for 3-th time...
process cdc.test already exit
+ stop_tidb_cluster
+ check_logs /tmp/tidb_cdc_test/synced_status
++ date
+ echo '[Mon Apr 29 12:00:53 CST 2024] <<<<<< run test case synced_status success! >>>>>>'
[Mon Apr 29 12:00:53 CST 2024] <<<<<< run test case synced_status success! >>>>>>
+ stop_tidb_cluster
\033[0;36m<<< Run all test success >>>\033[0m
[Pipeline] }
Cache not saved (ws/jenkins-pingcap-tiflow-release-7.5-pull_cdc_integration_storage_test-304/tiflow-cdc already exists)
[Pipeline] // cache
[Pipeline] }
[Pipeline] // dir
[Pipeline] }
[Pipeline] // withCredentials
[Pipeline] }
[Pipeline] // timeout
[Pipeline] }
[Pipeline] // stage
[Pipeline] }
[Pipeline] // container
[Pipeline] }
[Pipeline] // withEnv
[Pipeline] }
[Pipeline] // node
[Pipeline] }
[Pipeline] // podTemplate
[Pipeline] }
[Pipeline] // withEnv
[Pipeline] }
[Pipeline] // stage
[Pipeline] }
[Pipeline] // parallel
[Pipeline] }
[Pipeline] // stage
[Pipeline] }
[Pipeline] // timeout
[Pipeline] }
[Pipeline] // withEnv
[Pipeline] }
[Pipeline] // container
[Pipeline] }
[Pipeline] // withEnv
[Pipeline] }
[Pipeline] // node
[Pipeline] }
[Pipeline] // podTemplate
[Pipeline] End of Pipeline
Finished: SUCCESS