From ebe5fae3a9cef18da838bd7b48a1a78f8e47e5f7 Mon Sep 17 00:00:00 2001 From: Jacques Grove Date: Tue, 22 Apr 2025 21:50:59 -0700 Subject: [PATCH] Fix all the /media links to relative, since it tends to break pandoc conversion to pdf --- analyze-slow-queries.md | 2 +- ...line-workloads-and-add-index-operations.md | 28 ++++---- best-practices-for-security-configuration.md | 4 +- best-practices-on-public-cloud.md | 8 +-- .../grafana-monitor-best-practices.md | 32 ++++----- best-practices/haproxy-best-practices.md | 2 +- .../high-concurrency-best-practices.md | 22 +++---- best-practices/java-app-best-practices.md | 2 +- .../massive-regions-best-practices.md | 10 +-- .../three-nodes-hybrid-deployment.md | 10 +-- best-practices/uuid.md | 2 +- br/br-auto-tune.md | 2 +- br/br-log-architecture.md | 6 +- br/br-snapshot-architecture.md | 6 +- clinic/clinic-user-guide-for-tiup.md | 2 +- clinic/quick-start-with-clinic.md | 2 +- configure-memory-usage.md | 6 +- configure-placement-rules.md | 2 +- cost-model.md | 2 +- daily-check.md | 18 ++--- dashboard/continuous-profiling.md | 12 ++-- dashboard/dashboard-access.md | 4 +- dashboard/dashboard-cluster-info.md | 6 +- dashboard/dashboard-diagnostics-access.md | 8 +-- dashboard/dashboard-diagnostics-report.md | 36 +++++----- dashboard/dashboard-diagnostics-usage.md | 12 ++-- dashboard/dashboard-faq.md | 4 +- dashboard/dashboard-intro.md | 2 +- dashboard/dashboard-key-visualizer.md | 30 ++++----- dashboard/dashboard-log-search.md | 10 +-- dashboard/dashboard-metrics-relation.md | 14 ++-- dashboard/dashboard-monitoring.md | 4 +- dashboard/dashboard-ops-deploy.md | 2 +- dashboard/dashboard-overview.md | 14 ++-- dashboard/dashboard-profiling.md | 12 ++-- dashboard/dashboard-resource-manager.md | 6 +- dashboard/dashboard-session-share.md | 12 ++-- dashboard/dashboard-session-sso.md | 40 +++++------ dashboard/dashboard-slow-query.md | 28 ++++---- dashboard/dashboard-statement-details.md | 36 +++++----- dashboard/dashboard-statement-list.md | 12 ++-- dashboard/dashboard-user.md | 2 +- dashboard/top-sql.md | 16 ++--- ddl-introduction.md | 2 +- deploy-monitoring-services.md | 2 +- develop/dev-guide-aws-appflow-integration.md | 42 ++++++------ develop/dev-guide-gui-datagrip.md | 28 ++++---- develop/dev-guide-gui-dbeaver.md | 24 +++---- develop/dev-guide-gui-mysql-workbench.md | 20 +++--- develop/dev-guide-gui-navicat.md | 16 ++--- develop/dev-guide-gui-vscode-sqltools.md | 18 ++--- develop/dev-guide-join-tables.md | 6 +- develop/dev-guide-playground-gitpod.md | 6 +- develop/dev-guide-proxysql-integration.md | 8 +-- develop/dev-guide-transaction-restraints.md | 6 +- dm/dm-arch.md | 2 +- dm/dm-continuous-data-validation.md | 4 +- dm/dm-manage-schema.md | 2 +- dm/dm-replication-logic.md | 2 +- dm/dm-webui-guide.md | 2 +- dm/feature-shard-merge-optimistic.md | 28 ++++---- dm/feature-shard-merge-pessimistic.md | 6 +- dr-backup-restore.md | 2 +- dr-secondary-cluster.md | 4 +- dr-solution-introduction.md | 16 ++--- exporting-grafana-snapshots.md | 4 +- grafana-overview-dashboard.md | 2 +- grafana-pd-dashboard.md | 24 +++---- grafana-performance-overview-dashboard.md | 2 +- grafana-tikv-dashboard.md | 26 ++++---- join-reorder.md | 8 +-- migrate-from-vitess.md | 6 +- migrate-with-pt-ghost.md | 2 +- multi-data-centers-in-one-city-deployment.md | 8 +-- optimistic-transaction.md | 4 +- performance-tuning-methods.md | 66 +++++++++---------- performance-tuning-overview.md | 2 +- performance-tuning-practices.md | 60 ++++++++--------- pessimistic-transaction.md | 6 +- post-installation-check.md | 6 +- replicate-data-to-kafka.md | 2 +- sql-non-prepared-plan-cache.md | 4 +- sql-optimization-concepts.md | 2 +- sql-prepared-plan-cache.md | 4 +- sql-statements/sql-statement-explain.md | 2 +- sql-statements/sql-statement-trace.md | 4 +- sql-tuning-best-practice.md | 10 +-- statistics.md | 4 +- storage-engine/rocksdb-overview.md | 2 +- storage-engine/titan-overview.md | 14 ++-- sync-diff-inspector/shard-diff.md | 4 +- system-variables.md | 2 +- ...e-data-centers-in-two-cities-deployment.md | 6 +- ticdc-performance-tuning-methods.md | 6 +- ticdc/integrate-confluent-using-ticdc.md | 18 ++--- ticdc/monitor-ticdc.md | 22 +++---- ticdc/ticdc-architecture.md | 10 +-- ticdc/ticdc-bidirectional-replication.md | 2 +- ticdc/ticdc-changefeed-overview.md | 2 +- ticdc/ticdc-overview.md | 2 +- ticdc/ticdc-simple-protocol.md | 4 +- ticdc/ticdc-storage-consumer-dev-guide.md | 2 +- ticdc/ticdc-summary-monitor.md | 22 +++---- tidb-architecture.md | 2 +- .../changefeed-sink-to-cloud-storage.md | 18 ++--- tidb-cloud/config-s3-and-gcs-access.md | 18 ++--- tidb-cloud/csv-config-for-import-data.md | 2 +- tidb-cloud/data-service-integrations.md | 4 +- tidb-cloud/dev-guide-bi-looker-studio.md | 6 +- .../integrate-tidbcloud-with-airbyte.md | 6 +- tidb-cloud/integrate-tidbcloud-with-n8n.md | 2 +- tidb-cloud/integrate-tidbcloud-with-vercel.md | 16 ++--- tidb-cloud/integrate-tidbcloud-with-zapier.md | 18 ++--- .../migrate-from-mysql-using-aws-dms.md | 36 +++++----- ...migrate-from-mysql-using-data-migration.md | 4 +- tidb-cloud/migrate-from-op-tidb.md | 14 ++-- .../migrate-from-oracle-using-aws-dms.md | 26 ++++---- tidb-cloud/recovery-group-failover.md | 8 +-- tidb-cloud/recovery-group-overview.md | 2 +- tidb-cloud/serverless-external-storage.md | 10 +-- ...te-endpoint-connections-on-google-cloud.md | 2 +- ...private-endpoint-connections-serverless.md | 6 +- .../set-up-private-endpoint-connections.md | 8 +-- tidb-cloud/set-up-vpc-peering-connections.md | 16 ++--- tidb-cloud/tidb-cloud-billing-dm.md | 6 +- tidb-cloud/tidb-cloud-connect-aws-dms.md | 8 +-- tidb-cloud/tidb-cloud-intro.md | 4 +- tidb-cloud/tidb-cloud-poc.md | 2 +- tidb-cloud/tidb-cloud-sql-tuning-overview.md | 6 +- .../tidb-cloud-tune-performance-overview.md | 10 +-- ...-performance-benchmarking-with-sysbench.md | 10 +-- ...v6.5-performance-benchmarking-with-tpcc.md | 2 +- ...-performance-benchmarking-with-sysbench.md | 10 +-- ...v7.1-performance-benchmarking-with-tpcc.md | 2 +- ...-performance-benchmarking-with-sysbench.md | 10 +-- ...v7.5-performance-benchmarking-with-tpcc.md | 2 +- ...-performance-benchmarking-with-sysbench.md | 10 +-- ...v8.1-performance-benchmarking-with-tpcc.md | 2 +- tidb-computing.md | 6 +- tidb-distributed-execution-framework.md | 2 +- tidb-global-sort.md | 2 +- tidb-lightning/monitor-tidb-lightning.md | 14 ++-- tidb-lightning/tidb-lightning-faq.md | 6 +- tidb-lightning/tidb-lightning-overview.md | 2 +- .../tidb-lightning-web-interface.md | 8 +-- tidb-monitoring-framework.md | 6 +- tidb-performance-tuning-config.md | 4 +- tidb-scheduling.md | 2 +- tidb-storage.md | 8 +-- tiflash-performance-tuning-methods.md | 10 +-- tiflash/tiflash-disaggregated-and-s3.md | 2 +- tiflash/tiflash-mintso-scheduler.md | 4 +- tiflash/tiflash-overview.md | 2 +- tiflash/tiflash-pipeline-model.md | 2 +- tiflash/use-tiflash-mpp-mode.md | 2 +- tikv-in-memory-engine.md | 2 +- tikv-overview.md | 2 +- time-to-live.md | 6 +- tispark-overview.md | 2 +- troubleshoot-hot-spot-issues.md | 16 ++--- troubleshoot-lock-conflicts.md | 10 +-- troubleshoot-stale-read.md | 10 +-- troubleshoot-write-conflicts.md | 6 +- two-data-centers-in-one-city-deployment.md | 2 +- vector-search/vector-search-overview.md | 2 +- 165 files changed, 780 insertions(+), 780 deletions(-) diff --git a/analyze-slow-queries.md b/analyze-slow-queries.md index 0547326e02ca9..0693b996d212b 100644 --- a/analyze-slow-queries.md +++ b/analyze-slow-queries.md @@ -31,7 +31,7 @@ The procedures above are explained in the following sections. ## Identify the performance bottleneck of the query -First, you need to have a general understanding of the query process. The key stages of the query execution process in TiDB are illustrated in [TiDB performance map](/media/performance-map.png). +First, you need to have a general understanding of the query process. The key stages of the query execution process in TiDB are illustrated in [TiDB performance map](./media/performance-map.png). You can get the duration information using the following methods: diff --git a/benchmark/online-workloads-and-add-index-operations.md b/benchmark/online-workloads-and-add-index-operations.md index 366cf3adb3fb7..37690f10e5c5b 100644 --- a/benchmark/online-workloads-and-add-index-operations.md +++ b/benchmark/online-workloads-and-add-index-operations.md @@ -112,7 +112,7 @@ sysbench $testname \ | 32 | 54 | 229.2 | 4583 | | 48 | 57 | 230.1 | 4601 | -![add-index-load-1-b32](/media/add-index-load-1-b32.png) +![add-index-load-1-b32](./media/add-index-load-1-b32.png) #### `tidb_ddl_reorg_batch_size = 64` @@ -126,7 +126,7 @@ sysbench $testname \ | 32 | 42 | 185.2 | 3715 | | 48 | 45 | 189.2 | 3794 | -![add-index-load-1-b64](/media/add-index-load-1-b64.png) +![add-index-load-1-b64](./media/add-index-load-1-b64.png) #### `tidb_ddl_reorg_batch_size = 128` @@ -140,7 +140,7 @@ sysbench $testname \ | 32 | 35 | 130.8 | 2629 | | 48 | 35 | 120.5 | 2425 | -![add-index-load-1-b128](/media/add-index-load-1-b128.png) +![add-index-load-1-b128](./media/add-index-load-1-b128.png) #### `tidb_ddl_reorg_batch_size = 256` @@ -154,7 +154,7 @@ sysbench $testname \ | 32 | 36 | 113.5 | 2268 | | 48 | 33 | 86.2 | 1715 | -![add-index-load-1-b256](/media/add-index-load-1-b256.png) +![add-index-load-1-b256](./media/add-index-load-1-b256.png) #### `tidb_ddl_reorg_batch_size = 512` @@ -168,7 +168,7 @@ sysbench $testname \ | 32 | 33 | 72.5 | 1503 | | 48 | 33 | 54.2 | 1318 | -![add-index-load-1-b512](/media/add-index-load-1-b512.png) +![add-index-load-1-b512](./media/add-index-load-1-b512.png) #### `tidb_ddl_reorg_batch_size = 1024` @@ -182,7 +182,7 @@ sysbench $testname \ | 32 | 42 | 93.2 | 1835 | | 48 | 51 | 115.7 | 2261 | -![add-index-load-1-b1024](/media/add-index-load-1-b1024.png) +![add-index-load-1-b1024](./media/add-index-load-1-b1024.png) #### `tidb_ddl_reorg_batch_size = 2048` @@ -196,7 +196,7 @@ sysbench $testname \ | 32 | 1130 | 26.69 | 547 | | 48 | 893 | 27.5 | 552 | -![add-index-load-1-b2048](/media/add-index-load-1-b2048.png) +![add-index-load-1-b2048](./media/add-index-load-1-b2048.png) #### `tidb_ddl_reorg_batch_size = 4096` @@ -210,7 +210,7 @@ sysbench $testname \ | 32 | 942 | 114 | 2267 | | 48 | 187 | 54.2 | 1416 | -![add-index-load-1-b4096](/media/add-index-load-1-b4096.png) +![add-index-load-1-b4096](./media/add-index-load-1-b4096.png) ### Test conclusion @@ -247,7 +247,7 @@ When you perform frequent write operations (this test involves `UPDATE`, `INSERT | 32 | 46 | 533.4 | 8103 | | 48 | 46 | 532.2 | 8074 | -![add-index-load-2-b32](/media/add-index-load-2-b32.png) +![add-index-load-2-b32](./media/add-index-load-2-b32.png) #### `tidb_ddl_reorg_batch_size = 1024` @@ -261,7 +261,7 @@ When you perform frequent write operations (this test involves `UPDATE`, `INSERT | 32 | 31 | 467.5 | 7516 | | 48 | 30 | 562.1 | 7442 | -![add-index-load-2-b1024](/media/add-index-load-2-b1024.png) +![add-index-load-2-b1024](./media/add-index-load-2-b1024.png) #### `tidb_ddl_reorg_batch_size = 4096` @@ -275,7 +275,7 @@ When you perform frequent write operations (this test involves `UPDATE`, `INSERT | 32 | 30 | 441.9 | 7057 | | 48 | 30 | 440.1 | 7004 | -![add-index-load-2-b4096](/media/add-index-load-2-b4096.png) +![add-index-load-2-b4096](./media/add-index-load-2-b4096.png) ### Test conclusion @@ -309,7 +309,7 @@ When you only perform query operations to the target column of the `ADD INDEX` s | 32 | 42 | 343.1 | 6695 | | 48 | 42 | 333.4 | 6454 | -![add-index-load-3-b32](/media/add-index-load-3-b32.png) +![add-index-load-3-b32](./media/add-index-load-3-b32.png) #### `tidb_ddl_reorg_batch_size = 1024` @@ -323,7 +323,7 @@ When you only perform query operations to the target column of the `ADD INDEX` s | 32 | 32 | 300.6 | 6017 | | 48 | 31 | 279.5 | 5612 | -![add-index-load-3-b1024](/media/add-index-load-3-b1024.png) +![add-index-load-3-b1024](./media/add-index-load-3-b1024.png) #### `tidb_ddl_reorg_batch_size = 4096` @@ -337,7 +337,7 @@ When you only perform query operations to the target column of the `ADD INDEX` s | 32 | 32 | 220.2 | 4924 | | 48 | 33 | 214.8 | 4544 | -![add-index-load-3-b4096](/media/add-index-load-3-b4096.png) +![add-index-load-3-b4096](./media/add-index-load-3-b4096.png) ### Test conclusion diff --git a/best-practices-for-security-configuration.md b/best-practices-for-security-configuration.md index 1b684ce86f96d..c7982551e65c6 100644 --- a/best-practices-for-security-configuration.md +++ b/best-practices-for-security-configuration.md @@ -34,11 +34,11 @@ It is recommended to immediately change the Grafana password to a strong one dur - Upon first login to Grafana, follow the prompts to change the password. - ![Grafana Password Reset Guide](/media/grafana-password-reset1.png) + ![Grafana Password Reset Guide](./media/grafana-password-reset1.png) - Access the Grafana personal configuration center to change the password. - ![Grafana Password Reset Guide](/media/grafana-password-reset2.png) + ![Grafana Password Reset Guide](./media/grafana-password-reset2.png) ## Enhance TiDB Dashboard security diff --git a/best-practices-on-public-cloud.md b/best-practices-on-public-cloud.md index 11873ab32abef..26ca49972b044 100644 --- a/best-practices-on-public-cloud.md +++ b/best-practices-on-public-cloud.md @@ -168,8 +168,8 @@ In a TiDB cluster, a single active Placement Driver (PD) server is used to handl The following diagrams show the symptoms of a large-scale TiDB cluster consisting of three PD servers, each equipped with 56 CPUs. From these diagrams, it is observed that when the query per second (QPS) exceeds 1 million and the TSO (Timestamp Oracle) requests per second exceed 162,000, the CPU utilization reaches approximately 4,600%. This high CPU utilization indicates that the PD leader is experiencing a significant load and is running out of available CPU resources. -![pd-server-cpu](/media/performance/public-cloud-best-practice/baseline_cpu.png) -![pd-server-metrics](/media/performance/public-cloud-best-practice/baseline_metrics.png) +![pd-server-cpu](./media/performance/public-cloud-best-practice/baseline_cpu.png) +![pd-server-metrics](./media/performance/public-cloud-best-practice/baseline_metrics.png) ### Tune PD performance @@ -210,5 +210,5 @@ After the tuning, the following effects can be observed: These improvements indicate that the tuning adjustments have successfully reduced the CPU utilization of the PD server while maintaining stable TSO handling performance. -![pd-server-cpu](/media/performance/public-cloud-best-practice/after_tuning_cpu.png) -![pd-server-metrics](/media/performance/public-cloud-best-practice/after_tuning_metrics.png) +![pd-server-cpu](./media/performance/public-cloud-best-practice/after_tuning_cpu.png) +![pd-server-metrics](./media/performance/public-cloud-best-practice/after_tuning_metrics.png) diff --git a/best-practices/grafana-monitor-best-practices.md b/best-practices/grafana-monitor-best-practices.md index 6792c6f4121db..a9d06d71a2d2f 100644 --- a/best-practices/grafana-monitor-best-practices.md +++ b/best-practices/grafana-monitor-best-practices.md @@ -12,7 +12,7 @@ When you [deploy a TiDB cluster using TiUP](/production-deployment-using-tiup.md [Prometheus](https://prometheus.io/) is a time series database with a multi-dimensional data model and a flexible query language. [Grafana](https://grafana.com/) is an open source monitoring system for analyzing and visualizing metrics. -![The monitoring architecture in the TiDB cluster](/media/prometheus-in-tidb.png) +![The monitoring architecture in the TiDB cluster](./media/prometheus-in-tidb.png) For TiDB 2.1.3 or later versions, TiDB monitoring supports the pull method. It is a good adjustment with the following benefits: @@ -51,7 +51,7 @@ tidb_executor_statement_total{type="Use"} 466016 The data above is stored in Prometheus and displayed on Grafana. Right-click the panel and then click the **Edit** button (or directly press the E key) shown in the following figure: -![The Edit entry for the Metrics tab](/media/best-practices/metric-board-edit-entry.png) +![The Edit entry for the Metrics tab](./media/best-practices/metric-board-edit-entry.png) After clicking the **Edit** button, you can see the query expression with the `tidb_executor_statement_total` metric name on the Metrics tab. The meanings of some items on the panel are as follows: @@ -63,7 +63,7 @@ After clicking the **Edit** button, you can see the query expression with the `t The query expression on the **Metrics** tab is as follows: -![The query expression on the Metrics tab](/media/best-practices/metric-board-expression.jpeg) +![The query expression on the Metrics tab](./media/best-practices/metric-board-expression.jpeg) Prometheus supports many query expressions and functions. For more details, refer to [Prometheus official website](https://prometheus.io/docs/prometheus/latest/querying). @@ -75,11 +75,11 @@ This section introduces seven tips for efficiently using Grafana to monitor and In the example shown in the [source and display of monitoring data](#source-and-display-of-monitoring-data) section, the data is grouped by type. If you want to know whether you can group by other dimensions and quickly check which dimensions are available, you can use the following method: **Only keep the metric name on the query expression, no calculation, and leave the `Legend format` field blank**. In this way, the original metrics are displayed. For example, the following figure shows that there are three dimensions (`instance`, `job` and `type`): -![Edit query expression and check all dimensions](/media/best-practices/edit-expression-check-dimensions.jpg) +![Edit query expression and check all dimensions](./media/best-practices/edit-expression-check-dimensions.jpg) Then you can modify the query expression by adding the `instance` dimension after `type`, and adding `{{instance}}` to the `Legend format` field. In this way, you can check the QPS of different types of SQL statements that are executed on each TiDB server: -![Add an instance dimension to the query expression](/media/best-practices/add-instance-dimension.jpeg) +![Add an instance dimension to the query expression](./media/best-practices/add-instance-dimension.jpeg) ### Tip 2: Switch the scale of the Y-axis @@ -89,11 +89,11 @@ Of course, a linear scale is not suitable for all situations. For example, if yo The Y-axis uses a binary logarithmic scale by default: -![The Y-axis uses a binary logarithmic scale](/media/best-practices/default-axes-scale.jpg) +![The Y-axis uses a binary logarithmic scale](./media/best-practices/default-axes-scale.jpg) Switch the Y-axis to a linear scale: -![Switch to a linear scale](/media/best-practices/axes-scale-linear.jpg) +![Switch to a linear scale](./media/best-practices/axes-scale-linear.jpg) > **Tip:** > @@ -105,33 +105,33 @@ You might still cannot see the trend after switching to the linear scale. For ex The baseline defaults to `0`: -![Baseline defaults to 0](/media/best-practices/default-y-min.jpeg) +![Baseline defaults to 0](./media/best-practices/default-y-min.jpeg) Change the baseline to `auto`: -![Change the baseline to auto](/media/best-practices/y-min-auto.jpg) +![Change the baseline to auto](./media/best-practices/y-min-auto.jpg) ### Tip 4: Use Shared crosshair or Tooltip In the **Settings** panel, there is a **Graph Tooltip** panel option which defaults to **Default**. -![Graphic presentation tools](/media/best-practices/graph-tooltip.jpeg) +![Graphic presentation tools](./media/best-practices/graph-tooltip.jpeg) You can use **Shared crosshair** and **Shared Tooltip** respectively to test the effect as shown in the following figures. Then, the scales are displayed in linkage, which is convenient to confirm the correlation of two metrics when diagnosing problems. Set the graphic presentation tool to **Shared crosshair**: -![Set the graphical presentation tool to Shared crosshair](/media/best-practices/graph-tooltip-shared-crosshair.jpeg) +![Set the graphical presentation tool to Shared crosshair](./media/best-practices/graph-tooltip-shared-crosshair.jpeg) Set the graphical presentation tool to **Shared Tooltip**: -![Set the graphic presentation tool to Shared Tooltip](/media/best-practices/graph-tooltip-shared-tooltip.jpg) +![Set the graphic presentation tool to Shared Tooltip](./media/best-practices/graph-tooltip-shared-tooltip.jpg) ### Tip 5: Enter `IP address:port number` to check the metrics in history PD's dashboard only shows the metrics of the current leader. If you want to check the status of a PD leader in history and it no longer exists in the drop-down list of the `instance` field, you can manually enter `IP address:2379` to check the data of the leader. -![Check the metrics in history](/media/best-practices/manually-input-check-metric.jpeg) +![Check the metrics in history](./media/best-practices/manually-input-check-metric.jpeg) ### Tip 6: Use the `Avg` function @@ -139,11 +139,11 @@ Generally, only `Max` and `Current` functions are available in the legend by def Add summary functions such as the `Avg` function: -![Add summary functions such as Avg](/media/best-practices/add-avg-function.jpeg) +![Add summary functions such as Avg](./media/best-practices/add-avg-function.jpeg) Then check the overall trend: -![Add Avg function to check the overall trend](/media/best-practices/add-avg-function-check-trend.jpg) +![Add Avg function to check the overall trend](./media/best-practices/add-avg-function-check-trend.jpg) ### Tip 7: Use the API of Prometheus to obtain the result of query expressions @@ -155,7 +155,7 @@ Grafana obtains data through the API of Prometheus and you can use this API to o The API of Prometheus is shown as follows: -![The API of Prometheus](/media/best-practices/prometheus-api-interface.jpg) +![The API of Prometheus](./media/best-practices/prometheus-api-interface.jpg) {{< copyable "shell-regular" >}} diff --git a/best-practices/haproxy-best-practices.md b/best-practices/haproxy-best-practices.md index 061a2bdac305c..a7102158cdecc 100644 --- a/best-practices/haproxy-best-practices.md +++ b/best-practices/haproxy-best-practices.md @@ -8,7 +8,7 @@ aliases: ['/docs/dev/best-practices/haproxy-best-practices/','/docs/dev/referenc This document describes best practices for configuration and usage of [HAProxy](https://github.com/haproxy/haproxy) in TiDB. HAProxy provides load balancing for TCP-based applications. From TiDB clients, you can manipulate data just by connecting to the floating virtual IP address provided by HAProxy, which helps to achieve load balance in the TiDB server layer. -![HAProxy Best Practices in TiDB](/media/haproxy.jpg) +![HAProxy Best Practices in TiDB](./media/haproxy.jpg) > **Note:** > diff --git a/best-practices/high-concurrency-best-practices.md b/best-practices/high-concurrency-best-practices.md index 23559639488ea..9efdb90a7e004 100644 --- a/best-practices/high-concurrency-best-practices.md +++ b/best-practices/high-concurrency-best-practices.md @@ -37,7 +37,7 @@ To address the above challenges, it is necessary to start with the data segmenta TiDB splits data into Regions, each representing a range of data with a size limit of 96M by default. Each Region has multiple replicas, and each group of replicas is called a Raft Group. In a Raft Group, the Region Leader executes the read and write tasks (TiDB supports [Follower-Read](/follower-read.md)) within the data range. The Region Leader is automatically scheduled by the Placement Driver (PD) component to different physical nodes evenly to distribute the read and write pressure. -![TiDB Data Overview](/media/best-practices/tidb-data-overview.png) +![TiDB Data Overview](./media/best-practices/tidb-data-overview.png) In theory, if an application has no write hotspot, TiDB, by the virtue of its architecture, can not only linearly scale its read and write capacities, but also make full use of the distributed resources. From this point of view, TiDB is especially suitable for the high-concurrent and write-intensive scenario. @@ -94,19 +94,19 @@ In theory, the above operation seems to comply with the TiDB best practices, and For the cluster topology, 2 TiDB nodes, 3 PD nodes and 6 TiKV nodes are deployed. Ignore the QPS performance, because this test is to clarify the principle rather than for benchmark. -![QPS1](/media/best-practices/QPS1.png) +![QPS1](./media/best-practices/QPS1.png) The client starts "intensive" write requests in a short time, which is 3K QPS received by TiDB. In theory, the load pressure should be evenly distributed to 6 TiKV nodes. However, from the CPU usage of each TiKV node, the load distribution is uneven. The `tikv-3` node is the write hotspot. -![QPS2](/media/best-practices/QPS2.png) +![QPS2](./media/best-practices/QPS2.png) -![QPS3](/media/best-practices/QPS3.png) +![QPS3](./media/best-practices/QPS3.png) [Raft store CPU](/grafana-tikv-dashboard.md) is the CPU usage rate for the `raftstore` thread, usually representing the write load. In this scenario, `tikv-3` is the Leader of this Raft Group; `tikv-0` and `tikv-1` are the followers. The loads of other nodes are almost empty. The monitoring metrics of PD also confirms that hotspot has been caused. -![QPS4](/media/best-practices/QPS4.png) +![QPS4](./media/best-practices/QPS4.png) ## Hotspot causes @@ -118,13 +118,13 @@ In the above test, the operation does not reach the ideal performance expected i In a short period of time, a huge volume of data is continuously written to the same Region. -![TiKV Region Split](/media/best-practices/tikv-Region-split.png) +![TiKV Region Split](./media/best-practices/tikv-Region-split.png) The above diagram illustrates the Region splitting process. As data is continuously written into TiKV, TiKV splits a Region into multiple Regions. Because the leader election is started on the original store where the Region Leader to be split is located, the leaders of the two newly split Regions might be still on the same store. This splitting process might also happen on the newly split Region 2 and Region 3. In this way, write pressure is concentrated on TiKV-Node 1. During the continuous write process, after finding that hotspot is caused on Node 1, PD evenly distributes the concentrated Leaders to other nodes. If the number of TiKV nodes is more than the number of Region replicas, TiKV will try to migrate these Regions to idle nodes. These two operations during the write process are also reflected in the PD's monitoring metrics: -![QPS5](/media/best-practices/QPS5.png) +![QPS5](./media/best-practices/QPS5.png) After a period of continuous writes, PD automatically schedules the entire TiKV cluster to a state where pressure is evenly distributed. By that time, the capacity of the whole cluster can be fully used. @@ -150,7 +150,7 @@ SPLIT TABLE table_name [INDEX index_name] BY (value_list) [, (value_list)] However, TiDB does not automatically perform this pre-split operation. The reason is related to the data distribution in TiDB. -![Table Region Range](/media/best-practices/table-Region-range.png) +![Table Region Range](./media/best-practices/table-Region-range.png) From the diagram above, according to the encoding rule of a row's key, the `rowID` is the only variable part. In TiDB, `rowID` is an `Int64` integer. However, you might not need to evenly split the `Int64` integer range to the desired number of ranges and then to distribute these ranges to different nodes, because Region split must also be based on the actual situation. @@ -192,11 +192,11 @@ ORDER BY Then operate the write load again: -![QPS6](/media/best-practices/QPS6.png) +![QPS6](./media/best-practices/QPS6.png) -![QPS7](/media/best-practices/QPS7.png) +![QPS7](./media/best-practices/QPS7.png) -![QPS8](/media/best-practices/QPS8.png) +![QPS8](./media/best-practices/QPS8.png) You can see that the apparent hotspot problem has been resolved now. diff --git a/best-practices/java-app-best-practices.md b/best-practices/java-app-best-practices.md index c864ecd120ffd..b4ae8cb665a86 100644 --- a/best-practices/java-app-best-practices.md +++ b/best-practices/java-app-best-practices.md @@ -18,7 +18,7 @@ Common components that interact with the TiDB database in Java applications incl - Data access framework: Applications usually use a data access framework such as [MyBatis](https://mybatis.org/mybatis-3/index.html) and [Hibernate](https://hibernate.org/) to further simplify and manage the database access operations. - Application implementation: The application logic controls when to send what commands to the database. Some applications use [Spring Transaction](https://docs.spring.io/spring/docs/4.2.x/spring-framework-reference/html/transaction.html) aspects to manage transactions' start and commit logics. -![Java application components](/media/best-practices/java-practice-1.png) +![Java application components](./media/best-practices/java-practice-1.png) From the above diagram, you can see that a Java application might do the following things: diff --git a/best-practices/massive-regions-best-practices.md b/best-practices/massive-regions-best-practices.md index 0744c4a39f3a8..312cb7df68e48 100644 --- a/best-practices/massive-regions-best-practices.md +++ b/best-practices/massive-regions-best-practices.md @@ -14,7 +14,7 @@ This document introduces the workflow of Raftstore (a core module of TiKV), expl A TiKV instance has multiple Regions on it. The Raftstore module drives the Raft state machine to process Region messages. These messages include processing read or write requests on Regions, persisting or replicating Raft logs, and processing Raft heartbeats. However, an increasing number of Regions can affect performance of the whole cluster. To understand this, it is necessary to learn the workflow of Raftstore shown as follows: -![Raftstore Workflow](/media/best-practices/raft-process.png) +![Raftstore Workflow](./media/best-practices/raft-process.png) > **Note:** > @@ -41,7 +41,7 @@ You can check the following monitoring metrics in Grafana's **TiKV Dashboard**: Reference value: lower than `raftstore.store-pool-size * 85%`. - ![Check Raftstore CPU](/media/best-practices/raft-store-cpu.png) + ![Check Raftstore CPU](./media/best-practices/raft-store-cpu.png) + `Propose wait duration` in the **Raft Propose** panel @@ -49,7 +49,7 @@ You can check the following monitoring metrics in Grafana's **TiKV Dashboard**: Reference value: lower than 50~100 ms according to the cluster size - ![Check Propose wait duration](/media/best-practices/propose-wait-duration.png) + ![Check Propose wait duration](./media/best-practices/propose-wait-duration.png) + `Commit log duration` in the **Raft IO** panel @@ -61,7 +61,7 @@ You can check the following monitoring metrics in Grafana's **TiKV Dashboard**: Reference value: lower than 200-500 ms. - ![Check Commit log duration](/media/best-practices/commit-log-duration.png) + ![Check Commit log duration](./media/best-practices/commit-log-duration.png) ## Performance tuning methods @@ -172,7 +172,7 @@ In TiKV, pd-worker regularly reports Region Meta information to PD. When TiKV is You can check **Worker pending tasks** under **Task** in the **TiKV Grafana** panel to determine whether pd-worker has tasks piled up. Generally, `pending tasks` should be kept at a relatively low value. -![Check pd-worker](/media/best-practices/pd-worker-metrics.png) +![Check pd-worker](./media/best-practices/pd-worker-metrics.png) pd-worker has been optimized for better performance since [v3.0.5](/releases/release-3.0.5.md#tikv). If you encounter a similar problem, it is recommended to upgrade to the latest version. diff --git a/best-practices/three-nodes-hybrid-deployment.md b/best-practices/three-nodes-hybrid-deployment.md index cbd698c50de28..4110b6de161bd 100644 --- a/best-practices/three-nodes-hybrid-deployment.md +++ b/best-practices/three-nodes-hybrid-deployment.md @@ -19,11 +19,11 @@ In this example, the TPC-C 5000 Warehouse data is used in TiUP bench and the tes The image below shows the QPS monitor of the cluster within 12 hours with the default parameter configuration. From the image, you can see an obvious performance jitter. -![QPS with default config](/media/best-practices/three-nodes-default-config-qps.png) +![QPS with default config](./media/best-practices/three-nodes-default-config-qps.png) After the parameter adjustment, the performance is improved. -![QPS with modified config](/media/best-practices/three-nodes-final-config-qps.png) +![QPS with modified config](./media/best-practices/three-nodes-final-config-qps.png) ## Parameter adjustment @@ -65,7 +65,7 @@ This parameter defaults to `4`. Because in the existing deployment plan, the CPU In this test, the value of this parameter is set to `2`. Observe the **gRPC poll CPU** panel and you can see that the usage rate is just around 80%. -![gRPC Pool CPU](/media/best-practices/three-nodes-grpc-pool-usage.png) +![gRPC Pool CPU](./media/best-practices/three-nodes-grpc-pool-usage.png) #### `storage.scheduler-worker-pool-size` @@ -73,7 +73,7 @@ When TiKV detects that the CPU core number of the machine is greater than or equ Ideally, the usage rate of the scheduler thread pool is kept between 50% and 75%. Similar to the gRPC thread pool, the `storage.scheduler-worker-pool-size` parameter defaults to a larger value during the hybrid deployment, which makes resource usage insufficient. In this test, the value of this parameter is set to `2`, which is in line with the best practices, a conclusion drawn by observing the corresponding metrics in the **Scheduler worker CPU** panel. -![Scheduler Worker CPU](/media/best-practices/three-nodes-scheduler-pool-usage.png) +![Scheduler Worker CPU](./media/best-practices/three-nodes-scheduler-pool-usage.png) ### Resource configuration for TiKV background tasks @@ -102,7 +102,7 @@ The method of optimizing the RocksDB thread pool is similar to that of optimizin Because TiDB uses the multi-version concurrency control (MVCC) model, TiKV periodically cleans old version data in the background. When the available resources are limited, this operation causes periodical performance jitter. You can use the `gc.max_write_bytes_per_sec` parameter to limit the resource usage of such an operation. -![GC Impact](/media/best-practices/three-nodes-gc-impact.png) +![GC Impact](./media/best-practices/three-nodes-gc-impact.png) In addition to setting this parameter value in the configuration file, you can also dynamically adjust this value in tikv-ctl. diff --git a/best-practices/uuid.md b/best-practices/uuid.md index 04b420b2973f6..ae81beab23c1d 100644 --- a/best-practices/uuid.md +++ b/best-practices/uuid.md @@ -51,7 +51,7 @@ In the screenshot of the [Key Visualizer](/tidb-cloud/tune-performance.md#key-vi -![Key Visualizer](/media/best-practices/uuid_keyviz.png) +![Key Visualizer](./media/best-practices/uuid_keyviz.png) ```sql CREATE TABLE `uuid_demo_1` ( diff --git a/br/br-auto-tune.md b/br/br-auto-tune.md index 479dee8c597b4..70dd5a0d28c51 100644 --- a/br/br-auto-tune.md +++ b/br/br-auto-tune.md @@ -82,6 +82,6 @@ The following is an example of how auto-tune works. `*` denotes a CPU core used In the **Backup CPU Utilization** panel, you can see the size of the thread pool adjusted by auto-tune: -![Grafana dashboard example of backup auto-tune metrics](/media/br/br-auto-throttle.png) +![Grafana dashboard example of backup auto-tune metrics](./media/br/br-auto-throttle.png) In the image above, the yellow semi-transparent area represents the threads available for backup tasks. You can see the CPU utilization of backup tasks does not go beyond the yellow area. diff --git a/br/br-log-architecture.md b/br/br-log-architecture.md index c3c7a38ab44fb..6f57253fe8b72 100644 --- a/br/br-log-architecture.md +++ b/br/br-log-architecture.md @@ -11,13 +11,13 @@ This document introduces the architecture and process of TiDB log backup and poi The log backup and PITR architecture is as follows: -![BR log backup and PITR architecture](/media/br/br-log-arch.png) +![BR log backup and PITR architecture](./media/br/br-log-arch.png) ## Process of log backup The process of a cluster log backup is as follows: -![BR log backup process design](/media/br/br-log-backup-ts.png) +![BR log backup process design](./media/br/br-log-backup-ts.png) System components and key concepts involved in the log backup process: @@ -57,7 +57,7 @@ The complete backup process is as follows: The process of PITR is as follows: -![Point-in-time recovery process design](/media/br/pitr-ts.png) +![Point-in-time recovery process design](./media/br/pitr-ts.png) The complete PITR process is as follows: diff --git a/br/br-snapshot-architecture.md b/br/br-snapshot-architecture.md index 5af26ce3112cf..6222d22a54f4f 100644 --- a/br/br-snapshot-architecture.md +++ b/br/br-snapshot-architecture.md @@ -11,13 +11,13 @@ This document introduces the architecture and process of TiDB snapshot backup an The TiDB snapshot backup and restore architecture is as follows: -![BR snapshot backup and restore architecture](/media/br/br-snapshot-arch.png) +![BR snapshot backup and restore architecture](./media/br/br-snapshot-arch.png) ## Process of backup The process of a cluster snapshot backup is as follows: -![snapshot backup process design](/media/br/br-snapshot-backup-ts.png) +![snapshot backup process design](./media/br/br-snapshot-backup-ts.png) The complete backup process is as follows: @@ -54,7 +54,7 @@ The complete backup process is as follows: The process of a cluster snapshot restore is as follows: -![snapshot restore process design](/media/br/br-snapshot-restore-ts.png) +![snapshot restore process design](./media/br/br-snapshot-restore-ts.png) The complete restore process is as follows: diff --git a/clinic/clinic-user-guide-for-tiup.md b/clinic/clinic-user-guide-for-tiup.md index 3776572e410b1..8560f3d48a24a 100644 --- a/clinic/clinic-user-guide-for-tiup.md +++ b/clinic/clinic-user-guide-for-tiup.md @@ -71,7 +71,7 @@ Before using PingCAP Clinic, you need to install Diag (a component to collect da - Click the icon in the lower-right corner of the Cluster page, select **Get Access Token For Diag Tool**, and click **+** in the pop-up window. Make sure that you have copied and saved the token that is displayed. - ![Get the Token](/media/clinic-get-token.png) + ![Get the Token](./media/clinic-get-token.png) > **Note:** > diff --git a/clinic/quick-start-with-clinic.md b/clinic/quick-start-with-clinic.md index 56538e1aeda43..1c166cc0a185f 100644 --- a/clinic/quick-start-with-clinic.md +++ b/clinic/quick-start-with-clinic.md @@ -55,7 +55,7 @@ Before using PingCAP Clinic, you need to install Diag and prepare an environment To get a token, click the icon in the lower-right corner of the Cluster page, select **Get Access Token For Diag Tool**, and click **+** in the pop-up window. Make sure that you have copied and saved the token that is displayed. - ![An example of a token](/media/clinic-get-token.png) + ![An example of a token](./media/clinic-get-token.png) > **Note:** > diff --git a/configure-memory-usage.md b/configure-memory-usage.md index 25dfbd93bc0f4..571ba2ca22929 100644 --- a/configure-memory-usage.md +++ b/configure-memory-usage.md @@ -216,14 +216,14 @@ GO 1.19 introduces an environment variable [`GOMEMLIMIT`](https://pkg.go.dev/run For v6.1.3 <= TiDB < v6.5.0, you can mitigate a typical category of OOM issues by manually setting `GOMEMLIMIT`. The typical category of OOM issues is: before OOM occurs, the estimated memory in use on Grafana occupies only half of the entire memory (TiDB-Runtime > Memory Usage > estimate-inuse), as shown in the following figure: -![normal OOM case example](/media/configure-memory-usage-oom-example.png) +![normal OOM case example](./media/configure-memory-usage-oom-example.png) To verify the performance of `GOMEMLIMIT`, a test is performed to compare the specific memory usage with and without `GOMEMLIMIT` configuration. - In TiDB v6.1.2, the TiDB server encounters OOM (system memory: about 48 GiB) after the simulated workload runs for several minutes: - ![v6.1.2 workload oom](/media/configure-memory-usage-612-oom.png) + ![v6.1.2 workload oom](./media/configure-memory-usage-612-oom.png) - In TiDB v6.1.3, `GOMEMLIMIT` is set to 40000 MiB. It is found that the simulated workload runs stably for a long time, OOM does not occur in the TiDB server, and the maximum memory usage of the process is stable at around 40.8 GiB: - ![v6.1.3 workload no oom with GOMEMLIMIT](/media/configure-memory-usage-613-no-oom.png) + ![v6.1.3 workload no oom with GOMEMLIMIT](./media/configure-memory-usage-613-no-oom.png) diff --git a/configure-placement-rules.md b/configure-placement-rules.md index b50cf64bc4f34..2532e458137aa 100644 --- a/configure-placement-rules.md +++ b/configure-placement-rules.md @@ -22,7 +22,7 @@ The key ranges of multiple rules can have overlapping parts, which means that a In addition, to meet the requirement that rules from different sources are isolated from each other, these rules can be organized in a more flexible way. Therefore, the concept of "Group" is introduced. Generally, users can place rules in different groups according to different sources. -![Placement rules overview](/media/placement-rules-1.png) +![Placement rules overview](./media/placement-rules-1.png) ### Rule fields diff --git a/cost-model.md b/cost-model.md index 71445fbdf91e7..0353c645dbd49 100644 --- a/cost-model.md +++ b/cost-model.md @@ -7,7 +7,7 @@ summary: Learn how the cost model used by TiDB works during physical optimizatio TiDB uses a cost model to choose an index and operator during [physical optimization](/sql-physical-optimization.md). The process is illustrated in the following diagram: -![CostModel](/media/cost-model.png) +![CostModel](./media/cost-model.png) TiDB calculates the access cost of each index and the execution cost of each physical operator in plans (such as HashJoin and IndexJoin) and chooses the minimum cost plan. diff --git a/daily-check.md b/daily-check.md index beda3f86b57ea..c66d116ce7d50 100644 --- a/daily-check.md +++ b/daily-check.md @@ -16,7 +16,7 @@ TiDB Dashboard simplifies the operation and maintenance of the TiDB database. Yo ### Instance panel -![Instance panel](/media/instance-status-panel.png) +![Instance panel](./media/instance-status-panel.png) + **Status**: This indicator is used to check whether the status is normal. For an online node, this can be ignored. + **Up Time**: The key indicator. If you find that the `Up Time` is changed, you need to locate the reason why the component is restarted. @@ -24,19 +24,19 @@ TiDB Dashboard simplifies the operation and maintenance of the TiDB database. Yo ### Host panel -![Host panel](/media/host-panel.png) +![Host panel](./media/host-panel.png) You can view the usage of CPU, memory, and disk. When the usage of any resource exceeds 80%, it is recommended to scale out the capacity accordingly. ### SQL analysis panel -![SQL analysis panel](/media/sql-analysis-panel.png) +![SQL analysis panel](./media/sql-analysis-panel.png) You can locate the slow SQL statement executed in the cluster. Then you can optimize the specific SQL statement. ### Region panel -![Region panel](/media/region-panel.png) +![Region panel](./media/region-panel.png) + `down-peer-region-count`: The number of Regions with an unresponsive peer reported by the Raft leader. + `empty-region-count`: The number of empty Regions, with a size of smaller than 1 MiB. These Regions are generated by executing the `TRUNCATE TABLE`/`DROP TABLE` statement. If this number is large, you can consider enabling `Region Merge` to merge Regions across tables. @@ -52,13 +52,13 @@ Generally, it is normal that these values are not `0`. However, it is not normal ### KV Request Duration -![TiKV request duration](/media/kv-duration-panel.png) +![TiKV request duration](./media/kv-duration-panel.png) The KV request duration 99 in TiKV. If you find nodes with a long duration, check whether there are hot spots, or whether there are nodes with poor performance. ### PD TSO Wait Duration -![TiDB TSO Wait Duration](/media/pd-duration-panel.png) +![TiDB TSO Wait Duration](./media/pd-duration-panel.png) The time it takes for TiDB to obtain TSO from PD. The following are reasons for the long wait duration: @@ -68,18 +68,18 @@ The time it takes for TiDB to obtain TSO from PD. The following are reasons for ### Overview panel -![Overview panel](/media/overview-panel.png) +![Overview panel](./media/overview-panel.png) You can view the load, memory available, network traffic, and I/O utilities. When a bottleneck is found, it is recommended to scale out the capacity, or to optimize the cluster topology, SQL, and cluster parameters. ### Exceptions -![Exceptions](/media/failed-query-panel.png) +![Exceptions](./media/failed-query-panel.png) You can view the errors triggered by the execution of SQL statements on each TiDB instance. These include syntax error and primary key conflicts. ### GC status -![GC status](/media/garbage-collation-panel.png) +![GC status](./media/garbage-collation-panel.png) You can check whether the GC (Garbage Collection) status is normal by viewing the time when the last GC happens. If the GC is abnormal, it might lead to excessive historical data, thereby decreasing the access efficiency. diff --git a/dashboard/continuous-profiling.md b/dashboard/continuous-profiling.md index e68e7ac09af14..f384d3efcf258 100644 --- a/dashboard/continuous-profiling.md +++ b/dashboard/continuous-profiling.md @@ -40,7 +40,7 @@ You can access the Continuous Profiling page using either of the following metho * After logging in to TiDB Dashboard, click **Advanced Debugging** > **Profiling Instances** > **Continuous Profiling** in the left navigation menu. - ![Access page](/media/dashboard/dashboard-conprof-access.png) + ![Access page](./media/dashboard/dashboard-conprof-access.png) * Visit in your browser. Replace `127.0.0.1:2379` with the actual PD instance address and port. @@ -58,7 +58,7 @@ To enable this feature: 2. Click **Open Settings**. In the **Settings** area on the right, switch **Enable Feature** on, and modify the default value of **Retention Duration** if necessary. 3. Click **Save**. -![Enable feature](/media/dashboard/dashboard-conprof-start.png) +![Enable feature](./media/dashboard/dashboard-conprof-start.png) ## View current performance data @@ -68,17 +68,17 @@ Manual Profiling cannot be initiated on clusters that have Continuous Profiling On the list page, you can see all performance data collected since the enabling of this feature. -![History results](/media/dashboard/dashboard-conprof-history.png) +![History results](./media/dashboard/dashboard-conprof-history.png) ## Download performance data On the profiling result page, you can click **Download Profiling Result** in the upper-right corner to download all profiling results. -![Download profiling result](/media/dashboard/dashboard-conprof-download.png) +![Download profiling result](./media/dashboard/dashboard-conprof-download.png) You can also click an individual instance in the table to view its profiling result. Alternatively, you can hover on ... to download raw data. -![View profiling result](/media/dashboard/dashboard-conprof-single.png) +![View profiling result](./media/dashboard/dashboard-conprof-single.png) ## Disable Continuous Profiling @@ -87,7 +87,7 @@ You can also click an individual instance in the table to view its profiling res 3. Click **Save**. 4. In the popped-up dialog box, click **Disable**. -![Disable feature](/media/dashboard/dashboard-conprof-stop.png) +![Disable feature](./media/dashboard/dashboard-conprof-stop.png) ## Frequently asked questions diff --git a/dashboard/dashboard-access.md b/dashboard/dashboard-access.md index 35f6e74038eab..8e7469c96775a 100644 --- a/dashboard/dashboard-access.md +++ b/dashboard/dashboard-access.md @@ -58,10 +58,10 @@ The following languages are supported in TiDB Dashboard: In the **SQL User Sign In** page, you can click the **Switch Language** drop-down list to switch the interface language. -![Switch language](/media/dashboard/dashboard-access-switch-language.png) +![Switch language](./media/dashboard/dashboard-access-switch-language.png) ## Logout Once you have logged in, click the login user name in the left navigation bar to switch to the user page. Click the **Logout** button on the user page to log out the current user. After logging out, you need to re-enter your username and password. -![Logout](/media/dashboard/dashboard-access-logout.png) +![Logout](./media/dashboard/dashboard-access-logout.png) diff --git a/dashboard/dashboard-cluster-info.md b/dashboard/dashboard-cluster-info.md index 0862560f06488..08b6efff3a879 100644 --- a/dashboard/dashboard-cluster-info.md +++ b/dashboard/dashboard-cluster-info.md @@ -20,7 +20,7 @@ You can use one of the following two methods to access the cluster information p Click **Instances** to view the list of instances: -![Instance list](/media/dashboard/dashboard-cluster-info-instances-v650.png) +![Instance list](./media/dashboard/dashboard-cluster-info-instances-v650.png) This instance list shows the overview information of all instances of TiDB, TiKV, PD, and TiFlash components in the cluster. @@ -54,7 +54,7 @@ Instance status derives from the PD scheduling information. For more details, se Click **Hosts** to view the list of hosts: -![Host list](/media/dashboard/dashboard-cluster-info-hosts-v650.png) +![Host list](./media/dashboard/dashboard-cluster-info-hosts-v650.png) This host list shows the running status of hosts that correspond to all instances of TiDB, TiKV, PD, and TiFlash components in the cluster. @@ -74,7 +74,7 @@ The list includes the following information: Click **Disks** to view the list of disks: -![Disk list](/media/dashboard/dashboard-cluster-info-disks-v650.png) +![Disk list](./media/dashboard/dashboard-cluster-info-disks-v650.png) This disk list shows the status of disks on which the TiDB, TiKV, PD, and TiFlash instances run. diff --git a/dashboard/dashboard-diagnostics-access.md b/dashboard/dashboard-diagnostics-access.md index 42996efe71f12..b6daa5acfc4f7 100644 --- a/dashboard/dashboard-diagnostics-access.md +++ b/dashboard/dashboard-diagnostics-access.md @@ -18,7 +18,7 @@ You can use one of the following methods to access the cluster diagnostics page: * After logging in to TiDB Dashboard, click **Cluster Diagnostics** in the left navigation menu. - ![Access Cluster Diagnostics page](/media/dashboard/dashboard-diagnostics-access-v650.png) + ![Access Cluster Diagnostics page](./media/dashboard/dashboard-diagnostics-access-v650.png) * Visit `http://127.0.0.1:2379/dashboard/#/diagnose` in your browser. Replace `127.0.0.1:2379` with the actual PD address and port number. @@ -30,7 +30,7 @@ To diagnose a cluster within a specified time range and check the cluster load, 2. Set the **Range Duration**, such as `10 min`. 3. Click **Start**. -![Generate diagnostic report](/media/dashboard/dashboard-diagnostics-gen-report-v650.png) +![Generate diagnostic report](./media/dashboard/dashboard-diagnostics-gen-report-v650.png) > **Note:** > @@ -38,7 +38,7 @@ To diagnose a cluster within a specified time range and check the cluster load, The preceding steps generate a diagnostic report for the time range from `2022-05-21 14:40:00` to `2022-05-21 14:50:00`. After clicking **Start**, you can see the interface below. **Progress** is the progress bar of the diagnostic report. After the report is generated, click **View Full Report**. -![Report progress](/media/dashboard/dashboard-diagnostics-gen-process-v650.png) +![Report progress](./media/dashboard/dashboard-diagnostics-gen-process-v650.png) ## Generate comparison report @@ -55,7 +55,7 @@ You can take the following steps to generate a comparison report for the precedi 4. Set the **Baseline Range Start Time**, which is the start time of the range (to be compared with) in which the system is normal, such as `2022-05-21 14:30:00`. 5. Click **Start**. -![Generate comparison report](/media/dashboard/dashboard-diagnostics-gen-compare-report-v650.png) +![Generate comparison report](./media/dashboard/dashboard-diagnostics-gen-compare-report-v650.png) Then wait for the report to be generated and click **View Full Report**. diff --git a/dashboard/dashboard-diagnostics-report.md b/dashboard/dashboard-diagnostics-report.md index 85232f994ba58..e2dc1e4fb2136 100644 --- a/dashboard/dashboard-diagnostics-report.md +++ b/dashboard/dashboard-diagnostics-report.md @@ -21,7 +21,7 @@ The diagnostic report consists of the following parts: An example of the diagnostic report is as follows: -![Sample report](/media/dashboard/dashboard-diagnostics-example-table.png) +![Sample report](./media/dashboard/dashboard-diagnostics-example-table.png) In the image above, **Total Time Consume** in the top blue box is the report name. The information in the red box below explains the content of this report and the meaning of each field in the report. @@ -43,13 +43,13 @@ Each part of this report is introduced as follows. The time range for generating the diagnostics report includes the start time and end time. -![Report time range](/media/dashboard/dashboard-diagnostics-report-time-range.png) +![Report time range](./media/dashboard/dashboard-diagnostics-report-time-range.png) #### Cluster Hardware Info Cluster Hardware Info includes information such as CPU, memory, and disk of each server in the cluster. -![Cluster hardware report](/media/dashboard/dashboard-diagnostics-cluster-hardware.png) +![Cluster hardware report](./media/dashboard/dashboard-diagnostics-cluster-hardware.png) The fields in the table above are described as follows: @@ -64,7 +64,7 @@ The fields in the table above are described as follows: The `Cluster Info` table shows the cluster topology information. The information in this table are from TiDB [information_schema.cluster_info](/information-schema/information-schema-cluster-info.md) system table. -![Cluster info](/media/dashboard/dashboard-diagnostics-cluster-info.png) +![Cluster info](./media/dashboard/dashboard-diagnostics-cluster-info.png) The fields in the table above are described as follows: @@ -98,25 +98,25 @@ The `Node Load Info` table shows the load information of the server node, includ * The number of TCP connections in use by the node * The number of all TCP connections of the node -![Server Load Info report](/media/dashboard/dashboard-diagnostics-node-load-info.png) +![Server Load Info report](./media/dashboard/dashboard-diagnostics-node-load-info.png) #### Instance CPU Usage The `Instance CPU Usage` table shows the average value (AVG), maximum value (MAX), and minimum value (MIN) of the CPU usage of each TiDB/PD/TiKV process. The maximum CPU usage of the process is `100% * the number of CPU logical cores`. -![Instance CPU Usage report](/media/dashboard/dashboard-diagnostics-process-cpu-usage.png) +![Instance CPU Usage report](./media/dashboard/dashboard-diagnostics-process-cpu-usage.png) #### Instance Memory Usage The `Instance Memory Usage` table shows the average value (AVG), maximum value (MAX), and minimum value (MIN) of memory bytes occupied by each TiDB/PD/TiKV process. -![Instance memory usage report](/media/dashboard/dashboard-diagnostics-process-memory-usage.png) +![Instance memory usage report](./media/dashboard/dashboard-diagnostics-process-memory-usage.png) #### TiKV Thread CPU Usage The `TiKV Thread CPU Usage` table shows the average value (AVG), maximum value (MAX) and minimum value (MIN) of CPU usage of each module thread in TiKV. The maximum CPU usage of the process is `100% * the thread count of the corresponding configuration`. -![TiKV Thread CPU Usage report](/media/dashboard/dashboard-diagnostics-thread-cpu-usage.png) +![TiKV Thread CPU Usage report](./media/dashboard/dashboard-diagnostics-thread-cpu-usage.png) In the table above, @@ -131,7 +131,7 @@ In the table above, The `TiDB/PD Goroutines Count` table shows the average value (AVG), maximum value (MAX), and minimum value (MIN) of the number of TiDB or PD goroutines. If the number of goroutines exceeds 2,000, the concurrency of the process is too high, which affects the overall request latency. -![TiDB/PD goroutines count report](/media/dashboard/dashboard-diagnostics-goroutines-count.png) +![TiDB/PD goroutines count report](./media/dashboard/dashboard-diagnostics-goroutines-count.png) ### Overview information @@ -139,7 +139,7 @@ The `TiDB/PD Goroutines Count` table shows the average value (AVG), maximum valu The `Time Consumed by Each Component` table shows the monitored consumed time and the time ratio of TiDB, PD, TiKV modules in the cluster. The default time unit is seconds. You can use this table to quickly locate which modules consume more time. -![Time Consume report](/media/dashboard/dashboard-diagnostics-total-time-consume.png) +![Time Consume report](./media/dashboard/dashboard-diagnostics-total-time-consume.png) The fields in columns of the table above are described as follows: @@ -155,7 +155,7 @@ The fields in columns of the table above are described as follows: The following image shows the relationship of time consumption of the related modules in the monitoring metrics above. -![Time-consumption relationship of each module](/media/dashboard/dashboard-diagnostics-time-relation.png) +![Time-consumption relationship of each module](./media/dashboard/dashboard-diagnostics-time-relation.png) In the image above, yellow boxes are the TiDB-related monitoring metrics.Blue boxes are TiKV-related monitoring metrics, and gray boxes temporarily do not correspond to specific monitoring metrics. @@ -211,7 +211,7 @@ You can use `TOTAL_TIME`, the P999 time, and the P99 time to determine which mod The `Errors Occurred in Each Component` table shows the total number of errors in TiDB and TiKV, such as the failure to write binlog, `tikv server is busy`, `TiKV channel full`, `tikv write stall`. You can see the row comments for the specific meaning of each error. -![Errors Occurred in Each Component report](/media/dashboard/dashboard-diagnostics-error.png) +![Errors Occurred in Each Component report](./media/dashboard/dashboard-diagnostics-error.png) #### Specific TiDB/PD/TiKV monitoring information @@ -231,7 +231,7 @@ This table shows the number of client connections for each TiDB instance. This table shows transaction-related monitoring metrics. -![Transaction report](/media/dashboard/dashboard-diagnostics-tidb-txn.png) +![Transaction report](./media/dashboard/dashboard-diagnostics-tidb-txn.png) * `TOTAL_VALUE`: The sum of all values ​​(SUM) during the report time range. * `TOTAL_COUNT`: The total number of occurrences of this monitoring metric. @@ -246,7 +246,7 @@ In the table above, within the report time range, `tidb_txn_kv_write_size`: a to ##### DDL Owner -![TiDB DDL Owner Report](/media/dashboard/dashboard-diagnostics-tidb-ddl.png) +![TiDB DDL Owner Report](./media/dashboard/dashboard-diagnostics-tidb-ddl.png) The table above shows that from `2020-05-21 14:40:00`, the cluster's `DDL OWNER` is at the `10.0.1.13:10080` node. If the owner changes, multiple rows of data exist in the table above, where the `Min_Time` column indicates the minimum time of the corresponding known owner. @@ -305,7 +305,7 @@ Within the report time range, if some configurations have been modified, the fol Example: -![Scheduler Config Change History report](/media/dashboard/dashboard-diagnostics-config-change.png) +![Scheduler Config Change History report](./media/dashboard/dashboard-diagnostics-config-change.png) The table above shows that the `leader-schedule-limit` configuration parameter has been modified within the report time range: @@ -324,7 +324,7 @@ You can generate a comparison report for two time ranges. The report content is First, the `Compare Report Time Range` report in the basic information shows the two time ranges for comparison: -![Compare Report Time Range report](/media/dashboard/dashboard-diagnostics-compare-time.png) +![Compare Report Time Range report](./media/dashboard/dashboard-diagnostics-compare-time.png) In the table above, `t1` is the normal time range, or the reference time range. `t2` is the abnormal time range. @@ -338,7 +338,7 @@ Tables related to slow queries are shown as follows: This section introduces `DIFF_RATIO` using the `Instance CPU Usage` table as an example. -![Compare Instance CPU Usage report](/media/dashboard/dashboard-diagnostics-compare-instance-cpu-usage.png) +![Compare Instance CPU Usage report](./media/dashboard/dashboard-diagnostics-compare-instance-cpu-usage.png) * `t1.AVG`, `t1.MAX`, `t1.Min` are the average value, maximum value, and minimum value of CPU usage in the `t1`. * `t2.AVG`, `t2.MAX`, and `t2.Min` are the average value, maximum value, and minimum value ​​of CPU usage during `t2`. @@ -359,7 +359,7 @@ For example, in the table above, the average CPU usage of the `tidb` node in `t2 The `Maximum Different Item` table compares the monitoring metrics of two time ranges, and sorts them according to the difference of the monitoring metrics. Using this table, you can quickly find out which monitoring metric has the biggest difference in the two time ranges. See the following example: -![Maximum Different Item table](/media/dashboard/dashboard-diagnostics-maximum-different-item.png) +![Maximum Different Item table](./media/dashboard/dashboard-diagnostics-maximum-different-item.png) * `Table`: Indicates this monitoring metric comes from which table in the comparison report. For example, `TiKV, coprocessor_info` indicates the `coprocessor_info` table in the TiKV component. * `METRIC_NAME`: The monitoring metric name. Click `expand` to view the comparison of different labels of metrics. diff --git a/dashboard/dashboard-diagnostics-usage.md b/dashboard/dashboard-diagnostics-usage.md index 50d5c5215f5e7..df8e17032a0c0 100644 --- a/dashboard/dashboard-diagnostics-usage.md +++ b/dashboard/dashboard-diagnostics-usage.md @@ -14,7 +14,7 @@ This section demonstrates how to use the comparison diagnostic feature to diagno ### Example 1 -![QPS example](/media/dashboard/dashboard-diagnostics-usage1.png) +![QPS example](./media/dashboard/dashboard-diagnostics-usage1.png) The result of a `go-ycsb` pressure test is shown in the image above. You can see that at `2020-03-10 13:24:30`, QPS suddenly began to decrease. After 3 minutes, QPS began to return to normal. You can use diagnostic report of TiDB Dashboard to find out the cause. @@ -28,7 +28,7 @@ Because the impact range of jitter is 3 minutes, the two time ranges above are b After the report is generated, you can view this report on the **Compare Diagnose** page. -![Comparison diagnostics](/media/dashboard/dashboard-diagnostics-usage2.png) +![Comparison diagnostics](./media/dashboard/dashboard-diagnostics-usage2.png) The diagnostic results above show that big queries might exist during the diagnostic time. Each **DETAIL** in the report above is described as follows: @@ -64,7 +64,7 @@ From the result above, you can see that from `13:24:30`, there is a large write If a large query has not been executed, the query is not recorded in the slow log. In this situation, this large query can still be diagnosed. See the following example: -![QPS results](/media/dashboard/dashboard-diagnostics-usage3.png) +![QPS results](./media/dashboard/dashboard-diagnostics-usage3.png) The result of another `go-ycsb` pressure test is shown in the image above. You can see that at `2020-03-08 01:46:30`, QPS suddenly began to drop and did not recover. @@ -76,7 +76,7 @@ T2: `2020-03-08 01:46:30` to `2020-03-08 01:51:30`. In this range, QPS began to After the report is generated, you can view this report on the **Compare Diagnose** page. -![Comparison diagnostics](/media/dashboard/dashboard-diagnostics-usage4.png) +![Comparison diagnostics](./media/dashboard/dashboard-diagnostics-usage4.png) The diagnostic result is similar to that of example 1. The last row of the image above indicates that there might be slow queries and indicate that you can use SQL statements to query the expensive queries in the TiDB log. The execution result of the SQL statements are as follows. @@ -95,7 +95,7 @@ The query result above shows that on this `172.16.5.40:4009` TiDB instance, at ` Because the diagnostics might be wrong, using a comparison report might help DBAs locate problems more quickly. See the following example. -![QPS results](/media/dashboard/dashboard-diagnostics-usage5.png) +![QPS results](./media/dashboard/dashboard-diagnostics-usage5.png) The result of a `go-ycsb` pressure test is shown in the image above. You can see that at `2020-05-22 22:14:00`, QPS suddenly began to decrease. After 3 minutes, QPS began to return to normal. You can use the comparison diagnostic report of TiDB Dashboard to find out the cause. @@ -107,7 +107,7 @@ T2: `2020-05-22 22:14:00` `2020-05-22 22:17:00`. In this range, QPS began to dec After generating the comparison report, check the **Max diff item** report. This report compares the monitoring items of the two time ranges above and sorts them according to the difference of the monitoring items. The result of this table is as follows: -![Comparison results](/media/dashboard/dashboard-diagnostics-usage6.png) +![Comparison results](./media/dashboard/dashboard-diagnostics-usage6.png) From the result above, you can see that the Coprocessor requests in T2 are much more than those in T1. It might be that some large queries appear in T2 that bring more load. diff --git a/dashboard/dashboard-faq.md b/dashboard/dashboard-faq.md index ab0f5d7b25b93..6877370fb8f27 100644 --- a/dashboard/dashboard-faq.md +++ b/dashboard/dashboard-faq.md @@ -159,8 +159,8 @@ If the `unknown field` error appears on the **Slow Queries** page after the clus In the following example, Chrome is used. - ![Opening DevTools from Chrome's main menu](/media/dashboard/dashboard-faq-devtools.png) + ![Opening DevTools from Chrome's main menu](./media/dashboard/dashboard-faq-devtools.png) 3. Select the **Application** panel, expand the **Local Storage** menu and select the **TiDB Dashboard page domain**. Click the **Clear All** button. - ![Clear the Local Storage](/media/dashboard/dashboard-faq-devtools-application.png) + ![Clear the Local Storage](./media/dashboard/dashboard-faq-devtools-application.png) diff --git a/dashboard/dashboard-intro.md b/dashboard/dashboard-intro.md index 4eb006cf733e8..11efa1894993a 100644 --- a/dashboard/dashboard-intro.md +++ b/dashboard/dashboard-intro.md @@ -12,7 +12,7 @@ TiDB Dashboard is a Web UI for monitoring, diagnosing, and managing the TiDB clu > > TiDB v6.5.0 (and later) and TiDB Operator v1.4.0 (and later) support deploying TiDB Dashboard as an independent Pod on Kubernetes. For details, see [Deploy TiDB Dashboard independently in TiDB Operator](https://docs.pingcap.com/tidb-in-kubernetes/dev/get-started#deploy-tidb-dashboard-independently). -![TiDB Dashboard interface](/media/dashboard/dashboard-intro.gif) +![TiDB Dashboard interface](./media/dashboard/dashboard-intro.gif) TiDB Dashboard is open-sourced on [GitHub](https://github.com/pingcap-incubator/tidb-dashboard). diff --git a/dashboard/dashboard-key-visualizer.md b/dashboard/dashboard-key-visualizer.md index d2697f8244b3a..008bea35bb022 100644 --- a/dashboard/dashboard-key-visualizer.md +++ b/dashboard/dashboard-key-visualizer.md @@ -14,7 +14,7 @@ You can use one of the following two methods to access the Key Visualizer page: * After logging in to TiDB Dashboard, click **Key Visualizer** in the left navigation menu. - ![Access Key Visualizer](/media/dashboard/dashboard-keyviz-access-v650.png) + ![Access Key Visualizer](./media/dashboard/dashboard-keyviz-access-v650.png) * Visit in your browser. Replace `127.0.0.1:2379` with the actual PD instance address and port. @@ -22,7 +22,7 @@ You can use one of the following two methods to access the Key Visualizer page: The following image is a demonstration of the Key Visualizer page: -![Key Visualizer page](/media/dashboard/dashboard-keyviz-overview.png) +![Key Visualizer page](./media/dashboard/dashboard-keyviz-overview.png) From the preceding interface, you can see the following objects: @@ -67,19 +67,19 @@ This section introduces how to use Key Visualizer. To use the Key Visualizer page for the first time, you need to manually enable this feature on the **Settings** page. Follow the page guide and click **Open Settings** to open the settings page: -![Feature disabled](/media/dashboard/dashboard-keyviz-not-enabled.png) +![Feature disabled](./media/dashboard/dashboard-keyviz-not-enabled.png) After this feature is enabled, you can open the settings page by clicking the **Settings** icon in the upper right corner: -![Settings icon](/media/dashboard/dashboard-keyviz-settings-button.png) +![Settings icon](./media/dashboard/dashboard-keyviz-settings-button.png) The settings page is shown as follows: -![Settings page](/media/dashboard/dashboard-keyviz-settings.png) +![Settings page](./media/dashboard/dashboard-keyviz-settings.png) Set whether to start data collection through the switch, and click **Save** to take effect. After enabling the feature, you can see that the toolbar is available: -![Toolbar](/media/dashboard/dashboard-keyviz-toolbar.png) +![Toolbar](./media/dashboard/dashboard-keyviz-toolbar.png) After this feature is enabled, data collection is going on at the backend. You can see the heatmap shortly. @@ -91,12 +91,12 @@ When you open Key Visualizer, the heatmap of the entire database over the recent 2. Click and drag one of the following buttons to select the range. + Click the **Select & Zoom** button. Then click and drag this button to select the area to zoom in. - ![Selection box](/media/dashboard/dashboard-keyviz-select-zoom.gif) + ![Selection box](./media/dashboard/dashboard-keyviz-select-zoom.gif) + Click the **Reset** button to reset the Region range to the entire database. + Click the **time selection box** (at the position of `6 hour` on the preceding interface) and select the observation time period again. - ![Select time](/media/dashboard/dashboard-keyviz-select-time.png) + ![Select time](./media/dashboard/dashboard-keyviz-select-time.png) > **Note:** > @@ -112,7 +112,7 @@ The heatmap uses colors of different brightnesses to indicate the Bucket traffic ### Select metrics -![Select metrics](/media/dashboard/dashboard-keyviz-select-type.png) +![Select metrics](./media/dashboard/dashboard-keyviz-select-type.png) You can view a metric you are interested in by selecting this metric in the **metrics selection box** (at the `Write (bytes)` position in the interface above): @@ -134,11 +134,11 @@ To regain a heatmap based on the current time, click the **Refresh** button. If You can hover your mouse over the Bucket you are interested in to view the detailed information of this Region range. The image below is an example of this information: -![Bucket details](/media/dashboard/dashboard-keyviz-tooltip.png) +![Bucket details](./media/dashboard/dashboard-keyviz-tooltip.png) If you want to copy this information, click a Bucket. Then, the page with relevant details is temporarily pinned. Click on the information, and you have copied it to the clipboard. -![Copy Bucket details](/media/dashboard/dashboard-keyviz-tooltip-copy.png) +![Copy Bucket details](./media/dashboard/dashboard-keyviz-tooltip-copy.png) ## Common heatmap types @@ -146,19 +146,19 @@ This section shows and interprets four common types of heatmap in Key Visualizer ### Evenly distributed workload -![Balanced](/media/dashboard/dashboard-keyviz-well-dist.png) +![Balanced](./media/dashboard/dashboard-keyviz-well-dist.png) In the heatmap above, bright and dark colors are a fine-grained mix. This indicates that reads or writes are evenly distributed over time and among key ranges. The workload is evenly distributed to all nodes, which is ideal for a distributed database. ### Periodically reads and writes -![Periodically](/media/dashboard/dashboard-keyviz-period.png) +![Periodically](./media/dashboard/dashboard-keyviz-period.png) In the heatmap above, there is an alternating brightness and darkness along the X-axis (time) but the brightness is relatively even along the Y-axis (Region). This indicates that the reads and writes change periodically, which might occur in scenarios of periodically scheduled tasks. For example, the big data platform periodically extracts data from TiDB every day. In this kind of scenarios, pay attention to whether the resources are sufficient during peak usage. ### Concentrated reads or writes -![Concentrated](/media/dashboard/dashboard-keyviz-continue.png) +![Concentrated](./media/dashboard/dashboard-keyviz-continue.png) In the heatmap above, you can see several bright lines. Along the Y-axis, the fringes around the bright lines are dark, which indicates that the Regions corresponding to bright lines have high read and write traffic. You can observe whether the traffic distribution is expected by your application. For example, when all services are associated with the user table, the overall traffic of the user table can be high, so it is reasonable to show bright lines in the heatmap. @@ -166,7 +166,7 @@ In addition, the height of the bright lines (the thickness along the Y-axis) is ### Sequential reads or writes -![Sequential](/media/dashboard/dashboard-keyviz-sequential.png) +![Sequential](./media/dashboard/dashboard-keyviz-sequential.png) In the heatmap above, you can see a bright line. This means that the data reads or writes are sequential. Typical scenarios of sequential data reads or writes are importing data or scanning tables and indexes. For example, you continuously write data to tables with auto-increment IDs. diff --git a/dashboard/dashboard-log-search.md b/dashboard/dashboard-log-search.md index 85b2b27baf35a..ea1be662d618d 100644 --- a/dashboard/dashboard-log-search.md +++ b/dashboard/dashboard-log-search.md @@ -12,7 +12,7 @@ On the log search page of TiDB Dashboard, you can search logs of all nodes, prev After logging in to TiDB Dashboard, you can click **Search Logs** to enter this log search homepage. -![Log Search Page](/media/dashboard/dashboard-log-search-home.png) +![Log Search Page](./media/dashboard/dashboard-log-search-home.png) This page provides the following search parameters: @@ -27,7 +27,7 @@ After clicking the **Search** button, you enter the detail page of the search re The following image shows the page of the search results. -![Search result](/media/dashboard/dashboard-log-search-result.png) +![Search result](./media/dashboard/dashboard-log-search-result.png) This page consists of the following three areas: @@ -61,12 +61,12 @@ The search progress area has the following three control buttons: Click the **View search history** link on the log search homepage to enter page of search history list: -![Search history entry](/media/dashboard/dashboard-log-search-history-entry.png) +![Search history entry](./media/dashboard/dashboard-log-search-history-entry.png) -![Search history list](/media/dashboard/dashboard-log-search-history.png) +![Search history list](./media/dashboard/dashboard-log-search-history.png) The history list shows the time range, log level, components, keywords, and search status of each search log. Click the **Detail** link in the **Action** column to see the search result details: You can delete the search history that you no longer need. Click **Delete All** in the upper right corner, or select the rows to be deleted and then click **Delete selected** to delete the history: -![Delete search history](/media/dashboard/dashboard-log-search-delete-history.png) +![Delete search history](./media/dashboard/dashboard-log-search-delete-history.png) diff --git a/dashboard/dashboard-metrics-relation.md b/dashboard/dashboard-metrics-relation.md index 25425bee92927..5cd856033d187 100644 --- a/dashboard/dashboard-metrics-relation.md +++ b/dashboard/dashboard-metrics-relation.md @@ -11,7 +11,7 @@ TiDB Dashboard metrics relation graph is a feature introduced in v4.0.7. This fe After logging in to TiDB Dashboard, click **Cluster Diagnostics** in the left navigation menu, and you can see the page of generating the metrics relation graph. -![Metrics relation graph homepage](/media/dashboard/dashboard-metrics-relation-home-v650.png) +![Metrics relation graph homepage](./media/dashboard/dashboard-metrics-relation-home-v650.png) After setting **Range Start Time** and **Range Duration**, click **Generate Metrics Relation** and you will enter the page of metrics relation graph. @@ -19,7 +19,7 @@ After setting **Range Start Time** and **Range Duration**, click **Generate Metr The following image is an example of the metrics relation graph. This graph illustrates the proportion of each monitoring metric's duration to the total query duration in a TiDB cluster within 5 minutes after 2020-07-29 16:36:00. The graph also illustrates the relations of each monitoring metric. -![Metrics relation graph example](/media/dashboard/dashboard-metrics-relation-example.png) +![Metrics relation graph example](./media/dashboard/dashboard-metrics-relation-example.png) For example, the node meaning of the `tidb_execute` monitoring metric is as follows: @@ -27,7 +27,7 @@ For example, the node meaning of the `tidb_execute` monitoring metric is as foll + The duration of the `tidb_execute` node itself is 9070.18 seconds, which accounts for 42% of the total query duration. + Hover your mouse over the box area, and you can see the detailed information of the metric, including the total duration, the average duration, and the average P99 (99th percentile) duration. -![tidb_execute node example](/media/dashboard/dashboard-metrics-relation-node-example.png) +![tidb_execute node example](./media/dashboard/dashboard-metrics-relation-node-example.png) ### Node information @@ -39,7 +39,7 @@ Each box area represents a monitoring metric and provides the following informat *The total duration of the metric node* = *the duration of the metric node itself* + *the duration of its child nodes*. Therefore, the metric graph of some nodes displays the proportion of the node itself's duration to the total duration, such as the graph of `tidb_execute`. -![tidb_execute node example1](/media/dashboard/dashboard-metrics-relation-node-example1.png) +![tidb_execute node example1](./media/dashboard/dashboard-metrics-relation-node-example1.png) * `tidb_execute` is the name of the monitoring metric, which represents the execution duration of a SQL query in the TiDB execution engine. * `19306.46s` represents that total duration of the `tidb_execute` metric is 19306.46 seconds. `89.40%` represents that 19306.46 seconds account for 89.40% of the total time consumed for all SQL queries (including user SQL queries and TiDB's internal SQL queries). The total query duration is the total duration of `tidb_query`. @@ -47,7 +47,7 @@ Each box area represents a monitoring metric and provides the following informat Hover your mouse over the box area and you can see more details of the `tidb_execute` metric node: -![tidb_execute node example2](/media/dashboard/dashboard-metrics-relation-node-example2.png) +![tidb_execute node example2](./media/dashboard/dashboard-metrics-relation-node-example2.png) The text information displayed in the image above is the description of the metric node, including the total duration, the total times, the average duration, and the average duration P99, P90, and P80. @@ -55,7 +55,7 @@ The text information displayed in the image above is the description of the metr Taking the `tidb_execute` metric node as an example, this section introduces a metric's child nodes. -![tidb_execute node relation example1](/media/dashboard/dashboard-metrics-relation-relation-example1.png) +![tidb_execute node relation example1](./media/dashboard/dashboard-metrics-relation-relation-example1.png) From the graph above, you can see the two child nodes of `tidb_execute`: @@ -72,7 +72,7 @@ In addition, `tidb_execute` also has a dotted arrow pointing to the `tidb_cop` b ### `tidb_kv_request` and its parent nodes -![tidb_execute node relation example2](/media/dashboard/dashboard-metrics-relation-relation-example2.png) +![tidb_execute node relation example2](./media/dashboard/dashboard-metrics-relation-relation-example2.png) `tidb_cop` and `tidb_txn_cmd.get`, the parent nodes of `tidb_kv_request`, both have dotted arrows pointing to `tidb_kv_request`, which indicates as follows: diff --git a/dashboard/dashboard-monitoring.md b/dashboard/dashboard-monitoring.md index bb642c8076a36..47b03df8f59a4 100644 --- a/dashboard/dashboard-monitoring.md +++ b/dashboard/dashboard-monitoring.md @@ -11,11 +11,11 @@ On the monitoring page, you can view the Performance Overview dashboard, a perfo Log in to TiDB dashboard and click **Monitoring** from the left navigation bar. The Performance Overview dashboard is displayed. -![Monitoring page](/media/dashboard/dashboard-monitoring.png) +![Monitoring page](./media/dashboard/dashboard-monitoring.png) If the TiDB cluster is deployed using TiUP, you can also view the Performance Overview dashboard on Grafana. In this deployment mode, the monitoring system (Prometheus & Grafana) is deployed at the same time. For more information, see [TiDB Monitoring Framework Overview](/tidb-monitoring-framework.md). -![performance overview](/media/performance/grafana_performance_overview.png) +![performance overview](./media/performance/grafana_performance_overview.png) ## Key Metrics on Performance Overview diff --git a/dashboard/dashboard-ops-deploy.md b/dashboard/dashboard-ops-deploy.md index a87458b271fdc..988bb2f1961d6 100644 --- a/dashboard/dashboard-ops-deploy.md +++ b/dashboard/dashboard-ops-deploy.md @@ -30,7 +30,7 @@ When PD instances are running for the first time, they automatically negotiate w When you access a PD instance that does not serve TiDB Dashboard, the browser will be redirected automatically to guide you to access the PD instance that serves the TiDB Dashboard, so that you can access the service normally. This process is illustrated in the image below. -![Process Schematic](/media/dashboard/dashboard-ops-multiple-pd.png) +![Process Schematic](./media/dashboard/dashboard-ops-multiple-pd.png) > **Note:** > diff --git a/dashboard/dashboard-overview.md b/dashboard/dashboard-overview.md index 7239cdbf03f89..124b9e4d77d0f 100644 --- a/dashboard/dashboard-overview.md +++ b/dashboard/dashboard-overview.md @@ -19,13 +19,13 @@ This page shows the overview of the entire TiDB cluster, including the following After logging in to TiDB Dashboard, the overview page is entered by default, or you can click **Overview** in the left navigation menu to enter this page: -![Enter overview page](/media/dashboard/dashboard-overview-access-v650.png) +![Enter overview page](./media/dashboard/dashboard-overview-access-v650.png) ## QPS This area shows the number of successful and failed queries per second for the entire cluster over the recent hour: -![QPS](/media/dashboard/dashboard-overview-qps.png) +![QPS](./media/dashboard/dashboard-overview-qps.png) > **Note:** > @@ -35,7 +35,7 @@ This area shows the number of successful and failed queries per second for the e This area shows the latency of 99.9%, 99%, and 90% of queries in the entire cluster over the recent one hour: -![Latency](/media/dashboard/dashboard-overview-latency.png) +![Latency](./media/dashboard/dashboard-overview-latency.png) > **Note:** > @@ -45,7 +45,7 @@ This area shows the latency of 99.9%, 99%, and 90% of queries in the entire clus This area shows the ten types of SQL statements that have accumulated the longest execution time in the entire cluster over the recent period. SQL statements with different query parameters but of the same structure are classified into the same SQL type and displayed in the same row: -![Top SQL](/media/dashboard/dashboard-overview-top-statements.png) +![Top SQL](./media/dashboard/dashboard-overview-top-statements.png) The information shown in this area is consistent with the more detailed [SQL Statements Page](/dashboard/dashboard-statement-list.md). You can click the **Top SQL Statements** heading to view the complete list. For details of the columns in this table, see [SQL Statements Page](/dashboard/dashboard-statement-list.md). @@ -57,7 +57,7 @@ The information shown in this area is consistent with the more detailed [SQL Sta By default, this area shows the latest 10 slow queries in the entire cluster over the recent 30 minutes: -![Recent slow queries](/media/dashboard/dashboard-overview-slow-query.png) +![Recent slow queries](./media/dashboard/dashboard-overview-slow-query.png) By default, the SQL query that is executed longer than 300 milliseconds is counted as a slow query and displayed on the table. You can change this threshold by modifying the [tidb_slow_log_threshold](/system-variables.md#tidb_slow_log_threshold) variable or the [instance.tidb_slow_log_threshold](/tidb-configuration-file.md#tidb_slow_log_threshold) TiDB parameter. @@ -71,7 +71,7 @@ The content displayed in this area is consistent with the more detailed [Slow Qu This area summarizes the total number of instances and abnormal instances of TiDB, TiKV, PD, and TiFlash in the entire cluster: -![Instances](/media/dashboard/dashboard-overview-instances.png) +![Instances](./media/dashboard/dashboard-overview-instances.png) The statuses in the preceding image are described as follows: @@ -84,7 +84,7 @@ Click the **Instance** title to enter the [Cluster Info Page](/dashboard/dashboa This area provides links for you to view detailed monitor and alert: -![Monitor and alert](/media/dashboard/dashboard-overview-monitor.png) +![Monitor and alert](./media/dashboard/dashboard-overview-monitor.png) - **View Metrics**: Click this link to jump to the Grafana dashboard where you can view detailed monitoring information of the cluster. For details of each monitoring metric in the Grafana dashboard, see [monitoring metrics](/grafana-overview-dashboard.md). - **View Alerts**: Click this link to jump to the AlertManager page where you can view detailed alert information of the cluster. If alerts exist in the cluster, the number of alerts is directly shown in the link text. diff --git a/dashboard/dashboard-profiling.md b/dashboard/dashboard-profiling.md index 7e00f7719ca2e..3ff0dbd0d8bc8 100644 --- a/dashboard/dashboard-profiling.md +++ b/dashboard/dashboard-profiling.md @@ -38,7 +38,7 @@ You can access the instance profiling page using either of the following methods * After logging in to TiDB Dashboard, click **Advanced Debugging** > **Profiling Instances** > **Manual Profiling** in the left navigation menu. - ![Access instance profiling page](/media/dashboard/dashboard-profiling-access.png) + ![Access instance profiling page](./media/dashboard/dashboard-profiling-access.png) * Visit in your browser. Replace `127.0.0.1:2379` with the actual PD instance address and port. @@ -46,7 +46,7 @@ You can access the instance profiling page using either of the following methods In the instance profiling page, choose at least one target instance and click **Start Profiling** to start the instance profiling. -![Start instance profiling](/media/dashboard/dashboard-profiling-start.png) +![Start instance profiling](./media/dashboard/dashboard-profiling-start.png) You can modify the profiling duration before starting the profiling. This duration is determined by the time needed for the profiling, which is 30 seconds by default. The 30-second duration takes 30 seconds to complete. @@ -56,7 +56,7 @@ Manual Profiling cannot be initiated on clusters that have [Continuous Profiling After a profiling is started, you can view the profiling status and progress in real time. -![Profiling detail](/media/dashboard/dashboard-profiling-view-progress.png) +![Profiling detail](./media/dashboard/dashboard-profiling-view-progress.png) The profiling runs in the background. Refreshing or exiting the current page does not stop the profiling task that is running. @@ -64,16 +64,16 @@ The profiling runs in the background. Refreshing or exiting the current page doe After the profiling of all instances is completed, you can click **Download Profiling Result** in the upper right corner to download all performance data. -![Download profiling result](/media/dashboard/dashboard-profiling-download.png) +![Download profiling result](./media/dashboard/dashboard-profiling-download.png) You can also click an individual instance in the table to view its profiling result. Alternatively, you can hover on ... to download raw data. -![Single instance result](/media/dashboard/dashboard-profiling-view-single.png) +![Single instance result](./media/dashboard/dashboard-profiling-view-single.png) ## View profiling history The on-demand profiling history is listed on the page. Click a row to view details. -![View profiling history](/media/dashboard/dashboard-profiling-history.png) +![View profiling history](./media/dashboard/dashboard-profiling-history.png) For detailed operations on the profiling status page, see [View Profiling Status](#view-profiling-status). diff --git a/dashboard/dashboard-resource-manager.md b/dashboard/dashboard-resource-manager.md index c1af56c22f963..2bca7585be2f2 100644 --- a/dashboard/dashboard-resource-manager.md +++ b/dashboard/dashboard-resource-manager.md @@ -19,7 +19,7 @@ You can use one of the following two methods to access the Resource Manager page The following figure shows the Resource Manager details page: -![TiDB Dashboard: Resource Manager](/media/dashboard/dashboard-resource-manager-info.png) +![TiDB Dashboard: Resource Manager](./media/dashboard/dashboard-resource-manager-info.png) The Resource Manager page contains the following three sections: @@ -45,13 +45,13 @@ Before resource planning, you need to know the overall capacity of the cluster. - `oltp_read_write`: applies to workloads with even data read and write. It is estimated based on a workload model similar to `sysbench oltp_read_write`. - `oltp_read_only`: applies to workloads with heavy data read. It is estimated based on a workload model similar to `sysbench oltp_read_only`. - ![Calibrate by Hardware](/media/dashboard/dashboard-resource-manager-calibrate-by-hardware.png) + ![Calibrate by Hardware](./media/dashboard/dashboard-resource-manager-calibrate-by-hardware.png) The **Total RU of user resource groups** represents the total amount of RU for all user resource groups, excluding the `default` resource group. If this value is less than the estimated capacity, the system triggers an alert. By default, the system allocates unlimited usage to the predefined `default` resource group. When all users belong to the `default` resource group, resources are allocated in the same way as when resource control is disabled. - [Estimate capacity based on actual workload](/sql-statements/sql-statement-calibrate-resource.md#estimate-capacity-based-on-actual-workload) - ![Calibrate by Workload](/media/dashboard/dashboard-resource-manager-calibrate-by-workload.png) + ![Calibrate by Workload](./media/dashboard/dashboard-resource-manager-calibrate-by-workload.png) You can select a time range for estimation within the range of 10 minutes to 24 hours. The time zone used is the same as that of the front-end user. diff --git a/dashboard/dashboard-session-share.md b/dashboard/dashboard-session-share.md index b0b7e6fc1887b..1ba4aaf58e308 100644 --- a/dashboard/dashboard-session-share.md +++ b/dashboard/dashboard-session-share.md @@ -15,7 +15,7 @@ You can share the current session of the TiDB Dashboard to other users so that t 3. Click **Share Current Session**. - ![Sample Step](/media/dashboard/dashboard-session-share-settings-1-v650.png) + ![Sample Step](./media/dashboard/dashboard-session-share-settings-1-v650.png) > **Note:** > @@ -29,11 +29,11 @@ You can share the current session of the TiDB Dashboard to other users so that t 5. Click **Generate Authorization Code**. - ![Sample Step](/media/dashboard/dashboard-session-share-settings-2-v650.png) + ![Sample Step](./media/dashboard/dashboard-session-share-settings-2-v650.png) 6. Provide the generated **Authorization Code** to the user to whom you want to share the session. - ![Sample Step](/media/dashboard/dashboard-session-share-settings-3-v650.png) + ![Sample Step](./media/dashboard/dashboard-session-share-settings-3-v650.png) > **Warning:** > @@ -43,14 +43,14 @@ You can share the current session of the TiDB Dashboard to other users so that t 1. On the sign-in page of TiDB Dashboard, click **Use Alternative Authentication**. - ![Sample Step](/media/dashboard/dashboard-session-share-signin-1-v650.png) + ![Sample Step](./media/dashboard/dashboard-session-share-signin-1-v650.png) 2. Click **Authorization Code** to use it to sign in. - ![Sample Step](/media/dashboard/dashboard-session-share-signin-2-v650.png) + ![Sample Step](./media/dashboard/dashboard-session-share-signin-2-v650.png) 3. Enter the authorization code you have received from the inviter. 4. Click **Sign In**. - ![Sample Step](/media/dashboard/dashboard-session-share-signin-3-v650.png) + ![Sample Step](./media/dashboard/dashboard-session-share-signin-3-v650.png) diff --git a/dashboard/dashboard-session-sso.md b/dashboard/dashboard-session-sso.md index 900c1944824d7..225aa8e09f18b 100644 --- a/dashboard/dashboard-session-sso.md +++ b/dashboard/dashboard-session-sso.md @@ -28,7 +28,7 @@ TiDB Dashboard supports [OIDC](https://openid.net/connect/)-based Single Sign-On TiDB Dashboard will store this SQL password and use it to impersonate a normal SQL sign-in after an SSO sign-in is finished. - ![Sample Step](/media/dashboard/dashboard-session-sso-enable-1.png) + ![Sample Step](./media/dashboard/dashboard-session-sso-enable-1.png) > **Note:** > @@ -36,11 +36,11 @@ TiDB Dashboard supports [OIDC](https://openid.net/connect/)-based Single Sign-On 6. Click **Authorize and Save**. - ![Sample Step](/media/dashboard/dashboard-session-sso-enable-2.png) + ![Sample Step](./media/dashboard/dashboard-session-sso-enable-2.png) 7. Click **Update** (Update) to save the configuration. - ![Sample Step](/media/dashboard/dashboard-session-sso-enable-3.png) + ![Sample Step](./media/dashboard/dashboard-session-sso-enable-3.png) Now SSO sign-in has been enabled for TiDB Dashboard. @@ -60,7 +60,7 @@ You can disable the SSO, which will completely erase the stored SQL password: 4. Click **Update** (Update) to save the configuration. - ![Sample Step](/media/dashboard/dashboard-session-sso-disable.png) + ![Sample Step](./media/dashboard/dashboard-session-sso-disable.png) ### Re-enter the password after a password change @@ -72,7 +72,7 @@ The SSO sign-in will fail once the password of the SQL user is changed. In this 3. In the **Single Sign-On** section, Click **Authorize Impersonation** and input the updated SQL password. - ![Sample Step](/media/dashboard/dashboard-session-sso-reauthorize.png) + ![Sample Step](./media/dashboard/dashboard-session-sso-reauthorize.png) 4. Click **Authorize and Save**. @@ -82,7 +82,7 @@ Once SSO is configured for TiDB Dashboard, you can sign in via SSO by taking fol 1. In the sign-in page of TiDB Dashboard, click **Sign in via Company Account**. - ![Sample Step](/media/dashboard/dashboard-session-sso-signin.png) + ![Sample Step](./media/dashboard/dashboard-session-sso-signin.png) 2. Sign into the system with SSO service configured. @@ -102,7 +102,7 @@ First, create an Okta Application Integration to integrate SSO. 3. Click **Create App Integration**. - ![Sample Step](/media/dashboard/dashboard-session-sso-okta-1.png) + ![Sample Step](./media/dashboard/dashboard-session-sso-okta-1.png) 4. In the popped up dialog, choose **OIDC - OpenID Connect** in **Sign-in method**. @@ -110,7 +110,7 @@ First, create an Okta Application Integration to integrate SSO. 6. Click the **Next** button. - ![Sample Step](/media/dashboard/dashboard-session-sso-okta-2.png) + ![Sample Step](./media/dashboard/dashboard-session-sso-okta-2.png) 7. Fill **Sign-in redirect URIs** as follows: @@ -128,25 +128,25 @@ First, create an Okta Application Integration to integrate SSO. Similarly, substitute `DASHBOARD_IP:PORT` with the actual domain (or IP address) and port. - ![Sample Step](/media/dashboard/dashboard-session-sso-okta-3.png) + ![Sample Step](./media/dashboard/dashboard-session-sso-okta-3.png) 9. Configure what type of users in your organization is allowed for SSO sign-in in the **Assignments** field, and then click **Save** to save the configuration. - ![Sample Step](/media/dashboard/dashboard-session-sso-okta-4.png) + ![Sample Step](./media/dashboard/dashboard-session-sso-okta-4.png) ### Step 2: Obtain OIDC information and fill in TiDB Dashboard 1. In the Application Integration just created in Okta, click **Sign On**. - ![Sample Step 1](/media/dashboard/dashboard-session-sso-okta-info-1.png) + ![Sample Step 1](./media/dashboard/dashboard-session-sso-okta-info-1.png) 2. Copy values of the **Issuer** and **Audience** fields from the **OpenID Connect ID Token** section. - ![Sample Step 2](/media/dashboard/dashboard-session-sso-okta-info-2.png) + ![Sample Step 2](./media/dashboard/dashboard-session-sso-okta-info-2.png) 3. Open the TiDB Dashboard configuration page, fill **OIDC Client ID** with **Issuer** obtained from the last step and fill **OIDC Discovery URL** with **Audience**. Then finish the authorization and save the configuration. For example: - ![Sample Step 3](/media/dashboard/dashboard-session-sso-okta-info-3.png) + ![Sample Step 3](./media/dashboard/dashboard-session-sso-okta-info-3.png) Now TiDB Dashboard has been configured to use Okta SSO for sign-in. @@ -162,13 +162,13 @@ Similar to Okta, [Auth0](https://auth0.com/) also provides OIDC SSO identity ser 3. Click **Create App Integration**. - ![Create Application](/media/dashboard/dashboard-session-sso-auth0-create-app.png) + ![Create Application](./media/dashboard/dashboard-session-sso-auth0-create-app.png) In the popped-up dialog, fill **Name**, for example, "TiDB Dashboard". Choose **Single Page Web Applications** in **Choose an application type**. Click **Create**. 4. Click **Settings**. - ![Settings](/media/dashboard/dashboard-session-sso-auth0-settings-1.png) + ![Settings](./media/dashboard/dashboard-session-sso-auth0-settings-1.png) 5. Fill **Allowed Callback URLs** as follows: @@ -186,7 +186,7 @@ Similar to Okta, [Auth0](https://auth0.com/) also provides OIDC SSO identity ser Similarly, replace `DASHBOARD_IP:PORT` with the actual domain (or IP address) and port. - ![Settings](/media/dashboard/dashboard-session-sso-auth0-settings-2.png) + ![Settings](./media/dashboard/dashboard-session-sso-auth0-settings-2.png) 7. Keep the default values for other settings and click **Save Changes**. @@ -196,7 +196,7 @@ Similar to Okta, [Auth0](https://auth0.com/) also provides OIDC SSO identity ser 2. Fill **OIDC Discovery URL** with the **Domain** field value prefixed with `https://` and suffixed with `/`, for example, `https://example.us.auth0.com/`. Complete authorization and save the configuration. - ![Settings](/media/dashboard/dashboard-session-sso-auth0-settings-3.png) + ![Settings](./media/dashboard/dashboard-session-sso-auth0-settings-3.png) Now TiDB Dashboard has been configured to use Auth0 SSO for sign-in. @@ -211,7 +211,7 @@ Now TiDB Dashboard has been configured to use Auth0 SSO for sign-in. 2. Navigate from the top sidebar **Applications**. 3. Click **Applications - Add**. - ![Settings](/media/dashboard/dashboard-session-sso-casdoor-settings-1.png) + ![Settings](./media/dashboard/dashboard-session-sso-casdoor-settings-1.png) 4. Fill **Name** and **Display name**, for example, **TiDB Dashboard**. @@ -223,7 +223,7 @@ Now TiDB Dashboard has been configured to use Auth0 SSO for sign-in. Replace `DASHBOARD_IP:PORT` with the actual domain (or IP address) and port that you use to access the TiDB Dashboard in your browser. - ![Settings](/media/dashboard/dashboard-session-sso-casdoor-settings-2.png) + ![Settings](./media/dashboard/dashboard-session-sso-casdoor-settings-2.png) 6. Keep the default values for other settings and click **Save & Exit**. @@ -235,6 +235,6 @@ Now TiDB Dashboard has been configured to use Auth0 SSO for sign-in. 2. Fill **OIDC Discovery URL** with the **Domain** field value prefixed with `https://` and suffixed with `/`, for example, `https://casdoor.example.com/`. Complete authorization and save the configuration. - ![Settings](/media/dashboard/dashboard-session-sso-casdoor-settings-3.png) + ![Settings](./media/dashboard/dashboard-session-sso-casdoor-settings-3.png) Now TiDB Dashboard has been configured to use Casdoor SSO for sign-in. \ No newline at end of file diff --git a/dashboard/dashboard-slow-query.md b/dashboard/dashboard-slow-query.md index 24f419d7c8fd3..aa5bbcffee2b8 100644 --- a/dashboard/dashboard-slow-query.md +++ b/dashboard/dashboard-slow-query.md @@ -28,25 +28,25 @@ All data displayed on the slow query page comes from TiDB slow query system tabl You can filter slow queries based on the time range, the related database, SQL keywords, SQL types, the number of slow queries to be displayed. In the image below, 100 slow queries over the recent 30 minutes are displayed by default. -![Modify list filters](/media/dashboard/dashboard-slow-queries-list1-v620.png) +![Modify list filters](./media/dashboard/dashboard-slow-queries-list1-v620.png) ### Display more columns Click **Columns** on the page and you can choose to see more columns. You can move your mouse to the **(i)** icon at the right side of a column name to view the description of this column: -![Show more columns](/media/dashboard/dashboard-slow-queries-list2-v620.png) +![Show more columns](./media/dashboard/dashboard-slow-queries-list2-v620.png) ### Export slow queries locally Click ☰ (**More**) in the upper-right corner of the page to display the **Export** option. After you click **Export**, TiDB Dashboard exports slow queries in the current list as a CSV file. -![Export slow queries locally](/media/dashboard/dashboard-slow-queries-export-v651.png) +![Export slow queries locally](./media/dashboard/dashboard-slow-queries-export-v651.png) ### Sort by column By default, the list is sorted by **Finish Time** in the descending order. Click column headings to sort by the column or switch the sorting order: -![Modify sorting basis](/media/dashboard/dashboard-slow-queries-list3-v620.png) +![Modify sorting basis](./media/dashboard/dashboard-slow-queries-list3-v620.png) ## View execution details @@ -56,7 +56,7 @@ Click any item in the list to display detailed execution information of the slow - Plan: The execution plan of the slow query (area 2 in the following figure) - Other sorted SQL execution information (area 3 in the following figure) -![View execution details](/media/dashboard/dashboard-slow-queries-detail1-v620.png) +![View execution details](./media/dashboard/dashboard-slow-queries-detail1-v620.png) ### SQL @@ -74,7 +74,7 @@ On TiDB Dashboard, you can view execution plans in three ways: table, text, and The table format provides detailed information about the execution plan, which helps you quickly identify abnormal operator metrics and compare the status of different operators. The following figure shows an execution plan in table format: -![Execution plan in table format](/media/dashboard/dashboard-table-plan.png) +![Execution plan in table format](./media/dashboard/dashboard-table-plan.png) The table format displays similar information to the text format but provides more user-friendly interactions: @@ -83,13 +83,13 @@ The table format displays similar information to the text format but provides mo - If the execution plan is large, you can download it as a text file for local analysis. - You can hide and manage columns using the column picker. -![Execution plan in table format - column picker](/media/dashboard/dashboard-table-plan-columnpicker.png) +![Execution plan in table format - column picker](./media/dashboard/dashboard-table-plan-columnpicker.png) #### Execution plan in graph format The graph format is more suitable for viewing the execution plan tree of a complex SQL statement and understanding each operator and its corresponding content in detail. The following figure shows an execution plan in graph format: -![Execution plan in graph format](/media/dashboard/dashboard-visual-plan-2.png) +![Execution plan in graph format](./media/dashboard/dashboard-visual-plan-2.png) - The graph shows the execution from left to right, and from top to bottom. - Upper nodes are parent operators and lower nodes are child operators. @@ -98,19 +98,19 @@ The graph format is more suitable for viewing the execution plan tree of a compl Click the node area, and the detailed operator information is displayed on the right sidebar. -![Execution plan in graph format - sidebar](/media/dashboard/dashboard-visual-plan-popup.png) +![Execution plan in graph format - sidebar](./media/dashboard/dashboard-visual-plan-popup.png) ### SQL execution details For basic information, execution time, Coprocessor read, transaction, and slow query of the SQL statement, you can click the corresponding tab titles to switch among different information. -![Show different execution information](/media/dashboard/dashboard-slow-queries-detail2-v620.png) +![Show different execution information](./media/dashboard/dashboard-slow-queries-detail2-v620.png) #### Basic tab The basic information of a SQL execution includes the table names, index name, execution count, and total latency. The **Description** column provides detailed description of each field. -![Basic information](/media/dashboard/dashboard-slow-queries-detail-plans-basic.png) +![Basic information](./media/dashboard/dashboard-slow-queries-detail-plans-basic.png) #### Time tab @@ -120,16 +120,16 @@ Click the **Time** tab, and you can see how long each stage of the execution pla > > Because some operations might be performed in parallel within a single SQL statement, the cumulative duration of each stage might exceed the actual execution time of the SQL statement. -![Execution time](/media/dashboard/dashboard-slow-queries-detail-plans-time.png) +![Execution time](./media/dashboard/dashboard-slow-queries-detail-plans-time.png) #### Coprocessor tab Click the **Coprocessor** tab, and you can see information related to Coprocessor read. -![Coprocessor read](/media/dashboard/dashboard-slow-queries-detail-plans-cop-read.png) +![Coprocessor read](./media/dashboard/dashboard-slow-queries-detail-plans-cop-read.png) #### Transaction tab Click the **Transaction** tab, and you can see information related to execution plans and transactions, such as the average number of written keys or the maximum number of written keys. -![Transaction](/media/dashboard/dashboard-slow-queries-detail-plans-transaction.png) +![Transaction](./media/dashboard/dashboard-slow-queries-detail-plans-transaction.png) diff --git a/dashboard/dashboard-statement-details.md b/dashboard/dashboard-statement-details.md index 7c7ecd058da2a..e2c7d27bb5937 100644 --- a/dashboard/dashboard-statement-details.md +++ b/dashboard/dashboard-statement-details.md @@ -12,7 +12,7 @@ Click any item in the list to enter the detail page of the SQL statement to view - The execution plan list: If a SQL statement has multiple execution plans, this list is displayed. Besides text information of execution plans, TiDB v6.2.0 introduces visual execution plans, through which you can learn each operator of a statement and detailed information more intuitively. You can select different execution plans, and the details of the selected plans are displayed below the list (area 2 in the following figure). - Execution detail of plans, which displays the detailed information of the selected execution plans. See [Execution plan in details](#execution-details-of-plans) (area 3 in the following figure). -![Details](/media/dashboard/dashboard-statement-detail-v660.png) +![Details](./media/dashboard/dashboard-statement-detail-v660.png) ## Fast plan binding @@ -24,29 +24,29 @@ Starting from v6.6.0, TiDB introduces the fast plan binding feature. You can qui 1. Click **Plan Binding**. The **Plan Binding** dialog box is displayed. - ![Fast plan binding - not bound - entry](/media/dashboard/dashboard-quick-binding-entry-notbound.png) + ![Fast plan binding - not bound - entry](./media/dashboard/dashboard-quick-binding-entry-notbound.png) 2. Select a plan that you want to bind and click **Bind**. - ![Fast plan binding - popup](/media/dashboard/dashboard-quick-binding-popup-notbound.png) + ![Fast plan binding - popup](./media/dashboard/dashboard-quick-binding-popup-notbound.png) 3. After the binding is completed, you can see the **Bound** label. - ![Fast plan binding - popup - binding completed](/media/dashboard/dashboard-quick-binding-popup-bound.png) + ![Fast plan binding - popup - binding completed](./media/dashboard/dashboard-quick-binding-popup-bound.png) #### Drop an existing binding 1. On the page of a SQL statement that has an existing binding, click **Plan Binding**. The **Plan Binding** dialog box is displayed. - ![Fast plan binding - bound - entry](/media/dashboard/dashboard-quick-binding-entry-bound.png) + ![Fast plan binding - bound - entry](./media/dashboard/dashboard-quick-binding-entry-bound.png) 2. Click **Drop**. - ![Fast plan binding - popup - bound](/media/dashboard/dashboard-quick-binding-popup-bound.png) + ![Fast plan binding - popup - bound](./media/dashboard/dashboard-quick-binding-popup-bound.png) 3. After the binding is dropped, you can see the **Not bound** label. - ![Fast plan binding - popup](/media/dashboard/dashboard-quick-binding-popup-notbound.png) + ![Fast plan binding - popup](./media/dashboard/dashboard-quick-binding-popup-notbound.png) ### Limitation @@ -67,7 +67,7 @@ The execution detail of plans includes the following information: - Execution plan: Complete information about execution plans, displayed in table, graph, and text. For details of the execution plan, see [Understand the Query Execution Plan](/explain-overview.md). If multiple execution plans are selected, only (any) one of them is displayed. - For basic information, execution time, Coprocessor read, transaction, and slow query of the SQL statement, you can click the corresponding tab titles to switch among different information. -![Execution details of plans](/media/dashboard/dashboard-statement-plans-detail.png) +![Execution details of plans](./media/dashboard/dashboard-statement-plans-detail.png) ### SQL sample @@ -81,7 +81,7 @@ On TiDB Dashboard, you can view execution plans in three ways: table, text, and The table format provides detailed information about the execution plan, which helps you quickly identify abnormal operator metrics and compare the status of different operators. The following figure shows an execution plan in table format: -![Execution plan in table format](/media/dashboard/dashboard-table-plan.png) +![Execution plan in table format](./media/dashboard/dashboard-table-plan.png) The table format displays similar information to the text format but provides more user-friendly interactions: @@ -90,13 +90,13 @@ The table format displays similar information to the text format but provides mo - If the execution plan is large, you can download it as a text file for local analysis. - You can hide and manage columns using the column picker. -![Execution plan in table format - column picker](/media/dashboard/dashboard-table-plan-columnpicker.png) +![Execution plan in table format - column picker](./media/dashboard/dashboard-table-plan-columnpicker.png) #### Execution plan in graph format The graph format is more suitable for viewing the execution plan tree of a complex SQL statement and understanding each operator and its corresponding content in detail. The following figure shows an execution plan in graph format: -![Execution plan in graph format](/media/dashboard/dashboard-visual-plan-2.png) +![Execution plan in graph format](./media/dashboard/dashboard-visual-plan-2.png) - The graph shows the execution from left to right, and from top to bottom. - Upper nodes are parent operators and lower nodes are child operators. @@ -105,19 +105,19 @@ The graph format is more suitable for viewing the execution plan tree of a compl Click the node area, and the detailed operator information is displayed on the right sidebar. -![Execution plan in graph format - sidebar](/media/dashboard/dashboard-visual-plan-popup.png) +![Execution plan in graph format - sidebar](./media/dashboard/dashboard-visual-plan-popup.png) ### SQL execution details For basic information, execution time, Coprocessor read, transaction, and slow query of the SQL statement, you can click the corresponding tab titles to switch among different information. -![Show different execution information](/media/dashboard/dashboard-slow-queries-detail2-v620.png) +![Show different execution information](./media/dashboard/dashboard-slow-queries-detail2-v620.png) #### Basic tab The basic information of a SQL execution includes the table names, index name, execution count, and total latency. The **Description** column provides detailed description of each field. -![Basic information](/media/dashboard/dashboard-statement-plans-basic.png) +![Basic information](./media/dashboard/dashboard-statement-plans-basic.png) #### Time tab @@ -127,24 +127,24 @@ Click the **Time** tab, and you can see how long each stage of the execution pla > > Because some operations might be performed in parallel within a single SQL statement, the cumulative duration of each stage might exceed the actual execution time of the SQL statement. -![Execution time](/media/dashboard/dashboard-statement-plans-time.png) +![Execution time](./media/dashboard/dashboard-statement-plans-time.png) #### Coprocessor Read tab Click the **Coprocessor Read** tab, and you can see information related to Coprocessor read. -![Coprocessor read](/media/dashboard/dashboard-statement-plans-cop-read.png) +![Coprocessor read](./media/dashboard/dashboard-statement-plans-cop-read.png) #### Transaction tab Click the **Transaction** tab, and you can see information related to execution plans and transactions, such as the average number of written keys or the maximum number of written keys. -![Transaction](/media/dashboard/dashboard-statement-plans-transaction.png) +![Transaction](./media/dashboard/dashboard-statement-plans-transaction.png) #### Slow Query tab If an execution plan is executed too slowly, you can see its associated slow query records under the **Slow Query** tab. -![Slow Query](/media/dashboard/dashboard-statement-plans-slow-queries.png) +![Slow Query](./media/dashboard/dashboard-statement-plans-slow-queries.png) The information displayed in this area has the same structure with the slow query page. See [TiDB Dashboard Slow Query Page](/dashboard/dashboard-slow-query.md) for details. diff --git a/dashboard/dashboard-statement-list.md b/dashboard/dashboard-statement-list.md index c8de12141d099..ba5a9e041f69b 100644 --- a/dashboard/dashboard-statement-list.md +++ b/dashboard/dashboard-statement-list.md @@ -28,29 +28,29 @@ All the data shown on the SQL statement summary page are from the TiDB statement On the top of the SQL statement summary page, you can modify the time range of SQL executions to be displayed. You can also filter the list by database in which SQL statements are executed, or by SQL types. The following image shows all SQL executions over the recent data collection cycle (recent 30 minutes by default). -![Modify filters](/media/dashboard/dashboard-statement-filter-options.png) +![Modify filters](./media/dashboard/dashboard-statement-filter-options.png) ### Display More Columns Click **Columns** on the page and you can choose to see more columns. You can move your mouse to the **(i)** icon at the right side of a column name to view the description of this column: -![Choose columns](/media/dashboard/dashboard-statement-columns-selector.png) +![Choose columns](./media/dashboard/dashboard-statement-columns-selector.png) ### Sort by Column By default, the list is sorted by **Total Latency** from high to low. Click on different column headings to modify the sorting basis or switch the sorting order: -![Modify list sorting](/media/dashboard/dashboard-statement-change-order.png) +![Modify list sorting](./media/dashboard/dashboard-statement-change-order.png) ### Change Settings On the list page, click the **Settings** button on the top right to change the settings of the SQL statements feature: -![Settings entry](/media/dashboard/dashboard-statement-setting-entry.png) +![Settings entry](./media/dashboard/dashboard-statement-setting-entry.png) After clicking the **Settings** button, you can see the following setting dialog box: -![Settings](/media/dashboard/dashboard-statement-settings.png) +![Settings](./media/dashboard/dashboard-statement-settings.png) On the setting page, you can disable or enable the SQL statements feature. When the SQL statements feature is enabled, you can modify the following settings: @@ -69,7 +69,7 @@ See [Configurations of Statement Summary Tables](/statement-summary-tables.md#pa [`tidb_stmt_summary_max_stmt_count`](/system-variables.md#tidb_stmt_summary_max_stmt_count-new-in-v40) limits the number of SQL statements that can be stored in statement summary tables. If the limit is exceeded, TiDB clears the SQL statements that recently remain unused. These cleared SQL statements are represented as rows with `DIGEST` set to `NULL`. On the SQL statement page of TiDB Dashboard, the information of these rows is displayed as `Others`. -![Others](/media/dashboard/dashboard-statement-other-row.png) +![Others](./media/dashboard/dashboard-statement-other-row.png) ## Next step diff --git a/dashboard/dashboard-user.md b/dashboard/dashboard-user.md index 0d06fc0c034d7..570f2c2619a92 100644 --- a/dashboard/dashboard-user.md +++ b/dashboard/dashboard-user.md @@ -44,7 +44,7 @@ For details about how to control and manage TiDB SQL users, see [TiDB User Accou If an SQL user does not meet the preceding privilege requirements, the user fails to sign in to TiDB Dashboard, as shown below. -![insufficient-privileges](/media/dashboard/dashboard-user-insufficient-privileges.png) +![insufficient-privileges](./media/dashboard/dashboard-user-insufficient-privileges.png) ## Example: Create a least-privileged SQL user to access TiDB Dashboard diff --git a/dashboard/top-sql.md b/dashboard/top-sql.md index 73c3b5e0b9185..30417ef60f455 100644 --- a/dashboard/top-sql.md +++ b/dashboard/top-sql.md @@ -34,7 +34,7 @@ You can access the Top SQL page using either of the following methods: * After logging in to TiDB Dashboard, click **Top SQL** in the left navigation menu. - ![Top SQL](/media/dashboard/top-sql-access.png) + ![Top SQL](./media/dashboard/top-sql-access.png) * Visit in your browser. Replace `127.0.0.1:2379` with the actual PD instance address and port. @@ -68,39 +68,39 @@ The following are the common steps to use Top SQL. 2. Select a particular TiDB or TiKV instance that you want to observe the load. - ![Select Instance](/media/dashboard/top-sql-usage-select-instance.png) + ![Select Instance](./media/dashboard/top-sql-usage-select-instance.png) If you are unsure of which TiDB or TiKV instance to observe, you can select an arbitrary instance. Also, when the cluster CPU load is extremely unbalanced, you can first use Grafana charts to determine the specific instance you want to observe. 3. Observe the charts and tables presented by Top SQL. - ![Chart and Table](/media/dashboard/top-sql-usage-chart.png) + ![Chart and Table](./media/dashboard/top-sql-usage-chart.png) The size of the bars in the bar chart represents the size of CPU resources consumed by the SQL statement at that moment. Different colors distinguish different types of SQL statements. In most cases, you only need to focus on the SQL statements that have a higher CPU resource overhead in the corresponding time range in the chart. 4. Click a SQL statement in the table to show more information. You can see detailed execution metrics of different plans of that statement, such as Call/sec (average queries per second) and Scan Indexes/sec (average number of index rows scanned per second). - ![Details](/media/dashboard/top-sql-details.png) + ![Details](./media/dashboard/top-sql-details.png) 5. Based on these initial clues, you can further explore the [SQL Statement](/dashboard/dashboard-statement-list.md) or [Slow Queries](/dashboard/dashboard-slow-query.md) page to find the root cause of high CPU consumption or large data scans of the SQL statement. You can adjust the time range in the time picker or select a time range in the chart to get a more precise and detailed look at the problem. A smaller time range can provide more detailed data, with precision of up to 1 second. - ![Change time range](/media/dashboard/top-sql-usage-change-timerange.png) + ![Change time range](./media/dashboard/top-sql-usage-change-timerange.png) If the chart is out of date, you can click the **Refresh** button or select Auto Refresh options from the **Refresh** drop-down list. - ![Refresh](/media/dashboard/top-sql-usage-refresh.png) + ![Refresh](./media/dashboard/top-sql-usage-refresh.png) 6. View the CPU resource usage by table or database level to quickly identify resource usage at a higher level. Currently, only TiKV instances are supported. Select a TiKV instance, and then select **By TABLE** or **By DB**: - ![Select aggregation dimension](/media/dashboard/top-sql-usage-select-agg-by.png) + ![Select aggregation dimension](./media/dashboard/top-sql-usage-select-agg-by.png) View the aggregated results at a higher level: - ![Aggregated results at DB level](/media/dashboard/top-sql-usage-agg-by-db-detail.png) + ![Aggregated results at DB level](./media/dashboard/top-sql-usage-agg-by-db-detail.png) ## Disable Top SQL diff --git a/ddl-introduction.md b/ddl-introduction.md index 07710780c7bf1..3acd8a89ad781 100644 --- a/ddl-introduction.md +++ b/ddl-introduction.md @@ -41,7 +41,7 @@ TiDB uses the election mechanism of etcd to elect a node to host the Owner from A simple illustration of the DDL Owner is as follows: -![DDL Owner](/media/ddl-owner.png) +![DDL Owner](./media/ddl-owner.png) You can use the `ADMIN SHOW DDL` statement to view the current DDL owner: diff --git a/deploy-monitoring-services.md b/deploy-monitoring-services.md index b124889bb8cd7..b5bd762aa5923 100644 --- a/deploy-monitoring-services.md +++ b/deploy-monitoring-services.md @@ -258,7 +258,7 @@ To import a Grafana dashboard for the PD server, the TiKV server, and the TiDB s Click **New dashboard** in the top menu and choose the dashboard you want to view. -![view dashboard](/media/view-dashboard.png) +![view dashboard](./media/view-dashboard.png) You can get the following metrics for cluster components: diff --git a/develop/dev-guide-aws-appflow-integration.md b/develop/dev-guide-aws-appflow-integration.md index 5e30e06ef874f..1c4d22c21839f 100644 --- a/develop/dev-guide-aws-appflow-integration.md +++ b/develop/dev-guide-aws-appflow-integration.md @@ -77,17 +77,17 @@ git clone https://github.com/pingcap-inc/tidb-appflow-integration 4. Go to the [AWS Lambda console](https://console.aws.amazon.com/lambda/home), and you can see the Lambda that you just uploaded. Note that you need to select the correct region in the upper-right corner of the window. - ![lambda dashboard](/media/develop/aws-appflow-step-lambda-dashboard.png) + ![lambda dashboard](./media/develop/aws-appflow-step-lambda-dashboard.png) ### Use Lambda to register a connector 1. In the [AWS Management Console](https://console.aws.amazon.com), navigate to [Amazon AppFlow > Connectors](https://console.aws.amazon.com/appflow/home#/gallery) and click **Register new connector**. - ![register connector](/media/develop/aws-appflow-step-register-connector.png) + ![register connector](./media/develop/aws-appflow-step-register-connector.png) 2. In the **Register a new connector** dialog, choose the Lambda function you uploaded and specify the connector label using the connector name. - ![register connector dialog](/media/develop/aws-appflow-step-register-connector-dialog.png) + ![register connector dialog](./media/develop/aws-appflow-step-register-connector-dialog.png) 3. Click **Register**. Then, a TiDB connector is registered successfully. @@ -95,13 +95,13 @@ git clone https://github.com/pingcap-inc/tidb-appflow-integration Navigate to [Amazon AppFlow > Flows](https://console.aws.amazon.com/appflow/home#/list) and click **Create flow**. -![create flow](/media/develop/aws-appflow-step-create-flow.png) +![create flow](./media/develop/aws-appflow-step-create-flow.png) ### Set the flow name Enter the flow name, and then click **Next**. -![name flow](/media/develop/aws-appflow-step-name-flow.png) +![name flow](./media/develop/aws-appflow-step-name-flow.png) ### Set the source and destination tables @@ -109,21 +109,21 @@ Choose the **Source details** and **Destination details**. TiDB connector can be 1. Choose the source name. This document uses **Salesforce** as an example source. - ![salesforce source](/media/develop/aws-appflow-step-salesforce-source.png) + ![salesforce source](./media/develop/aws-appflow-step-salesforce-source.png) After you register to Salesforce, Salesforce will add some example data to your platform. The following steps will use the **Account** object as an example source object. - ![salesforce data](/media/develop/aws-appflow-step-salesforce-data.png) + ![salesforce data](./media/develop/aws-appflow-step-salesforce-data.png) 2. Click **Connect**. 1. In the **Connect to Salesforce** dialog, specify the name of this connection, and then click **Continue**. - ![connect to salesforce](/media/develop/aws-appflow-step-connect-to-salesforce.png) + ![connect to salesforce](./media/develop/aws-appflow-step-connect-to-salesforce.png) 2. Click **Allow** to confirm that AWS can read your Salesforce data. - ![allow salesforce](/media/develop/aws-appflow-step-allow-salesforce.png) + ![allow salesforce](./media/develop/aws-appflow-step-allow-salesforce.png) > **Note:** > @@ -131,7 +131,7 @@ Choose the **Source details** and **Destination details**. TiDB connector can be 3. In the **Destination details** area, choose **TiDB-Connector** as the destination. The **Connect** button is displayed. - ![tidb dest](/media/develop/aws-appflow-step-tidb-dest.png) + ![tidb dest](./media/develop/aws-appflow-step-tidb-dest.png) 4. Before clicking **Connect**, you need to create a `sf_account` table in TiDB for the Salesforce **Account** object. Note that this table schema is different from the sample data in [Tutorial of Amazon AppFlow](https://docs.aws.amazon.com/appflow/latest/userguide/flow-tutorial-set-up-source.html). @@ -150,19 +150,19 @@ Choose the **Source details** and **Destination details**. TiDB connector can be 5. After the `sf_account` table is created, click **Connect**. A connection dialog is displayed. 6. In the **Connect to TiDB-Connector** dialog, enter the connection properties of the TiDB cluster. If you use a TiDB Cloud Serverless cluster, you need to set the **TLS** option to `Yes`, which lets the TiDB connector use the TLS connection. Then, click **Connect**. - ![tidb connection message](/media/develop/aws-appflow-step-tidb-connection-message.png) + ![tidb connection message](./media/develop/aws-appflow-step-tidb-connection-message.png) 7. Now you can get all tables in the database that you specified for connection. Choose the **sf_account** table from the drop-down list. - ![database](/media/develop/aws-appflow-step-database.png) + ![database](./media/develop/aws-appflow-step-database.png) The following screenshot shows the configurations to transfer data from the Salesforce **Account** object to the `sf_account` table in TiDB: - ![complete flow](/media/develop/aws-appflow-step-complete-flow.png) + ![complete flow](./media/develop/aws-appflow-step-complete-flow.png) 8. In the **Error handling** area, choose **Stop the current flow run**. In the **Flow trigger** area, choose the **Run on demand** trigger type, which means you need to run the flow manually. Then, click **Next**. - ![complete step1](/media/develop/aws-appflow-step-complete-step1.png) + ![complete step1](./media/develop/aws-appflow-step-complete-step1.png) ### Set mapping rules @@ -180,7 +180,7 @@ Map the fields of the **Account** object in Salesforce to the `sf_account` table - To set a mapping rule, you can select a source field name on the left, and select a destination field name on the right. Then, click **Map fields**, and a rule is set. - ![add mapping rule](/media/develop/aws-appflow-step-add-mapping-rule.png) + ![add mapping rule](./media/develop/aws-appflow-step-add-mapping-rule.png) - The following mapping rules (Source field name -> Destination field name) are needed in this document: @@ -191,31 +191,31 @@ Map the fields of the **Account** object in Salesforce to the `sf_account` table - Account Rating -> rating - Industry -> industry - ![mapping a rule](/media/develop/aws-appflow-step-mapping-a-rule.png) + ![mapping a rule](./media/develop/aws-appflow-step-mapping-a-rule.png) - ![show all mapping rules](/media/develop/aws-appflow-step-show-all-mapping-rules.png) + ![show all mapping rules](./media/develop/aws-appflow-step-show-all-mapping-rules.png) ### (Optional) Set filters If you want to add some filters to your data fields, you can set them here. Otherwise, skip this step and click **Next**. -![filters](/media/develop/aws-appflow-step-filters.png) +![filters](./media/develop/aws-appflow-step-filters.png) ### Confirm and create the flow Confirm the information of the flow to be created. If everything looks fine, click **Create flow**. -![review](/media/develop/aws-appflow-step-review.png) +![review](./media/develop/aws-appflow-step-review.png) ## Step 3. Run the flow On the page of the newly created flow, click **Run flow** in the upper-right corner. -![run flow](/media/develop/aws-appflow-step-run-flow.png) +![run flow](./media/develop/aws-appflow-step-run-flow.png) The following screenshot shows an example that the flow runs successfully: -![run success](/media/develop/aws-appflow-step-run-success.png) +![run success](./media/develop/aws-appflow-step-run-success.png) Query the `sf_account` table, and you can see that the records from the Salesforce **Account** object have been written to it: diff --git a/develop/dev-guide-gui-datagrip.md b/develop/dev-guide-gui-datagrip.md index f3089d4e05921..5a98e342877a5 100644 --- a/develop/dev-guide-gui-datagrip.md +++ b/develop/dev-guide-gui-datagrip.md @@ -68,21 +68,21 @@ Connect to your TiDB cluster depending on the TiDB deployment option you've sele 5. Launch DataGrip and create a project to manage your connections. - ![Create a project in DataGrip](/media/develop/datagrip-create-project.jpg) + ![Create a project in DataGrip](./media/develop/datagrip-create-project.jpg) 6. In the newly created project, click **+** in the upper-left corner of the **Database Explorer** panel, and select **Data Source** > **Other** > **TiDB**. - ![Select a data source in DataGrip](/media/develop/datagrip-data-source-select.jpg) + ![Select a data source in DataGrip](./media/develop/datagrip-data-source-select.jpg) 7. Copy the connection string from the TiDB Cloud connection dialog. Then, paste it into the **URL** field, and the remaining parameters will be auto-populated. An example result is as follows: - ![Configure the URL field for TiDB Cloud Serverless](/media/develop/datagrip-url-paste.jpg) + ![Configure the URL field for TiDB Cloud Serverless](./media/develop/datagrip-url-paste.jpg) If a **Download missing driver files** warning displays, click **Download** to acquire the driver files. 8. Click **Test Connection** to validate the connection to the TiDB Cloud Serverless cluster. - ![Test the connection to a TiDB Cloud Serverless cluster](/media/develop/datagrip-test-connection.jpg) + ![Test the connection to a TiDB Cloud Serverless cluster](./media/develop/datagrip-test-connection.jpg) 9. Click **OK** to save the connection configuration. @@ -101,11 +101,11 @@ Connect to your TiDB cluster depending on the TiDB deployment option you've sele 4. Launch DataGrip and create a project to manage your connections. - ![Create a project in DataGrip](/media/develop/datagrip-create-project.jpg) + ![Create a project in DataGrip](./media/develop/datagrip-create-project.jpg) 5. In the newly created project, click **+** in the upper-left corner of the **Database Explorer** panel, and select **Data Source** > **Other** > **TiDB**. - ![Select a data source in DataGrip](/media/develop/datagrip-data-source-select.jpg) + ![Select a data source in DataGrip](./media/develop/datagrip-data-source-select.jpg) 6. Copy and paste the appropriate connection string into the **Data Source and Drivers** window in DataGrip. The mappings between DataGrip fields and TiDB Cloud Dedicated connection string are as follows: @@ -118,21 +118,21 @@ Connect to your TiDB cluster depending on the TiDB deployment option you've sele An example is as follows: - ![Configure the connection parameters for TiDB Cloud Dedicated](/media/develop/datagrip-dedicated-connect.jpg) + ![Configure the connection parameters for TiDB Cloud Dedicated](./media/develop/datagrip-dedicated-connect.jpg) 7. Click the **SSH/SSL** tab, select the **Use SSL** checkbox, and input the CA certificate path into the **CA file** field. - ![Configure the CA for TiDB Cloud Dedicated](/media/develop/datagrip-dedicated-ssl.jpg) + ![Configure the CA for TiDB Cloud Dedicated](./media/develop/datagrip-dedicated-ssl.jpg) If a **Download missing driver files** warning displays, click **Download** to acquire the driver files. 8. Click the **Advanced** tab, scroll to find the **enabledTLSProtocols** parameter, and set its value to `TLSv1.2,TLSv1.3`. - ![Configure the TLS for TiDB Cloud Dedicated](/media/develop/datagrip-dedicated-advanced.jpg) + ![Configure the TLS for TiDB Cloud Dedicated](./media/develop/datagrip-dedicated-advanced.jpg) 9. Click **Test Connection** to validate the connection to the TiDB Cloud Dedicated cluster. - ![Test the connection to a TiDB Cloud Dedicated cluster](/media/develop/datagrip-dedicated-test-connection.jpg) + ![Test the connection to a TiDB Cloud Dedicated cluster](./media/develop/datagrip-dedicated-test-connection.jpg) 10. Click **OK** to save the connection configuration. @@ -141,11 +141,11 @@ Connect to your TiDB cluster depending on the TiDB deployment option you've sele 1. Launch DataGrip and create a project to manage your connections. - ![Create a project in DataGrip](/media/develop/datagrip-create-project.jpg) + ![Create a project in DataGrip](./media/develop/datagrip-create-project.jpg) 2. In the newly created project, click **+** in the upper-left corner of the **Database Explorer** panel, and select **Data Source** > **Other** > **TiDB**. - ![Select a data source in DataGrip](/media/develop/datagrip-data-source-select.jpg) + ![Select a data source in DataGrip](./media/develop/datagrip-data-source-select.jpg) 3. Configure the following connection parameters: @@ -156,13 +156,13 @@ Connect to your TiDB cluster depending on the TiDB deployment option you've sele An example is as follows: - ![Configure the connection parameters for TiDB Self-Managed](/media/develop/datagrip-self-hosted-connect.jpg) + ![Configure the connection parameters for TiDB Self-Managed](./media/develop/datagrip-self-hosted-connect.jpg) If a **Download missing driver files** warning displays, click **Download** to acquire the driver files. 4. Click **Test Connection** to validate the connection to the TiDB Self-Managed cluster. - ![Test the connection to a TiDB Self-Managed cluster](/media/develop/datagrip-self-hosted-test-connection.jpg) + ![Test the connection to a TiDB Self-Managed cluster](./media/develop/datagrip-self-hosted-test-connection.jpg) 5. Click **OK** to save the connection configuration. diff --git a/develop/dev-guide-gui-dbeaver.md b/develop/dev-guide-gui-dbeaver.md index aa83a5ba66c57..648ddeace46c4 100644 --- a/develop/dev-guide-gui-dbeaver.md +++ b/develop/dev-guide-gui-dbeaver.md @@ -63,23 +63,23 @@ Connect to your TiDB cluster depending on the TiDB deployment option you've sele 5. Launch DBeaver and click **New Database Connection** in the upper-left corner. In the **Connect to a database** dialog, select **TiDB** from the list, and then click **Next**. - ![Select TiDB as the database in DBeaver](/media/develop/dbeaver-select-database.jpg) + ![Select TiDB as the database in DBeaver](./media/develop/dbeaver-select-database.jpg) 6. Copy the connection string from the TiDB Cloud connection dialog. In DBeaver, select **URL** for **Connect by** and paste the connection string into the **URL** field. 7. In the **Authentication (Database Native)** section, enter your **Username** and **Password**. An example is as follows: - ![Configure connection settings for TiDB Cloud Serverless](/media/develop/dbeaver-connection-settings-serverless.jpg) + ![Configure connection settings for TiDB Cloud Serverless](./media/develop/dbeaver-connection-settings-serverless.jpg) 8. Click **Test Connection** to validate the connection to the TiDB Cloud Serverless cluster. If the **Download driver files** dialog is displayed, click **Download** to get the driver files. - ![Download driver files](/media/develop/dbeaver-download-driver.jpg) + ![Download driver files](./media/develop/dbeaver-download-driver.jpg) If the connection test is successful, the **Connection test** dialog is displayed as follows. Click **OK** to close it. - ![Connection test result](/media/develop/dbeaver-connection-test.jpg) + ![Connection test result](./media/develop/dbeaver-connection-test.jpg) 9. Click **Finish** to save the connection configuration. @@ -98,7 +98,7 @@ Connect to your TiDB cluster depending on the TiDB deployment option you've sele 4. Launch DBeaver and click **New Database Connection** in the upper-left corner. In the **Connect to a database** dialog, select **TiDB** from the list, and then click **Next**. - ![Select TiDB as the database in DBeaver](/media/develop/dbeaver-select-database.jpg) + ![Select TiDB as the database in DBeaver](./media/develop/dbeaver-select-database.jpg) 5. Copy and paste the appropriate connection string into the DBeaver connection panel. The mappings between DBeaver fields and TiDB Cloud Dedicated connection string are as follows: @@ -111,17 +111,17 @@ Connect to your TiDB cluster depending on the TiDB deployment option you've sele An example is as follows: - ![Configure connection settings for TiDB Cloud Dedicated](/media/develop/dbeaver-connection-settings-dedicated.jpg) + ![Configure connection settings for TiDB Cloud Dedicated](./media/develop/dbeaver-connection-settings-dedicated.jpg) 6. Click **Test Connection** to validate the connection to the TiDB Cloud Dedicated cluster. If the **Download driver files** dialog is displayed, click **Download** to get the driver files. - ![Download driver files](/media/develop/dbeaver-download-driver.jpg) + ![Download driver files](./media/develop/dbeaver-download-driver.jpg) If the connection test is successful, the **Connection test** dialog is displayed as follows. Click **OK** to close it. - ![Connection test result](/media/develop/dbeaver-connection-test.jpg) + ![Connection test result](./media/develop/dbeaver-connection-test.jpg) 7. Click **Finish** to save the connection configuration. @@ -130,7 +130,7 @@ Connect to your TiDB cluster depending on the TiDB deployment option you've sele 1. Launch DBeaver and click **New Database Connection** in the upper-left corner. In the **Connect to a database** dialog, select **TiDB** from the list, and then click **Next**. - ![Select TiDB as the database in DBeaver](/media/develop/dbeaver-select-database.jpg) + ![Select TiDB as the database in DBeaver](./media/develop/dbeaver-select-database.jpg) 2. Configure the following connection parameters: @@ -141,17 +141,17 @@ Connect to your TiDB cluster depending on the TiDB deployment option you've sele An example is as follows: - ![Configure connection settings for TiDB Self-Managed](/media/develop/dbeaver-connection-settings-self-hosted.jpg) + ![Configure connection settings for TiDB Self-Managed](./media/develop/dbeaver-connection-settings-self-hosted.jpg) 3. Click **Test Connection** to validate the connection to the TiDB Self-Managed cluster. If the **Download driver files** dialog is displayed, click **Download** to get the driver files. - ![Download driver files](/media/develop/dbeaver-download-driver.jpg) + ![Download driver files](./media/develop/dbeaver-download-driver.jpg) If the connection test is successful, the **Connection test** dialog is displayed as follows. Click **OK** to close it. - ![Connection test result](/media/develop/dbeaver-connection-test.jpg) + ![Connection test result](./media/develop/dbeaver-connection-test.jpg) 4. Click **Finish** to save the connection configuration. diff --git a/develop/dev-guide-gui-mysql-workbench.md b/develop/dev-guide-gui-mysql-workbench.md index 0aa8a63ccc3f2..3a35bfae70967 100644 --- a/develop/dev-guide-gui-mysql-workbench.md +++ b/develop/dev-guide-gui-mysql-workbench.md @@ -68,7 +68,7 @@ Connect to your TiDB cluster depending on the TiDB deployment option you have se 5. Launch MySQL Workbench and click **+** near the **MySQL Connections** title. - ![MySQL Workbench: add new connection](/media/develop/mysql-workbench-add-new-connection.png) + ![MySQL Workbench: add new connection](./media/develop/mysql-workbench-add-new-connection.png) 6. In the **Setup New Connection** dialog, configure the following connection parameters: @@ -78,11 +78,11 @@ Connect to your TiDB cluster depending on the TiDB deployment option you have se - **Username**: enter the `USERNAME` parameter from the TiDB Cloud connection dialog. - **Password**: click **Store in Keychain ...** or **Store in Vault**, enter the password of the TiDB Cloud Serverless cluster, and then click **OK** to store the password. - ![MySQL Workbench: store the password of TiDB Cloud Serverless in keychain](/media/develop/mysql-workbench-store-password-in-keychain.png) + ![MySQL Workbench: store the password of TiDB Cloud Serverless in keychain](./media/develop/mysql-workbench-store-password-in-keychain.png) The following figure shows an example of the connection parameters: - ![MySQL Workbench: configure connection settings for TiDB Cloud Serverless](/media/develop/mysql-workbench-connection-config-serverless-parameters.png) + ![MySQL Workbench: configure connection settings for TiDB Cloud Serverless](./media/develop/mysql-workbench-connection-config-serverless-parameters.png) 7. Click **Test Connection** to validate the connection to the TiDB Cloud Serverless cluster. @@ -103,7 +103,7 @@ Connect to your TiDB cluster depending on the TiDB deployment option you have se 4. Launch MySQL Workbench and click **+** near the **MySQL Connections** title. - ![MySQL Workbench: add new connection](/media/develop/mysql-workbench-add-new-connection.png) + ![MySQL Workbench: add new connection](./media/develop/mysql-workbench-add-new-connection.png) 5. In the **Setup New Connection** dialog, configure the following connection parameters: @@ -113,11 +113,11 @@ Connect to your TiDB cluster depending on the TiDB deployment option you have se - **Username**: enter the `USERNAME` parameter from the TiDB Cloud connection dialog. - **Password**: click **Store in Keychain ...**, enter the password of the TiDB Cloud Dedicated cluster, and then click **OK** to store the password. - ![MySQL Workbench: store the password of TiDB Cloud Dedicated in keychain](/media/develop/mysql-workbench-store-dedicated-password-in-keychain.png) + ![MySQL Workbench: store the password of TiDB Cloud Dedicated in keychain](./media/develop/mysql-workbench-store-dedicated-password-in-keychain.png) The following figure shows an example of the connection parameters: - ![MySQL Workbench: configure connection settings for TiDB Cloud Dedicated](/media/develop/mysql-workbench-connection-config-dedicated-parameters.png) + ![MySQL Workbench: configure connection settings for TiDB Cloud Dedicated](./media/develop/mysql-workbench-connection-config-dedicated-parameters.png) 6. Click **Test Connection** to validate the connection to the TiDB Cloud Dedicated cluster. @@ -128,7 +128,7 @@ Connect to your TiDB cluster depending on the TiDB deployment option you have se 1. Launch MySQL Workbench and click **+** near the **MySQL Connections** title. - ![MySQL Workbench: add new connection](/media/develop/mysql-workbench-add-new-connection.png) + ![MySQL Workbench: add new connection](./media/develop/mysql-workbench-add-new-connection.png) 2. In the **Setup New Connection** dialog, configure the following connection parameters: @@ -138,11 +138,11 @@ Connect to your TiDB cluster depending on the TiDB deployment option you have se - **Username**: enter the username to use to connect to your TiDB. - **Password**: click **Store in Keychain ...**, enter the password to use to connect to your TiDB cluster, and then click **OK** to store the password. - ![MySQL Workbench: store the password of TiDB Self-Managed in keychain](/media/develop/mysql-workbench-store-self-hosted-password-in-keychain.png) + ![MySQL Workbench: store the password of TiDB Self-Managed in keychain](./media/develop/mysql-workbench-store-self-hosted-password-in-keychain.png) The following figure shows an example of the connection parameters: - ![MySQL Workbench: configure connection settings for TiDB Self-Managed](/media/develop/mysql-workbench-connection-config-self-hosted-parameters.png) + ![MySQL Workbench: configure connection settings for TiDB Self-Managed](./media/develop/mysql-workbench-connection-config-self-hosted-parameters.png) 3. Click **Test Connection** to validate the connection to the TiDB Self-Managed cluster. @@ -160,7 +160,7 @@ This error indicates that the query execution time exceeds the timeout limit. To 1. Launch MySQL Workbench and navigate to the **Workbench Preferences** page. 2. In the **SQL Editor** > **MySQL Session** section, configure the **DBMS connection read timeout interval (in seconds)** option. This sets the maximum amount of time (in seconds) that a query can take before MySQL Workbench disconnects from the server. - ![MySQL Workbench: adjust timeout option in SQL Editor settings](/media/develop/mysql-workbench-adjust-sqleditor-read-timeout.jpg) + ![MySQL Workbench: adjust timeout option in SQL Editor settings](./media/develop/mysql-workbench-adjust-sqleditor-read-timeout.jpg) For more information, see [MySQL Workbench frequently asked questions](https://dev.mysql.com/doc/workbench/en/workbench-faq.html). diff --git a/develop/dev-guide-gui-navicat.md b/develop/dev-guide-gui-navicat.md index a6f0e2295ef8b..ed3741fd74a09 100644 --- a/develop/dev-guide-gui-navicat.md +++ b/develop/dev-guide-gui-navicat.md @@ -64,7 +64,7 @@ Connect to your TiDB cluster depending on the TiDB deployment option you have se 5. Launch Navicat Premium, click **Connection** in the upper-left corner, select **PingCAP** from the **Vendor Filter** list, and double-click **TiDB** in the right panel. - ![Navicat: add new connection](/media/develop/navicat-premium-add-new-connection.png) + ![Navicat: add new connection](./media/develop/navicat-premium-add-new-connection.png) 6. In the **New Connection (TiDB)** dialog, configure the following connection parameters: @@ -74,11 +74,11 @@ Connect to your TiDB cluster depending on the TiDB deployment option you have se - **User Name**: enter the `USERNAME` parameter from the TiDB Cloud connection dialog. - **Password**: enter the password of the TiDB Cloud Serverless cluster. - ![Navicat: configure connection general panel for TiDB Cloud Serverless](/media/develop/navicat-premium-connection-config-serverless-general.png) + ![Navicat: configure connection general panel for TiDB Cloud Serverless](./media/develop/navicat-premium-connection-config-serverless-general.png) 7. Click the **SSL** tab and select **Use SSL**, **Use authentication**, and **Verify server certificate against CA** checkboxes. Then, select the `CA` file from the TiDB Cloud connection dialog into the **CA Certificate** field. - ![Navicat: configure connection SSL panel for TiDB Cloud Serverless](/media/develop/navicat-premium-connection-config-serverless-ssl.png) + ![Navicat: configure connection SSL panel for TiDB Cloud Serverless](./media/develop/navicat-premium-connection-config-serverless-ssl.png) 8. Click **Test Connection** to validate the connection to the TiDB Cloud Serverless cluster. @@ -101,7 +101,7 @@ Connect to your TiDB cluster depending on the TiDB deployment option you have se 5. Launch Navicat Premium, click **Connection** in the upper-left corner, select **PingCAP** from the **Vendor Filter** list, and double-click **TiDB** in the right panel. - ![Navicat: add new connection](/media/develop/navicat-premium-add-new-connection.png) + ![Navicat: add new connection](./media/develop/navicat-premium-add-new-connection.png) 6. In the **New Connection (TiDB)** dialog, configure the following connection parameters: @@ -111,11 +111,11 @@ Connect to your TiDB cluster depending on the TiDB deployment option you have se - **User Name**: enter the `USERNAME` parameter from the TiDB Cloud connection dialog. - **Password**: enter the password of the TiDB Cloud Dedicated cluster. - ![Navicat: configure connection general panel for TiDB Cloud Dedicated](/media/develop/navicat-premium-connection-config-dedicated-general.png) + ![Navicat: configure connection general panel for TiDB Cloud Dedicated](./media/develop/navicat-premium-connection-config-dedicated-general.png) 7. Click the **SSL** tab and select **Use SSL**, **Use authentication**, and **Verify server certificate against CA** checkboxes. Then, select the CA file downloaded in step 4 into the **CA Certificate** field. - ![Navicat: configure connection SSL panel for TiDB Cloud Dedicated](/media/develop/navicat-premium-connection-config-dedicated-ssl.png) + ![Navicat: configure connection SSL panel for TiDB Cloud Dedicated](./media/develop/navicat-premium-connection-config-dedicated-ssl.png) 8. **Test Connection** to validate the connection to the TiDB Cloud Dedicated cluster. @@ -126,7 +126,7 @@ Connect to your TiDB cluster depending on the TiDB deployment option you have se 1. Launch Navicat Premium, click **Connection** in the upper-left corner, select **PingCAP** from the **Vendor Filter** list, and double-click **TiDB** in the right panel. - ![Navicat: add new connection](/media/develop/navicat-premium-add-new-connection.png) + ![Navicat: add new connection](./media/develop/navicat-premium-add-new-connection.png) 2. In the **New Connection (TiDB)** dialog, configure the following connection parameters: @@ -136,7 +136,7 @@ Connect to your TiDB cluster depending on the TiDB deployment option you have se - **User Name**: enter the username to use to connect to your TiDB. - **Password**: enter the password to use to connect to your TiDB. - ![Navicat: configure connection general panel for self-hosted TiDB](/media/develop/navicat-premium-connection-config-self-hosted-general.png) + ![Navicat: configure connection general panel for self-hosted TiDB](./media/develop/navicat-premium-connection-config-self-hosted-general.png) 3. Click **Test Connection** to validate the connection to the TiDB Self-Managed cluster. diff --git a/develop/dev-guide-gui-vscode-sqltools.md b/develop/dev-guide-gui-vscode-sqltools.md index f8230e47d4774..bfa2e75041058 100644 --- a/develop/dev-guide-gui-vscode-sqltools.md +++ b/develop/dev-guide-gui-vscode-sqltools.md @@ -72,7 +72,7 @@ Connect to your TiDB cluster depending on the TiDB deployment option you have se 5. Launch VS Code and select the **SQLTools** extension on the navigation pane. Under the **CONNECTIONS** section, click **Add New Connection** and select **TiDB** as the database driver. - ![VS Code SQLTools: add new connection](/media/develop/vsc-sqltools-add-new-connection.jpg) + ![VS Code SQLTools: add new connection](./media/develop/vsc-sqltools-add-new-connection.jpg) 6. In the setting pane, configure the following connection parameters: @@ -93,14 +93,14 @@ Connect to your TiDB cluster depending on the TiDB deployment option you have se > > If you are running on Windows or GitHub Codespaces, you can leave **SSL** blank. By default SQLTools trusts well-known CAs curated by Let's Encrypt. For more information, see [TiDB Cloud Serverless root certificate management](https://docs.pingcap.com/tidbcloud/secure-connections-to-serverless-clusters#root-certificate-management). - ![VS Code SQLTools: configure connection settings for TiDB Cloud Serverless](/media/develop/vsc-sqltools-connection-config-serverless.jpg) + ![VS Code SQLTools: configure connection settings for TiDB Cloud Serverless](./media/develop/vsc-sqltools-connection-config-serverless.jpg) 7. Click **TEST CONNECTION** to validate the connection to the TiDB Cloud Serverless cluster. 1. In the pop-up window, click **Allow**. 2. In the **SQLTools Driver Credentials** dialog, enter the password you created in step 4. - ![VS Code SQLTools: enter password to connect to TiDB Cloud Serverless](/media/develop/vsc-sqltools-password.jpg) + ![VS Code SQLTools: enter password to connect to TiDB Cloud Serverless](./media/develop/vsc-sqltools-password.jpg) 8. If the connection test is successful, you can see the **Successfully connected!** message. Click **SAVE CONNECTION** to save the connection configuration. @@ -119,7 +119,7 @@ Connect to your TiDB cluster depending on the TiDB deployment option you have se 4. Launch VS Code and select the **SQLTools** extension on the navigation pane. Under the **CONNECTIONS** section, click **Add New Connection** and select **TiDB** as the database driver. - ![VS Code SQLTools: add new connection](/media/develop/vsc-sqltools-add-new-connection.jpg) + ![VS Code SQLTools: add new connection](./media/develop/vsc-sqltools-add-new-connection.jpg) 5. In the setting pane, configure the following connection parameters: @@ -136,14 +136,14 @@ Connect to your TiDB cluster depending on the TiDB deployment option you have se - **Authentication Protocol**: select **default**. - **SSL**: select **Disabled**. - ![VS Code SQLTools: configure connection settings for TiDB Cloud Dedicated](/media/develop/vsc-sqltools-connection-config-dedicated.jpg) + ![VS Code SQLTools: configure connection settings for TiDB Cloud Dedicated](./media/develop/vsc-sqltools-connection-config-dedicated.jpg) 6. Click **TEST CONNECTION** to validate the connection to the TiDB Cloud Dedicated cluster. 1. In the pop-up window, click **Allow**. 2. In the **SQLTools Driver Credentials** dialog, enter the password of the TiDB Cloud Dedicated cluster. - ![VS Code SQLTools: enter password to connect to TiDB Cloud Dedicated](/media/develop/vsc-sqltools-password.jpg) + ![VS Code SQLTools: enter password to connect to TiDB Cloud Dedicated](./media/develop/vsc-sqltools-password.jpg) 7. If the connection test is successful, you can see the **Successfully connected!** message. Click **SAVE CONNECTION** to save the connection configuration. @@ -152,7 +152,7 @@ Connect to your TiDB cluster depending on the TiDB deployment option you have se 1. Launch VS Code and select the **SQLTools** extension on the navigation pane. Under the **CONNECTIONS** section, click **Add New Connection** and select **TiDB** as the database driver. - ![VS Code SQLTools: add new connection](/media/develop/vsc-sqltools-add-new-connection.jpg) + ![VS Code SQLTools: add new connection](./media/develop/vsc-sqltools-add-new-connection.jpg) 2. In the setting pane, configure the following connection parameters: @@ -173,13 +173,13 @@ Connect to your TiDB cluster depending on the TiDB deployment option you have se - **Authentication Protocol**: select **default**. - **SSL**: select **Disabled**. - ![VS Code SQLTools: configure connection settings for TiDB Self-Managed](/media/develop/vsc-sqltools-connection-config-self-hosted.jpg) + ![VS Code SQLTools: configure connection settings for TiDB Self-Managed](./media/develop/vsc-sqltools-connection-config-self-hosted.jpg) 3. Click **TEST CONNECTION** to validate the connection to the TiDB Self-Managed cluster. If the password is not empty, click **Allow** in the pop-up window, and then enter the password of the TiDB Self-Managed cluster. - ![VS Code SQLTools: enter password to connect to TiDB Self-Managed](/media/develop/vsc-sqltools-password.jpg) + ![VS Code SQLTools: enter password to connect to TiDB Self-Managed](./media/develop/vsc-sqltools-password.jpg) 4. If the connection test is successful, you can see the **Successfully connected!** message. Click **SAVE CONNECTION** to save the connection configuration. diff --git a/develop/dev-guide-join-tables.md b/develop/dev-guide-join-tables.md index 237dc9a8b4841..b015045072872 100644 --- a/develop/dev-guide-join-tables.md +++ b/develop/dev-guide-join-tables.md @@ -15,7 +15,7 @@ This section describes the Join types in detail. The join result of an inner join returns only rows that match the join condition. -![Inner Join](/media/develop/inner-join.png) +![Inner Join](./media/develop/inner-join.png) For example, if you want to know the most prolific author, you need to join the author table named `authors` with the book author table named `book_authors`. @@ -88,7 +88,7 @@ public List getTop10AuthorsOrderByBooks() throws SQLException { The left outer join returns all the rows in the left table and the values ​​in the right table that match the join condition. If no rows are matched in the right table, it will be filled with `NULL`. -![Left Outer Join](/media/develop/left-outer-join.png) +![Left Outer Join](./media/develop/left-outer-join.png) In some cases, you want to use multiple tables to complete the data query, but do not want the data set to become too small because the join condition are not met. @@ -191,7 +191,7 @@ public List getLatestBooksWithAverageScore() throws SQLException { A right outer join returns all the records in the right table and the values ​​in the left table that match the join condition. If there is no matching value, it is filled with `NULL`. -![Right Outer Join](/media/develop/right-outer-join.png) +![Right Outer Join](./media/develop/right-outer-join.png) ### CROSS JOIN diff --git a/develop/dev-guide-playground-gitpod.md b/develop/dev-guide-playground-gitpod.md index 1a1cec665bc87..80d2e7b32445d 100644 --- a/develop/dev-guide-playground-gitpod.md +++ b/develop/dev-guide-playground-gitpod.md @@ -31,7 +31,7 @@ Take the [Spring Boot Web](/develop/dev-guide-sample-application-java-spring-boo After that, you will see a page similar to the following: -![playground gitpod workspace init](/media/develop/playground-gitpod-workspace-init.png) +![playground gitpod workspace init](./media/develop/playground-gitpod-workspace-init.png) This scenario in the page uses [TiUP](https://docs.pingcap.com/tidb/stable/tiup-overview) to build a TiDB Playground. You can check the progress on the left side of the terminal area. @@ -39,7 +39,7 @@ Once the TiDB Playground is ready, another `Spring JPA Hibernate` task will run. After all these tasks are finished, you will see a page similar to the following. On this page, check the `REMOTE EXPLORER` area in the left navigation pane (Gitpod supports URL-based port forwarding) and find the URL of your port `8080`. -![playground gitpod workspace ready](/media/develop/playground-gitpod-workspace-ready.png) +![playground gitpod workspace ready](./media/develop/playground-gitpod-workspace-ready.png) ## Using custom Gitpod configuration and Docker image @@ -165,7 +165,7 @@ Visit `https://gitpod.io/workspaces` for all established workspaces. Gitpod provides a complete, automated, and pre-configured cloud-native development environment. You can develop, run, and test code directly in the browser without any local configurations. -![playground gitpod summary](/media/develop/playground-gitpod-summary.png) +![playground gitpod summary](./media/develop/playground-gitpod-summary.png) ## Need help? diff --git a/develop/dev-guide-proxysql-integration.md b/develop/dev-guide-proxysql-integration.md index dc57c8cae232d..d9b878d634a2d 100644 --- a/develop/dev-guide-proxysql-integration.md +++ b/develop/dev-guide-proxysql-integration.md @@ -29,7 +29,7 @@ ProxySQL is designed from the ground up to be fast, efficient, and easy to use. The most obvious way to deploy ProxySQL with TiDB is to add ProxySQL as a standalone intermediary between the application layer and TiDB. However, the scalability and failure tolerance are not guaranteed, and it also adds additional latency due to network hop. To avoid these problems, an alternate deployment architecture is to deploy ProxySQL as a sidecar as below: -![proxysql-client-side-tidb-cloud](/media/develop/proxysql-client-side-tidb-cloud.png) +![proxysql-client-side-tidb-cloud](./media/develop/proxysql-client-side-tidb-cloud.png) > **Note:** > @@ -85,7 +85,7 @@ systemctl start docker 1. Download the **64-bit Git for Windows Setup** package from the [Git Windows Download](https://git-scm.com/download/win) page. 2. Install the Git package by following the setup wizard. You can click **Next** for a few times to use the default installation settings. - ![proxysql-windows-git-install](/media/develop/proxysql-windows-git-install.png) + ![proxysql-windows-git-install](./media/develop/proxysql-windows-git-install.png) - Download and install MySQL Shell. @@ -109,7 +109,7 @@ systemctl start docker 1. Download Docker Desktop installer from the [Docker Download](https://www.docker.com/products/docker-desktop/) page. 2. Double-click the installer to run it. After the installation is completed, you will be prompted for a restart. - ![proxysql-windows-docker-install](/media/develop/proxysql-windows-docker-install.png) + ![proxysql-windows-docker-install](./media/develop/proxysql-windows-docker-install.png) - Download the latest Python 3 installer from the [Python Download](https://www.python.org/downloads/) page and run it. @@ -798,7 +798,7 @@ This section takes query routing as an example to show some of the benefits that Databases can be overloaded by high traffic, faulty code, or malicious spam. With query rules of ProxySQL, you can respond to these issues quickly and effectively by rerouting, rewriting, or rejecting queries. -![proxysql-client-side-rules](/media/develop/proxysql-client-side-rules.png) +![proxysql-client-side-rules](./media/develop/proxysql-client-side-rules.png) > **Note:** > diff --git a/develop/dev-guide-transaction-restraints.md b/develop/dev-guide-transaction-restraints.md index 512e21ba2d1d3..17398554b737e 100644 --- a/develop/dev-guide-transaction-restraints.md +++ b/develop/dev-guide-transaction-restraints.md @@ -11,7 +11,7 @@ This document briefly introduces the transaction restraints in TiDB. The isolation levels supported by TiDB are **RC (Read Committed)** and **SI (Snapshot Isolation)**, where **SI** is basically equivalent to the **RR (Repeatable Read)** isolation level. -![isolation level](/media/develop/transaction_isolation_level.png) +![isolation level](./media/develop/transaction_isolation_level.png) ## Snapshot Isolation can avoid phantom reads @@ -356,7 +356,7 @@ mysql> SELECT * FROM doctors; In both transactions, the application first checks if two or more doctors are on call; if so, it assumes that one doctor can safely take leave. Since the database uses the snapshot isolation, both checks return `2`, so both transactions move on to the next stage. `Alice` updates her record to be off duty, and so does `Bob`. Both transactions are successfully committed. Now there are no doctors on duty which violates the requirement that at least one doctor should be on call. The following diagram (quoted from **_Designing Data-Intensive Applications_**) illustrates what actually happens. -![Write Skew](/media/develop/write-skew.png) +![Write Skew](./media/develop/write-skew.png) Now let's change the sample program to use `SELECT FOR UPDATE` to avoid the write skew problem: @@ -725,7 +725,7 @@ Note that for both the size restrictions and row restrictions, you should also c Currently locks are not added to auto-committed `SELECT FOR UPDATE` statements. The effect is shown in the following figure: -![The situation in TiDB](/media/develop/autocommit_selectforupdate_nowaitlock.png) +![The situation in TiDB](./media/develop/autocommit_selectforupdate_nowaitlock.png) This is a known incompatibility issue with MySQL. You can solve this issue by using the explicit `BEGIN;COMMIT;` statements. diff --git a/dm/dm-arch.md b/dm/dm-arch.md index 77804af759160..d80cb8cbe9056 100644 --- a/dm/dm-arch.md +++ b/dm/dm-arch.md @@ -9,7 +9,7 @@ This document introduces the architecture of Data Migration (DM). DM consists of three components: DM-master, DM-worker, and dmctl. -![Data Migration architecture](/media/dm/dm-architecture-2.0.png) +![Data Migration architecture](./media/dm/dm-architecture-2.0.png) ## Architecture components diff --git a/dm/dm-continuous-data-validation.md b/dm/dm-continuous-data-validation.md index d42748d50ecd4..e0a583e68bfe2 100644 --- a/dm/dm-continuous-data-validation.md +++ b/dm/dm-continuous-data-validation.md @@ -273,11 +273,11 @@ Flags: The architecture of continuous data validation (validator) in DM is as follows: -![validator summary](/media/dm/dm-validator-summary.jpeg) +![validator summary](./media/dm/dm-validator-summary.jpeg) The lifecycle of continuous data validation is as follows: -![validator lifecycle](/media/dm/dm-validator-lifecycle.jpeg) +![validator lifecycle](./media/dm/dm-validator-lifecycle.jpeg) The detailed implementation of continuous data validation is as follows: diff --git a/dm/dm-manage-schema.md b/dm/dm-manage-schema.md index ed32f4acaafd7..ebf769c0e9282 100644 --- a/dm/dm-manage-schema.md +++ b/dm/dm-manage-schema.md @@ -21,7 +21,7 @@ The internal table schema comes from the following sources: For incremental replication, schema maintenance is complicated. During the whole data replication, the following four table schemas are involved. These schemas might be the consistent or inconsistent with one another: -![schema](/media/dm/operate-schema.png) +![schema](./media/dm/operate-schema.png) * The upstream table schema at the current time, identified as `schema-U`. * The table schema of the binlog event currently being consumed by DM, identified as `schema-B`. This schema corresponds to the upstream table schema at a historical time. diff --git a/dm/dm-replication-logic.md b/dm/dm-replication-logic.md index 72e0440264918..528fdbaf89977 100644 --- a/dm/dm-replication-logic.md +++ b/dm/dm-replication-logic.md @@ -27,7 +27,7 @@ The Sync unit processes DML statements as follows: 4. Execute the DML to the downstream. 5. Periodically save the binlog position or GTID to the checkpoint. -![DML processing logic](/media/dm/dm-dml-replication-logic.png) +![DML processing logic](./media/dm/dm-dml-replication-logic.png) ## DML optimization logic diff --git a/dm/dm-webui-guide.md b/dm/dm-webui-guide.md index fcf02fd95b3a4..b5180e78c73ff 100644 --- a/dm/dm-webui-guide.md +++ b/dm/dm-webui-guide.md @@ -25,7 +25,7 @@ DM WebUI has the following pages: The interface is as follows: -![webui](/media/dm/dm-webui-preview-en.png) +![webui](./media/dm/dm-webui-preview-en.png) ## Access method diff --git a/dm/feature-shard-merge-optimistic.md b/dm/feature-shard-merge-optimistic.md index 57ebadcb41981..7d2535551b1eb 100644 --- a/dm/feature-shard-merge-optimistic.md +++ b/dm/feature-shard-merge-optimistic.md @@ -83,7 +83,7 @@ When you use the optimistic mode for a migration task, a DDL statement is migrat Merge and migrate the following three sharded tables to TiDB: -![optimistic-ddl-fail-example-1](/media/dm/optimistic-ddl-fail-example-1.png) +![optimistic-ddl-fail-example-1](./media/dm/optimistic-ddl-fail-example-1.png) Add a new column `Age` in `tbl01` and set the default value of the column to `0`: @@ -91,7 +91,7 @@ Add a new column `Age` in `tbl01` and set the default value of the column to `0` ALTER TABLE `tbl01` ADD COLUMN `Age` INT DEFAULT 0; ``` -![optimistic-ddl-fail-example-2](/media/dm/optimistic-ddl-fail-example-2.png) +![optimistic-ddl-fail-example-2](./media/dm/optimistic-ddl-fail-example-2.png) Add a new column `Age` in `tbl00` and set the default value of the column to `-1`: @@ -99,7 +99,7 @@ Add a new column `Age` in `tbl00` and set the default value of the column to `-1 ALTER TABLE `tbl00` ADD COLUMN `Age` INT DEFAULT -1; ``` -![optimistic-ddl-fail-example-3](/media/dm/optimistic-ddl-fail-example-3.png) +![optimistic-ddl-fail-example-3](./media/dm/optimistic-ddl-fail-example-3.png) By then, the `Age` column of `tbl00` is inconsistent because `DEFAULT 0` and `DEFAULT -1` are incompatible with each other. In this situation, DM will report the error, but you have to manually fix the data inconsistency. @@ -107,13 +107,13 @@ By then, the `Age` column of `tbl00` is inconsistent because `DEFAULT 0` and `DE In the optimistic mode, after DM-worker receives the DDL statement from the upstream, it forwards the updated table schema to DM-master. DM-worker tracks the current schema of each sharded table, and DM-master merges these schemas into a composite schema that is compatible with DML statements of every sharded table. Then DM-master migrates the corresponding DDL statement to the downstream. DML statements are directly migrated to the downstream. -![optimistic-ddl-flow](/media/dm/optimistic-ddl-flow.png) +![optimistic-ddl-flow](./media/dm/optimistic-ddl-flow.png) ### Examples Assume the upstream MySQL has three sharded tables (`tbl00`, `tbl01`, and `tbl02`). Merge and migrate these sharded tables to the `tbl` table in the downstream TiDB. See the following image: -![optimistic-ddl-example-1](/media/dm/optimistic-ddl-example-1.png) +![optimistic-ddl-example-1](./media/dm/optimistic-ddl-example-1.png) Add a `Level` column in the upstream: @@ -121,11 +121,11 @@ Add a `Level` column in the upstream: ALTER TABLE `tbl00` ADD COLUMN `Level` INT; ``` -![optimistic-ddl-example-2](/media/dm/optimistic-ddl-example-2.png) +![optimistic-ddl-example-2](./media/dm/optimistic-ddl-example-2.png) Then TiDB will receive the DML statement from `tbl00` (with the `Level` column) and the DML statement from the `tbl01` and `tbl02` tables (without the `Level` column). -![optimistic-ddl-example-3](/media/dm/optimistic-ddl-example-3.png) +![optimistic-ddl-example-3](./media/dm/optimistic-ddl-example-3.png) The following DML statements can be migrated to the downstream without any modification: @@ -134,7 +134,7 @@ UPDATE `tbl00` SET `Level` = 9 WHERE `ID` = 1; INSERT INTO `tbl02` (`ID`, `Name`) VALUES (27, 'Tony'); ``` -![optimistic-ddl-example-4](/media/dm/optimistic-ddl-example-4.png) +![optimistic-ddl-example-4](./media/dm/optimistic-ddl-example-4.png) Also add a `Level` column in `tbl01`: @@ -142,7 +142,7 @@ Also add a `Level` column in `tbl01`: ALTER TABLE `tbl01` ADD COLUMN `Level` INT; ``` -![optimistic-ddl-example-5](/media/dm/optimistic-ddl-example-5.png) +![optimistic-ddl-example-5](./media/dm/optimistic-ddl-example-5.png) At this time, the downstream already have had the same `Level` column, so DM-master performs no operation after comparing the table schemas. @@ -152,7 +152,7 @@ Drop a `Name` column in `tbl01`: ALTER TABLE `tbl01` DROP COLUMN `Name`; ``` -![optimistic-ddl-example-6](/media/dm/optimistic-ddl-example-6.png) +![optimistic-ddl-example-6](./media/dm/optimistic-ddl-example-6.png) Then the downstream will receive the DML statements from `tbl00` and `tbl02` with the `Name` column, so this column is not immediately dropped. @@ -163,7 +163,7 @@ INSERT INTO `tbl01` (`ID`, `Level`) VALUES (15, 7); UPDATE `tbl00` SET `Level` = 5 WHERE `ID` = 5; ``` -![optimistic-ddl-example-7](/media/dm/optimistic-ddl-example-7.png) +![optimistic-ddl-example-7](./media/dm/optimistic-ddl-example-7.png) Add a `Level` column in `tbl02`: @@ -171,7 +171,7 @@ Add a `Level` column in `tbl02`: ALTER TABLE `tbl02` ADD COLUMN `Level` INT; ``` -![optimistic-ddl-example-8](/media/dm/optimistic-ddl-example-8.png) +![optimistic-ddl-example-8](./media/dm/optimistic-ddl-example-8.png) By then, all sharded tables have the `Level` column. @@ -182,7 +182,7 @@ ALTER TABLE `tbl00` DROP COLUMN `Name`; ALTER TABLE `tbl02` DROP COLUMN `Name`; ``` -![optimistic-ddl-example-9](/media/dm/optimistic-ddl-example-9.png) +![optimistic-ddl-example-9](./media/dm/optimistic-ddl-example-9.png) By then, the `Name` columns are dropped from all sharded tables and can be safely dropped in the downstream: @@ -190,4 +190,4 @@ By then, the `Name` columns are dropped from all sharded tables and can be safel ALTER TABLE `tbl` DROP COLUMN `Name`; ``` -![optimistic-ddl-example-10](/media/dm/optimistic-ddl-example-10.png) +![optimistic-ddl-example-10](./media/dm/optimistic-ddl-example-10.png) diff --git a/dm/feature-shard-merge-pessimistic.md b/dm/feature-shard-merge-pessimistic.md index 349cbee9cdd8e..e376ebe067551 100644 --- a/dm/feature-shard-merge-pessimistic.md +++ b/dm/feature-shard-merge-pessimistic.md @@ -39,7 +39,7 @@ However, in the process of merging and migrating sharded tables, if DDL statemen Here is a simple example: -![shard-ddl-example-1](/media/dm/shard-ddl-example-1.png) +![shard-ddl-example-1](./media/dm/shard-ddl-example-1.png) In the above example, the merging process is simplified, where only two MySQL instances exist in the upstream and each instance has only one table. When the migration begins, the table schema version of two sharded tables is marked as `schema V1`, and the table schema version after executing DDL statements is marked as `schema V2`. @@ -57,7 +57,7 @@ Assume that the DDL statements of sharded tables are not processed during the mi This section shows how DM migrates DDL statements in the process of merging sharded tables based on the above example in the pessimistic mode. -![shard-ddl-flow](/media/dm/shard-ddl-flow.png) +![shard-ddl-flow](./media/dm/shard-ddl-flow.png) In this example, `DM-worker-1` migrates the data from MySQL instance 1 and `DM-worker-2` migrates the data from MySQL instance 2. `DM-master` coordinates the DDL migration among multiple DM-workers. Starting from `DM-worker-1` receiving the DDL statements, the DDL migration process is simplified as follows: @@ -81,7 +81,7 @@ In the above example, only one sharded table needs to be merged in the upstream Assume that there are two sharded tables, namely `table_1` and `table_2`, to be merged in one MySQL instance: -![shard-ddl-example-2](/media/dm/shard-ddl-example-2.png) +![shard-ddl-example-2](./media/dm/shard-ddl-example-2.png) Because data comes from the same MySQL instance, all the data is obtained from the same binlog stream. In this case, the time sequence is as follows: diff --git a/dr-backup-restore.md b/dr-backup-restore.md index 9e79357a993d4..622f02eccb669 100644 --- a/dr-backup-restore.md +++ b/dr-backup-restore.md @@ -16,7 +16,7 @@ Generally speaking, BR is the last resort for data safety. It improves the safet ## Perform backup and restore -![BR log backup and PITR architecture](/media/dr/dr-backup-and-restore.png) +![BR log backup and PITR architecture](./media/dr/dr-backup-and-restore.png) As shown in the preceding architecture, you can back up data to a DR storage device located in other regions, and recover data from the backup data as needed. This means that the cluster can tolerate the failure of a single region with a Recovery Point Objective (RPO) of up to 5 minutes and a Recovery Time Objective (RTO) between tens of minutes and a few hours. However, if the database size is large, the RTO might be longer. diff --git a/dr-secondary-cluster.md b/dr-secondary-cluster.md index 37126f1e3e58d..590ed7fb4c3de 100644 --- a/dr-secondary-cluster.md +++ b/dr-secondary-cluster.md @@ -26,7 +26,7 @@ Meanwhile, this document also describes how to query business data on the second ### Architecture -![TiCDC secondary cluster architecture](/media/dr/dr-ticdc-secondary-cluster.png) +![TiCDC secondary cluster architecture](./media/dr/dr-ticdc-secondary-cluster.png) The preceding architecture includes two TiDB clusters: a primary cluster and a secondary cluster. @@ -412,7 +412,7 @@ storage = "s3://redo?access-key=minio&secret-access-key=miniostorage&endpoint=ht In this DR scenario, the TiDB clusters in two regions can act as each other's disaster recovery clusters: the business traffic is written to the corresponding TiDB cluster based on the region configuration, and the two TiDB clusters back up each other's data. -![TiCDC bidirectional replication](/media/dr/bdr-ticdc.png) +![TiCDC bidirectional replication](./media/dr/bdr-ticdc.png) With the bidirectional replication feature, the TiDB clusters in two regions can replicate each other's data. This DR solution guarantees data security and reliability, and also ensures the write performance of the database. In a planned DR switchover, you do not need to stop the running changefeeds before starting a new changefeed, which simplifies the operation and maintenance. diff --git a/dr-solution-introduction.md b/dr-solution-introduction.md index dd70a150d0c78..5a2854301e5ac 100644 --- a/dr-solution-introduction.md +++ b/dr-solution-introduction.md @@ -19,7 +19,7 @@ This document introduces the disaster recovery (DR) solutions provided by TiDB. The following figure illustrates these two concepts: -![RTO and RPO](/media/dr/rto-rpo.png) +![RTO and RPO](./media/dr/rto-rpo.png) - Error tolerance objective: Because a disaster can affect different regions. In this document, the term error tolerance objective is used to describe the maximum range of a disaster that the system can tolerate. - Region: This document focuses on regional DR and "region" mentioned here refers to a geographical area or city. @@ -30,7 +30,7 @@ Before introducing specific DR solutions, this section introduces the architectu ### TiDB -![TiDB architecture](/media/dr/tidb-architecture.png) +![TiDB architecture](./media/dr/tidb-architecture.png) TiDB is designed with an architecture of separated computing and storage: @@ -42,7 +42,7 @@ TiDB stores three complete data replicas. Therefore, it is naturally capable of ### TiCDC -![TiCDC architecture](/media/ticdc/cdc-architecture.png) +![TiCDC architecture](./media/ticdc/cdc-architecture.png) As an incremental data replication tool for TiDB, TiCDC is highly available through PD's etcd. TiCDC pulls data changes from TiKV nodes through multiple Capture processes, and then sorts and merges data changes internally. After that, TiCDC replicates data to multiple downstream systems by using multiple replication tasks. In the preceding architecture diagram: @@ -53,7 +53,7 @@ It can be seen from the preceding architecture diagram that, the architecture of ### BR -![BR architecture](/media/br/br-snapshot-arch.png) +![BR architecture](./media/br/br-snapshot-arch.png) As a backup and restore tool for TiDB, BR can perform full snapshot backup based on a specific time point and continuous log backup of a TiDB cluster. When the TiDB cluster is completely unavailable, you can restore the backup files in a new cluster. BR is usually considered the last resort for data security. @@ -61,7 +61,7 @@ As a backup and restore tool for TiDB, BR can perform full snapshot backup based ### DR solution based on primary and secondary clusters -![Primary-secondary cluster DR](/media/dr/ticdc-dr.png) +![Primary-secondary cluster DR](./media/dr/ticdc-dr.png) The preceding architecture contains two TiDB clusters, cluster 1 runs in region 1 and handles read and write requests. Cluster 2 runs in region 2 and works as the secondary cluster. When cluster 1 encounters a disaster, cluster 2 takes over services. Data changes are replicated between the two clusters using TiCDC. This architecture is also called the "1:1" DR solution. @@ -69,7 +69,7 @@ This architecture is simple and highly available with region-level error toleran ### DR solution based on multiple replicas in a single cluster -![Multi-replica cluster DR](/media/dr/multi-replica-dr.png) +![Multi-replica cluster DR](./media/dr/multi-replica-dr.png) In the preceding architecture, each region has two complete data replicas located in different available zones (AZs). The entire cluster is across three regions. Region 1 is the primary region that handles read and write requests. When region 1 is completely unavailable due to a disaster, region 2 can be used as a DR region. Region 3 is a replica used to meet the majority protocol. This architecture is also called the "2-2-1" solution. @@ -79,7 +79,7 @@ This solution provides region-level error tolerance, scalable write capability, The preceding two solutions provide regional DR. However, they fail to work if multiple regions are unavailable at the same time. If your system is very important and requires error tolerance objective to cover multiple regions, you need to combine these two solutions. -![TiCDC-based multi-replica cluster DR](/media/dr/ticdc-multi-replica-dr.png) +![TiCDC-based multi-replica cluster DR](./media/dr/ticdc-multi-replica-dr.png) In the preceding architecture, there are two TiDB clusters. Cluster 1 has five replicas that span three regions. Region 1 contains two replicas that work as the primary region and handle write requests. Region 2 has two replicas that work as the DR region for region 1. This region provides read services that are not sensitive to latency. Located in Region 3, the last replica is used for voting. @@ -89,7 +89,7 @@ Of course, if the error tolerance objective is multiple regions and RPO must be ### DR solution based on BR -![BR-based cluster DR](/media/dr/br-dr.png) +![BR-based cluster DR](./media/dr/br-dr.png) In this architecture, TiDB cluster 1 is deployed in region 1. BR regularly backs up the data of cluster 1 to region 2, and continuously backs up the data change logs of this cluster to region 2 as well. When region 1 encounters a disaster and cluster 1 cannot be recovered, you can use the backup data and data change logs to restore a new cluster (cluster 2) in region 2 to provide services. diff --git a/exporting-grafana-snapshots.md b/exporting-grafana-snapshots.md index ce2c8511098d4..fd5aadc599871 100644 --- a/exporting-grafana-snapshots.md +++ b/exporting-grafana-snapshots.md @@ -22,11 +22,11 @@ MetricsTool can be accessed from . It consists * **Export**: A user script running on the browser's Developer Tool, allowing you to download a snapshot of all visible panels in the current dashboard on any Grafana v6.x.x server. - ![Screenshot of MetricsTool Exporter after running the user script](/media/metricstool-export.png) + ![Screenshot of MetricsTool Exporter after running the user script](./media/metricstool-export.png) * **Visualize**: A web page visualizing the exported snapshot files. The visualized snapshots can be operated in the same way as live Grafana dashboards. - ![Screenshot of MetricsTool Visualizer](/media/metricstool-visualize.png) + ![Screenshot of MetricsTool Visualizer](./media/metricstool-visualize.png) * **Import**: Instructions to import the exported snapshot back into an actual Grafana instance. diff --git a/grafana-overview-dashboard.md b/grafana-overview-dashboard.md index 1a0a935dab9a3..5dce043d93de6 100644 --- a/grafana-overview-dashboard.md +++ b/grafana-overview-dashboard.md @@ -71,4 +71,4 @@ To understand the key metrics displayed on the Overview dashboard, check the fol ## Interface of the Overview dashboard -![overview](/media/grafana-monitor-overview.png) +![overview](./media/grafana-monitor-overview.png) diff --git a/grafana-pd-dashboard.md b/grafana-pd-dashboard.md index 048dc0c7b3d69..ffedfe5cedecc 100644 --- a/grafana-pd-dashboard.md +++ b/grafana-pd-dashboard.md @@ -23,7 +23,7 @@ The following is the description of PD Dashboard metrics items: - Abnormal stores: The count of unhealthy stores. The normal value is `0`. If the number is bigger than `0`, it means at least one instance is abnormal. - Region health: The health status of Regions indicated via the count of unusual Regions including pending peers, down peers, extra peers, offline peers, missing peers, learner peers and incorrect namespaces. Generally, the number of pending peers should be less than `100`. The missing peers should not be persistently greater than `0`. If many empty Regions exist, enable Region Merge in time. - Current peer count: The current count of all cluster peers -![PD Dashboard - Header](/media/pd-dashboard-header-v4.png) +![PD Dashboard - Header](./media/pd-dashboard-header-v4.png) ## Key metrics description @@ -37,7 +37,7 @@ The following is the description of PD Dashboard metrics items: - Label distribution: The distribution status of the labels in the cluster - Store Limit: The flow control limitation of scheduling on the Store -![PD Dashboard - Cluster metrics](/media/pd-dashboard-cluster-v4.png) +![PD Dashboard - Cluster metrics](./media/pd-dashboard-cluster-v4.png) ## Operator @@ -50,7 +50,7 @@ The following is the description of PD Dashboard metrics items: - Operator finish duration: The maximum duration of finished operators - Operator step duration: The maximum duration of finished operator steps -![PD Dashboard - Operator metrics](/media/pd-dashboard-operator-v4.png) +![PD Dashboard - Operator metrics](./media/pd-dashboard-operator-v4.png) ## Statistics - Balance @@ -66,7 +66,7 @@ The following is the description of PD Dashboard metrics items: - Store leader count: The leader count per TiKV instance - Store Region count: The Region count per TiKV instance -![PD Dashboard - Balance metrics](/media/pd-dashboard-balance-v4.png) +![PD Dashboard - Balance metrics](./media/pd-dashboard-balance-v4.png) ## Statistics - hot write @@ -81,7 +81,7 @@ The following is the description of PD Dashboard metrics items: - Direction of hotspot move leader: The direction of leader movement in the hotspot scheduling. The positive number means scheduling into the instance. The negative number means scheduling out of the instance - Direction of hotspot move peer: The direction of peer movement in the hotspot scheduling. The positive number means scheduling into the instance. The negative number means scheduling out of the instance -![PD Dashboard - Hot write metrics](/media/pd-dashboard-hotwrite-v4.png) +![PD Dashboard - Hot write metrics](./media/pd-dashboard-hotwrite-v4.png) ## Statistics - hot read @@ -91,7 +91,7 @@ The following is the description of PD Dashboard metrics items: - Store read rate keys: The total read keys of each TiKV instance - Hot cache read entry number: The number of peers that are in the read hotspot statistics module on each TiKV instance -![PD Dashboard - Hot read metrics](/media/pd-dashboard-hotread-v4.png) +![PD Dashboard - Hot read metrics](./media/pd-dashboard-hotread-v4.png) ## Scheduler @@ -109,14 +109,14 @@ The following is the description of PD Dashboard metrics items: - Filter source: The number of attempts that the store is selected as the scheduling source but failed to pass the filter - Balance Direction: The number of times that the Store is selected as the target or source of scheduling -![PD Dashboard - Scheduler metrics](/media/pd-dashboard-scheduler-v4.png) +![PD Dashboard - Scheduler metrics](./media/pd-dashboard-scheduler-v4.png) ## gRPC - Completed commands rate: The rate per command type at which gRPC commands are completed - 99% Completed commands duration: The rate per command type at which gRPC commands are completed (P99) -![PD Dashboard - gRPC metrics](/media/pd-dashboard-grpc-v2.png) +![PD Dashboard - gRPC metrics](./media/pd-dashboard-grpc-v2.png) ## etcd @@ -129,7 +129,7 @@ The following is the description of PD Dashboard metrics items: - Raft committed index: The last committed index of Raft - Raft applied index: The last applied index of Raft -![PD Dashboard - etcd metrics](/media/pd-dashboard-etcd-v2.png) +![PD Dashboard - etcd metrics](./media/pd-dashboard-etcd-v2.png) ## TiDB @@ -137,7 +137,7 @@ The following is the description of PD Dashboard metrics items: - Handle requests count: The count of TiDB requests - Handle requests duration: The time consumed for handling TiDB requests. It should be less than `100ms` (P99) -![PD Dashboard - TiDB metrics](/media/pd-dashboard-tidb-v4.png) +![PD Dashboard - TiDB metrics](./media/pd-dashboard-tidb-v4.png) ## Heartbeat @@ -148,11 +148,11 @@ The following is the description of PD Dashboard metrics items: - Region schedule push: The count of corresponding schedule commands sent from PD per TiKV instance - 99% Region heartbeat latency: The heartbeat latency per TiKV instance (P99) -![PD Dashboard - Heartbeat metrics](/media/pd-dashboard-heartbeat-v4.png) +![PD Dashboard - Heartbeat metrics](./media/pd-dashboard-heartbeat-v4.png) ## Region storage - Syncer Index: The maximum index in the Region change history recorded by the leader - history last index: The last index where the Region change history is synchronized successfully with the follower -![PD Dashboard - Region storage](/media/pd-dashboard-region-storage.png) +![PD Dashboard - Region storage](./media/pd-dashboard-region-storage.png) diff --git a/grafana-performance-overview-dashboard.md b/grafana-performance-overview-dashboard.md index 3aa9ae07d9348..aee5a64e0f0df 100644 --- a/grafana-performance-overview-dashboard.md +++ b/grafana-performance-overview-dashboard.md @@ -163,7 +163,7 @@ All these three metrics include the average duration and P99 duration in all TiK ### Interface of the Performance Overview panels -![performance overview](/media/performance/grafana_performance_overview.png) +![performance overview](./media/performance/grafana_performance_overview.png) ## TiFlash diff --git a/grafana-tikv-dashboard.md b/grafana-tikv-dashboard.md index 5b7d3062d5714..9e7f27ebc13e4 100644 --- a/grafana-tikv-dashboard.md +++ b/grafana-tikv-dashboard.md @@ -31,7 +31,7 @@ This section provides a detailed description of these key metrics on the **TiKV- - Region: The number of Regions per TiKV instance - Uptime: The runtime of TiKV since last restart -![TiKV Dashboard - Cluster metrics](/media/tikv-dashboard-cluster.png) +![TiKV Dashboard - Cluster metrics](./media/tikv-dashboard-cluster.png) ### Errors @@ -46,7 +46,7 @@ This section provides a detailed description of these key metrics on the **TiKV- - Leader missing: The count of missing leaders per TiKV instance - Log Replication Reject: The number of logappend messages rejected due to insufficient memory on each TiKV instance -![TiKV Dashboard - Errors metrics](/media/tikv-dashboard-errors-v610.png) +![TiKV Dashboard - Errors metrics](./media/tikv-dashboard-errors-v610.png) ### Server @@ -59,7 +59,7 @@ This section provides a detailed description of these key metrics on the **TiKV- - Region average written keys: The average number of written keys to Regions per TiKV instance - Region average written bytes: The average written bytes to Regions per TiKV instance -![TiKV Dashboard - Server metrics](/media/tikv-dashboard-server.png) +![TiKV Dashboard - Server metrics](./media/tikv-dashboard-server.png) ### gRPC @@ -109,7 +109,7 @@ This section provides a detailed description of these key metrics on the **TiKV- - Commit log duration: The time consumed by Raft to commit logs - Commit log duration per server: The time consumed by Raft to commit logs per TiKV instance -![TiKV Dashboard - Raft IO metrics](/media/tikv-dashboard-raftio.png) +![TiKV Dashboard - Raft IO metrics](./media/tikv-dashboard-raftio.png) ### Raft process @@ -128,7 +128,7 @@ This section provides a detailed description of these key metrics on the **TiKV- - Replica read lock checking duration: The time consumed for checking locks when processing Replica Read. - Peer msg length distribution: The number of messages processed by each Region in each TiKV instance at a time. The more messages, the busier the peer is. -![TiKV Dashboard - Raft process metrics](/media/tikv-dashboard-raft-process.png) +![TiKV Dashboard - Raft process metrics](./media/tikv-dashboard-raft-process.png) ### Raft message @@ -139,7 +139,7 @@ This section provides a detailed description of these key metrics on the **TiKV- - Vote: The number of Vote messages sent in Raft per second - Raft dropped messages: The number of dropped Raft messages per type per second -![TiKV Dashboard - Raft message metrics](/media/tikv-dashboard-raft-message.png) +![TiKV Dashboard - Raft message metrics](./media/tikv-dashboard-raft-message.png) ### Raft propose @@ -153,7 +153,7 @@ This section provides a detailed description of these key metrics on the **TiKV- - Apply wait duration per server: The histogram of apply time of each proposal per TiKV instance - Raft log speed: The average rate at which peers propose logs -![TiKV Dashboard - Raft propose metrics](/media/tikv-dashboard-raft-propose.png) +![TiKV Dashboard - Raft propose metrics](./media/tikv-dashboard-raft-propose.png) ### Raft admin @@ -162,13 +162,13 @@ This section provides a detailed description of these key metrics on the **TiKV- - Check split: The number of Raftstore split check commands per second - 99.99% Check split duration: The time consumed when running split check commands (P99.99) -![TiKV Dashboard - Raft admin metrics](/media/tikv-dashboard-raft-admin.png) +![TiKV Dashboard - Raft admin metrics](./media/tikv-dashboard-raft-admin.png) ### Local reader - Local reader requests: The number of total requests and the number of rejections from the local read thread -![TiKV Dashboard - Local reader metrics](/media/tikv-dashboard-local-reader.png) +![TiKV Dashboard - Local reader metrics](./media/tikv-dashboard-local-reader.png) ### Unified Read Pool @@ -183,7 +183,7 @@ This section provides a detailed description of these key metrics on the **TiKV- - Storage async snapshot duration: The time consumed by processing asynchronous snapshot requests. It should be less than `1s` in `.99`. - Storage async write duration: The time consumed by processing asynchronous write requests. It should be less than `1s` in `.99`. -![TiKV Dashboard - Storage metrics](/media/tikv-dashboard-storage.png) +![TiKV Dashboard - Storage metrics](./media/tikv-dashboard-storage.png) ### Flow Control @@ -198,7 +198,7 @@ This section provides a detailed description of these key metrics on the **TiKV- - Txn command throttled duration: The blocked duration for commands related to transactions due to throttling. Under normal circumstances, this metric is 0. - Non-txn command throttled duration: The blocked duration for other commands due to throttling. Under normal circumstances, this metric is 0. -![TiKV Dashboard - Flow Control metrics](/media/tikv-dashboard-flow-control.png) +![TiKV Dashboard - Flow Control metrics](./media/tikv-dashboard-flow-control.png) ### Scheduler @@ -207,7 +207,7 @@ This section provides a detailed description of these key metrics on the **TiKV- - Scheduler priority commands: The count of different priority commands per second - Scheduler pending commands: The count of pending commands per TiKV instance per second -![TiKV Dashboard - Scheduler metrics](/media/tikv-dashboard-scheduler.png) +![TiKV Dashboard - Scheduler metrics](./media/tikv-dashboard-scheduler.png) ### Scheduler - commit @@ -221,7 +221,7 @@ This section provides a detailed description of these key metrics on the **TiKV- - Scheduler scan details [write]: The keys scan details of write CF when executing the commit command - Scheduler scan details [default]: The keys scan details of default CF when executing the commit command -![TiKV Dashboard - Scheduler commit metrics](/media/tikv-dashboard-scheduler-commit.png) +![TiKV Dashboard - Scheduler commit metrics](./media/tikv-dashboard-scheduler-commit.png) ### Scheduler - pessimistic_rollback diff --git a/join-reorder.md b/join-reorder.md index 747c18365e058..7cfe3d4ef3afe 100644 --- a/join-reorder.md +++ b/join-reorder.md @@ -34,23 +34,23 @@ Take the preceding three tables (t1, t2, and t3) as an example. First, TiDB obtains all the nodes that participates in the join operation, and sorts the nodes in the ascending order of row numbers. -![join-reorder-1](/media/join-reorder-1.png) +![join-reorder-1](./media/join-reorder-1.png) After that, the table with the least rows is selected and joined with other two tables respectively. By comparing the sizes of the output result sets, TiDB selects the pair with a smaller result set. -![join-reorder-2](/media/join-reorder-2.png) +![join-reorder-2](./media/join-reorder-2.png) Then TiDB enters the next round of selection. If you try to join four tables, TiDB continues to compare the sizes of the output result sets and selects the pair with a smaller result set. In this case only three tables are joined, so TiDB gets the final join result. -![join-reorder-3](/media/join-reorder-3.png) +![join-reorder-3](./media/join-reorder-3.png) ## Example: the dynamic programming algorithm of Join Reorder Taking the preceding three tables (t1, t2, and t3) as an example again, the dynamic programming algorithm can enumerate all possibilities. Therefore, comparing with the greedy algorithm, which must start with the `t1` table (the table with the least rows), the dynamic programming algorithm can enumerate a join order as follows: -![join-reorder-4](/media/join-reorder-4.png) +![join-reorder-4](./media/join-reorder-4.png) When this choice is better than the greedy algorithm, the dynamic programming algorithm can choose a better join order. diff --git a/migrate-from-vitess.md b/migrate-from-vitess.md index 74f4bfd190a7b..0b9bdc01b066a 100644 --- a/migrate-from-vitess.md +++ b/migrate-from-vitess.md @@ -28,14 +28,14 @@ The following two examples show how Dumpling and TiDB Lightning work together to - In this example, TiDB Lightning uses the [logical import mode](/tidb-lightning/tidb-lightning-logical-import-mode.md), which first encodes data into SQL statements and then runs the SQL statements to import data. - ![Vitess to TiDB Migration with TiDB backend](/media/vitess_to_tidb.png) + ![Vitess to TiDB Migration with TiDB backend](./media/vitess_to_tidb.png) - In this example, TiDB Lightning uses the [physical import mode](/tidb-lightning/tidb-lightning-physical-import-mode.md) to directly ingest data into TiKV. - ![Vitess to TiDB Migration with local backend](/media/vitess_to_tidb_dumpling_local.png) + ![Vitess to TiDB Migration with local backend](./media/vitess_to_tidb_dumpling_local.png) ### DM The following example shows how [DM](/dm/dm-overview.md) migrates data from Vitess to TiDB. -![Vitess to TiDB with DM](/media/vitess_to_tidb_dm.png) +![Vitess to TiDB with DM](./media/vitess_to_tidb_dm.png) diff --git a/migrate-with-pt-ghost.md b/migrate-with-pt-ghost.md index 4505e7a38c3f6..d312d21d3d3df 100644 --- a/migrate-with-pt-ghost.md +++ b/migrate-with-pt-ghost.md @@ -54,7 +54,7 @@ The workflow of DM: - Apply DDLs recorded downstream. -![dm-online-ddl](/media/dm/dm-online-ddl.png) +![dm-online-ddl](./media/dm/dm-online-ddl.png) The change in the workflow brings the following advantages: diff --git a/multi-data-centers-in-one-city-deployment.md b/multi-data-centers-in-one-city-deployment.md index 5f67a00b6040f..ed89eb5fbd1fc 100644 --- a/multi-data-centers-in-one-city-deployment.md +++ b/multi-data-centers-in-one-city-deployment.md @@ -42,7 +42,7 @@ TiDB clusters can be deployed in three AZs in the same region. In this solution, TiDB, TiKV, and PD are distributed among three AZs, which is the most common deployment with the highest availability. -![3-AZ Deployment Architecture](/media/deploy-3dc.png) +![3-AZ Deployment Architecture](./media/deploy-3dc.png) **Advantages:** @@ -50,7 +50,7 @@ TiDB, TiKV, and PD are distributed among three AZs, which is the most common dep - No data will be lost if one AZ is down (RPO = 0). - Even if one AZ is down, the other two AZs will automatically start leader election and automatically resume services within a certain period (within 20 seconds in most cases). See the following diagram for more information: -![Disaster Recovery for 3-AZ Deployment](/media/deploy-3dc-dr.png) +![Disaster Recovery for 3-AZ Deployment](./media/deploy-3dc-dr.png) **Disadvantages:** @@ -64,7 +64,7 @@ The performance can be affected by the network latency. If not all of the three AZs need to provide services to the applications, you can dispatch all the requests to one AZ and configure the scheduling policy to migrate the TiKV Region leader and PD leader to the same AZ. In this way, neither obtaining TSO nor reading TiKV Regions will be impacted by the network latency across AZs. If this AZ is down, the PD leader and TiKV Region leader will be automatically elected in other surviving AZs, and you just need to switch the requests to the AZs that are still alive. -![Read Performance Optimized 3-AZ Deployment](/media/deploy-3dc-optimize.png) +![Read Performance Optimized 3-AZ Deployment](./media/deploy-3dc-optimize.png) **Advantages:** @@ -100,7 +100,7 @@ This section provides a topology example, and introduces TiKV labels and TiKV la The following example assumes that three AZs (AZ1, AZ2, and AZ3) are located in one region; each AZ has two sets of racks and each rack has three servers. The example ignores the hybrid deployment or the scenario where multiple instances are deployed on one machine. The deployment of a TiDB cluster (three replicas) on three AZs in one region is as follows: -![3-AZ in One Region](/media/multi-data-centers-in-one-city-deployment-sample.png) +![3-AZ in One Region](./media/multi-data-centers-in-one-city-deployment-sample.png) #### TiKV labels diff --git a/optimistic-transaction.md b/optimistic-transaction.md index f7e4b1fe558c1..8ce5e181d81cf 100644 --- a/optimistic-transaction.md +++ b/optimistic-transaction.md @@ -18,7 +18,7 @@ Before enabling optimistic transactions, make sure that your application correct To support distributed transactions, TiDB adopts two-phase commit (2PC) in optimistic transactions. The procedure is as follows: -![2PC in TiDB](/media/2pc-in-tidb.png) +![2PC in TiDB](./media/2pc-in-tidb.png) 1. The client begins a transaction. @@ -144,6 +144,6 @@ scheduler-concurrency = 2048000 In addition, TiKV supports monitoring the time spent on waiting latches in the scheduler. -![Scheduler latch wait duration](/media/optimistic-transaction-metric.png) +![Scheduler latch wait duration](./media/optimistic-transaction-metric.png) When `Scheduler latch wait duration` is high and there are no slow writes, it can be safely concluded that there are many write conflicts at this time. diff --git a/performance-tuning-methods.md b/performance-tuning-methods.md index 45c7d746f8d07..578c20519026d 100644 --- a/performance-tuning-methods.md +++ b/performance-tuning-methods.md @@ -28,7 +28,7 @@ TiDB is constantly measuring and collecting SQL processing paths and database ti The following figure shows a typical SQL process. You can see that most SQL processing paths are covered in TiDB performance metrics. The database time is broken down into different dimensions, which are colored accordingly. You can quickly understand the workload characteristics and catch the bottlenecks inside the database if any. -![database time decomposition chart](/media/performance/dashboard-diagnostics-time-relation.png) +![database time decomposition chart](./media/performance/dashboard-diagnostics-time-relation.png) Database time is the sum of all SQL processing time. A breakdown of the database time into the following three dimensions helps you quickly identify bottlenecks in TiDB: @@ -82,7 +82,7 @@ The diagrams of database time breakdown and execution time overview present both **Example 1: TPC-C workload** -![TPC-C](/media/performance/tpcc_db_time.png) +![TPC-C](./media/performance/tpcc_db_time.png) - Database Time by SQL Type: Most time-consuming statements are `commit`, `update`, `select`, and `insert` statements. - Database Time by SQL Phase: The most time-consuming phase is SQL execution in green. @@ -99,7 +99,7 @@ The diagrams of database time breakdown and execution time overview present both **Example 2: OLTP read-heavy workload** -![OLTP](/media/performance/oltp_normal_db_time.png) +![OLTP](./media/performance/oltp_normal_db_time.png) - Database Time by SQL Type: Major time-consuming statements are `SELECT`, `COMMIT`, `UPDATE`, and `INSERT`, among which `SELECT` consumes most database time. - Database Time by SQL Phase: Most time is consumed in the `execute` phase in green. @@ -107,7 +107,7 @@ The diagrams of database time breakdown and execution time overview present both **Example 3: Read-only OLTP workload** -![OLTP](/media/performance/oltp_long_compile_db_time.png) +![OLTP](./media/performance/oltp_long_compile_db_time.png) - Database Time by SQL Type: Mainly are `SELECT` statements. - Database Time by SQL Phase: Major time-consuming phases are `compile` in orange and `execute` in green. Latency in the `compile` phase is the highest, indicating that TiDB is taking too long to generate execution plans and the root cause needs to be further determined based on the subsequent performance data. @@ -119,7 +119,7 @@ The diagrams of database time breakdown and execution time overview present both **Example 4: Lock contention workload** -![OLTP](/media/performance/oltp_lock_contention_db_time.png) +![OLTP](./media/performance/oltp_lock_contention_db_time.png) - Database Time by SQL Type: Mainly are `UPDATE` statements. - Database Time by SQL Phase: Most time is consumed in the execute phase in green. @@ -127,7 +127,7 @@ The diagrams of database time breakdown and execution time overview present both **Example 5: HTAP CH-Benchmark workload** -![HTAP](/media/performance/htap_tiflash_mpp.png) +![HTAP](./media/performance/htap_tiflash_mpp.png) - Database Time by SQL Type: Mainly are `SELECT` statements. - Database Time by SQL Phase: Most time is consumed in the execute phase in green. @@ -156,13 +156,13 @@ By checking the following three panels in Performance Overview, you can learn th The TPC-C workload are mainly `UPDATE`, `SELECT`, and `INSERT` statements. The total QPS is equal to the number of `StmtExecute` commands per second and the latter is almost equal to `avg-hit` on the Queries Using Plan Cache OPS panel. Ideally, the client caches the object of the prepared statement. In this way, the cached statement is called directly when a SQL statement is executed. All SQL executions hit the prepared plan cache, and there is no need to recompile to generate execution plans. -![TPC-C](/media/performance/tpcc_qps.png) +![TPC-C](./media/performance/tpcc_qps.png) **Example 2: Prepared plan cache unavailable for query commands in read-only OLTP workload** In this workload, `Commit QPS` = `Rollback QPS` = `Select QPS`. The application has enabled auto-commit concurrency, and rollback is performed every time a connection is fetched from the connection pool. As a result, these three statements are executed the same number of times. -![OLTP-Query](/media/performance/oltp_long_compile_qps.png) +![OLTP-Query](./media/performance/oltp_long_compile_qps.png) - The red bold line in the QPS panel stands for failed queries, and the Y-axis on the right shows the number of failed queries. A value other than 0 means the presence of failed queries. - The total QPS is equal to the number of queries in the CPS By Type panel, the query command has been used by the application. @@ -179,13 +179,13 @@ In this workload, `Commit QPS` = `Rollback QPS` = `Select QPS`. The application > > Starting from TiDB v6.0.0, you can prevent the `StmtClose` command from clearing cached execution plans via the global variable (`set global tidb_ignore_prepared_cache_close_stmt=on;`). In this way, subsequent executions can hit the prepared plan cache. -![OLTP-Prepared](/media/performance/oltp_prepared_statement_no_plan_cache.png) +![OLTP-Prepared](./media/performance/oltp_prepared_statement_no_plan_cache.png) **Example 4: Prepared statements have a resource leak** The number of `StmtPrepare` commands per second is much greater than that of `StmtClose` per second, which indicates that the application has an object leak for prepared statements. -![OLTP-Query](/media/performance/prepared_statement_leaking.png) +![OLTP-Query](./media/performance/prepared_statement_leaking.png) - In the QPS panel, the red bold line indicates the number of failed queries, and the Y axis on the right indicates the coordinate value of the number. In this example, the number of failed queries per second is 74.6. - In the CPS By Type panel, the number of `StmtPrepare` commands per second is much greater than that of `StmtClose` per second, which indicates that an object leak occurs in the application for prepared statements. @@ -200,7 +200,7 @@ The number of `StmtPrepare` commands per second is much greater than that of `St **Example 1: Busy workload** -![TPC-C](/media/performance/tpcc_source_sql.png) +![TPC-C](./media/performance/tpcc_source_sql.png) In this TPC-C workload: @@ -209,7 +209,7 @@ In this TPC-C workload: **Example 2: Analyze workload** -![OLTP](/media/performance/internal_stats.png) +![OLTP](./media/performance/internal_stats.png) In this workload, only `ANALYZE` statements are running in the cluster: @@ -227,7 +227,7 @@ In the CPU/Memory panels of TiDB, TiKV, and PD, you can monitor their respective In the following TPC-C workload, each TiDB and TiKV is configured with 16 CPUs. PD is configured with 4 CPUs. -![TPC-C](/media/performance/tpcc_cpu_memory.png) +![TPC-C](./media/performance/tpcc_cpu_memory.png) - The average, maximum, and delta CPU usage of TiDB are 761%, 934%, and 322%, respectively. The maximum memory usage is 6.86 GiB. - The average, maximum, and delta CPU usage of TiKV are 1343%, 1505%, and 283%, respectively. The maximum memory usage is 27.1 GiB. @@ -277,7 +277,7 @@ The following is an example of read and write traffic in the TPC-C workload. - `TiKV -> Rocksdb`: 109 MB/s - `RocksDB Compaction`: 567 MB/s -![TPC-C](/media/performance/tpcc_read_write_traffic.png) +![TPC-C](./media/performance/tpcc_read_write_traffic.png) **Example 2: Write traffic before and after Titan is enabled** @@ -291,7 +291,7 @@ The following is an example of read and write traffic in the TPC-C workload. - `TiKV -> Rocksdb`: 753 MB/s - `RocksDB Compaction`: 10.6 GB/s - ![Titan Disable](/media/performance/titan_disable.png) + ![Titan Disable](./media/performance/titan_disable.png) - Write traffic after Titan is enabled @@ -301,7 +301,7 @@ The following is an example of read and write traffic in the TPC-C workload. - `TiKV -> Rocksdb`: 1.21 GB/s - `RocksDB Compaction`: 4.68 MB/s - ![Titan Enable](/media/performance/titan_enable.png) + ![Titan Enable](./media/performance/titan_enable.png) ### Query latency breakdown and key latency metrics @@ -331,7 +331,7 @@ In the Connection Count panel, you can check the total number of connections and **Example 1: The number of disconnection/s is too high** -![high disconnection/s](/media/performance/high_disconnections.png) +![high disconnection/s](./media/performance/high_disconnections.png) In this workload: @@ -341,7 +341,7 @@ In this workload: **Example 2: TiDB is the bottleneck of user response time** -![TiDB is the Bottleneck](/media/performance/tpcc_duration_idle.png) +![TiDB is the Bottleneck](./media/performance/tpcc_duration_idle.png) In this TPC-C workload: @@ -352,7 +352,7 @@ The average query latency is significantly greater than `avg-in-txn`, which mean **Example 3: TiDB is not the bottleneck of user response time** -![TiDB is not Bottleneck](/media/performance/cloud_query_long_idle.png) +![TiDB is not Bottleneck](./media/performance/cloud_query_long_idle.png) In this workload, the average query latency is 1.69 ms and `avg-in-txn` is 18 ms, indicating that TiDB spends 1.69 ms on average to process a SQL statement in transactions, and then needs to wait for 18 ms to receive the next statement. @@ -382,13 +382,13 @@ Usually, the `execute` phase accounts for the most of the `query` latency. Howev **Example 1: Database bottleneck in the `compile` phase** -![Compile](/media/performance/long_compile.png) +![Compile](./media/performance/long_compile.png) In the preceding figure, the average time of the `parse`, `compile`, and `execute` phases are 17.1 us, 729 us, and 681 us, respectively. The `compile` latency is high because the application uses the `query` command interface and cannot use prepared plan cache. **Example 2: Database bottleneck in the `execute` phase** -![Execute](/media/performance/long_execute.png) +![Execute](./media/performance/long_execute.png) In this TPC-C workload, the average time of `parse`, `compile` and `execute` phases are 7.39 us, 38.1 us, and 12.8 ms, respectively. The `execute` phase is the bottleneck of the `query` latency. @@ -404,7 +404,7 @@ The TSO wait time is recorded as `TSO WAIT` and the network time of the TSO requ - Common KV read requests: `Get`, `BatchGet`, and `Cop` - Common KV write requests: `PessimisticLock`, `Prewrite` and `Commit` for two-phase commits -![Execute](/media/performance/execute_phase.png) +![Execute](./media/performance/execute_phase.png) The indicators in this section correspond to the following three panels. @@ -425,19 +425,19 @@ The difference between `Avg TiDB KV Request Duration` and `Avg TiKV GRPC Duratio **Example 1: Low workload of clusters deployed on the same data center** -![Same Data Center](/media/performance/oltp_kv_tso.png) +![Same Data Center](./media/performance/oltp_kv_tso.png) In this workload, the average `Prewrite` latency on TiDB is 925 us, and the average `kv_prewrite` processing latency inside TiKV is 720 us. The difference is about 200 us, which is normal in the same data center. The average TSO wait latency is 206 us, and the RPC time is 144 us. **Example 2: Normal workload on public cloud clusters** -![Cloud Env ](/media/performance/cloud_kv_tso.png) +![Cloud Env ](./media/performance/cloud_kv_tso.png) In this example, TiDB clusters are deployed in different data centers in the same region. The average `commit` latency on TiDB is 12.7 ms, and the average `kv_commit` processing latency inside TiKV is 10.2 ms, a difference of about 2.5 ms. The average TSO wait latency is 3.12 ms, and the RPC time is 693 us. **Example 3: Resource overloaded on public cloud clusters** -![Cloud Env, TiDB Overloaded](/media/performance/cloud_kv_tso_overloaded.png) +![Cloud Env, TiDB Overloaded](./media/performance/cloud_kv_tso_overloaded.png) In this example, the TiDB clusters are deployed in different data centers in the same region, and TiDB network and CPU resources are severely overloaded. The average `BatchGet` latency on TiDB is 38.6 ms, and the average `kv_batch_get` processing latency inside TiKV is 6.15 ms. The difference is more than 32 ms, which is much higher than the normal value. The average TSO wait latency is 9.45 ms and the RPC time is 14.3 ms. @@ -453,7 +453,7 @@ TiKV processes a write request in the following procedure: - The `Store` thread processes Raft messages and new `proposals`. When a new `proposals` is received, the `Store` thread of the leader node writes to the local Raft DB and copies the message to multiple follower nodes. When this `proposals` is successfully persisted in most instances, the `proposals` is successfully committed. - The `Apply` thread writes the committed `proposals` to the KV DB. When the data is successfully written to the KV DB, the `Apply` thread notifies externally that the write request has completed. -![TiKV Write](/media/performance/store_apply.png) +![TiKV Write](./media/performance/store_apply.png) The `Storage Async Write Duration` metric records the latency after a write request enters raftstore. The data is collected on a basis of per request. @@ -478,17 +478,17 @@ In v5.4.0, the gPRC module has been optimized to accelerate Raft log replication v5.3.0: -![v5.3.0](/media/performance/v5.3.0_store_apply.png) +![v5.3.0](./media/performance/v5.3.0_store_apply.png) v5.4.0: -![v5.4.0](/media/performance/v5.4.0_store_apply.png) +![v5.4.0](./media/performance/v5.4.0_store_apply.png) **Example 2: Store Duration is a bottleneck** Apply the preceding formula: 10.1 ms ~= 9.81 ms + 0.304 ms. The result indicates that the latency bottleneck for write requests is in `Store Duration`. -![Store](/media/performance/cloud_store_apply.png) +![Store](./media/performance/cloud_store_apply.png) #### Commit Log Duration, Append Log Duration, and Apply Log Duration @@ -525,15 +525,15 @@ In v5.4.0, the gPRC module has been optimized to accelerate Raft log replication v5.3.0: -![v5.3.0](/media/performance/v5.3.0_commit_append_apply.png) +![v5.3.0](./media/performance/v5.3.0_commit_append_apply.png) v5.4.0: -![v5.4.0](/media/performance/v5.4.0_commit_append_apply.png) +![v5.4.0](./media/performance/v5.4.0_commit_append_apply.png) **Example 2: Commit Log Duration is a bottleneck** -![Store](/media/performance/cloud_append_commit_apply.png) +![Store](./media/performance/cloud_append_commit_apply.png) - Average `Append Log Duration` = 4.38 ms - Average `Commit Log Duration` = 7.92 ms @@ -549,4 +549,4 @@ For the `Store` thread, `Commit Log Duration` is obviously higher than `Apply Lo Starting from v6.1.0, Grafana has a built-in Performance Overview dashboard by default. This dashboard is compatible with TiDB v4.x and v5.x versions. If your TiDB is earlier than v6.1.0, you need to manually import [`performance_overview.json`](https://github.com/pingcap/tidb/blob/master/pkg/metrics/grafana/performance_overview.json), as shown in the following figure: -![Store](/media/performance/import_dashboard.png) +![Store](./media/performance/import_dashboard.png) diff --git a/performance-tuning-overview.md b/performance-tuning-overview.md index 4332f9cc9b590..2ccb37b3ca66b 100644 --- a/performance-tuning-overview.md +++ b/performance-tuning-overview.md @@ -24,7 +24,7 @@ To get a total user response time within a specified time range (`ΔT`), you can Total user response time in `ΔT` = Average TPS (Transactions Per Second) x Average user response time x `ΔT`. -![user_response_time](/media/performance/user_response_time_en.png) +![user_response_time](./media/performance/user_response_time_en.png) ### Database time diff --git a/performance-tuning-practices.md b/performance-tuning-practices.md index 1704b228e6f9d..d56701c9ec9fa 100644 --- a/performance-tuning-practices.md +++ b/performance-tuning-practices.md @@ -42,11 +42,11 @@ useServerPrepStmts=false From the Top SQL page in the TiDB Dashboard below, you can see that the non-business SQL type `SELECT @@session.tx_isolation` consumes the most resources. Although TiDB processes these types of SQL statements quickly, these types of SQL statements have the highest number of executions that result in the highest overall CPU time consumption. -![dashboard-for-query-interface](/media/performance/case1.png) +![dashboard-for-query-interface](./media/performance/case1.png) From the following flame chart of TiDB, you can see that the CPU consumption of functions such as `Compile` and `Optimize` is significant during the SQL execution. Because the application uses the Query interface, TiDB cannot use the execution plan cache. TiDB needs to compile and generate an execution plan for each SQL statement. -![flame-graph-for-query-interface](/media/performance/7.1.png) +![flame-graph-for-query-interface](./media/performance/7.1.png) - ExecuteStmt cpu = 38% cpu time = 23.84s - Compile cpu = 27% cpu time = 17.17s @@ -56,7 +56,7 @@ From the following flame chart of TiDB, you can see that the CPU consumption of Check the database time overview and QPS in the following Performance Overview dashboard. -![performance-overview-1-for-query-interface](/media/performance/j-1.png) +![performance-overview-1-for-query-interface](./media/performance/j-1.png) - Database Time by SQL Type: the `Select` statement type takes most of the time. - Database Time by SQL Phase: the `execute` and `compile` phases take most of the time. @@ -68,7 +68,7 @@ Check the database time overview and QPS in the following Performance Overview d Check the resource consumption of the cluster: the average utilization of TiDB CPU is 925%, the average utilization of TiKV CPU is 201%, and the average throughput of TiKV IO is 18.7 MB/s. The resource consumption of TiDB is significantly higher. -![performance-overview-2-for-query-interface](/media/performance/5.png) +![performance-overview-2-for-query-interface](./media/performance/5.png) ### Analysis conclusion @@ -90,11 +90,11 @@ useServerPrepStmts=false&useConfigs=maxPerformance From the Top SQL page in the TiDB Dashboard below, you can see that `SELECT @@session.tx_isolation`, which consumed the most resources, has disappeared. -![dashboard-for-maxPerformance](/media/performance/case2.png) +![dashboard-for-maxPerformance](./media/performance/case2.png) From the following flame chart of TiDB, you can see that the CPU consumption of functions such as `Compile` and `Optimize` is still significant during the SQL execution. -![flame-graph-for-maxPerformance](/media/performance/20220507-145257.jpg) +![flame-graph-for-maxPerformance](./media/performance/20220507-145257.jpg) - ExecuteStmt cpu = 43% cpu time =35.84s - Compile cpu = 31% cpu time =25.61s @@ -104,7 +104,7 @@ From the following flame chart of TiDB, you can see that the CPU consumption of The data of the database time overview and QPS is as follows: -![performance-overview-1-for-maxPerformance](/media/performance/j-2.png) +![performance-overview-1-for-maxPerformance](./media/performance/j-2.png) - Database Time by SQL Type: the `Select` statement type takes most of the time. - Database Time by SQL Phase: the `execute` and `compile` phases take most of the time. @@ -116,11 +116,11 @@ The data of the database time overview and QPS is as follows: From Scenario 1 to Scenario 2, the average TiDB CPU utilization drops from 925% to 874%, and the average TiKV CPU utilization increases from 201% to about 250%. -![performance-overview-2-for-maxPerformance](/media/performance/9.1.1.png) +![performance-overview-2-for-maxPerformance](./media/performance/9.1.1.png) The changes in key latency metrics are as follows: -![performance-overview-3-for-maxPerformance](/media/performance/9.2.2.png) +![performance-overview-3-for-maxPerformance](./media/performance/9.2.2.png) - avg query duration = 1.12ms (from 479μs to 1.12ms) - avg parse duration = 84.7μs (from 37.2μs to 84.7μs) @@ -149,7 +149,7 @@ useServerPrepStmts=true&useConfigs=maxPerformance" From the following flame chart of TiDB, you can see that the CPU consumption of `CompileExecutePreparedStmt` and `Optimize` is still significant after the Prepared Statement interface is enabled. -![flame-graph-for-PrepStmts](/media/performance/3.1.1.png) +![flame-graph-for-PrepStmts](./media/performance/3.1.1.png) - ExecutePreparedStmt cpu = 31% cpu time = 23.10s - preparedStmtExec cpu = 30% cpu time = 22.92s @@ -160,7 +160,7 @@ From the following flame chart of TiDB, you can see that the CPU consumption of After the Prepared Statement interface is used, the data of database time overview and QPS is as follows: -![performance-overview-1-for-PrepStmts](/media/performance/j-3.png) +![performance-overview-1-for-PrepStmts](./media/performance/j-3.png) The QPS drops from 24.4k to 19.7k. From the Database Time Overview, you can see that the application uses three types of Prepared commands, and the `general` statement type (which includes the execution time of commands such as `StmtPrepare` and `StmtClose`) takes the second place in Database Time by SQL Type. This indicates that even when the Prepared Statement interface is used, the execution plan cache is not hit. The reason is that, when the `StmtClose` command is executed, TiDB clears the execution plan cache of SQL statements in the internal processing. @@ -173,11 +173,11 @@ The QPS drops from 24.4k to 19.7k. From the Database Time Overview, you can see The TiDB average CPU utilization increases from 874% to 936%. -![performance-overview-1-for-PrepStmts](/media/performance/3-2.png) +![performance-overview-1-for-PrepStmts](./media/performance/3-2.png) The key latency metrics are as follows: -![performance-overview-2-for-PrepStmts](/media/performance/3.4.png) +![performance-overview-2-for-PrepStmts](./media/performance/3.4.png) - avg query duration = 528μs (from 1.12ms to 528μs) - avg parse duration = 14.9μs (from 84.7μs to 14.9μs) @@ -210,13 +210,13 @@ From the flame chart of the TiDB CPU usage, you can see that `CompileExecutePrep PreparseStmt cpu = 25% cpu time = 12.75s -![flame-graph-for-3-commands](/media/performance/4.2.png) +![flame-graph-for-3-commands](./media/performance/4.2.png) #### Performance Overview dashboard In the Performance Overview dashboard, the most significant change is the average time of the `compile` phase, which is reduced from 8.95 seconds per second in Scenario 3 to 1.18 seconds per second. The number of queries using the execution plan cache is roughly equal to the value of `StmtExecute`. With the increase in QPS, the database time consumed by `Select` statements per second decreases, and the database time consumed by `general` statements per second type increases. -![performance-overview-1-for-3-commands](/media/performance/j-4.png) +![performance-overview-1-for-3-commands](./media/performance/j-4.png) - Database Time by SQL Type: the `Select` statement type takes the most time. - Database Time by SQL Phase: the `execute` phase takes most of the time. @@ -228,11 +228,11 @@ In the Performance Overview dashboard, the most significant change is the averag The average TiDB CPU utilization drops from 936% to 827%. -![performance-overview-2-for-3-commands](/media/performance/4.4.png) +![performance-overview-2-for-3-commands](./media/performance/4.4.png) The average `compile` time drops significantly, from 374 us to 53.3 us. Because the QPS increases, the average `execute` time increases too. -![performance-overview-3-for-3-commands](/media/performance/4.5.png) +![performance-overview-3-for-3-commands](./media/performance/4.5.png) - avg query duration = 426μs (from 528μs to 426μs) - avg parse duration = 12.3μs (from 14.8μs to 12.3μs) @@ -269,13 +269,13 @@ From the following flame chart of TiDB, you can see that the high CPU consumptio - ExecutePreparedStmt cpu = 22% cpu time = 8.4s -![flame-graph-for-1-command](/media/performance/5.1.1.png) +![flame-graph-for-1-command](./media/performance/5.1.1.png) #### Performance Overview dashboard In the Performance Overview dashboard, the most notable changes are that the three Stmt command types in the **CPS By Type** pane drop to one type, the `general` statement type in the **Database Time by SQL Type** pane is disappeared, and the QPS in the **QPS** pane increases to 30.9k. -![performance-overview-for-1-command](/media/performance/j-5.png) +![performance-overview-for-1-command](./media/performance/j-5.png) - Database Time by SQL Type: the `Select` statement type takes most of the time and the `general` statement type disappears. - Database Time by SQL Phase: the `execute` phase takes most of the time. @@ -286,11 +286,11 @@ In the Performance Overview dashboard, the most notable changes are that the thr The average TiDB CPU utilization drops from 827% to 577%. As the QPS increases, the average TiKV CPU utilization increases to 313%. -![performance-overview-for-2-command](/media/performance/j-5-cpu.png) +![performance-overview-for-2-command](./media/performance/j-5-cpu.png) The key latency metrics are as follows: -![performance-overview-for-3-command](/media/performance/j-5-duration.png) +![performance-overview-for-3-command](./media/performance/j-5-duration.png) - avg query duration = 690μs (from 426μs to 690μs) - avg parse duration = 13.5μs (from 12.3μs to 13.5μs ) @@ -322,13 +322,13 @@ The flame chart of the TiDB CPU does not have any significant changes. - ExecutePreparedStmt cpu = 22% cpu time = 8.4s -![flame-graph-for-rc-read](/media/performance/6.2.2.png) +![flame-graph-for-rc-read](./media/performance/6.2.2.png) #### Performance Overview dashboard After using RC read, QPS increases from 30.9k to 34.9k, and the `tso wait` time consumed per second decreases from 5.46 s to 456 ms. -![performance-overview-1-for-rc-read](/media/performance/j-6.png) +![performance-overview-1-for-rc-read](./media/performance/j-6.png) - Database Time by SQL Type: the `Select` statement type takes most of the time. - Database Time by SQL Phase: the `execute` phase takes most of the time. @@ -339,15 +339,15 @@ After using RC read, QPS increases from 30.9k to 34.9k, and the `tso wait` time The `tso cmd` per second drops from 28.3k to 2.7k. -![performance-overview-2-for-rc-read](/media/performance/j-6-cmd.png) +![performance-overview-2-for-rc-read](./media/performance/j-6-cmd.png) The average TiDB CPU increases to 603% (from 577% to 603%). -![performance-overview-3-for-rc-read](/media/performance/j-6-cpu.png) +![performance-overview-3-for-rc-read](./media/performance/j-6-cpu.png) The key latency metrics are as follows: -![performance-overview-4-for-rc-read](/media/performance/j-6-duration.png) +![performance-overview-4-for-rc-read](./media/performance/j-6-duration.png) - avg query duration = 533μs (from 690μs to 533μs) - avg parse duration = 13.4μs (from 13.5μs to 13.4μs ) @@ -373,13 +373,13 @@ Compared with Scenario 6, the application configuration remains the same. The on The flame chart of the TiDB CPU does not have any significant changes. -![flame-graph-for-table-cache](/media/performance/7.2.png) +![flame-graph-for-table-cache](./media/performance/7.2.png) #### Performance Overview dashboard The QPS increases from 34.9k to 40.9k, and the KV request types take the most time in the `execute` phase change to `Prewrite` and `Commit`. The database time consumed by `Get` per second decreases from 5.33 seconds to 1.75 seconds, and the database time consumed by `Cop` per second decreases from 3.87 seconds to 1.09 seconds. -![performance-overview-1-for-table-cache](/media/performance/j-7.png) +![performance-overview-1-for-table-cache](./media/performance/j-7.png) - Database Time by SQL Type: the `Select` statement type takes most of the time. - Database Time by SQL Phase: the `execute` and `compile` phases take most of the time. @@ -390,11 +390,11 @@ The QPS increases from 34.9k to 40.9k, and the KV request types take the most ti The average TiDB CPU utilization drops from 603% to 478% and the average TiKV CPU utilization drops from 346% to 256%. -![performance-overview-2-for-table-cache](/media/performance/j-7-cpu.png) +![performance-overview-2-for-table-cache](./media/performance/j-7-cpu.png) The average query latency drops from 533 us to 313 us. The average `execute` latency drops from 466 us to 250 us. -![performance-overview-3-for-table-cache](/media/performance/j-7-duration.png) +![performance-overview-3-for-table-cache](./media/performance/j-7-duration.png) - avg query duration = 313μs (from 533μs to 313μs) - avg parse duration = 11.9μs (from 13.4μs to 11.9μs) diff --git a/pessimistic-transaction.md b/pessimistic-transaction.md index ef875869e92a7..507d29f885446 100644 --- a/pessimistic-transaction.md +++ b/pessimistic-transaction.md @@ -147,7 +147,7 @@ TiDB supports the following two isolation levels in the pessimistic transaction In the transaction commit process, pessimistic transactions and optimistic transactions have the same logic. Both transactions adopt the two-phase commit (2PC) mode. The important adaptation of pessimistic transactions is DML execution. -![TiDB pessimistic transaction commit process](/media/pessimistic-transaction-commit.png) +![TiDB pessimistic transaction commit process](./media/pessimistic-transaction-commit.png) The pessimistic transaction adds an `Acquire Pessimistic Lock` phase before 2PC. This phase includes the following steps: @@ -155,7 +155,7 @@ The pessimistic transaction adds an `Acquire Pessimistic Lock` phase before 2PC. 2. When the TiDB server receives a writing request from the client, the TiDB server initiates a pessimistic lock request to the TiKV server, and the lock is persisted to the TiKV server. 3. (Same as the optimistic transaction mode) When the client sends the commit request, TiDB starts to perform the two-phase commit similar to the optimistic transaction mode. -![Pessimistic transactions in TiDB](/media/pessimistic-transaction-in-tidb.png) +![Pessimistic transactions in TiDB](./media/pessimistic-transaction-in-tidb.png) ## Pipelined locking process @@ -171,7 +171,7 @@ To reduce the overhead of locking, TiKV implements the pipelined locking process If the application logic relies on the locking or lock waiting mechanisms, or if you want to guarantee as much as possible the success rate of transaction commits even in the case of TiKV cluster anomalies, you should disable the pipelined locking feature. -![Pipelined pessimistic lock](/media/pessimistic-transaction-pipelining.png) +![Pipelined pessimistic lock](./media/pessimistic-transaction-pipelining.png) This feature is enabled by default. To disable it, modify the TiKV configuration: diff --git a/post-installation-check.md b/post-installation-check.md index 8e991786dc586..f002dcee38666 100644 --- a/post-installation-check.md +++ b/post-installation-check.md @@ -28,11 +28,11 @@ Expected output: If the `Status` information of each node is `Up`, the cluster r 1. Log in to TiDB Dashboard at `${pd-ip}:${pd-port}/dashboard`. The username and password is the same as that of the TiDB `root` user. If you have modified the `root` password, enter the modified password. The password is empty by default. - ![TiDB-Dashboard](/media/tiup/tidb-dashboard.png) + ![TiDB-Dashboard](./media/tiup/tidb-dashboard.png) 2. The home page displays the node information in the TiDB cluster. - ![TiDB-Dashboard-status](/media/tiup/tidb-dashboard-status.png) + ![TiDB-Dashboard-status](./media/tiup/tidb-dashboard-status.png) ### Use Grafana @@ -40,7 +40,7 @@ Expected output: If the `Status` information of each node is `Up`, the cluster r 2. To check the TiDB port status and load monitoring information, click **Overview**. - ![Grafana-overview](/media/tiup/grafana-overview.png) + ![Grafana-overview](./media/tiup/grafana-overview.png) ## Log in to the database and perform simple operations diff --git a/replicate-data-to-kafka.md b/replicate-data-to-kafka.md index 1cf94d1c41252..3f9e5a55f88f4 100644 --- a/replicate-data-to-kafka.md +++ b/replicate-data-to-kafka.md @@ -162,6 +162,6 @@ At this time, incremental data of the TiDB database is successfully replicated t After this command is executed, you can see that there is new data in the table, as shown in the following figure. - ![SQL query result](/media/integrate/sql-query-result.png) + ![SQL query result](./media/integrate/sql-query-result.png) Data integration with Kafka is done. diff --git a/sql-non-prepared-plan-cache.md b/sql-non-prepared-plan-cache.md index 1378361f75c97..c3bfde16f197e 100644 --- a/sql-non-prepared-plan-cache.md +++ b/sql-non-prepared-plan-cache.md @@ -101,7 +101,7 @@ However, this feature also introduces some additional memory and CPU overhead, i In this case, you need to observe the `non-prepared` metric in the **Queries Using Plan Cache OPS** panel and the `non-prepared-unsupported` metric in the **Plan Cache Miss OPS** panel on Grafana. If most queries are not supported and only a few can hit the plan cache, you can disable this feature. -![non-prepared-unsupported](/media/non-prepapred-plan-cache-unsupprot.png) +![non-prepared-unsupported](./media/non-prepapred-plan-cache-unsupprot.png) ## Diagnostics @@ -144,7 +144,7 @@ In the preceding example, the query cannot hit the cache because the non-prepare After enabling the non-prepared plan cache, you can monitor the memory usage, number of plans in the cache, and cache hit rate in the following panes: -![non-prepare-plan-cache](/media/tidb-non-prepared-plan-cache-metrics.png) +![non-prepare-plan-cache](./media/tidb-non-prepared-plan-cache-metrics.png) You can also monitor the cache hit rate in the `statements_summary` table and slow query log. The following shows how to view the cache hit rate in the `statements_summary` table: diff --git a/sql-optimization-concepts.md b/sql-optimization-concepts.md index e1c8935f02eae..d69f608cdfd2b 100644 --- a/sql-optimization-concepts.md +++ b/sql-optimization-concepts.md @@ -8,7 +8,7 @@ aliases: ['/docs/dev/sql-optimization-concepts/','/docs/dev/reference/performanc In TiDB, the process from inputting a query to getting the execution result according to the final execution plan is illustrated as follows: -![SQL Optimization Process](/media/sql-optimization.png) +![SQL Optimization Process](./media/sql-optimization.png) After parsing the original query text by `parser` and some simple validity checks, TiDB first makes some logically equivalent changes to the query. For detailed changes, see [SQL Logical Optimization](/sql-logical-optimization.md). diff --git a/sql-prepared-plan-cache.md b/sql-prepared-plan-cache.md index 1b85d938d6416..86580d024aca1 100644 --- a/sql-prepared-plan-cache.md +++ b/sql-prepared-plan-cache.md @@ -204,7 +204,7 @@ To view the total number of execution plans cached in each TiDB instance, you ca The following is an example of the **Plan Cache Memory Usage** and **Plan Cache Plan Num** panels in Grafana: -![grafana_panels](/media/planCache-memoryUsage-planNum-panels.png) +![grafana_panels](./media/planCache-memoryUsage-planNum-panels.png) Starting from v7.1.0, you can control the maximum number of plans that can be cached in each session by configuring the system variable [`tidb_session_plan_cache_size`](/system-variables.md#tidb_session_plan_cache_size-new-in-v710). For different environments, the recommended value is as follows and you can adjust it according to the monitoring panels: @@ -363,7 +363,7 @@ mysql> select @@last_plan_from_cache; -- Reuse the last plan In [the Grafana dashboard](/grafana-tidb-dashboard.md) on the TiDB page in the **Executor** section, there are the "Queries Using Plan Cache OPS" and "Plan Cache Miss OPS" graphs. These graphs can be used to check if both TiDB and the application are configured correctly to allow the SQL Plan Cache to work correctly. The **Server** section on the same page provides the "Prepared Statement Count" graph. This graph shows a non-zero value if the application uses prepared statements, which is required for the SQL Plan Cache to function correctly. -![`sql_plan_cache`](/media/performance/sql_plan_cache.png) +![`sql_plan_cache`](./media/performance/sql_plan_cache.png) diff --git a/sql-statements/sql-statement-explain.md b/sql-statements/sql-statement-explain.md index 2c2f4ef77b9d3..39116ec2dde86 100644 --- a/sql-statements/sql-statement-explain.md +++ b/sql-statements/sql-statement-explain.md @@ -285,7 +285,7 @@ The xx.dot is the result returned by the above statement. If your computer has no `dot` program, copy the result to [this website](http://www.webgraphviz.com/) to get a tree diagram: -![Explain Dot](/media/explain_dot.png) +![Explain Dot](./media/explain_dot.png) diff --git a/sql-statements/sql-statement-trace.md b/sql-statements/sql-statement-trace.md index 06797e7312c77..cb2426c91ee70 100644 --- a/sql-statements/sql-statement-trace.md +++ b/sql-statements/sql-statement-trace.md @@ -61,9 +61,9 @@ TRACE FORMAT='json' SELECT * FROM mysql.user; The JSON formatted trace can be pasted into the trace viewer, which is accessed via the TiDB status port: -![TiDB Trace Viewer-1](/media/trace-paste.png) +![TiDB Trace Viewer-1](./media/trace-paste.png) -![TiDB Trace Viewer-2](/media/trace-view.png) +![TiDB Trace Viewer-2](./media/trace-view.png) ### Log diff --git a/sql-tuning-best-practice.md b/sql-tuning-best-practice.md index e6e5dda8b36b6..e04f526ce0616 100644 --- a/sql-tuning-best-practice.md +++ b/sql-tuning-best-practice.md @@ -93,7 +93,7 @@ In [TiDB Dashboard](/dashboard/dashboard-overview.md), navigate to the [**SQL St TiDB normalizes SQL statements into templates by replacing literals and bind variables with `?`. This normalization and sorting process helps you quickly identify the most resource-intensive queries that might require optimization. -![sql-statements-default](/media/sql-tuning/sql-statements-default.png) +![sql-statements-default](./media/sql-tuning/sql-statements-default.png) #### Slow Queries page @@ -105,7 +105,7 @@ In [TiDB Dashboard](/dashboard/dashboard-overview.md), navigate to the [**Slow Q The **Slow Queries** page does not display SQL execution frequency. A query appears on this page if its execution time exceeds the [`tidb_slow_log_threshold`](/tidb-configuration-file.md#tidb_slow_log_threshold) configuration item for a single instance. -![slow-query-default](/media/sql-tuning/slow-query-default.png) +![slow-query-default](./media/sql-tuning/slow-query-default.png) ### Use other tools to identify Top SQL @@ -156,7 +156,7 @@ This section introduces the query processing workflow, optimizer fundamentals, a When a client sends a SQL statement to TiDB, the statement passes through the protocol layer of the TiDB server. This layer manages the connection between the TiDB server and the client, receives SQL statements, and returns data to the client. -![workflow](/media/sql-tuning/workflow-tiflash.png) +![workflow](./media/sql-tuning/workflow-tiflash.png) In the preceding figure, to the right of the protocol layer is the optimizer of the TiDB server, which processes SQL statements as follows: @@ -317,7 +317,7 @@ Additionally, the physical optimization phase includes pushing down expressions This distribution enables cross-component collaboration for efficient query processing. -![cost-based-optimization](/media/sql-tuning/cost-based-optimization.png) +![cost-based-optimization](./media/sql-tuning/cost-based-optimization.png) For more information, see [SQL Physical Optimization](/sql-physical-optimization.md). @@ -467,7 +467,7 @@ LIMIT 3; The following figure illustrates the plan tree for the second execution plan: -![execution-plan-traverse](/media/sql-tuning/execution-plan-traverse.png) +![execution-plan-traverse](./media/sql-tuning/execution-plan-traverse.png) The execution plan follows a top-to-bottom, first-child-first traversal, corresponding to a postorder traversal (Left, Right, Root) of the plan tree. diff --git a/statistics.md b/statistics.md index f16cf14580911..82a28629afb1a 100644 --- a/statistics.md +++ b/statistics.md @@ -87,7 +87,7 @@ A histogram is an approximate representation of the distribution of data. It div Here "equal-depth" means that the number of values ​​falling into each bucket is as equal as possible. For example, for a given set {1.6, 1.9, 1.9, 2.0, 2.4, 2.6, 2.7, 2.7, 2.8, 2.9, 3.4, 3.5}, you want to generate 4 buckets. The equal-depth histogram is as follows. It contains four buckets [1.6, 1.9], [2.0, 2.6], [2.7, 2.8], [2.9, 3.5]. The bucket depth is 3. -![Equal-depth Histogram Example](/media/statistics-1.png) +![Equal-depth Histogram Example](./media/statistics-1.png) For details about the parameter that determines the upper limit to the number of histogram buckets, refer to [Manual Collection](#manual-collection). When the number of buckets is larger, the accuracy of the histogram is higher; however, higher accuracy is at the cost of the usage of memory resources. You can adjust this number appropriately according to the actual scenario. @@ -746,7 +746,7 @@ When you run the `ANALYZE` statement, you can adjust the concurrency using syste The relationships of the relevant system variables are shown below: -![analyze_concurrency](/media/analyze_concurrency.png) +![analyze_concurrency](./media/analyze_concurrency.png) `tidb_build_stats_concurrency`, `tidb_build_sampling_stats_concurrency`, and `tidb_analyze_partition_concurrency` are in an upstream-downstream relationship, as shown in the preceding diagram. The actual total concurrency is: `tidb_build_stats_concurrency` * (`tidb_build_sampling_stats_concurrency` + `tidb_analyze_partition_concurrency`). When modifying these variables, you need to consider their respective values at the same time. It is recommended to adjust them one by one in the order of `tidb_analyze_partition_concurrency`, `tidb_build_sampling_stats_concurrency`, `tidb_build_stats_concurrency`, and observe the impact on the system. The larger the values of these three variables, the greater the resource overhead on the system. diff --git a/storage-engine/rocksdb-overview.md b/storage-engine/rocksdb-overview.md index 8ada230088a7d..c199382cb13f7 100644 --- a/storage-engine/rocksdb-overview.md +++ b/storage-engine/rocksdb-overview.md @@ -16,7 +16,7 @@ RocksDB allows users to create multiple Column Families (CFs). CFs have their ow The architecture of TiKV is illustrated as follows: -![TiKV RocksDB](/media/tikv-rocksdb.png) +![TiKV RocksDB](./media/tikv-rocksdb.png) As the storage engine of TiKV, RocksDB is used to store Raft logs and user data. All data in a TiKV node shares two RocksDB instances. One is for Raft log (often called raftdb), and the other is for user data and MVCC metadata (often called kvdb). There are four CFs in kvdb: raft, lock, default, and write: diff --git a/storage-engine/titan-overview.md b/storage-engine/titan-overview.md index 42b8ea95dfc50..bc4fb8996ba93 100644 --- a/storage-engine/titan-overview.md +++ b/storage-engine/titan-overview.md @@ -39,7 +39,7 @@ If you want to improve the performance of Titan, see the blog post [Titan: A Roc The following figure shows the architecture of Titan: -![Titan Architecture](/media/titan/titan-1.png) +![Titan Architecture](./media/titan/titan-1.png) During flush and compaction operations, Titan separates values from the LSM-tree. The advantage of this approach is that the write process is consistent with RocksDB, which reduces the chance of invasive changes to RocksDB. @@ -47,7 +47,7 @@ During flush and compaction operations, Titan separates values from the LSM-tree When Titan separates the value file from the LSM-tree, it stores the value file in the BlobFile. The following figure shows the BlobFile format: -![BlobFile Format](/media/titan/titan-2.png) +![BlobFile Format](./media/titan/titan-2.png) A blob file mainly consists of blob records, meta blocks, a meta index block, and a footer. Each block record stores a Key-Value pair. The meta blocks are used for scalability, and store properties related to the blob file. The meta index block is used for meta block searching. @@ -60,7 +60,7 @@ A blob file mainly consists of blob records, meta blocks, a meta index block, an ### TitanTableBuilder -![TitanTableBuilder](/media/titan/titan-3.png) +![TitanTableBuilder](./media/titan/titan-3.png) TitanTableBuilder is the key to achieving Key-Value separation. TitanTableBuilder determines the Key-Pair value size, and based on that, decides whether to separate the value from the Key-Value pair and store it in the blob file. @@ -84,7 +84,7 @@ Titan uses the TablePropertiesCollector and EventListener components of RocksDB RocksDB supports using BlobFileSizeCollector, a custom table property collector, to collect properties from the SST which are written into corresponding SST files. The collected properties are named BlobFileSizeProperties. The following figure shows the BlobFileSizeCollector workflow and data formats: -![BlobFileSizeProperties](/media/titan/titan-4.png) +![BlobFileSizeProperties](./media/titan/titan-4.png) On the left is the SST index format. The first column is the blob file ID; the second column is the offset for the blob record in the blob file; the third column is the blob record size. @@ -94,7 +94,7 @@ On the right is the BlobFileSizeProperties format. Each line represents a blob f RocksDB uses compaction to discard old data and reclaim space. After each compaction, some blob files in Titan might contain partly or entirely outdated data. Therefore, you can trigger GC by listening to compaction events. During compaction, you can collect and compare the input/output blob file size properties of SST to determine which blob files require GC. The following figure shows the general process: -![EventListener](/media/titan/titan-5.png) +![EventListener](./media/titan/titan-5.png) + *inputs* stands for the blob file size properties for all SSTs that participate in the compaction. + *outputs* stands for the blob file size properties for all SSTs generated in the compaction. @@ -108,7 +108,7 @@ For the selected blob file, Titan checks whether the blob index of the key corre Level Merge is a newly introduced algorithm in Titan. According to the implementation principle of Level Merge, Titan merges and rewrites blob file that corresponds to the SST file, and generates new blob file while compactions are performed in LSM-tree. The following figure shows the general process: -![LevelMerge General Process](/media/titan/titan-6.png) +![LevelMerge General Process](./media/titan/titan-6.png) When compactions are performed on the SSTs of level z-1 and level z, Titan reads and writes Key-Value pairs in order. Then it writes the values of the selected blob files into new blob files in order, and updates the blob indexes of keys when new SSTs are generated. For the keys deleted in compactions, the corresponding values will not be written to the new blob file, which works similar to GC. @@ -123,7 +123,7 @@ Range Merge is an optimized approach of GC based on Level Merge. However, the bo - When `level_compaction_dynamic_level_bytes` is enabled, data volume at each level of LSM-tree dynamically increases, and the sorted runs at the bottom level keep increasing. - A specific range of data is frequently compacted, and this causes a lot of sorted runs in that range. -![RangeMerge](/media/titan/titan-7.png) +![RangeMerge](./media/titan/titan-7.png) Therefore, the Range Merge operation is needed to keep the number of sorted runs within a certain level. At the time of OnCompactionComplete, Titan counts the number of sorted runs in a range. If the number is large, Titan marks the corresponding blob file as ToMerge and rewrites it in the next compaction. diff --git a/sync-diff-inspector/shard-diff.md b/sync-diff-inspector/shard-diff.md index a8fefc2a2bb9d..aa9edf257b76b 100644 --- a/sync-diff-inspector/shard-diff.md +++ b/sync-diff-inspector/shard-diff.md @@ -10,7 +10,7 @@ sync-diff-inspector supports data check in the sharding scenario. Assume that yo For scenarios where the number of upstream sharded tables is small and the naming rules of sharded tables do not have a pattern as shown below, you can use `Datasource config` to configure `table-0`, set corresponding `rules` and configure the tables that have the mapping relationship between the upstream and downstream databases. This configuration method requires setting all sharded tables. -![shard-table-replica-1](/media/shard-table-replica-1.png) +![shard-table-replica-1](./media/shard-table-replica-1.png) Below is a complete example of the sync-diff-inspector configuration. @@ -78,7 +78,7 @@ target-table = "table-0" # The name of the target table You can use `table-rules` for configuration when there are a large number of upstream sharded tables and the naming rules of all sharded tables have a pattern, as shown below: -![shard-table-replica-2](/media/shard-table-replica-2.png) +![shard-table-replica-2](./media/shard-table-replica-2.png) Below is a complete example of the sync-diff-inspector configuration. diff --git a/system-variables.md b/system-variables.md index c31bde4afb2b9..e9c1b3e97a864 100644 --- a/system-variables.md +++ b/system-variables.md @@ -4053,7 +4053,7 @@ For a system upgraded to v5.0 from an earlier version, if you have not modified - Unit: Rows - This variable is used to set the minimum number of rows during the coprocessor paging request process. Setting it to a too small value increases the RPC request count between TiDB and TiKV, while setting it to a too large value might cause a performance decrease when executing queries using IndexLookup with Limit. The default value of this variable brings better performance in OLTP scenarios than in OLAP scenarios. If the application only uses TiKV as the storage engine, consider increasing the value of this variable when executing OLAP workload queries, which might bring you better performance. -![Paging size impact on TPCH](/media/paging-size-impact-on-tpch.png) +![Paging size impact on TPCH](./media/paging-size-impact-on-tpch.png) As shown in this diagram, when [`tidb_enable_paging`](#tidb_enable_paging-new-in-v540) is enabled, the performance of TPCH is affected by the settings of `tidb_min_paging_size` and [`tidb_max_paging_size`](#tidb_max_paging_size-new-in-v630). The vertical axis is the execution time, and it is the smaller the better. diff --git a/three-data-centers-in-two-cities-deployment.md b/three-data-centers-in-two-cities-deployment.md index 6b473cc1d431f..5308472584c77 100644 --- a/three-data-centers-in-two-cities-deployment.md +++ b/three-data-centers-in-two-cities-deployment.md @@ -28,7 +28,7 @@ The architecture of the cluster deployment is as follows: - The cluster has five replicas, two in AZ1, two in AZ2, and one in AZ3. For the TiKV component, each rack has a label, which means that each rack has a replica. - The Raft protocol is adopted to ensure consistency and high availability of data, which is transparent to users. -![3-AZ-in-2-region architecture](/media/three-data-centers-in-two-cities-deployment-01.png) +![3-AZ-in-2-region architecture](./media/three-data-centers-in-two-cities-deployment-01.png) This architecture is highly available. The distribution of Region leaders is restricted to the two AZs (AZ1 and AZ2) that are in the same region (Seattle). Compared with the three-AZ solution in which the distribution of Region leaders is not restricted, this architecture has the following advantages and disadvantages: @@ -48,7 +48,7 @@ This architecture is highly available. The distribution of Region leaders is res The configuration of the three AZs in two regions (Seattle and San Francisco) deployment plan is illustrated as follows: -![3-AZ-2-region](/media/three-data-centers-in-two-cities-deployment-02.png) +![3-AZ-2-region](./media/three-data-centers-in-two-cities-deployment-02.png) From the preceding illustration, you can see that Seattle has two AZs: AZ1 and AZ2. AZ1 has three sets of racks: rac1, rac2, and rac3. AZ2 has two racks: rac4 and rac5. The AZ3 in San Francisco has the rac6 rack. @@ -129,7 +129,7 @@ alertmanager_servers: In the deployment of three AZs in two regions, the label design requires taking availability and disaster recovery into account. It is recommended that you define the four levels (`az`, `replication zone`, `rack`, and `host`) based on the physical structure of the deployment. -![Label logical definition](/media/three-data-centers-in-two-cities-deployment-03.png) +![Label logical definition](./media/three-data-centers-in-two-cities-deployment-03.png) In the PD configuration, add level information of TiKV labels: diff --git a/ticdc-performance-tuning-methods.md b/ticdc-performance-tuning-methods.md index e7465ab3bb9f9..528e76607f374 100644 --- a/ticdc-performance-tuning-methods.md +++ b/ticdc-performance-tuning-methods.md @@ -44,7 +44,7 @@ As shown in the following diagram, because the upstream QPS is excessively high - Add more TiCDC nodes: scale out the TiCDC cluster to multiple nodes to increase processing capacity. - Optimize TiCDC node resources: increase CPU and memory configurations of the TiCDC node to improve performance. -![TiCDC overview](/media/performance/cdc/cdc-slow.png) +![TiCDC overview](./media/performance/cdc/cdc-slow.png) ### Data flow throughput metrics and downstream latency @@ -66,6 +66,6 @@ As shown in the following diagram, both upstream and downstream are TiDB cluster - During the first workload, because the downstream TiDB cluster writes data slowly, TiCDC consumes data at a speed that falls behind the upstream QPS, leading to a continuous increase in `Changefeed checkpoint lag`. However, `Changefeed resolved ts lag` remains within 300 milliseconds, indicating that replication lag and throughput bottlenecks are not caused by the puller and sorter modules but caused by the downstream sink module. - During the second workload, because the downstream TiDB cluster writes data faster, TiCDC replicates data at a speed that completely catches up with the upstream, the `Changefeed checkpoint lag` and `Changefeed resolved ts lag` remain within 500 milliseconds, which is a relatively ideal replication speed for TiCDC. -![TiCDC overview](/media/performance/cdc/cdc-fast-1.png) +![TiCDC overview](./media/performance/cdc/cdc-fast-1.png) -![data flow and txn latency](/media/performance/cdc/cdc-fast-2.png) +![data flow and txn latency](./media/performance/cdc/cdc-fast-2.png) diff --git a/ticdc/integrate-confluent-using-ticdc.md b/ticdc/integrate-confluent-using-ticdc.md index aa6de870b107f..9db6dbee6e226 100644 --- a/ticdc/integrate-confluent-using-ticdc.md +++ b/ticdc/integrate-confluent-using-ticdc.md @@ -154,7 +154,7 @@ After the preceding steps are done, TiCDC sends change logs of incremental data 2. Observe data in Confluent Cloud. - ![Confluent topics](/media/integrate/confluent-topics.png) + ![Confluent topics](./media/integrate/confluent-topics.png) In the Confluent Cloud Console, click **Topics**. You can see that the target topics have been created and are receiving data. At this time, incremental data of the TiDB database is successfully replicated to Confluent Cloud. @@ -175,19 +175,19 @@ Snowflake is a cloud native data warehouse. With Confluent, you can replicate Ti 2. In the Confluent Cloud Console, choose **Data integration** > **Connectors** > **Snowflake Sink**. The page shown below is displayed. - ![Add snowflake sink connector](/media/integrate/add-snowflake-sink-connector.png) + ![Add snowflake sink connector](./media/integrate/add-snowflake-sink-connector.png) 3. Select the topic you want to replicate to Snowflake. Then go to the next page. - ![Configuration](/media/integrate/configuration.png) + ![Configuration](./media/integrate/configuration.png) 4. Specify the authentication information for connecting Snowflake. Fill in **Database name** and **Schema name** with the values you created in the previous step. Then go to the next page. - ![Configuration](/media/integrate/configuration.png) + ![Configuration](./media/integrate/configuration.png) 5. On the **Configuration** page, select `AVRO` for both **Input Kafka record value format** and **Input Kafka record key format**. Then click **Continue**. Wait until the connector is created and the status becomes **Running**, which might take several minutes. - ![Data preview](/media/integrate/data-preview.png) + ![Data preview](./media/integrate/data-preview.png) 6. In the Snowflake console, choose **Data** > **Database** > **TPCC** > **TiCDC**. You can see that TiDB incremental data has been replicated to Snowflake. Data integration with Snowflake is done (see the preceding figure). However, the table structure in Snowflake is different from that in TiDB, and data is inserted into Snowflake incrementally. In most scenarios, you expect the data in Snowflake to be a replica of the data in TiDB, rather than storing TiDB change logs. This problem will be addressed in the next section. @@ -334,7 +334,7 @@ ksqlDB is a database purpose-built for stream processing applications. You can c SELECT * FROM ORDERS EMIT CHANGES; ``` - ![Select from orders](/media/integrate/select-from-orders.png) + ![Select from orders](./media/integrate/select-from-orders.png) You can see that the incremental data has been replicated to ksqlDB, as shown in the preceding figure. Data integration with ksqlDB is done. @@ -364,11 +364,11 @@ Microsoft SQL Server is a relational database management system (RDBMS) develope 2. In the Confluent Cloud Console, choose **Data integration** > **Connectors** > **Microsoft SQL Server Sink**. The page shown below is displayed. - ![Topic selection](/media/integrate/topic-selection.png) + ![Topic selection](./media/integrate/topic-selection.png) 3. Select the topic you want to replicate to SQL Server. Then go to the next page. - ![Authentication](/media/integrate/authentication.png) + ![Authentication](./media/integrate/authentication.png) 4. Fill in the connection and authentication information. Then go to the next page. @@ -386,6 +386,6 @@ Microsoft SQL Server is a relational database management system (RDBMS) develope 6. After configuration, click **Continue**. Wait until the connector status becomes **Running**, which might take several minutes. - ![Results](/media/integrate/results.png) + ![Results](./media/integrate/results.png) 7. Connect SQL Server and observe the data. You can see that the incremental data has been replicated to SQL Server, as shown in the preceding figure. Data integration with SQL Server is done. diff --git a/ticdc/monitor-ticdc.md b/ticdc/monitor-ticdc.md index 08f355e63f8bf..e71e64a41fcd2 100644 --- a/ticdc/monitor-ticdc.md +++ b/ticdc/monitor-ticdc.md @@ -15,7 +15,7 @@ cdc cli changefeed create --server=http://10.0.10.25:8300 --sink-uri="mysql://ro The TiCDC dashboard contains four monitoring panels. See the following screenshot: -![TiCDC Dashboard - Overview](/media/ticdc/ticdc-dashboard-overview.png) +![TiCDC Dashboard - Overview](./media/ticdc/ticdc-dashboard-overview.png) The description of each panel is as follows: @@ -28,7 +28,7 @@ The description of each panel is as follows: The following is an example of the **Server** panel: -![TiCDC Dashboard - Server metrics](/media/ticdc/ticdc-dashboard-server.png) +![TiCDC Dashboard - Server metrics](./media/ticdc/ticdc-dashboard-server.png) The description of each metric in the **Server** panel is as follows: @@ -44,7 +44,7 @@ The description of each metric in the **Server** panel is as follows: The following is an example of the **Changefeed** panel: -![TiCDC Dashboard - Changefeed metrics 1](/media/ticdc/ticdc-dashboard-changefeed-1.png) +![TiCDC Dashboard - Changefeed metrics 1](./media/ticdc/ticdc-dashboard-changefeed-1.png) - Changefeed table count: The number of tables that each TiCDC node needs to replicate in the replication task - Processor resolved ts: The timestamps that have been resolved in the TiCDC cluster @@ -55,20 +55,20 @@ The following is an example of the **Changefeed** panel: - Changefeed checkpoint lag: The progress lag of data replication (the unit is second) between the upstream and the downstream - Processor resolved ts lag: The progress lag of data replication (the unit is second) between the upstream and TiCDC nodes -![TiCDC Dashboard - Changefeed metrics 2](/media/ticdc/ticdc-dashboard-changefeed-2.png) +![TiCDC Dashboard - Changefeed metrics 2](./media/ticdc/ticdc-dashboard-changefeed-2.png) - Sink write duration: The histogram of the time spent by TiCDC writing a transaction change to the downstream - Sink write duration percentile: The time (P95, P99, and P999) spent by TiCDC writing a transaction change to the downstream within one second - Flush sink duration: The histogram of the time spent by TiCDC asynchronously flushing data to the downstream - Flush sink duration percentile: The time (P95, P99, and P999) spent by TiCDC asynchronously flushing data to the downstream within one second -![TiCDC Dashboard - Changefeed metrics 3](/media/ticdc/ticdc-dashboard-changefeed-3.png) +![TiCDC Dashboard - Changefeed metrics 3](./media/ticdc/ticdc-dashboard-changefeed-3.png) - MySQL sink conflict detect duration: The histogram of the time spent on detecting MySQL sink conflicts - MySQL sink conflict detect duration percentile: The time (P95, P99, and P999) spent on detecting MySQL sink conflicts within one second - MySQL sink worker load: The workload of MySQL sink workers of TiCDC nodes -![TiCDC Dashboard - Changefeed metrics 4](/media/ticdc/ticdc-dashboard-changefeed-4.png) +![TiCDC Dashboard - Changefeed metrics 4](./media/ticdc/ticdc-dashboard-changefeed-4.png) - Changefeed catch-up ETA: The estimated time needed for the replication task to catch up with the upstream cluster data. When the upstream write speed is faster than the TiCDC replication speed, the metric might be extremely large. Because TiCDC replication speed is subject to many factors, this metric is for reference only and might not be the actual replication time. @@ -76,9 +76,9 @@ The following is an example of the **Changefeed** panel: The following is an example of the **Events** panel: -![TiCDC Dashboard - Events metrics 2](/media/ticdc/ticdc-dashboard-events-1.png) -![TiCDC Dashboard - Events metrics 2](/media/ticdc/ticdc-dashboard-events-2.png) -![TiCDC Dashboard - Events metrics 2](/media/ticdc/ticdc-dashboard-events-3.png) +![TiCDC Dashboard - Events metrics 2](./media/ticdc/ticdc-dashboard-events-1.png) +![TiCDC Dashboard - Events metrics 2](./media/ticdc/ticdc-dashboard-events-2.png) +![TiCDC Dashboard - Events metrics 2](./media/ticdc/ticdc-dashboard-events-3.png) The description of each metric in the **Events** panel is as follows: @@ -106,8 +106,8 @@ The description of each metric in the **Events** panel is as follows: The following is an example of the **TiKV** panel: -![TiCDC Dashboard - TiKV metrics 1](/media/ticdc/ticdc-dashboard-tikv-1.png) -![TiCDC Dashboard - TiKV metrics 2](/media/ticdc/ticdc-dashboard-tikv-2.png) +![TiCDC Dashboard - TiKV metrics 1](./media/ticdc/ticdc-dashboard-tikv-1.png) +![TiCDC Dashboard - TiKV metrics 2](./media/ticdc/ticdc-dashboard-tikv-2.png) The description of each metric in the **TiKV** panel is as follows: diff --git a/ticdc/ticdc-architecture.md b/ticdc/ticdc-architecture.md index 880c8750ce850..e56274b226e79 100644 --- a/ticdc/ticdc-architecture.md +++ b/ticdc/ticdc-architecture.md @@ -9,7 +9,7 @@ summary: Learn the architecture and working principles of TiCDC. Consisting of multiple TiCDC nodes, a TiCDC cluster uses a distributed and stateless architecture. The design of TiCDC and its components is as follows: -![TiCDC architecture](/media/ticdc/ticdc-architecture-1.jpg) +![TiCDC architecture](./media/ticdc/ticdc-architecture-1.jpg) ## TiCDC components @@ -19,7 +19,7 @@ Each Capture process contains one or multiple Processor threads for replicating Each pipeline contains the following components: Puller, Sorter, Mounter, and Sink. -![TiCDC architecture](/media/ticdc/ticdc-architecture-2.jpg) +![TiCDC architecture](./media/ticdc/ticdc-architecture-2.jpg) These components work in serial with each other to complete the replication process, including pulling data, sorting data, loading data, and replicating data from the upstream to the downstream. The components are described as follows: @@ -30,7 +30,7 @@ These components work in serial with each other to complete the replication proc To realize high availability, each TiCDC cluster runs multiple TiCDC nodes. These nodes regularly report their status to the etcd cluster in PD, and elect one of the nodes as the owner of the TiCDC cluster. The owner node schedules data based on the status stored in etcd and writes the scheduling results to etcd. The Processor completes tasks according to the status in etcd. If the node running the Processor fails, the cluster schedules tables to other nodes. If the owner node fails, the Capture processes in other nodes will elect a new owner. See the following figure: -![TiCDC architecture](/media/ticdc/ticdc-architecture-3.PNG) +![TiCDC architecture](./media/ticdc/ticdc-architecture-3.PNG) ## Changefeeds and tasks @@ -63,13 +63,13 @@ The preceding `cdc cli changefeed create` command creates a changefeed task that The following is the TiCDC architecture diagram with Changefeed and Task included: -![TiCDC architecture](/media/ticdc/ticdc-architecture-6.jpg) +![TiCDC architecture](./media/ticdc/ticdc-architecture-6.jpg) In the preceding diagram, a changefeed is created to replicate four tables to downstream. This changefeed is split into three Tasks, which are sent to the three Capture processes respectively in the TiCDC cluster. After TiCDC processes the data, the data is replicated to the downstream system. TiCDC supports replicating data to MySQL, TiDB, and Kafka databases. The preceding diagram only illustrates the process of data transfer at the changefeed level. The following sections describe in detail how TiCDC processes data, using Task1 that replicates table `table1` as an example. -![TiCDC architecture](/media/ticdc/ticdc-architecture-5.jpg) +![TiCDC architecture](./media/ticdc/ticdc-architecture-5.jpg) 1. Push data: When a data change occurs, TiKV pushes data to the Puller module. 2. Scan incremental data: The Puller module pulls data from TiKV when it finds the data changes received not continuous. diff --git a/ticdc/ticdc-bidirectional-replication.md b/ticdc/ticdc-bidirectional-replication.md index 4e6d161e77693..1bd06e1f030f9 100644 --- a/ticdc/ticdc-bidirectional-replication.md +++ b/ticdc/ticdc-bidirectional-replication.md @@ -17,7 +17,7 @@ TiCDC only replicates incremental data changes that occur after a specified time 2. Deploy two TiCDC clusters between the two TiDB clusters. The cluster topology is as follows. The arrows in the diagram indicate the directions of data flow. - ![TiCDC bidirectional replication](/media/ticdc/ticdc-bidirectional-replication.png) + ![TiCDC bidirectional replication](./media/ticdc/ticdc-bidirectional-replication.png) 3. Specify the starting time point of data replication for the upstream and downstream clusters. diff --git a/ticdc/ticdc-changefeed-overview.md b/ticdc/ticdc-changefeed-overview.md index 23a3f80ccea3a..75efa67f99cfe 100644 --- a/ticdc/ticdc-changefeed-overview.md +++ b/ticdc/ticdc-changefeed-overview.md @@ -11,7 +11,7 @@ A changefeed is a replication task in TiCDC, which replicates the data change lo The state of a replication task represents the running status of the replication task. During the running of TiCDC, replication tasks might fail with errors, be manually paused, resumed, or reach the specified `TargetTs`. These behaviors can lead to the change of the replication task state. This section describes the states of TiCDC replication tasks and the transfer relationships between states. -![TiCDC state transfer](/media/ticdc/ticdc-changefeed-state-transfer.png) +![TiCDC state transfer](./media/ticdc/ticdc-changefeed-state-transfer.png) The states in the preceding state transfer diagram are described as follows: diff --git a/ticdc/ticdc-overview.md b/ticdc/ticdc-overview.md index c1d5ec955e75b..9aa2b2bbfc83c 100644 --- a/ticdc/ticdc-overview.md +++ b/ticdc/ticdc-overview.md @@ -63,7 +63,7 @@ TiCDC is an incremental data replication tool for TiDB, which is highly availabl The architecture of TiCDC is illustrated in the following figure: -![TiCDC architecture](/media/ticdc/cdc-architecture.png) +![TiCDC architecture](./media/ticdc/cdc-architecture.png) The components in the architecture diagram are described as follows: diff --git a/ticdc/ticdc-simple-protocol.md b/ticdc/ticdc-simple-protocol.md index db1ad3bc98fc6..e70a8a58cb62a 100644 --- a/ticdc/ticdc-simple-protocol.md +++ b/ticdc/ticdc-simple-protocol.md @@ -514,13 +514,13 @@ The consumption methods are introduced in the following two scenarios. In this scenario, the consumer starts consuming from the creation of a table, so the consumer can receive all DDL and BOOTSTRAP messages of the table. In this case, the consumer can obtain the schema information of the table through the `table` name and `schemaVersion` field of the DML message. The detailed process is as follows: -![TiCDC Simple Protocol consumer scene 1](/media/ticdc/ticdc-simple-consumer-1.png) +![TiCDC Simple Protocol consumer scene 1](./media/ticdc/ticdc-simple-consumer-1.png) ### Scenario 2: The consumer starts consuming from the middle When a new consumer joins the consumer group, it might start consuming from the middle, so it might miss earlier DDL and BOOTSTRAP messages of the table. In this case, the consumer might receive some DML messages before obtaining the schema information of the table. Therefore, the consumer needs to wait for a period of time until it receives the DDL or BOOTSTRAP message to obtain the schema information of the table. Because TiCDC sends BOOTSTRAP messages periodically, the consumer can always obtain the schema information of the table within a period of time. The detailed process is as follows: -![TiCDC Simple Protocol consumer scene 2](/media/ticdc/ticdc-simple-consumer-2.png) +![TiCDC Simple Protocol consumer scene 2](./media/ticdc/ticdc-simple-consumer-2.png) ## Reference diff --git a/ticdc/ticdc-storage-consumer-dev-guide.md b/ticdc/ticdc-storage-consumer-dev-guide.md index 18e1e01326d8a..1002d411d98f2 100644 --- a/ticdc/ticdc-storage-consumer-dev-guide.md +++ b/ticdc/ticdc-storage-consumer-dev-guide.md @@ -19,7 +19,7 @@ TiCDC does not provide any standard way for implementing a consumer. This docume The following diagram shows the overall consumption process of the consumer: -![TiCDC storage consumer overview](/media/ticdc/ticdc-storage-consumer-overview.png) +![TiCDC storage consumer overview](./media/ticdc/ticdc-storage-consumer-overview.png) The components of the consumer and their features are described as follows: diff --git a/ticdc/ticdc-summary-monitor.md b/ticdc/ticdc-summary-monitor.md index e03d63ba0f68c..2c170a06ec096 100644 --- a/ticdc/ticdc-summary-monitor.md +++ b/ticdc/ticdc-summary-monitor.md @@ -9,7 +9,7 @@ Starting from v7.0.0, when you deploy Grafana using TiUP, the TiCDC Summary Dash The following image shows the monitoring panels of the TiCDC Summary Dashboard: -![TiCDC Summary Dashboard - Overview](/media/ticdc/ticdc-summary-monitor.png) +![TiCDC Summary Dashboard - Overview](./media/ticdc/ticdc-summary-monitor.png) Each monitoring panel is described as follows: @@ -25,7 +25,7 @@ Each monitoring panel is described as follows: The **Server** panel is as follows: -![TiCDC Summary Dashboard - Server metrics](/media/ticdc/ticdc-summary-monitor-server.png) +![TiCDC Summary Dashboard - Server metrics](./media/ticdc/ticdc-summary-monitor-server.png) - **Uptime**: the time that TiCDC nodes have been running. - **CPU usage**: the CPU usage of TiCDC nodes. @@ -35,31 +35,31 @@ The **Server** panel is as follows: The **Changefeed** panel is as follows: -![TiCDC Summary Dashboard - Changefeed metrics](/media/ticdc/ticdc-summary-monitor-changefeed.png) +![TiCDC Summary Dashboard - Changefeed metrics](./media/ticdc/ticdc-summary-monitor-changefeed.png) - **Changefeed checkpoint lag**: indicates the data replication latency between the upstream TiDB cluster and the downstream system, measured in time. In general, this metric reflects the overall health of the data replication task. Usually, the smaller the lag, the better the status of the replication task. When the lag increases, it usually indicates that the replication ability of the changefeed or the consumption ability of the downstream system cannot keep up with the write speed of the upstream. - **Changefeed resolved ts lag**: indicates the data latency between the upstream TiDB cluster and the TiCDC node, measured in time. This metric reflects the ability of the changefeed to pull the data changes from the upstream. When the lag increases, it means that the changefeed cannot pull the data changes generated by the upstream in time. ## Dataflow panel -![TiCDC Summary Dashboard - Puller metrics](/media/ticdc/ticdc-summary-monitor-dataflow-puller.png) +![TiCDC Summary Dashboard - Puller metrics](./media/ticdc/ticdc-summary-monitor-dataflow-puller.png) - **Puller output events/s**: the number of data changes output by the Puller module to the Sorter module per second in the TiCDC node. This metric reflects the speed at which TiCDC pulls the data changes from the upstream. - **Puller output events**: the total number of data changes output by the Puller module to the Sorter module in the TiCDC node. -![TiCDC Summary Dashboard - Sorter metrics](/media/ticdc/ticdc-summary-monitor-dataflow-sorter.png) +![TiCDC Summary Dashboard - Sorter metrics](./media/ticdc/ticdc-summary-monitor-dataflow-sorter.png) - **Sorter output events/s**: the number of data changes output by the Sorter module per second to the Sink module in the TiCDC node. Note that the data output rate of Sorter is affected by the Sink module. Therefore, when you find that the output rate of the Sorter module is lower than that of the Puller module, it does not necessarily mean that the sorting speed of the Sorter module is too slow. You need to first observe the metrics related to the Sink module to confirm whether the Sink module takes a long time to flush data, resulting in a decrease in Sorter module output. - **Sorter output event**: the total number of data changes output by the Sorter module to the Sink module in the TiCDC node. -![TiCDC Summary Dashboard - Mounter metrics](/media/ticdc/ticdc-summary-monitor-dataflow-mounter.png) +![TiCDC Summary Dashboard - Mounter metrics](./media/ticdc/ticdc-summary-monitor-dataflow-mounter.png) - **Mounter output events/s**: the number of data changes decoded by the Mounter module per second in the TiCDC node. When the upstream data changes involve a large number of fields, the decoding speed of the Mounter module might be affected. - **Mounter output event**: the total number of data changes decoded by the Mounter module in the TiCDC node. -![TiCDC Summary Dashboard - Sink metrics](/media/ticdc/ticdc-summary-monitor-dataflow-sink.png) +![TiCDC Summary Dashboard - Sink metrics](./media/ticdc/ticdc-summary-monitor-dataflow-sink.png) - **Sink flush rows/s**: the number of data changes output by the Sink module to the downstream per second in the TiCDC node. This metric reflects the speed at which the data changes are replicated to the downstream. When **Sink flush rows/s** is lower than **Puller output events/s**, the replication latency might increase. @@ -69,7 +69,7 @@ The **Changefeed** panel is as follows: The **Transaction Sink** panel displays data only when the downstream is MySQL or TiDB. -![TiCDC Summary Dashboard - Transaction Sink metrics](/media/ticdc/ticdc-summary-monitor-transaction-sink.png) +![TiCDC Summary Dashboard - Transaction Sink metrics](./media/ticdc/ticdc-summary-monitor-transaction-sink.png) - **Backend Flush Duration**: the duration that the TiCDC Transaction Sink module takes to execute a SQL statement on the downstream. By observing this metric, you can determine whether the performance of the downstream is the bottleneck of the replication speed. Generally, the p999 value should be below 500 ms. When the value exceeds this limit, the replication speed might be affected, resulting in an increase in the Changefeed checkpoint lag. @@ -79,7 +79,7 @@ The **Transaction Sink** panel displays data only when the downstream is MySQL o The **MQ Sink** panel displays data only when the downstream is Kafka. -![TiCDC Summary Dashboard - Transaction Sink metrics](/media/ticdc/ticdc-summary-monitor-mq-sink.png) +![TiCDC Summary Dashboard - Transaction Sink metrics](./media/ticdc/ticdc-summary-monitor-mq-sink.png) - **Worker Send Message Duration Percentile**: The latency of TiCDC MQ Sink worker sending data to the downstream. - **Kafka Ongoing Bytes**: The speed at which TiCDC MQ Sink sends data to the downstream. @@ -88,7 +88,7 @@ The **MQ Sink** panel displays data only when the downstream is Kafka. The **Cloud Storage Sink** panel displays data only when the downstream is Cloud Storage. -![TiCDC Summary Dashboard - Transaction Sink metrics](/media/ticdc/ticdc-summary-monitor-cloud-storage.png) +![TiCDC Summary Dashboard - Transaction Sink metrics](./media/ticdc/ticdc-summary-monitor-cloud-storage.png) - **Write Bytes/s**: The speed at which the Cloud Storage Sink module writes data to the downstream. - **File Count**: the total number of files written by the Cloud Storage Sink module. @@ -97,7 +97,7 @@ The **Cloud Storage Sink** panel displays data only when the downstream is Cloud The **Redo** panel displays data only when the Redo Log feature is enabled. -![TiCDC Summary Dashboard - Transaction Sink metrics](/media/ticdc/ticdc-summary-monitor-redo.png) +![TiCDC Summary Dashboard - Transaction Sink metrics](./media/ticdc/ticdc-summary-monitor-redo.png) - **Redo Write rows/s**: the number of rows written per second by the Redo module. When the Redo feature is enabled, if the latency of a replication task increases, you can observe whether there is a significant difference between this metric and the value of Puller Output event/s. If so, the increase in latency might be due to the insufficient writing capacity of the Redo module. - **Redo Write byte/s**: the speed at which data is written per second by the Redo module. diff --git a/tidb-architecture.md b/tidb-architecture.md index 50fb12f6b32cf..be8a4b07d460e 100644 --- a/tidb-architecture.md +++ b/tidb-architecture.md @@ -21,7 +21,7 @@ Compared with the traditional standalone databases, TiDB has the following advan As a distributed database, TiDB is designed to consist of multiple components. These components communicate with each other and form a complete TiDB system. The architecture is as follows: -![TiDB Architecture](/media/tidb-architecture-v6.png) +![TiDB Architecture](./media/tidb-architecture-v6.png) ## TiDB server diff --git a/tidb-cloud/changefeed-sink-to-cloud-storage.md b/tidb-cloud/changefeed-sink-to-cloud-storage.md index 596a2df4db24b..92bcde97e39e1 100644 --- a/tidb-cloud/changefeed-sink-to-cloud-storage.md +++ b/tidb-cloud/changefeed-sink-to-cloud-storage.md @@ -27,7 +27,7 @@ Navigate to the cluster overview page of the target TiDB cluster. Click **Change For **Amazon S3**, fill the **S3 Endpoint** area: `S3 URI`, `Access Key ID`, and `Secret Access Key`. Make the S3 bucket in the same region with your TiDB cluster. -![s3_endpoint](/media/tidb-cloud/changefeed/sink-to-cloud-storage-s3-endpoint.jpg) +![s3_endpoint](./media/tidb-cloud/changefeed/sink-to-cloud-storage-s3-endpoint.jpg)
@@ -36,14 +36,14 @@ For **GCS**, before filling **GCS Endpoint**, you need to first grant the GCS bu 1. In the TiDB Cloud console, record the **Service Account ID**, which will be used to grant TiDB Cloud access to your GCS bucket. - ![gcs_endpoint](/media/tidb-cloud/changefeed/sink-to-cloud-storage-gcs-endpoint.png) + ![gcs_endpoint](./media/tidb-cloud/changefeed/sink-to-cloud-storage-gcs-endpoint.png) 2. In the Google Cloud console, create an IAM role for your GCS bucket. 1. Sign in to the [Google Cloud console](https://console.cloud.google.com/). 2. Go to the [Roles](https://console.cloud.google.com/iam-admin/roles) page, and then click **Create role**. - ![Create a role](/media/tidb-cloud/changefeed/sink-to-cloud-storage-gcs-create-role.png) + ![Create a role](./media/tidb-cloud/changefeed/sink-to-cloud-storage-gcs-create-role.png) 3. Enter a name, description, ID, and role launch stage for the role. The role name cannot be changed after the role is created. 4. Click **Add permissions**. Add the following permissions to the role, and then click **Add**. @@ -55,13 +55,13 @@ For **GCS**, before filling **GCS Endpoint**, you need to first grant the GCS bu - storage.objects.list - storage.objects.update - ![Add permissions](/media/tidb-cloud/changefeed/sink-to-cloud-storage-gcs-assign-permission.png) + ![Add permissions](./media/tidb-cloud/changefeed/sink-to-cloud-storage-gcs-assign-permission.png) 3. Go to the [Bucket](https://console.cloud.google.com/storage/browser) page, and choose a GCS bucket you want TiDB Cloud to access. Note that the GCS bucket must be in the same region as your TiDB cluster. 4. On the **Bucket details** page, click the **Permissions** tab, and then click **Grant access**. - ![Grant Access to the bucket ](/media/tidb-cloud/changefeed/sink-to-cloud-storage-gcs-grant-access-1.png) + ![Grant Access to the bucket ](./media/tidb-cloud/changefeed/sink-to-cloud-storage-gcs-grant-access-1.png) 5. Fill in the following information to grant access to your bucket, and then click **Save**. @@ -76,11 +76,11 @@ For **GCS**, before filling **GCS Endpoint**, you need to first grant the GCS bu - To get a bucket's gsutil URI, click the copy button and add `gs://` as a prefix. For example, if the bucket name is `test-sink-gcs`, the URI would be `gs://test-sink-gcs/`. - ![Get bucket URI](/media/tidb-cloud/changefeed/sink-to-cloud-storage-gcs-uri01.png) + ![Get bucket URI](./media/tidb-cloud/changefeed/sink-to-cloud-storage-gcs-uri01.png) - To get a folder's gsutil URI, open the folder, click the copy button, and add `gs://` as a prefix. For example, if the bucket name is `test-sink-gcs` and the folder name is `changefeed-xxx`, the URI would be `gs://test-sink-gcs/changefeed-xxx/`. - ![Get bucket URI](/media/tidb-cloud/changefeed/sink-to-cloud-storage-gcs-uri02.png) + ![Get bucket URI](./media/tidb-cloud/changefeed/sink-to-cloud-storage-gcs-uri02.png) 7. In the TiDB Cloud console, go to the Changefeed's **Configure Destination** page, and fill in the **bucket gsutil URI** field. @@ -96,7 +96,7 @@ Click **Next** to establish the connection from the TiDB Cloud Dedicated cluster 1. Customize **Table Filter** to filter the tables that you want to replicate. For the rule syntax, refer to [table filter rules](https://docs.pingcap.com/tidb/stable/ticdc-filter#changefeed-log-filters). - ![the table filter of changefeed](/media/tidb-cloud/changefeed/sink-to-s3-02-table-filter.jpg) + ![the table filter of changefeed](./media/tidb-cloud/changefeed/sink-to-s3-02-table-filter.jpg) - **Filter Rules**: you can set filter rules in this column. By default, there is a rule `*.*`, which stands for replicating all tables. When you add a new rule, TiDB Cloud queries all the tables in TiDB and displays only the tables that match the rules in the box on the right. You can add up to 100 filter rules. - **Tables with valid keys**: this column displays the tables that have valid keys, including primary keys or unique indexes. @@ -143,7 +143,7 @@ Click **Next** to establish the connection from the TiDB Cloud Dedicated cluster - **Flush Interval**: set to 60 seconds by default, adjustable within a range of 2 seconds to 10 minutes; - **File Size**: set to 64 MB by default, adjustable within a range of 1 MB to 512 MB. - ![Flush Parameters](/media/tidb-cloud/changefeed/sink-to-cloud-storage-flush-parameters.jpg) + ![Flush Parameters](./media/tidb-cloud/changefeed/sink-to-cloud-storage-flush-parameters.jpg) > **Note:** > diff --git a/tidb-cloud/config-s3-and-gcs-access.md b/tidb-cloud/config-s3-and-gcs-access.md index 38b1e3489545a..d0cf133da2010 100644 --- a/tidb-cloud/config-s3-and-gcs-access.md +++ b/tidb-cloud/config-s3-and-gcs-access.md @@ -44,11 +44,11 @@ Configure the bucket access for TiDB Cloud and get the Role ARN as follows: 1. Sign in to the AWS Management Console and open the Amazon S3 console at [https://console.aws.amazon.com/s3/](https://console.aws.amazon.com/s3/). 2. In the **Buckets** list, choose the name of your bucket with the source data, and then click **Copy ARN** to get your S3 bucket ARN (for example, `arn:aws:s3:::tidb-cloud-source-data`). Take a note of the bucket ARN for later use. - ![Copy bucket ARN](/media/tidb-cloud/copy-bucket-arn.png) + ![Copy bucket ARN](./media/tidb-cloud/copy-bucket-arn.png) 3. Open the IAM console at [https://console.aws.amazon.com/iam/](https://console.aws.amazon.com/iam/), click **Policies** in the navigation pane on the left, and then click **Create Policy**. - ![Create a policy](/media/tidb-cloud/aws-create-policy.png) + ![Create a policy](./media/tidb-cloud/aws-create-policy.png) 4. On the **Create policy** page, click the **JSON** tab. 5. Copy the following access policy template and paste it to the policy text field. @@ -111,7 +111,7 @@ Configure the bucket access for TiDB Cloud and get the Role ARN as follows: 1. In the IAM console at [https://console.aws.amazon.com/iam/](https://console.aws.amazon.com/iam/), click **Roles** in the navigation pane on the left, and then click **Create role**. - ![Create a role](/media/tidb-cloud/aws-create-role.png) + ![Create a role](./media/tidb-cloud/aws-create-role.png) 2. To create a role, fill in the following information: @@ -123,7 +123,7 @@ Configure the bucket access for TiDB Cloud and get the Role ARN as follows: 4. Under **Role details**, set a name for the role, and then click **Create role** in the lower-right corner. After the role is created, the list of roles is displayed. 5. In the list of roles, click the name of the role that you just created to go to its summary page, and then copy the role ARN. - ![Copy AWS role ARN](/media/tidb-cloud/aws-role-arn.png) + ![Copy AWS role ARN](./media/tidb-cloud/aws-role-arn.png) 4. In the TiDB Cloud console, go to the **Data Import** page where you get the TiDB Cloud account ID and external ID, and then paste the role ARN to the **Role ARN** field. @@ -179,7 +179,7 @@ To allow TiDB Cloud to access the source data in your GCS bucket, you need to co 1. Sign in to the [Google Cloud console](https://console.cloud.google.com/). 2. Go to the [Roles](https://console.cloud.google.com/iam-admin/roles) page, and then click **CREATE ROLE**. - ![Create a role](/media/tidb-cloud/gcp-create-role.png) + ![Create a role](./media/tidb-cloud/gcp-create-role.png) 3. Enter a name, description, ID, and role launch stage for the role. The role name cannot be changed after the role is created. 4. Click **ADD PERMISSIONS**. @@ -191,13 +191,13 @@ To allow TiDB Cloud to access the source data in your GCS bucket, you need to co You can copy a permission name to the **Enter property name or value** field as a filter query, and choose the name in the filter result. To add the three permissions, you can use **OR** between the permission names. - ![Add permissions](/media/tidb-cloud/gcp-add-permissions.png) + ![Add permissions](./media/tidb-cloud/gcp-add-permissions.png) 3. Go to the [Bucket](https://console.cloud.google.com/storage/browser) page, and click the name of the GCS bucket you want TiDB Cloud to access. 4. On the **Bucket details** page, click the **PERMISSIONS** tab, and then click **GRANT ACCESS**. - ![Grant Access to the bucket ](/media/tidb-cloud/gcp-bucket-permissions.png) + ![Grant Access to the bucket ](./media/tidb-cloud/gcp-bucket-permissions.png) 5. Fill in the following information to grant access to your bucket, and then click **SAVE**. @@ -212,12 +212,12 @@ To allow TiDB Cloud to access the source data in your GCS bucket, you need to co If you want to copy a file's gsutil URI, select the file, click **Open object overflow menu**, and then click **Copy gsutil URI**. - ![Get bucket URI](/media/tidb-cloud/gcp-bucket-uri01.png) + ![Get bucket URI](./media/tidb-cloud/gcp-bucket-uri01.png) If you want to use a folder's gsutil URI, open the folder, and then click the copy button following the folder name to copy the folder name. After that, you need to add `gs://` to the beginning and `/` to the end of the name to get a correct URI of the folder. For example, if the folder name is `tidb-cloud-source-data`, you need to use `gs://tidb-cloud-source-data/` as the URI. - ![Get bucket URI](/media/tidb-cloud/gcp-bucket-uri02.png) + ![Get bucket URI](./media/tidb-cloud/gcp-bucket-uri02.png) 7. In the TiDB Cloud console, go to the **Data Import** page where you get the Google Cloud Service Account ID, and then paste the GCS bucket gsutil URI to the **Bucket gsutil URI** field. For example, paste `gs://tidb-cloud-source-data/`. diff --git a/tidb-cloud/csv-config-for-import-data.md b/tidb-cloud/csv-config-for-import-data.md index fa6c7f556937c..febff8e218c36 100644 --- a/tidb-cloud/csv-config-for-import-data.md +++ b/tidb-cloud/csv-config-for-import-data.md @@ -9,7 +9,7 @@ This document introduces CSV configurations for the Import Data service on TiDB The following is the CSV Configuration window when you use the Import Data service on TiDB Cloud to import CSV files. For more information, see [Import CSV Files from Amazon S3 or GCS into TiDB Cloud](/tidb-cloud/import-csv-files.md). -![CSV Configurations](/media/tidb-cloud/import-data-csv-config.png) +![CSV Configurations](./media/tidb-cloud/import-data-csv-config.png) ## Separator diff --git a/tidb-cloud/data-service-integrations.md b/tidb-cloud/data-service-integrations.md index 32b6ddc0f72a7..06d114c1eeaaf 100644 --- a/tidb-cloud/data-service-integrations.md +++ b/tidb-cloud/data-service-integrations.md @@ -19,7 +19,7 @@ To integrate your Data App with GPTs, perform the following steps: 2. In the left pane, locate your target Data App, click the name of your target Data App, and then click the **Integrations** tab. 3. In the **Integrate with GPTs** area, click **Get Configuration**. - ![Get Configuration](/media/tidb-cloud/data-service/GPTs1.png) + ![Get Configuration](./media/tidb-cloud/data-service/GPTs1.png) 4. In the displayed dialog box, you can see the following fields: @@ -29,7 +29,7 @@ To integrate your Data App with GPTs, perform the following steps: c. **API Key Encoded**: copy the base64 encoded string equivalent to the API key you have provided. - ![GPTs Dialog Box](/media/tidb-cloud/data-service/GPTs2.png) + ![GPTs Dialog Box](./media/tidb-cloud/data-service/GPTs2.png) 5. Use the copied API Specification URL and the encoded API key in your GPT configuration. diff --git a/tidb-cloud/dev-guide-bi-looker-studio.md b/tidb-cloud/dev-guide-bi-looker-studio.md index e1b3c8b357b1a..a15e9952f7c13 100644 --- a/tidb-cloud/dev-guide-bi-looker-studio.md +++ b/tidb-cloud/dev-guide-bi-looker-studio.md @@ -78,7 +78,7 @@ If you encounter any issues during import, you can cancel this import task as fo - **Password**: enter the `PASSWORD` parameter from the TiDB Cloud Serverless connection dialog. - **Enable SSL**: select this option, and then click the upload icon to the right of **MySQL SSL Client Configuration Files** to upload the CA file downloaded from [Step 2](#step-2-get-the-connection-information-for-your-cluster). - ![Looker Studio: configure connection settings for TiDB Cloud Serverless](/media/tidb-cloud/looker-studio-configure-connection.png) + ![Looker Studio: configure connection settings for TiDB Cloud Serverless](./media/tidb-cloud/looker-studio-configure-connection.png) 4. Click **AUTHENTICATE**. @@ -90,7 +90,7 @@ Now, you can use the TiDB cluster as a data source and create a simple chart wit 1. In the right pane, click **CUSTOM QUERY**. - ![Looker Studio: custom query](/media/tidb-cloud/looker-studio-custom-query.png) + ![Looker Studio: custom query](./media/tidb-cloud/looker-studio-custom-query.png) 2. Copy the following code to the **Enter Custom Query** area, and then click **Add** in the lower-right corner. @@ -124,7 +124,7 @@ Now, you can use the TiDB cluster as a data source and create a simple chart wit Then, you can see a combo chart similar as follows: -![Looker Studio: A simple Combo chart](/media/tidb-cloud/looker-studio-simple-chart.png) +![Looker Studio: A simple Combo chart](./media/tidb-cloud/looker-studio-simple-chart.png) ## Next steps diff --git a/tidb-cloud/integrate-tidbcloud-with-airbyte.md b/tidb-cloud/integrate-tidbcloud-with-airbyte.md index a600ba1186694..962fd142b1cfd 100644 --- a/tidb-cloud/integrate-tidbcloud-with-airbyte.md +++ b/tidb-cloud/integrate-tidbcloud-with-airbyte.md @@ -66,7 +66,7 @@ Conveniently, the steps are the same for setting TiDB as the source and the dest 4. Click **Set up source** or **destination** to complete creating the connector. The following screenshot shows the configuration of TiDB as the source. -![TiDB source configuration](/media/tidb-cloud/integration-airbyte-parameters.jpg) +![TiDB source configuration](./media/tidb-cloud/integration-airbyte-parameters.jpg) You can use any combination of sources and destinations, such as TiDB to Snowflake, and CSV files to TiDB. @@ -92,13 +92,13 @@ The following steps use TiDB as both a source and a destination. Other connector > - In Incremental mode, Airbyte only reads records added to the source since the last sync job. The first sync using Incremental mode is equivalent to Full Refresh mode. > - In Full Refresh mode, Airbyte reads all records in the source and replicates to the destination in every sync task. You can set the sync mode for every table named **Namespace** in Airbyte individually. - ![Set up connection](/media/tidb-cloud/integration-airbyte-connection.jpg) + ![Set up connection](./media/tidb-cloud/integration-airbyte-connection.jpg) 7. Set **Normalization & Transformation** to **Normalized tabular data** to use the default normalization mode, or you can set the dbt file for your job. For more information about normalization, refer to [Transformations and Normalization](https://docs.airbyte.com/operator-guides/transformation-and-normalization/transformations-with-dbt). 8. Click **Set up connection**. 9. Once the connection is established, click **ENABLED** to activate the synchronization task. You can also click **Sync now** to sync immediately. -![Sync data](/media/tidb-cloud/integration-airbyte-sync.jpg) +![Sync data](./media/tidb-cloud/integration-airbyte-sync.jpg) ## Limitations diff --git a/tidb-cloud/integrate-tidbcloud-with-n8n.md b/tidb-cloud/integrate-tidbcloud-with-n8n.md index eb4a615bb2070..1741c65fae4d8 100644 --- a/tidb-cloud/integrate-tidbcloud-with-n8n.md +++ b/tidb-cloud/integrate-tidbcloud-with-n8n.md @@ -74,7 +74,7 @@ This example usage workflow would use the following nodes: The final workflow should look like the following image. -![img](/media/tidb-cloud/integration-n8n-workflow-rss.jpg) +![img](./media/tidb-cloud/integration-n8n-workflow-rss.jpg) ### (Optional) Create a TiDB Cloud Serverless cluster diff --git a/tidb-cloud/integrate-tidbcloud-with-vercel.md b/tidb-cloud/integrate-tidbcloud-with-vercel.md index f576c97d633e5..d580fc28ba314 100644 --- a/tidb-cloud/integrate-tidbcloud-with-vercel.md +++ b/tidb-cloud/integrate-tidbcloud-with-vercel.md @@ -95,7 +95,7 @@ The detailed steps are as follows: 7. Choose whether to enable **Branching** to create new branches for preview environments. 8. Click **Add Integration and Return to Vercel**. -![Vercel Integration Page](/media/tidb-cloud/vercel/integration-link-cluster-page.png) +![Vercel Integration Page](./media/tidb-cloud/vercel/integration-link-cluster-page.png) 6. Get back to your Vercel dashboard, go to your Vercel project, click **Settings** > **Environment Variables**, and check whether the environment variables for your target TiDB cluster have been automatically added. @@ -139,7 +139,7 @@ The detailed steps are as follows: 4. Select your target TiDB Data App. 6. Click **Add Integration and Return to Vercel**. -![Vercel Integration Page](/media/tidb-cloud/vercel/integration-link-data-app-page.png) +![Vercel Integration Page](./media/tidb-cloud/vercel/integration-link-data-app-page.png) 6. Get back to your Vercel dashboard, go to your Vercel project, click **Settings** > **Environment Variables**, and check whether the environment variables for your target Data App have been automatically added. @@ -163,7 +163,7 @@ If you have installed [TiDB Cloud Vercel integration](https://vercel.com/integra 3. Click **Configure**. 4. Click **Add Link** or **Remove** to add or remove connections. - ![Vercel Integration Configuration Page](/media/tidb-cloud/vercel/integration-vercel-configuration-page.png) + ![Vercel Integration Configuration Page](./media/tidb-cloud/vercel/integration-vercel-configuration-page.png) When you remove a connection, the environment variables set by the integration workflow are removed from the Vercel project, too. However, this action does not affect the data of the TiDB Cloud Serverless cluster. @@ -192,15 +192,15 @@ After you push changes to the Git repository, Vercel will trigger a preview depl 2. Add some changes and push the changes to the remote repository. 3. Vercel will trigger a preview deployment for the new branch. - ![Vercel Preview_Deployment](/media/tidb-cloud/vercel/vercel-preview-deployment.png) + ![Vercel Preview_Deployment](./media/tidb-cloud/vercel/vercel-preview-deployment.png) 1. During the deployment, TiDB Cloud integration will automatically create a TiDB Cloud Serverless branch with the same name as the Git branch. If the TiDB Cloud Serverless branch already exists, TiDB Cloud integration will skip this step. - ![TiDB_Cloud_Branch_Check](/media/tidb-cloud/vercel/tidbcloud-branch-check.png) + ![TiDB_Cloud_Branch_Check](./media/tidb-cloud/vercel/tidbcloud-branch-check.png) 2. After the TiDB Cloud Serverless branch is ready, TiDB Cloud integration will set environment variables in the preview deployment for the Vercel project. - ![Preview_Envs](/media/tidb-cloud/vercel/preview-envs.png) + ![Preview_Envs](./media/tidb-cloud/vercel/preview-envs.png) 3. TiDB Cloud integration will also register a blocking check to wait for the TiDB Cloud Serverless branch to be ready. You can rerun the check manually. 4. After the check is passed, you can visit the preview deployment to see the changes. @@ -224,7 +224,7 @@ After you push changes to the Git repository, Vercel will trigger a preview depl 2. Go to your Vercel dashboard > Vercel project > **Settings** > **Environment Variables**, and then [declare each environment variable value](https://vercel.com/docs/concepts/projects/environment-variables#declare-an-environment-variable) according to the connection information of your TiDB cluster. - ![Vercel Environment Variables](/media/tidb-cloud/vercel/integration-vercel-environment-variables.png) + ![Vercel Environment Variables](./media/tidb-cloud/vercel/integration-vercel-environment-variables.png) Here we use a Prisma application as an example. The following is a datasource setting in the Prisma schema file for a TiDB Cloud Serverless cluster: @@ -249,7 +249,7 @@ You can get the information of ``, ``, ``, ``, a 2. Go to your Vercel dashboard > Vercel project > **Settings** > **Environment Variables**, and then [declare each environment variable value](https://vercel.com/docs/concepts/projects/environment-variables#declare-an-environment-variable) according to the connection information of your Data App. - ![Vercel Environment Variables](/media/tidb-cloud/vercel/integration-vercel-environment-variables.png) + ![Vercel Environment Variables](./media/tidb-cloud/vercel/integration-vercel-environment-variables.png) In Vercel, you can declare the environment variables as follows. diff --git a/tidb-cloud/integrate-tidbcloud-with-zapier.md b/tidb-cloud/integrate-tidbcloud-with-zapier.md index b31a1b17d0e94..8b6166a52c30a 100644 --- a/tidb-cloud/integrate-tidbcloud-with-zapier.md +++ b/tidb-cloud/integrate-tidbcloud-with-zapier.md @@ -65,7 +65,7 @@ In the editor page, you can see the trigger and action. Click the trigger to set 2. On the login page, fill in your public key and private key. To get the TiDB Cloud API key, follow the instructions in [TiDB Cloud API documentation](https://docs.pingcap.com/tidbcloud/api/v1beta#section/Authentication/API-Key-Management). 3. Click **Continue**. - ![Account](/media/tidb-cloud/zapier/zapier-tidbcloud-account.png) + ![Account](./media/tidb-cloud/zapier/zapier-tidbcloud-account.png) 3. Set up action @@ -73,19 +73,19 @@ In the editor page, you can see the trigger and action. Click the trigger to set 1. From the drop-down list, choose the project name and cluster name. The connection information of your cluster will be displayed automatically. - ![Set up project name and cluster name](/media/tidb-cloud/zapier/zapier-set-up-tidbcloud-project-and-cluster.png) + ![Set up project name and cluster name](./media/tidb-cloud/zapier/zapier-set-up-tidbcloud-project-and-cluster.png) 2. Enter your password. 3. From the drop-down list, choose the database. - ![Set up database name](/media/tidb-cloud/zapier/zapier-set-up-tidbcloud-databse.png) + ![Set up database name](./media/tidb-cloud/zapier/zapier-set-up-tidbcloud-databse.png) Zapier queries the databases from TiDB Cloud using the password you entered. If no database is found in your cluster, re-enter your password and refresh the page. 4. In **The table you want to search** box, fill in `github_global_event`. If the table does not exist, the template uses the following DDL to create the table. Click **Continue**. - ![The create table DDL](/media/tidb-cloud/zapier/zapier-tidbcloud-create-table-ddl.png) + ![The create table DDL](./media/tidb-cloud/zapier/zapier-tidbcloud-create-table-ddl.png) 4. Test action @@ -101,7 +101,7 @@ In the editor page, you can see the trigger and action. Click the trigger to set Select the account you have chosen when you set up the `Find Table in TiDB Cloud` action. Click **Continue**. - ![Choose account](/media/tidb-cloud/zapier/zapier-tidbcloud-choose-account.png) + ![Choose account](./media/tidb-cloud/zapier/zapier-tidbcloud-choose-account.png) 3. Set up action @@ -109,11 +109,11 @@ In the editor page, you can see the trigger and action. Click the trigger to set 2. In the **Table Name**, choose the **github_global_event** table from the drop-down list. The columns of the table are displayed. - ![Table columns](/media/tidb-cloud/zapier/zapier-set-up-tidbcloud-columns.png) + ![Table columns](./media/tidb-cloud/zapier/zapier-set-up-tidbcloud-columns.png) 3. In the **Columns** box, choose the corresponding data from the trigger. Fill in all the columns, and click **Continue**. - ![Fill in Columns](/media/tidb-cloud/zapier/zapier-fill-in-tidbcloud-triggers-data.png) + ![Fill in Columns](./media/tidb-cloud/zapier/zapier-fill-in-tidbcloud-triggers-data.png) 4. Test action @@ -133,7 +133,7 @@ In the editor page, you can see the trigger and action. Click the trigger to set Click **Publish** to publish your zap. You can see the zap is running in the [home page](https://zapier.com/app/zaps). -![Publish the zap](/media/tidb-cloud/zapier/zapier-tidbcloud-publish.png) +![Publish the zap](./media/tidb-cloud/zapier/zapier-tidbcloud-publish.png) Now, this zap will automatically record all the global events from your GitHub account into TiDB Cloud. @@ -231,6 +231,6 @@ Make sure that your custom query executes in less than 30 seconds. Otherwise, yo 2. In the`set up action` step, tick the `Create TiDB Cloud Table if it doesn’t exist yet?` box to enable `find and create`. - ![Find and create](/media/tidb-cloud/zapier/zapier-tidbcloud-find-and-create.png) + ![Find and create](./media/tidb-cloud/zapier/zapier-tidbcloud-find-and-create.png) This workflow creates a table if it does not exist yet. Note that the table will be created directly if you test your action. diff --git a/tidb-cloud/migrate-from-mysql-using-aws-dms.md b/tidb-cloud/migrate-from-mysql-using-aws-dms.md index 4a7fc78e0ddd7..b341f4b1814ee 100644 --- a/tidb-cloud/migrate-from-mysql-using-aws-dms.md +++ b/tidb-cloud/migrate-from-mysql-using-aws-dms.md @@ -37,7 +37,7 @@ Before you start the migration, make sure you have read the following: 2. Click **Create replication instance**. - ![Create replication instance](/media/tidb-cloud/aws-dms-tidb-cloud/aws-dms-to-tidb-cloud-create-instance.png) + ![Create replication instance](./media/tidb-cloud/aws-dms-tidb-cloud/aws-dms-to-tidb-cloud-create-instance.png) 3. Fill in an instance name, ARN, and description. @@ -60,19 +60,19 @@ Before you start the migration, make sure you have read the following: 1. In the [AWS DMS console](https://console.aws.amazon.com/dms/v2/home), click the replication instance that you just created. Copy the public and private network IP addresses as shown in the following screenshot. - ![Copy the public and private network IP addresses](/media/tidb-cloud/aws-dms-tidb-cloud/aws-dms-to-tidb-cloud-copy-ip.png) + ![Copy the public and private network IP addresses](./media/tidb-cloud/aws-dms-tidb-cloud/aws-dms-to-tidb-cloud-copy-ip.png) 2. Configure the security group rules for Amazon RDS. In this example, add the public and private IP addresses of the AWS DMS instance to the security group. - ![Configure the security group rules](/media/tidb-cloud/aws-dms-tidb-cloud/aws-dms-to-tidb-cloud-rules.png) + ![Configure the security group rules](./media/tidb-cloud/aws-dms-tidb-cloud/aws-dms-to-tidb-cloud-rules.png) 3. Click **Create endpoint** to create the source database endpoint. - ![Click Create endpoint](/media/tidb-cloud/aws-dms-tidb-cloud/aws-dms-to-tidb-cloud-endpoint.png) + ![Click Create endpoint](./media/tidb-cloud/aws-dms-tidb-cloud/aws-dms-to-tidb-cloud-endpoint.png) 4. In this example, click **Select RDS DB instance** and then select the source RDS instance. If the source database is a self-hosted MySQL, you can skip this step and fill in the information in the following steps. - ![Select RDS DB instance](/media/tidb-cloud/aws-dms-tidb-cloud/aws-dms-to-tidb-cloud-select-rds.png) + ![Select RDS DB instance](./media/tidb-cloud/aws-dms-tidb-cloud/aws-dms-to-tidb-cloud-select-rds.png) 5. Configure the following information: - **Endpoint identifier**: create a label for the source endpoint to help you identify it in the subsequent task configuration. @@ -83,19 +83,19 @@ Before you start the migration, make sure you have read the following: - Fill in the source database **Port**, **Username**, and **Password**. - **Secure Socket Layer (SSL) mode**: you can enable SSL mode as needed. - ![Fill in the endpoint configurations](/media/tidb-cloud/aws-dms-tidb-cloud/aws-dms-to-tidb-cloud-endpoint-config.png) + ![Fill in the endpoint configurations](./media/tidb-cloud/aws-dms-tidb-cloud/aws-dms-to-tidb-cloud-endpoint-config.png) 6. Use default values for **Endpoint settings**, **KMS key**, and **Tags**. In the **Test endpoint connection (optional)** section, it is recommended to select the same VPC as the source database to simplify the network configuration. Select the corresponding replication instance, and then click **Run test**. The status needs to be **successful**. 7. Click **Create endpoint**. - ![Click Create endpoint](/media/tidb-cloud/aws-dms-tidb-cloud/aws-dms-to-tidb-cloud-connection.png) + ![Click Create endpoint](./media/tidb-cloud/aws-dms-tidb-cloud/aws-dms-to-tidb-cloud-connection.png) ## Step 3. Create the target database endpoint 1. In the [AWS DMS console](https://console.aws.amazon.com/dms/v2/home), click the replication instance that you just created. Copy the public and private network IP addresses as shown in the following screenshot. - ![Copy the public and private network IP addresses](/media/tidb-cloud/aws-dms-tidb-cloud/aws-dms-to-tidb-cloud-copy-ip.png) + ![Copy the public and private network IP addresses](./media/tidb-cloud/aws-dms-tidb-cloud/aws-dms-to-tidb-cloud-copy-ip.png) 2. In the TiDB Cloud console, go to the [**Clusters**](https://tidbcloud.com/console/clusters) page, click the name of your target cluster, and then click **Connect** in the upper-right corner to get the TiDB Cloud database connection information. @@ -113,7 +113,7 @@ Before you start the migration, make sure you have read the following: - **Descriptive Amazon Resource Name (ARN) - optional**: create a friendly name for the default DMS ARN. - **Target engine**: select **MySQL**. - ![Configure the target endpoint](/media/tidb-cloud/aws-dms-tidb-cloud/aws-dms-to-tidb-cloud-target-endpoint.png) + ![Configure the target endpoint](./media/tidb-cloud/aws-dms-tidb-cloud/aws-dms-to-tidb-cloud-target-endpoint.png) 8. In the [AWS DMS console](https://console.aws.amazon.com/dms/v2/home), click **Create endpoint** to create the target database endpoint, and then configure the following information: - **Server name**: fill in the hostname of your TiDB cluster, which is the `-h` information you have recorded. @@ -123,23 +123,23 @@ Before you start the migration, make sure you have read the following: - **Secure Socket Layer (SSL) mode**: select **Verify-ca**. - Click **Add new CA certificate** to import the CA file downloaded from the TiDB Cloud console in the previous steps. - ![Fill in the target endpoint information](/media/tidb-cloud/aws-dms-tidb-cloud/aws-dms-to-tidb-cloud-target-endpoint2.png) + ![Fill in the target endpoint information](./media/tidb-cloud/aws-dms-tidb-cloud/aws-dms-to-tidb-cloud-target-endpoint2.png) 9. Import the CA file. - ![Upload CA](/media/tidb-cloud/aws-dms-tidb-cloud/aws-dms-to-tidb-cloud-upload-ca.png) + ![Upload CA](./media/tidb-cloud/aws-dms-tidb-cloud/aws-dms-to-tidb-cloud-upload-ca.png) 10. Use the default values for **Endpoint settings**, **KMS key**, and **Tags**. In the **Test endpoint connection (optional)** section, select the same VPC as the source database. Select the corresponding replication instance, and then click **Run test**. The status needs to be **successful**. 11. Click **Create endpoint**. - ![Click Create endpoint](/media/tidb-cloud/aws-dms-tidb-cloud/aws-dms-to-tidb-cloud-target-endpoint3.png) + ![Click Create endpoint](./media/tidb-cloud/aws-dms-tidb-cloud/aws-dms-to-tidb-cloud-target-endpoint3.png) ## Step 4. Create a database migration task 1. In the AWS DMS console, go to the [Data migration tasks](https://console.aws.amazon.com/dms/v2/home#tasks) page. Switch to your region. Then click **Create task** in the upper-right corner of the window. - ![Create task](/media/tidb-cloud/aws-dms-tidb-cloud/aws-dms-to-tidb-cloud-create-task.png) + ![Create task](./media/tidb-cloud/aws-dms-tidb-cloud/aws-dms-to-tidb-cloud-create-task.png) 2. Configure the following information: - **Task identifier**: fill in a name for the task. It is recommended to use a name that is easy to remember. @@ -149,7 +149,7 @@ Before you start the migration, make sure you have read the following: - **Target database endpoint**: select the target database endpoint that you just created. - **Migration type**: select a migration type as needed. In this example, select **Migrate existing data and replicate ongoing changes**. - ![Task configurations](/media/tidb-cloud/aws-dms-tidb-cloud/aws-dms-to-tidb-cloud-task-config.png) + ![Task configurations](./media/tidb-cloud/aws-dms-tidb-cloud/aws-dms-to-tidb-cloud-task-config.png) 3. Configure the following information: - **Editing mode**: select **Wizard**. @@ -161,23 +161,23 @@ Before you start the migration, make sure you have read the following: - **Turn on validation**: select it according to your needs. - **Task logs**: select **Turn on CloudWatch logs** for troubleshooting in future. Use the default settings for the related configurations. - ![Task settings](/media/tidb-cloud/aws-dms-tidb-cloud/aws-dms-to-tidb-cloud-task-settings.png) + ![Task settings](./media/tidb-cloud/aws-dms-tidb-cloud/aws-dms-to-tidb-cloud-task-settings.png) 4. In the **Table mappings** section, specify the database to be migrated. The schema name is the database name in the Amazon RDS instance. The default value of the **Source name** is "%", which means that all databases in the Amazon RDS will be migrated to TiDB. It will cause the system databases such as `mysql` and `sys` in Amazon RDS to be migrated to the TiDB cluster, and result in task failure. Therefore, it is recommended to fill in the specific database name, or filter out all system databases. For example, according to the settings in the following screenshot, only the database named `franktest` and all the tables in that database will be migrated. - ![Table mappings](/media/tidb-cloud/aws-dms-tidb-cloud/aws-dms-to-tidb-cloud-table-mappings.png) + ![Table mappings](./media/tidb-cloud/aws-dms-tidb-cloud/aws-dms-to-tidb-cloud-table-mappings.png) 5. Click **Create task** in the lower-right corner. 6. Go back to the [Data migration tasks](https://console.aws.amazon.com/dms/v2/home#tasks) page. Switch to your region. You can see the status and progress of the task. - ![Tasks status](/media/tidb-cloud/aws-dms-tidb-cloud/aws-dms-to-tidb-cloud-task-status.png) + ![Tasks status](./media/tidb-cloud/aws-dms-tidb-cloud/aws-dms-to-tidb-cloud-task-status.png) If you encounter any issues or failures during the migration, you can check the log information in [CloudWatch](https://console.aws.amazon.com/cloudwatch/home) to troubleshoot the issues. -![Troubleshooting](/media/tidb-cloud/aws-dms-tidb-cloud/aws-dms-to-tidb-cloud-troubleshooting.png) +![Troubleshooting](./media/tidb-cloud/aws-dms-tidb-cloud/aws-dms-to-tidb-cloud-troubleshooting.png) ## See also diff --git a/tidb-cloud/migrate-from-mysql-using-data-migration.md b/tidb-cloud/migrate-from-mysql-using-data-migration.md index ca58ca4a5e8a0..94815091b6f41 100644 --- a/tidb-cloud/migrate-from-mysql-using-data-migration.md +++ b/tidb-cloud/migrate-from-mysql-using-data-migration.md @@ -252,8 +252,8 @@ For detailed instructions about incremental data migration, see [Migrate Only In 2. Click **Next**. diff --git a/tidb-cloud/migrate-from-op-tidb.md b/tidb-cloud/migrate-from-op-tidb.md index 7bd0929305f2c..3c5e746d18fc6 100644 --- a/tidb-cloud/migrate-from-op-tidb.md +++ b/tidb-cloud/migrate-from-op-tidb.md @@ -145,9 +145,9 @@ Create an access key in the AWS console. See [Create an access key](https://docs 3. To create an access key, click **Create access key**. Then choose **Download .csv file** to save the access key ID and secret access key to a CSV file on your computer. Store the file in a secure location. You will not have access to the secret access key again after this dialog box closes. After you download the CSV file, choose **Close**. When you create an access key, the key pair is active by default, and you can use the pair right away. - ![Create access key](/media/tidb-cloud/op-to-cloud-create-access-key01.png) + ![Create access key](./media/tidb-cloud/op-to-cloud-create-access-key01.png) - ![Download CSV file](/media/tidb-cloud/op-to-cloud-create-access-key02.png) + ![Download CSV file](./media/tidb-cloud/op-to-cloud-create-access-key02.png) #### Step 3. Export data from the upstream TiDB cluster to Amazon S3 using Dumpling @@ -164,11 +164,11 @@ Do the following to export data from the upstream TiDB cluster to Amazon S3 usin The following screenshot shows how to get the S3 bucket URI information: - ![Get the S3 URI](/media/tidb-cloud/op-to-cloud-copy-s3-uri.png) + ![Get the S3 URI](./media/tidb-cloud/op-to-cloud-copy-s3-uri.png) The following screenshot shows how to get the region information: - ![Get the region information](/media/tidb-cloud/op-to-cloud-copy-region-info.png) + ![Get the region information](./media/tidb-cloud/op-to-cloud-copy-region-info.png) 3. Run Dumpling to export data to the Amazon S3 bucket. @@ -205,7 +205,7 @@ After you export data from the TiDB Self-Managed cluster to Amazon S3, you need The following screenshot shows how to get the Account ID and External ID: - ![Get the Account ID and External ID](/media/tidb-cloud/op-to-cloud-get-role-arn.png) + ![Get the Account ID and External ID](./media/tidb-cloud/op-to-cloud-get-role-arn.png) 2. Configure access permissions for Amazon S3. Usually you need the following read-only permissions: @@ -277,7 +277,7 @@ To replicate incremental data, do the following: 1. Get the start time of the incremental data migration. For example, you can get it from the metadata file of the full data migration. - ![Start Time in Metadata](/media/tidb-cloud/start_ts_in_metadata.png) + ![Start Time in Metadata](./media/tidb-cloud/start_ts_in_metadata.png) 2. Grant TiCDC to connect to TiDB Cloud. In the [TiDB Cloud console](https://tidbcloud.com/console/clusters), locate the cluster, and then go to the **Networking** page. Click **Add IP Address** > **Use IP addresses**. Fill in the public IP address of the TiCDC component in the **IP Address** field, and click **Confirm** to save it. Now TiCDC can access TiDB Cloud. For more information, see [Configure an IP Access List](/tidb-cloud/configure-ip-access-list.md). @@ -334,7 +334,7 @@ To replicate incremental data, do the following: tiup cdc cli changefeed list --pd=http://172.16.6.122:2379 ``` - ![Update Filter](/media/tidb-cloud/normal_status_in_replication_task.png) + ![Update Filter](./media/tidb-cloud/normal_status_in_replication_task.png) - Verify the replication. Write a new record to the upstream cluster, and then check whether the record is replicated to the downstream TiDB Cloud cluster. diff --git a/tidb-cloud/migrate-from-oracle-using-aws-dms.md b/tidb-cloud/migrate-from-oracle-using-aws-dms.md index 56518143e9753..10226f8447a58 100644 --- a/tidb-cloud/migrate-from-oracle-using-aws-dms.md +++ b/tidb-cloud/migrate-from-oracle-using-aws-dms.md @@ -29,7 +29,7 @@ At a high level, follow the following steps: The following diagram illustrates the high-level architecture. -![Architecture](/media/tidb-cloud/aws-dms-from-oracle-to-tidb-0.png) +![Architecture](./media/tidb-cloud/aws-dms-from-oracle-to-tidb-0.png) ## Prerequisites @@ -48,7 +48,7 @@ Log in to the [AWS console](https://console.aws.amazon.com/vpc/home#vpcs:) and c For instructions about how to create a VPC, see [Creating a VPC](https://docs.aws.amazon.com/vpc/latest/userguide/working-with-vpcs.html#Create-VPC). -![Create VPC](/media/tidb-cloud/aws-dms-from-oracle-to-tidb-1.png) +![Create VPC](./media/tidb-cloud/aws-dms-from-oracle-to-tidb-1.png) ## Step 2. Create an Oracle DB instance @@ -56,7 +56,7 @@ Create an Oracle DB instance in the VPC you just created, and remember the passw For instructions about how to create an Oracle DB instance, see [Creating an Oracle DB instance and connecting to a database on an Oracle DB instance](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_GettingStarted.CreatingConnecting.Oracle.html). -![Create Oracle RDS](/media/tidb-cloud/aws-dms-from-oracle-to-tidb-2.png) +![Create Oracle RDS](./media/tidb-cloud/aws-dms-from-oracle-to-tidb-2.png) ## Step 3. Prepare the table data in Oracle @@ -67,7 +67,7 @@ Using the following scripts to create and populate 10000 rows of data in the git After you finish executing the SQL script, check the data in Oracle. The following example uses [DBeaver](https://dbeaver.io/) to query the data: -![Oracle RDS Data](/media/tidb-cloud/aws-dms-from-oracle-to-tidb-3.png) +![Oracle RDS Data](./media/tidb-cloud/aws-dms-from-oracle-to-tidb-3.png) ## Step 4. Create a TiDB Cloud Serverless cluster @@ -87,7 +87,7 @@ After you finish executing the SQL script, check the data in Oracle. The followi 2. Create an AWS DMS replication instance with `dms.t3.large` in the VPC. - ![Create AWS DMS Instance](/media/tidb-cloud/aws-dms-from-oracle-to-tidb-8.png) + ![Create AWS DMS Instance](./media/tidb-cloud/aws-dms-from-oracle-to-tidb-8.png) > **Note:** > @@ -101,11 +101,11 @@ After you finish executing the SQL script, check the data in Oracle. The followi The following screenshot shows the configurations of the source endpoint. - ![Create AWS DMS Source endpoint](/media/tidb-cloud/aws-dms-from-oracle-to-tidb-9.png) + ![Create AWS DMS Source endpoint](./media/tidb-cloud/aws-dms-from-oracle-to-tidb-9.png) The following screenshot shows the configurations of the target endpoint. - ![Create AWS DMS Target endpoint](/media/tidb-cloud/aws-dms-from-oracle-to-tidb-10.png) + ![Create AWS DMS Target endpoint](./media/tidb-cloud/aws-dms-from-oracle-to-tidb-10.png) > **Note:** > @@ -123,25 +123,25 @@ For more information, see [Migrating your source schema to your target database 1. In the AWS DMS console, go to the [Data migration tasks](https://console.aws.amazon.com/dms/v2/home#tasks) page. Switch to your region. Then click **Create task** in the upper right corner of the window. - ![Create task](/media/tidb-cloud/aws-dms-to-tidb-cloud-create-task.png) + ![Create task](./media/tidb-cloud/aws-dms-to-tidb-cloud-create-task.png) 2. Create a database migration task and specify the **Selection rules**: - ![Create AWS DMS migration task](/media/tidb-cloud/aws-dms-from-oracle-to-tidb-11.png) + ![Create AWS DMS migration task](./media/tidb-cloud/aws-dms-from-oracle-to-tidb-11.png) - ![AWS DMS migration task selection rules](/media/tidb-cloud/aws-dms-from-oracle-to-tidb-12.png) + ![AWS DMS migration task selection rules](./media/tidb-cloud/aws-dms-from-oracle-to-tidb-12.png) 3. Create the task, start it, and then wait for the task to finish. 4. Click the **Table statistics** to check the table. The schema name is `ADMIN`. - ![Check AWS DMS migration task](/media/tidb-cloud/aws-dms-from-oracle-to-tidb-13.png) + ![Check AWS DMS migration task](./media/tidb-cloud/aws-dms-from-oracle-to-tidb-13.png) ## Step 9. Check data in the downstream TiDB cluster Connect to the [TiDB Cloud Serverless cluster](https://tidbcloud.com/console/clusters/create-cluster) and check the `admin.github_event` table data. As shown in the following screenshot, DMS successfully migrated table `github_events` and 10000 rows of data. -![Check Data In TiDB](/media/tidb-cloud/aws-dms-from-oracle-to-tidb-14.png) +![Check Data In TiDB](./media/tidb-cloud/aws-dms-from-oracle-to-tidb-14.png) ## Summary @@ -149,7 +149,7 @@ With AWS DMS, you can successfully migrate data from any upstream AWS RDS databa If you encounter any issues or failures during the migration, you can check the log information in [CloudWatch](https://console.aws.amazon.com/cloudwatch/home) to troubleshoot the issues. -![Troubleshooting](/media/tidb-cloud/aws-dms-to-tidb-cloud-troubleshooting.png) +![Troubleshooting](./media/tidb-cloud/aws-dms-to-tidb-cloud-troubleshooting.png) ## See also diff --git a/tidb-cloud/recovery-group-failover.md b/tidb-cloud/recovery-group-failover.md index 38d84ed7246e6..ba51ae3bec178 100644 --- a/tidb-cloud/recovery-group-failover.md +++ b/tidb-cloud/recovery-group-failover.md @@ -15,7 +15,7 @@ When the regional outage is resolved, the ability to reverse the replication fro Before performing a failover, a recovery group should have been created and be successfully replicating to the secondary cluster. For more information, see [Get Started with Recovery Groups](/tidb-cloud/recovery-group-get-started.md). -![Protected Recovery Group](/media/tidb-cloud/recovery-group/recovery-group-protected.png) +![Protected Recovery Group](./media/tidb-cloud/recovery-group/recovery-group-protected.png) ## Failover databases using a recovery group @@ -37,7 +37,7 @@ In the event of a disaster, you can use the recovery group to failover databases 6. Confirm that you understand the potentially disruptive nature of a failover by typing **Failover** into the confirmation entry and clicking **I understand, failover group** to begin the failover. - ![Fail Over Recovery Group](/media/tidb-cloud/recovery-group/recovery-group-failover.png) + ![Fail Over Recovery Group](./media/tidb-cloud/recovery-group/recovery-group-failover.png) ## Reprotect databases using a recovery group @@ -45,7 +45,7 @@ After a failover completes, the replica databases on the secondary cluster are n If the original primary cluster that was affected by the disaster can be brought online again, you can re-establish replication from the recovery region back to the original region using the **Reprotect** action. -![Unprotected Recovery Group](/media/tidb-cloud/recovery-group/recovery-group-unprotected.png) +![Unprotected Recovery Group](./media/tidb-cloud/recovery-group/recovery-group-unprotected.png) 1. In the [TiDB Cloud console](https://tidbcloud.com/), click in the lower-left corner, switch to the target project if you have multiple projects, and then click **Project Settings**. @@ -66,4 +66,4 @@ If the original primary cluster that was affected by the disaster can be brought 5. Confirm the reprotect operation by clicking **Reprotect** to begin the reprotect operation. - ![Reprotect Recovery Group](/media/tidb-cloud/recovery-group/recovery-group-reprotected.png) + ![Reprotect Recovery Group](./media/tidb-cloud/recovery-group/recovery-group-reprotected.png) diff --git a/tidb-cloud/recovery-group-overview.md b/tidb-cloud/recovery-group-overview.md index ebcf5a0a2c0c2..83ac35ca40b59 100644 --- a/tidb-cloud/recovery-group-overview.md +++ b/tidb-cloud/recovery-group-overview.md @@ -11,7 +11,7 @@ A TiDB Cloud recovery group allows you to replicate your databases between TiDB A recovery group consists of a set of replicated databases that can be failed over together between two TiDB Cloud Dedicated clusters. Each recovery group is assigned a primary cluster, and databases on this primary cluster are associated with the group and are then replicated to the secondary cluster. -![Recovery Group](/media/tidb-cloud/recovery-group/recovery-group-overview.png) +![Recovery Group](./media/tidb-cloud/recovery-group/recovery-group-overview.png) - Recovery Group: a group of databases that are replicated between two clusters - Primary Cluster: the cluster where the database is actively written by the application diff --git a/tidb-cloud/serverless-external-storage.md b/tidb-cloud/serverless-external-storage.md index a17a130528016..cf77052e66687 100644 --- a/tidb-cloud/serverless-external-storage.md +++ b/tidb-cloud/serverless-external-storage.md @@ -53,7 +53,7 @@ It is recommended that you use [AWS CloudFormation](https://docs.aws.amazon.com/ 5. After the CloudFormation stack is executed, you can click the **Outputs** tab and find the Role ARN value in the **Value** column. - ![img.png](/media/tidb-cloud/serverless-external-storage/serverless-role-arn.png) + ![img.png](./media/tidb-cloud/serverless-external-storage/serverless-role-arn.png) If you have any trouble creating a role ARN with AWS CloudFormation, you can take the following steps to create one manually: @@ -68,11 +68,11 @@ If you have any trouble creating a role ARN with AWS CloudFormation, you can tak 2. In the **Buckets** list, choose the name of your bucket with the source data, and then click **Copy ARN** to get your S3 bucket ARN (for example, `arn:aws:s3:::tidb-cloud-source-data`). Take a note of the bucket ARN for later use. - ![Copy bucket ARN](/media/tidb-cloud/copy-bucket-arn.png) + ![Copy bucket ARN](./media/tidb-cloud/copy-bucket-arn.png) 3. Open the [IAM console](https://console.aws.amazon.com/iam/), click **Policies** in the left navigation pane, and then click **Create Policy**. - ![Create a policy](/media/tidb-cloud/aws-create-policy.png) + ![Create a policy](./media/tidb-cloud/aws-create-policy.png) 4. On the **Create policy** page, click the **JSON** tab. @@ -138,7 +138,7 @@ If you have any trouble creating a role ARN with AWS CloudFormation, you can tak 1. In the [IAM console](https://console.aws.amazon.com/iam/), click **Roles** in the left navigation pane, and then click **Create role**. - ![Create a role](/media/tidb-cloud/aws-create-role.png) + ![Create a role](./media/tidb-cloud/aws-create-role.png) 2. To create a role, fill in the following information: @@ -152,7 +152,7 @@ If you have any trouble creating a role ARN with AWS CloudFormation, you can tak 5. In the list of roles, click the name of the role that you just created to go to its summary page, and then you can get the role ARN. - ![Copy AWS role ARN](/media/tidb-cloud/aws-role-arn.png) + ![Copy AWS role ARN](./media/tidb-cloud/aws-role-arn.png) diff --git a/tidb-cloud/set-up-private-endpoint-connections-on-google-cloud.md b/tidb-cloud/set-up-private-endpoint-connections-on-google-cloud.md index 2ae40e9d64ff9..e4003df3f54f3 100644 --- a/tidb-cloud/set-up-private-endpoint-connections-on-google-cloud.md +++ b/tidb-cloud/set-up-private-endpoint-connections-on-google-cloud.md @@ -18,7 +18,7 @@ Powered by Google Cloud Private Service Connect, the endpoint connection is secu The architecture of the private endpoint is as follows: -![Private Service Connect architecture](/media/tidb-cloud/google-cloud-psc-endpoint-overview.png) +![Private Service Connect architecture](./media/tidb-cloud/google-cloud-psc-endpoint-overview.png) For more detailed definitions of the private endpoint and endpoint service, see the following Google Cloud documents: diff --git a/tidb-cloud/set-up-private-endpoint-connections-serverless.md b/tidb-cloud/set-up-private-endpoint-connections-serverless.md index a2dddbcc98b2d..a2ce31e782389 100644 --- a/tidb-cloud/set-up-private-endpoint-connections-serverless.md +++ b/tidb-cloud/set-up-private-endpoint-connections-serverless.md @@ -18,7 +18,7 @@ Powered by AWS PrivateLink, the endpoint connection is secure and private, and d The architecture of the private endpoint is as follows: -![Private endpoint architecture](/media/tidb-cloud/aws-private-endpoint-arch.png) +![Private endpoint architecture](./media/tidb-cloud/aws-private-endpoint-arch.png) For more detailed definitions of the private endpoint and endpoint service, see the following AWS documents: @@ -61,7 +61,7 @@ To use the AWS Management Console to create a VPC interface endpoint, perform th The **Create endpoint** page is displayed. - ![Verify endpoint service](/media/tidb-cloud/private-endpoint/create-endpoint-2.png) + ![Verify endpoint service](./media/tidb-cloud/private-endpoint/create-endpoint-2.png) 3. Select **Other endpoint services**. 4. Enter the service name that you found in [step 1](#step-1-choose-a-tidb-cluster). @@ -119,7 +119,7 @@ After you have created the interface endpoint, go back to the TiDB Cloud console You might need to properly set the security group for your VPC endpoint in the AWS Management Console. Go to **VPC** > **Endpoints**. Right-click your VPC endpoint and select the proper **Manage security groups**. A proper security group within your VPC that allows inbound access from your EC2 instances on Port 4000 or a customer-defined port. -![Manage security groups](/media/tidb-cloud/private-endpoint/manage-security-groups.png) +![Manage security groups](./media/tidb-cloud/private-endpoint/manage-security-groups.png) ### I cannot enable private DNS. An error is reported indicating that the `enableDnsSupport` and `enableDnsHostnames` VPC attributes are not enabled diff --git a/tidb-cloud/set-up-private-endpoint-connections.md b/tidb-cloud/set-up-private-endpoint-connections.md index 9d16744dc1eb0..0b2c164657315 100644 --- a/tidb-cloud/set-up-private-endpoint-connections.md +++ b/tidb-cloud/set-up-private-endpoint-connections.md @@ -18,7 +18,7 @@ Powered by AWS PrivateLink, the endpoint connection is secure and private, and d The architecture of the private endpoint is as follows: -![Private endpoint architecture](/media/tidb-cloud/aws-private-endpoint-arch.png) +![Private endpoint architecture](./media/tidb-cloud/aws-private-endpoint-arch.png) For more detailed definitions of the private endpoint and endpoint service, see the following AWS documents: @@ -85,7 +85,7 @@ To use the AWS Management Console to create a VPC interface endpoint, perform th The **Create endpoint** page is displayed. - ![Verify endpoint service](/media/tidb-cloud/private-endpoint/create-endpoint-2.png) + ![Verify endpoint service](./media/tidb-cloud/private-endpoint/create-endpoint-2.png) 3. Select **Other endpoint services**. 4. Enter the service name `${your_endpoint_service_name}` from the generated command (`--service-name ${your_endpoint_service_name}`). @@ -141,7 +141,7 @@ To enable private DNS in your AWS Management Console: 3. Select the **Enable for this endpoint** check box. 4. Click **Save changes**. - ![Enable private DNS](/media/tidb-cloud/private-endpoint/enable-private-dns.png) + ![Enable private DNS](./media/tidb-cloud/private-endpoint/enable-private-dns.png)
@@ -207,7 +207,7 @@ The possible statuses of a private endpoint service are explained as follows: You might need to properly set the security group for your VPC endpoint in the AWS Management Console. Go to **VPC** > **Endpoints**. Right-click your VPC endpoint and select the proper **Manage security groups**. A proper security group within your VPC that allows inbound access from your EC2 instances on Port 4000 or a customer-defined port. -![Manage security groups](/media/tidb-cloud/private-endpoint/manage-security-groups.png) +![Manage security groups](./media/tidb-cloud/private-endpoint/manage-security-groups.png) ### I cannot enable private DNS. An error is reported indicating that the `enableDnsSupport` and `enableDnsHostnames` VPC attributes are not enabled diff --git a/tidb-cloud/set-up-vpc-peering-connections.md b/tidb-cloud/set-up-vpc-peering-connections.md index 2bdc751469c46..4ed7d1becf483 100644 --- a/tidb-cloud/set-up-vpc-peering-connections.md +++ b/tidb-cloud/set-up-vpc-peering-connections.md @@ -32,7 +32,7 @@ You can set the CIDR when creating the first TiDB Cloud Dedicated cluster. If yo 3. On the **Project Settings** page of your project, click **Network Access** in the left navigation pane, click the **Project CIDR** tab, and then select **AWS** or **Google Cloud** according to your cloud provider. 4. In the upper-right corner, click **Create CIDR**. Specify the region and CIDR value in the **Create AWS CIDR** or **Create Google Cloud CIDR** dialog, and then click **Confirm**. - ![Project-CIDR4](/media/tidb-cloud/Project-CIDR4.png) + ![Project-CIDR4](./media/tidb-cloud/Project-CIDR4.png) > **Note:** > @@ -51,7 +51,7 @@ You can set the CIDR when creating the first TiDB Cloud Dedicated cluster. If yo The CIDR is inactive by default. To activate the CIDR, you need to create a cluster in the target region. When the region CIDR is active, you can create VPC Peering for the region. - ![Project-CIDR2](/media/tidb-cloud/Project-CIDR2.png) + ![Project-CIDR2](./media/tidb-cloud/Project-CIDR2.png) ## Set up VPC peering on AWS @@ -79,7 +79,7 @@ You can add VPC peering requests on either the project-level **Network Access** You can get such information from your VPC details page of the [AWS Management Console](https://console.aws.amazon.com/). TiDB Cloud supports creating VPC peerings between VPCs in the same region or from two different regions. - ![VPC peering](/media/tidb-cloud/vpc-peering/vpc-peering-creating-infos.png) + ![VPC peering](./media/tidb-cloud/vpc-peering/vpc-peering-creating-infos.png) 5. Click **Create** to send the VPC peering request, and then view the VPC peering information on the **VPC Peering** > **AWS** tab. The status of the newly created VPC peering is **System Checking**. @@ -109,7 +109,7 @@ You can add VPC peering requests on either the project-level **Network Access** You can get such information from your VPC details page of the [AWS Management Console](https://console.aws.amazon.com/). TiDB Cloud supports creating VPC peerings between VPCs in the same region or from two different regions. - ![VPC peering](/media/tidb-cloud/vpc-peering/vpc-peering-creating-infos.png) + ![VPC peering](./media/tidb-cloud/vpc-peering/vpc-peering-creating-infos.png) 4. Click **Create** to send the VPC peering request, and then view the VPC peering information on the **Networking** > **AWS VPC Peering** section. The status of the newly created VPC peering is **System Checking**. @@ -208,13 +208,13 @@ You can also use the AWS dashboard to configure the VPC peering connection. 1. Sign in to the [AWS Management Console](https://console.aws.amazon.com/) and click **Services** on the top menu bar. Enter `VPC` in the search box and go to the VPC service page. - ![AWS dashboard](/media/tidb-cloud/vpc-peering/aws-vpc-guide-1.jpg) + ![AWS dashboard](./media/tidb-cloud/vpc-peering/aws-vpc-guide-1.jpg) 2. From the left navigation bar, open the **Peering Connections** page. On the **Create Peering Connection** tab, a peering connection is in the **Pending Acceptance** status. 3. Confirm that the requester owner and the requester VPC match **TiDB Cloud AWS Account ID** and **TiDB Cloud VPC ID** on the **VPC Peering Details** page of the [TiDB Cloud console](https://tidbcloud.com). Right-click the peering connection and select **Accept Request** to accept the request in the **Accept VPC peering connection request** dialog. - ![AWS VPC peering requests](/media/tidb-cloud/vpc-peering/aws-vpc-guide-3.png) + ![AWS VPC peering requests](./media/tidb-cloud/vpc-peering/aws-vpc-guide-3.png) 2. Add a route to the TiDB Cloud VPC for each of your VPC subnet route tables. @@ -222,11 +222,11 @@ You can also use the AWS dashboard to configure the VPC peering connection. 2. Search all the route tables that belong to your application VPC. - ![Search all route tables related to VPC](/media/tidb-cloud/vpc-peering/aws-vpc-guide-4.png) + ![Search all route tables related to VPC](./media/tidb-cloud/vpc-peering/aws-vpc-guide-4.png) 3. Right-click each route table and select **Edit routes**. On the edit page, add a route with a destination to the TiDB Cloud CIDR (by checking the **VPC Peering** configuration page in the TiDB Cloud console) and fill in your peering connection ID in the **Target** column. - ![Edit all route tables](/media/tidb-cloud/vpc-peering/aws-vpc-guide-5.png) + ![Edit all route tables](./media/tidb-cloud/vpc-peering/aws-vpc-guide-5.png) 3. Make sure you have enabled private DNS hosted zone support for your VPC. diff --git a/tidb-cloud/tidb-cloud-billing-dm.md b/tidb-cloud/tidb-cloud-billing-dm.md index 5ab94cd92e871..434ac291a2e4f 100644 --- a/tidb-cloud/tidb-cloud-billing-dm.md +++ b/tidb-cloud/tidb-cloud-billing-dm.md @@ -41,15 +41,15 @@ Note that if you are using AWS PrivateLink or VPC peering connections, and if th - If the source database and the TiDB node are not in the same region, cross-region traffic charges are incurred when the Data Migration job collects data from the source database. - ![Cross-region traffic charges](/media/tidb-cloud/dm-billing-cross-region-fees.png) + ![Cross-region traffic charges](./media/tidb-cloud/dm-billing-cross-region-fees.png) - If the source database and the TiDB node are in the same region but in different AZs, cross-AZ traffic charges are incurred when the Data Migration job collects data from the source database. - ![Cross-AZ traffic charges](/media/tidb-cloud/dm-billing-cross-az-fees.png) + ![Cross-AZ traffic charges](./media/tidb-cloud/dm-billing-cross-az-fees.png) - If the Data Migration job and the TiDB node are not in the same AZ, cross-AZ traffic charges are incurred when the Data Migration job writes data to the target TiDB node. In addition, if the Data Migration job and the TiDB node are not in the same AZ (or region) with the source database, cross-AZ (or cross-region) traffic charges are incurred when the Data Migration job collects data from the source database. - ![Cross-region and cross-AZ traffic charges](/media/tidb-cloud/dm-billing-cross-region-and-az-fees.png) + ![Cross-region and cross-AZ traffic charges](./media/tidb-cloud/dm-billing-cross-region-and-az-fees.png) The cross-region and cross-AZ traffic prices are the same as those for TiDB Cloud. For more information, see [TiDB Cloud Pricing Details](https://www.pingcap.com/tidb-dedicated-pricing-details/). diff --git a/tidb-cloud/tidb-cloud-connect-aws-dms.md b/tidb-cloud/tidb-cloud-connect-aws-dms.md index 6ad1a127fd72c..249e2e9dff6c3 100644 --- a/tidb-cloud/tidb-cloud-connect-aws-dms.md +++ b/tidb-cloud/tidb-cloud-connect-aws-dms.md @@ -64,7 +64,7 @@ For TiDB Cloud Dedicated, your clients can connect to clusters via public endpoi 1. In the AWS DMS console, go to the [**Replication instances**](https://console.aws.amazon.com/dms/v2/home#replicationInstances) page and switch to the corresponding region. It is recommended to use the same region for AWS DMS as TiDB Cloud. - ![Create replication instance](/media/tidb-cloud/aws-dms-tidb-cloud/aws-dms-connect-replication-instances.png) + ![Create replication instance](./media/tidb-cloud/aws-dms-tidb-cloud/aws-dms-connect-replication-instances.png) 2. Click **Create replication instance**. @@ -84,7 +84,7 @@ For TiDB Cloud Dedicated, your clients can connect to clusters via public endpoi - **Replication subnet group**: select a subnet group for your replication instance. - **Public accessible**: set it based on your network configuration. - ![Connectivity and security](/media/tidb-cloud/aws-dms-tidb-cloud/aws-dms-connect-connectivity-security.png) + ![Connectivity and security](./media/tidb-cloud/aws-dms-tidb-cloud/aws-dms-connect-connectivity-security.png) 7. Configure the **Advanced settings**, **Maintenance**, and **Tags** sections if needed, and then click **Create replication instance** to finish the instance creation. @@ -98,7 +98,7 @@ For connectivity, the steps for using TiDB Cloud clusters as a source or as a ta 1. In the AWS DMS console, go to the [**Endpoints**](https://console.aws.amazon.com/dms/v2/home#endpointList) page and switch to the corresponding region. - ![Create endpoint](/media/tidb-cloud/aws-dms-tidb-cloud/aws-dms-connect-create-endpoint.png) + ![Create endpoint](./media/tidb-cloud/aws-dms-tidb-cloud/aws-dms-connect-create-endpoint.png) 2. Click **Create endpoint** to create the target database endpoint. @@ -133,7 +133,7 @@ For connectivity, the steps for using TiDB Cloud clusters as a source or as a ta
- ![Provide access information manually](/media/tidb-cloud/aws-dms-tidb-cloud/aws-dms-connect-configure-endpoint.png) + ![Provide access information manually](./media/tidb-cloud/aws-dms-tidb-cloud/aws-dms-connect-configure-endpoint.png) 6. If you want to create the endpoint as a **Target endpoint**, expand the **Endpoint settings** section, select the **Use endpoint connection attributes** checkbox, and then set **Extra connection attributes** to `Initstmt=SET FOREIGN_KEY_CHECKS=0;`. diff --git a/tidb-cloud/tidb-cloud-intro.md b/tidb-cloud/tidb-cloud-intro.md index 292a5c7d0a5ca..2fb0675118702 100644 --- a/tidb-cloud/tidb-cloud-intro.md +++ b/tidb-cloud/tidb-cloud-intro.md @@ -8,7 +8,7 @@ category: intro [TiDB Cloud](https://www.pingcap.com/tidb-cloud/) is a fully-managed Database-as-a-Service (DBaaS) that brings [TiDB](https://docs.pingcap.com/tidb/stable/overview), an open-source Hybrid Transactional and Analytical Processing (HTAP) database, to your cloud. TiDB Cloud offers an easy way to deploy and manage databases to let you focus on your applications, not the complexities of the databases. You can create TiDB Cloud clusters to quickly build mission-critical applications on Google Cloud and Amazon Web Services (AWS). -![TiDB Cloud Overview](/media/tidb-cloud/tidb-cloud-overview.png) +![TiDB Cloud Overview](./media/tidb-cloud/tidb-cloud-overview.png) ## Why TiDB Cloud @@ -76,7 +76,7 @@ For feature comparison between TiDB Cloud Serverless and TiDB Cloud Dedicated, s ## Architecture -![TiDB Cloud architecture](/media/tidb-cloud/tidb-cloud-architecture.png) +![TiDB Cloud architecture](./media/tidb-cloud/tidb-cloud-architecture.png) - TiDB VPC (Virtual Private Cloud) diff --git a/tidb-cloud/tidb-cloud-poc.md b/tidb-cloud/tidb-cloud-poc.md index 33e9a9bffbeae..91e339595be62 100644 --- a/tidb-cloud/tidb-cloud-poc.md +++ b/tidb-cloud/tidb-cloud-poc.md @@ -214,7 +214,7 @@ Once your application for the PoC is approved, you will receive credits in your To check the credits left for your PoC, go to the [**Clusters**](https://tidbcloud.com/console/clusters) page of your target project, as shown in the following screenshot. -![TiDB Cloud PoC Credits](/media/tidb-cloud/poc-points.png) +![TiDB Cloud PoC Credits](./media/tidb-cloud/poc-points.png) Alternatively, you can also click in the lower-left corner of the TiDB Cloud console, click **Billing**, and click **Credits** to see the credit details page. diff --git a/tidb-cloud/tidb-cloud-sql-tuning-overview.md b/tidb-cloud/tidb-cloud-sql-tuning-overview.md index 0058552892dfc..af7fd9da0f528 100644 --- a/tidb-cloud/tidb-cloud-sql-tuning-overview.md +++ b/tidb-cloud/tidb-cloud-sql-tuning-overview.md @@ -31,15 +31,15 @@ You can view some key information in **SQL Statement**. - **SQL Template**: including SQL digest, SQL template ID, the time range currently viewed, the number of execution plans, and the database where the execution takes place. - ![Details0](/media/dashboard/dashboard-statement-detail0.png) + ![Details0](./media/dashboard/dashboard-statement-detail0.png) - Execution plan list: if a SQL statement has more than one execution plan, the list is displayed. You can select different execution plans and the details of the selected execution plan are displayed at the bottom of the list. If there is only one execution plan, the list will not be displayed. - ![Details1](/media/dashboard/dashboard-statement-detail1.png) + ![Details1](./media/dashboard/dashboard-statement-detail1.png) - Execution plan details: shows the details of the selected execution plan. It collects the execution plans of each SQL type and the corresponding execution time from several perspectives to help you get more information. See [Execution plans](https://docs.pingcap.com/tidb/stable/dashboard-statement-details#execution-plans). - ![Details2](/media/dashboard/dashboard-statement-detail2.png) + ![Details2](./media/dashboard/dashboard-statement-detail2.png) - Related slow query diff --git a/tidb-cloud/tidb-cloud-tune-performance-overview.md b/tidb-cloud/tidb-cloud-tune-performance-overview.md index ff9ee252b5c4c..9d1d05950b88f 100644 --- a/tidb-cloud/tidb-cloud-tune-performance-overview.md +++ b/tidb-cloud/tidb-cloud-tune-performance-overview.md @@ -22,7 +22,7 @@ To get a total user response time within a specified time range (`ΔT`), you can Total user response time in `ΔT` = Average TPS (Transactions Per Second) x Average user response time x `ΔT`. -![user_response_time](/media/performance/user_response_time_en.png) +![user_response_time](./media/performance/user_response_time_en.png) ## Relationship between user response time and system throughput @@ -84,19 +84,19 @@ For more information about SQL performance tuning, see [SQL Tuning Overview](/ti You can view hotspot issues on the [Key Visualizer tab](/tidb-cloud/tune-performance.md#key-visualizer). The following screenshot shows a sample heat map. The horizontal coordinate of the map is the time, and the vertical coordinate is the table and index. Brighter color indicates higher traffic. You can toggle the display of read or write traffic in the toolbar. -![Hotspot issues](/media/tidb-cloud/tidb-cloud-troubleshoot-hotspot.png) +![Hotspot issues](./media/tidb-cloud/tidb-cloud-troubleshoot-hotspot.png) The following screenshot shows an example of a write hotspot. A bright diagonal line (diagonal up or diagonal down) appears in the write flow graph, and the write traffic appears only at the end of the line. It becomes a stepped pattern as the number of table Regions grows. It indicates that there is a write hotspot in the table. When a write hotspot occurs, you need to check whether you are using a self-incrementing primary key, or no primary key, or using a time-dependent insert statement or index. -![Write hotspot](/media/tidb-cloud/tidb-cloud-troubleshoot-write-hotspot.png) +![Write hotspot](./media/tidb-cloud/tidb-cloud-troubleshoot-write-hotspot.png) A read hotspot is generally represented in the heat map as a bright horizontal line, usually a small table with a large number of queries, as shown in the following screenshot. -![Read hotspot](/media/tidb-cloud/tidb-cloud-troubleshoot-read-hotspot-new.png) +![Read hotspot](./media/tidb-cloud/tidb-cloud-troubleshoot-read-hotspot-new.png) Hover over the highlighted block to see which table or index has high traffic, as shown in the following screenshot. -![Hotspot index](/media/tidb-cloud/tidb-cloud-troubleshoot-hotspot-index.png) +![Hotspot index](./media/tidb-cloud/tidb-cloud-troubleshoot-hotspot-index.png) #### Scale out diff --git a/tidb-cloud/v6.5-performance-benchmarking-with-sysbench.md b/tidb-cloud/v6.5-performance-benchmarking-with-sysbench.md index d8c4f4179fa6c..d50cab9421634 100644 --- a/tidb-cloud/v6.5-performance-benchmarking-with-sysbench.md +++ b/tidb-cloud/v6.5-performance-benchmarking-with-sysbench.md @@ -104,7 +104,7 @@ The performance on the `oltp_point_select` workload is as follows: | 100 | 64987 | 2.07 | | 200 | 121656 | 2.14 | -![Sysbench point select performance](/media/tidb-cloud/v6.5.6-oltp_select_point.png) +![Sysbench point select performance](./media/tidb-cloud/v6.5.6-oltp_select_point.png) ### Read write performance @@ -116,7 +116,7 @@ The performance on the `oltp_read_write` workload is as follows: | 100 | 2266 | 51.9 | | 200 | 3578 | 81.5 | -![Sysbench read write performance](/media/tidb-cloud/v6.5.6-oltp_read_write.png) +![Sysbench read write performance](./media/tidb-cloud/v6.5.6-oltp_read_write.png) ### Update non-index performance @@ -128,7 +128,7 @@ The performance on the `oltp_update_non_index` workload is as follows: | 200 | 20640 | 12.1 | | 400 | 36830 | 13.5 | -![Sysbench update non-index performance](/media/tidb-cloud/v6.5.6-oltp_update_non_index.png) +![Sysbench update non-index performance](./media/tidb-cloud/v6.5.6-oltp_update_non_index.png) ### Update index performance @@ -140,7 +140,7 @@ The performance on the `oltp_update_index` workload is as follows: | 200 | 14466 | 18.0 | | 400 | 22194 | 24.8 | -![Sysbench update index performance](/media/tidb-cloud/v6.5.6-oltp_update_index.png) +![Sysbench update index performance](./media/tidb-cloud/v6.5.6-oltp_update_index.png) ### Insert performance @@ -152,4 +152,4 @@ The performance on the `oltp_insert` workload is as follows: | 200 | 27143 | 10.1 | | 400 | 40884 | 15.0 | -![Sysbench insert performance](/media/tidb-cloud/v6.5.6-oltp_insert.png) +![Sysbench insert performance](./media/tidb-cloud/v6.5.6-oltp_insert.png) diff --git a/tidb-cloud/v6.5-performance-benchmarking-with-tpcc.md b/tidb-cloud/v6.5-performance-benchmarking-with-tpcc.md index 3538c4001f194..ea9f0e2989a0b 100644 --- a/tidb-cloud/v6.5-performance-benchmarking-with-tpcc.md +++ b/tidb-cloud/v6.5-performance-benchmarking-with-tpcc.md @@ -108,4 +108,4 @@ The TPC-C performance of v6.5.6 in the [test environment](#test-environment) is | 100 | 74424 | | 200 | 101545 | -![TPC-C](/media/tidb-cloud/v6.5.6-tpmC.png) +![TPC-C](./media/tidb-cloud/v6.5.6-tpmC.png) diff --git a/tidb-cloud/v7.1-performance-benchmarking-with-sysbench.md b/tidb-cloud/v7.1-performance-benchmarking-with-sysbench.md index 66f28cc222005..938320daea146 100644 --- a/tidb-cloud/v7.1-performance-benchmarking-with-sysbench.md +++ b/tidb-cloud/v7.1-performance-benchmarking-with-sysbench.md @@ -125,7 +125,7 @@ The performance on the `oltp_point_select` workload is as follows: | 100 | 64853 | 2.00 | | 200 | 118462 | 2.22 | -![Sysbench point select performance](/media/tidb-cloud/v7.1.3-oltp_select_point.png) +![Sysbench point select performance](./media/tidb-cloud/v7.1.3-oltp_select_point.png) ### Read write performance @@ -137,7 +137,7 @@ The performance on the `oltp_read_write` workload is as follows: | 100 | 2235 | 53.9 | | 200 | 3380 | 87.6 | -![Sysbench read write performance](/media/tidb-cloud/v7.1.3-oltp_read_write.png) +![Sysbench read write performance](./media/tidb-cloud/v7.1.3-oltp_read_write.png) ### Update non-index performance @@ -149,7 +149,7 @@ The performance on the `oltp_update_non_index` workload is as follows: | 200 | 19985 | 12.8 | | 400 | 35621 | 14.7 | -![Sysbench update non-index performance](/media/tidb-cloud/v7.1.3-oltp_update_non_index.png) +![Sysbench update non-index performance](./media/tidb-cloud/v7.1.3-oltp_update_non_index.png) ### Update index performance @@ -161,7 +161,7 @@ The performance on the `oltp_update_index` workload is as follows: | 200 | 14414 | 18.6 | | 400 | 21997 | 25.3 | -![Sysbench update index performance](/media/tidb-cloud/v7.1.3-oltp_update_index.png) +![Sysbench update index performance](./media/tidb-cloud/v7.1.3-oltp_update_index.png) ### Insert performance @@ -173,4 +173,4 @@ The performance on the `oltp_insert` workload is as follows: | 200 | 25078 | 11.0 | | 400 | 38436 | 15.6 | -![Sysbench insert performance](/media/tidb-cloud/v7.1.3-oltp_insert.png) +![Sysbench insert performance](./media/tidb-cloud/v7.1.3-oltp_insert.png) diff --git a/tidb-cloud/v7.1-performance-benchmarking-with-tpcc.md b/tidb-cloud/v7.1-performance-benchmarking-with-tpcc.md index 4cd0f9f38710e..9fbafdd1b4f39 100644 --- a/tidb-cloud/v7.1-performance-benchmarking-with-tpcc.md +++ b/tidb-cloud/v7.1-performance-benchmarking-with-tpcc.md @@ -109,4 +109,4 @@ The TPC-C performance of v7.1.3 in the [test environment](#test-environment) is | 100 | 72895 | | 200 | 97924 | -![TPC-C](/media/tidb-cloud/v7.1.3-tpmC.png) +![TPC-C](./media/tidb-cloud/v7.1.3-tpmC.png) diff --git a/tidb-cloud/v7.5-performance-benchmarking-with-sysbench.md b/tidb-cloud/v7.5-performance-benchmarking-with-sysbench.md index 3ceb83626726d..c50779b71e75e 100644 --- a/tidb-cloud/v7.5-performance-benchmarking-with-sysbench.md +++ b/tidb-cloud/v7.5-performance-benchmarking-with-sysbench.md @@ -125,7 +125,7 @@ The performance on the `oltp_point_select` workload is as follows: | 100 | 64,810 | 2.03 | | 200 | 118,651 | 2.22 | -![Sysbench point select performance](/media/tidb-cloud/v7.5.0-oltp_point_select.png) +![Sysbench point select performance](./media/tidb-cloud/v7.5.0-oltp_point_select.png) ### Read write performance @@ -137,7 +137,7 @@ The performance on the `oltp_read_write` workload is as follows: | 100 | 2,162 | 54.8 | | 200 | 3,169 | 92.4 | -![Sysbench read write performance](/media/tidb-cloud/v7.5.0-oltp_read_write.png) +![Sysbench read write performance](./media/tidb-cloud/v7.5.0-oltp_read_write.png) ### Update non-index performance @@ -149,7 +149,7 @@ The performance on the `oltp_update_non_index` workload is as follows: | 200 | 20,223 | 13.0 | | 400 | 34,011 | 14.7 | -![Sysbench update non-index performance](/media/tidb-cloud/v7.5.0-oltp_update_non_index.png) +![Sysbench update non-index performance](./media/tidb-cloud/v7.5.0-oltp_update_non_index.png) ### Update index performance @@ -161,7 +161,7 @@ The performance on the `oltp_update_index` workload is as follows: | 200 | 13,718 | 19.0 | | 400 | 20,377 | 26.9 | -![Sysbench update index performance](/media/tidb-cloud/v7.5.0-oltp_update_index.png) +![Sysbench update index performance](./media/tidb-cloud/v7.5.0-oltp_update_index.png) ### Insert performance @@ -173,4 +173,4 @@ The performance on the `oltp_insert` workload is as follows: | 200 | 24,756 | 10.8 | | 400 | 37,247 | 16.4 | -![Sysbench insert performance](/media/tidb-cloud/v7.5.0-oltp_insert.png) +![Sysbench insert performance](./media/tidb-cloud/v7.5.0-oltp_insert.png) diff --git a/tidb-cloud/v7.5-performance-benchmarking-with-tpcc.md b/tidb-cloud/v7.5-performance-benchmarking-with-tpcc.md index 63ec70b77b0f1..9de66949b86ea 100644 --- a/tidb-cloud/v7.5-performance-benchmarking-with-tpcc.md +++ b/tidb-cloud/v7.5-performance-benchmarking-with-tpcc.md @@ -109,4 +109,4 @@ The TPC-C performance of v7.5.0 in the [test environment](#test-environment) is | 100 | 71,499 | | 200 | 97,389 | -![TPC-C](/media/tidb-cloud/v7.5.0_tpcc.png) +![TPC-C](./media/tidb-cloud/v7.5.0_tpcc.png) diff --git a/tidb-cloud/v8.1-performance-benchmarking-with-sysbench.md b/tidb-cloud/v8.1-performance-benchmarking-with-sysbench.md index b3bd825524fa2..1b1130dcd9d94 100644 --- a/tidb-cloud/v8.1-performance-benchmarking-with-sysbench.md +++ b/tidb-cloud/v8.1-performance-benchmarking-with-sysbench.md @@ -130,7 +130,7 @@ The performance on the `oltp_point_select` workload is as follows: | 100 | 62,545 | 2.03 | | 200 | 111,470 | 2.48 | -![Sysbench point select performance](/media/tidb-cloud/v8.1.0_oltp_point_select.png) +![Sysbench point select performance](./media/tidb-cloud/v8.1.0_oltp_point_select.png) ### Read write performance @@ -142,7 +142,7 @@ The performance on the `oltp_read_write` workload is as follows: | 100 | 2,341 | 51 | | 200 | 3,240 | 109 | -![Sysbench read write performance](/media/tidb-cloud/v8.1.0_oltp_read_write.png) +![Sysbench read write performance](./media/tidb-cloud/v8.1.0_oltp_read_write.png) ### Update non-index performance @@ -154,7 +154,7 @@ The performance on the `oltp_update_non_index` workload is as follows: | 200 | 25,215 | 10.5 | | 400 | 42,550 | 12.8 | -![Sysbench update non-index performance](/media/tidb-cloud/v8.1.0_oltp_update_non_index.png) +![Sysbench update non-index performance](./media/tidb-cloud/v8.1.0_oltp_update_non_index.png) ### Update index performance @@ -166,7 +166,7 @@ The performance on the `oltp_update_index` workload is as follows: | 200 | 17,805 | 14.7 | | 400 | 24,575 | 23.5 | -![Sysbench update index performance](/media/tidb-cloud/v8.1.0_oltp_update_index.png) +![Sysbench update index performance](./media/tidb-cloud/v8.1.0_oltp_update_index.png) ### Insert performance @@ -178,4 +178,4 @@ The performance on the `oltp_insert` workload is as follows: | 200 | 29,387 | 9.73 | | 400 | 42,712 | 14.2 | -![Sysbench insert performance](/media/tidb-cloud/v8.1.0_oltp_insert.png) +![Sysbench insert performance](./media/tidb-cloud/v8.1.0_oltp_insert.png) diff --git a/tidb-cloud/v8.1-performance-benchmarking-with-tpcc.md b/tidb-cloud/v8.1-performance-benchmarking-with-tpcc.md index 8aad399883823..66e959b352e2a 100644 --- a/tidb-cloud/v8.1-performance-benchmarking-with-tpcc.md +++ b/tidb-cloud/v8.1-performance-benchmarking-with-tpcc.md @@ -120,4 +120,4 @@ The TPC-C performance of v8.1.0 in the [test environment](#test-environment) is | 100 | 75,495 | | 200 | 102,013 | -![TPC-C](/media/tidb-cloud/v8.1.0_tpcc.png) +![TPC-C](./media/tidb-cloud/v8.1.0_tpcc.png) diff --git a/tidb-computing.md b/tidb-computing.md index f401b0d22747b..0fb6fed4177df 100644 --- a/tidb-computing.md +++ b/tidb-computing.md @@ -132,7 +132,7 @@ For example, to execute the `select count(*) from user where name = "TiDB"` SQL **The entire process is illustrated as follows:** -![naive sql flow](/media/tidb-computing-native-sql-flow.jpeg) +![naive sql flow](./media/tidb-computing-native-sql-flow.jpeg) This solution is intuitive and feasible, but has some obvious problems in a distributed database scenario: @@ -146,12 +146,12 @@ To solve the problems above, the computation should be as close to the storage n The following image shows how data returns layer by layer: -![dist sql flow](/media/tidb-computing-dist-sql-flow.png) +![dist sql flow](./media/tidb-computing-dist-sql-flow.png) ### Architecture of SQL layer The previous sections introduce some functions of the SQL layer and I hope you have a basic understanding of how SQL statements are handled. In fact, TiDB's SQL layer is much more complicated, with many modules and layers. The following diagram lists the important modules and calling relationships: -![tidb sql layer](/media/tidb-computing-tidb-sql-layer.png) +![tidb sql layer](./media/tidb-computing-tidb-sql-layer.png) The user's SQL request is sent to TiDB Server either directly or via `Load Balancer`. TiDB Server will parse `MySQL Protocol Packet`, get the content of requests, parse the SQL request syntactically and semantically, develop and optimize query plans, execute a query plan, get and process the data. All data is stored in the TiKV cluster, so in this process, TiDB Server needs to interact with TiKV and get the data. Finally, TiDB Server needs to return the query results to the user. diff --git a/tidb-distributed-execution-framework.md b/tidb-distributed-execution-framework.md index 5a328041ac189..f77de5e983ecb 100644 --- a/tidb-distributed-execution-framework.md +++ b/tidb-distributed-execution-framework.md @@ -108,7 +108,7 @@ Starting from v8.1.0, if new nodes are added during task execution, the DXF dete The architecture of the DXF is as follows: -![Architecture of the DXF](/media/dist-task/dist-task-architect.jpg) +![Architecture of the DXF](./media/dist-task/dist-task-architect.jpg) As shown in the preceding diagram, the execution of tasks in the DXF is mainly handled by the following modules: diff --git a/tidb-global-sort.md b/tidb-global-sort.md index 7043e787dd5bd..5f9e7be9cd879 100644 --- a/tidb-global-sort.md +++ b/tidb-global-sort.md @@ -70,7 +70,7 @@ To enable Global Sort, follow these steps: The algorithm of the Global Sort feature is as follows: -![Algorithm of Global Sort](/media/dist-task/global-sort.jpeg) +![Algorithm of Global Sort](./media/dist-task/global-sort.jpeg) The detailed implementation principles are as follows: diff --git a/tidb-lightning/monitor-tidb-lightning.md b/tidb-lightning/monitor-tidb-lightning.md index 6b922b4445c37..b948ac7d1dc4c 100644 --- a/tidb-lightning/monitor-tidb-lightning.md +++ b/tidb-lightning/monitor-tidb-lightning.md @@ -38,7 +38,7 @@ scrape_configs: ### Row 1: Speed -![Panels in first row](/media/lightning-grafana-row-1.png) +![Panels in first row](./media/lightning-grafana-row-1.png) | Panel | Series | Description | |:-----|:-----|:-----| @@ -50,7 +50,7 @@ Sometimes the import speed will drop to zero allowing other parts to catch up. T ### Row 2: Progress -![Panels in second row](/media/lightning-grafana-row-2.png) +![Panels in second row](./media/lightning-grafana-row-2.png) | Panel | Description | |:-----|:-----| @@ -60,7 +60,7 @@ Sometimes the import speed will drop to zero allowing other parts to catch up. T ### Row 3: Resource -![Panels in third row](/media/lightning-grafana-row-3.png) +![Panels in third row](./media/lightning-grafana-row-3.png) | Panel | Description | |:-----|:-----| @@ -70,7 +70,7 @@ Sometimes the import speed will drop to zero allowing other parts to catch up. T ### Row 4: Quota -![Panels in fourth row](/media/lightning-grafana-row-4.png) +![Panels in fourth row](./media/lightning-grafana-row-4.png) | Panel | Series | Description | |:-----|:-----|:-----| @@ -84,7 +84,7 @@ Sometimes the import speed will drop to zero allowing other parts to catch up. T ### Row 5: Read speed -![Panels in fifth row](/media/lightning-grafana-row-5.png) +![Panels in fifth row](./media/lightning-grafana-row-5.png) | Panel | Series | Description | |:-----|:-----|:-----| @@ -97,7 +97,7 @@ If any of the duration is too high, it indicates that the disk used by TiDB Ligh ### Row 6: Storage -![Panels in sixth row](/media/lightning-grafana-row-6.png) +![Panels in sixth row](./media/lightning-grafana-row-6.png) | Panel | Series | Description | |:-----|:-----|:-----| @@ -111,7 +111,7 @@ If any of the duration is too high, it indicates that the disk used by TiDB Ligh ### Row 7: Import speed -![Panels in seventh row](/media/lightning-grafana-row-7.png) +![Panels in seventh row](./media/lightning-grafana-row-7.png) | Panel | Series | Description | |:-----|:-----|:-----| diff --git a/tidb-lightning/tidb-lightning-faq.md b/tidb-lightning/tidb-lightning-faq.md index ff0f72bb275af..5dadccfc71b9f 100644 --- a/tidb-lightning/tidb-lightning-faq.md +++ b/tidb-lightning/tidb-lightning-faq.md @@ -148,7 +148,7 @@ The purpose of placement rule in SQL is to control the data location of certain Suppose the source cluster has the following topology: -![TiDB Lightning FAQ - source cluster topology](/media/lightning-faq-source-cluster-topology.jpg) +![TiDB Lightning FAQ - source cluster topology](./media/lightning-faq-source-cluster-topology.jpg) The source cluster has the following placement policy: @@ -158,11 +158,11 @@ CREATE PLACEMENT POLICY p1 PRIMARY_REGION="us-east" REGIONS="us-east,us-west"; **Situation 1:** The target cluster has 3 replicas, and the topology is different from the source cluster. In such cases, when TiDB Lightning creates the placement policy in the target cluster, it will not report an error. However, the semantics in the target cluster is wrong. -![TiDB Lightning FAQ - situation 1](/media/lightning-faq-situation-1.jpg) +![TiDB Lightning FAQ - situation 1](./media/lightning-faq-situation-1.jpg) **Situation 2:** The target cluster locates the follower replica in another TiKV node in region "us-mid" and does not have the region "us-west" in the topology. In such cases, when creating the placement policy in the target cluster, TiDB Lightning will report an error. -![TiDB Lightning FAQ - situation 2](/media/lightning-faq-situation-2.jpg) +![TiDB Lightning FAQ - situation 2](./media/lightning-faq-situation-2.jpg) **Workaround:** diff --git a/tidb-lightning/tidb-lightning-overview.md b/tidb-lightning/tidb-lightning-overview.md index 369e589c46606..a22c0689fd9f9 100644 --- a/tidb-lightning/tidb-lightning-overview.md +++ b/tidb-lightning/tidb-lightning-overview.md @@ -26,7 +26,7 @@ TiDB Lightning can read data from the following sources: ## TiDB Lightning architecture -![Architecture of TiDB Lightning tool set](/media/tidb-lightning-architecture.png) +![Architecture of TiDB Lightning tool set](./media/tidb-lightning-architecture.png) TiDB Lightning supports two import modes, configured by `backend`. The import mode determines the way data is imported into TiDB. diff --git a/tidb-lightning/tidb-lightning-web-interface.md b/tidb-lightning/tidb-lightning-web-interface.md index ff853bb960ef9..0d1386fcbd33f 100644 --- a/tidb-lightning/tidb-lightning-web-interface.md +++ b/tidb-lightning/tidb-lightning-web-interface.md @@ -28,7 +28,7 @@ In server mode, TiDB Lightning does not start running immediately. Rather, users ## Front page -![Front page of the web interface](/media/lightning-web-frontpage.png) +![Front page of the web interface](./media/lightning-web-frontpage.png) Functions of the title bar, from left to right: @@ -53,7 +53,7 @@ Each panel contains cards describing the status of the table. Click the **+** button on the title bar to submit a task. -![Submit task dialog](/media/lightning-web-submit.png) +![Submit task dialog](./media/lightning-web-submit.png) Tasks are TOML files described as [task configurations](/tidb-lightning/tidb-lightning-configuration.md#tidb-lightning-task). One could also open a local TOML file by clicking **UPLOAD**. @@ -63,7 +63,7 @@ Click **SUBMIT** to run the task. If a task is already running, the new task wil Click the **>** button of a table card on the front page to view the detailed progress of a table. -![Table progress](/media/lightning-web-table.png) +![Table progress](./media/lightning-web-table.png) The page shows the import progress of every engine and data files associated with the table. @@ -73,7 +73,7 @@ Click **TiDB Lightning** on the title bar to go back to the front page. Click the **ⓘ** button on the title bar to manage the current and queued tasks. -![Task management page](/media/lightning-web-queue.png) +![Task management page](./media/lightning-web-queue.png) Each task is labeled by the time it was submitted. Clicking the task would show the configuration formatted as JSON. diff --git a/tidb-monitoring-framework.md b/tidb-monitoring-framework.md index 786d25d70bdac..7389bc71e5bed 100644 --- a/tidb-monitoring-framework.md +++ b/tidb-monitoring-framework.md @@ -20,13 +20,13 @@ Prometheus consists of multiple components. Currently, TiDB uses the following o The diagram is as follows: -![diagram](/media/prometheus-in-tidb.png) +![diagram](./media/prometheus-in-tidb.png) ## About Grafana in TiDB Grafana is an open source project for analyzing and visualizing metrics. TiDB uses Grafana to display the performance metrics as follows: -![Grafana monitored_groups](/media/grafana-monitored-groups.png) +![Grafana monitored_groups](./media/grafana-monitored-groups.png) - {TiDB_Cluster_name}-Backup-Restore: Monitoring metrics related to backup and restore. - {TiDB_Cluster_name}-Blackbox_exporter: Monitoring metrics related to network probe. @@ -50,7 +50,7 @@ Grafana is an open source project for analyzing and visualizing metrics. TiDB us Each group has multiple panel labels of monitoring metrics, and each panel contains detailed information of multiple monitoring metrics. For example, the **Overview** monitoring group has five panel labels, and each labels corresponds to a monitoring panel. See the following UI: -![Grafana Overview](/media/grafana-monitor-overview.png) +![Grafana Overview](./media/grafana-monitor-overview.png) ## TiDB Dashboard diff --git a/tidb-performance-tuning-config.md b/tidb-performance-tuning-config.md index 4449cf4d413bc..68379e86934fb 100644 --- a/tidb-performance-tuning-config.md +++ b/tidb-performance-tuning-config.md @@ -194,7 +194,7 @@ Instance plan cache provides better memory efficiency than session-level plan ca In scenarios with multiple connections and complex queries, session-level plan cache would require significantly more memory to achieve similar hit ratios, making instance plan cache the more efficient choice. -![Instance plan cache: Queries Using Plan Cache OPS](/media/performance/instance-plan-cache.png) +![Instance plan cache: Queries Using Plan Cache OPS](./media/performance/instance-plan-cache.png) #### Test workload @@ -243,7 +243,7 @@ The performance improvement observed in the key settings is primarily attributed This significant reduction in compaction overhead contributes to the overall throughput improvement seen in the key settings configuration. -![Titan RocksDB compaction:](/media/performance/titan-rocksdb-compactions.png) +![Titan RocksDB compaction:](./media/performance/titan-rocksdb-compactions.png) #### Test workload diff --git a/tidb-scheduling.md b/tidb-scheduling.md index 89a7300cc349e..32f87cbad86da 100644 --- a/tidb-scheduling.md +++ b/tidb-scheduling.md @@ -85,7 +85,7 @@ Scheduling is based on information collection. In short, the PD scheduling compo + **Offline**: A TiKV store is manually taken offline through PD Control. This is only an intermediate status for the store to go offline. The store in this status moves all its Regions to other "Up" stores that meet the relocation conditions. When `leader_count` and `region_count` (obtained through PD Control) both show `0`, the store status changes to "Tombstone" from "Offline". In the "Offline" status, **do not** disable the store service or the physical server where the store is located. During the process that the store goes offline, if the cluster does not have target stores to relocate the Regions (for example, inadequate stores to hold replicas in the cluster), the store is always in the "Offline" status. + **Tombstone**: The TiKV store is completely offline. You can use the `remove-tombstone` interface to safely clean up TiKV in this status. Starting from v6.5.0, if not manually handled, PD will automatically delete the Tombstone records stored internally one month after the node is converted to Tombstone. - ![TiKV store status relationship](/media/tikv-store-status-relationship.png) + ![TiKV store status relationship](./media/tikv-store-status-relationship.png) - Information reported by Region leaders: diff --git a/tidb-storage.md b/tidb-storage.md index 04bb8f6931b7d..bdc8ce8dc63c9 100644 --- a/tidb-storage.md +++ b/tidb-storage.md @@ -7,7 +7,7 @@ summary: Understand the storage layer of a TiDB database. This document introduces some design ideas and key concepts of [TiKV](https://github.com/tikv/tikv). -![storage-architecture](/media/tidb-storage-architecture-1.png) +![storage-architecture](./media/tidb-storage-architecture-1.png) ## Key-Value pairs @@ -38,7 +38,7 @@ Raft is a consensus algorithm. This document only briefly introduces Raft. For m TiKV use Raft to perform data replication. Each data change will be recorded as a Raft log. Through Raft log replication, data is safely and reliably replicated to multiple nodes of the Raft group. However, according to Raft protocol, successful writes only need that data is replicated to the majority of nodes. -![Raft in TiDB](/media/tidb-storage-1.png) +![Raft in TiDB](./media/tidb-storage-1.png) In summary, TiKV can quickly store data on disk via the standalone machine RocksDB, and replicate data to multiple machines via Raft in case of machine failure. Data is written through the interface of Raft instead of to RocksDB. With the implementation of Raft, TiKV becomes a distributed Key-Value storage. Even with a few machine failures, TiKV can automatically complete replicas by virtue of the native Raft protocol, which does not impact the application. @@ -51,7 +51,7 @@ To make it easy to understand, let's assume that all data only has one replica. TiKV chooses the second solution that divides the whole Key-Value space into a series of consecutive Key segments. Each segment is called a Region. Each Region can be described by `[StartKey, EndKey)`, a left-closed and right-open interval. The default size limit for each Region is 256 MiB and the size can be configured. -![Region in TiDB](/media/tidb-storage-2.png) +![Region in TiDB](./media/tidb-storage-2.png) Note that the Region here has nothing to do with the table in SQL. In this document, forget about SQL and focus on KV for now. After dividing data into Regions, TiKV will perform two important tasks: @@ -68,7 +68,7 @@ These two tasks are very important and will be introduced one by one. One of the Replicas serves as the Leader of the Group and other as the Follower. By default, all reads and writes are processed through the Leader, where reads are done and write are replicated to followers. The following diagram shows the whole picture about Region and Raft group. -![TiDB Storage](/media/tidb-storage-3.png) +![TiDB Storage](./media/tidb-storage-3.png) As we distribute and replicate data in Regions, we have a distributed Key-Value system that, to some extent, has the capability of disaster recovery. You no longer need to worry about the capacity, or disk failure and data loss. diff --git a/tiflash-performance-tuning-methods.md b/tiflash-performance-tuning-methods.md index 767bd1da85e97..dec158ea85af5 100644 --- a/tiflash-performance-tuning-methods.md +++ b/tiflash-performance-tuning-methods.md @@ -19,7 +19,7 @@ Example: Resource utilization during [CH-benCHmark workload](/benchmark/benchmar This TiFlash cluster consists of two nodes, each node configured with 16 cores and 48 GB of memory. During the CH-benCHmark workload, CPU utilization can reach up to 1500%, memory usage can reach up to 20 GB, and IO utilization can reach up to 91%. These metrics indicate that TiFlash node resources are approaching saturation. -![CH-TiFlash-MPP](/media/performance/tiflash/tiflash-resource-usage.png) +![CH-TiFlash-MPP](./media/performance/tiflash/tiflash-resource-usage.png) ## Key metrics for TiFlash performance @@ -57,13 +57,13 @@ In the workload of the following diagram, `run_mpp_task` and `mpp_establish_conn The processing duration of `cop` requests is relatively small, indicating that some of the requests are pushed down to TiFlash for data access and filtering through the coprocessor. -![CH-TiFlash-MPP](/media/performance/tiflash/ch-2tiflash-op.png) +![CH-TiFlash-MPP](./media/performance/tiflash/ch-2tiflash-op.png) Example 2: TiFlash `cop` requests constitute the majority of the total processing duration In the workload of the following diagram, `cop` requests constitute the majority of the total processing duration. In this case, you can check the SQL execution plan to see why these `cop` requests are generated. -![Cop](/media/performance/tiflash/tiflash_request_duration_by_type.png) +![Cop](./media/performance/tiflash/tiflash_request_duration_by_type.png) ### Raft-related metrics @@ -104,10 +104,10 @@ As shown in the following diagram, the `Raft Wait Index Duration` and the 99th p In this cluster, there are two TiFlash nodes. The incremental data replication speed from TiKV to TiFlash is approximately 28 MB per second. The maximum write throughput of the stable layer (File Descriptor) is 939 MB/s, and the maximum read throughput is 1.1 GiB/s. Meanwhile, the maximum write throughput of the Delta layer (Page) is 74 MB/s, and the maximum read throughput is 111 MB/s. In this environment, TiFlash uses dedicated NVME disks, which have strong IO throughput capabilities. -![CH-2TiFlash-OP](/media/performance/tiflash/ch-2tiflash-raft-io-flow.png) +![CH-2TiFlash-OP](./media/performance/tiflash/ch-2tiflash-raft-io-flow.png) Example 2: Raft and IO metrics of the [CH-benCHmark workload](/benchmark/benchmark-tidb-using-ch.md) in a public cloud deployment environment As shown in the following diagram, the 99th percentile of `Raft Wait Index Duration` is up to 438 milliseconds, and 99th percentile of the `Raft Batch Read Index Duration` is up to 125 milliseconds. This cluster has only one TiFlash node. TiKV replicates about 5 MB of incremental data to TiFlash per second. The maximum write traffic of the stable layer (File Descriptor) is 78 MB/s and the maximum read traffic is 221 MB/s. In the meantime, the maximum write traffic of the Delta layer (Page) is 8 MB/s and the maximum read traffic is 18 MB/s. In this environment, TiFlash uses an AWS EBS cloud disk, which has relatively weak IO throughput. -![CH-TiFlash-MPP](/media/performance/tiflash/ch-1tiflash-raft-io-flow-cloud.png) \ No newline at end of file +![CH-TiFlash-MPP](./media/performance/tiflash/ch-1tiflash-raft-io-flow-cloud.png) \ No newline at end of file diff --git a/tiflash/tiflash-disaggregated-and-s3.md b/tiflash/tiflash-disaggregated-and-s3.md index 856d64dbe2a6e..d289056cbf66b 100644 --- a/tiflash/tiflash-disaggregated-and-s3.md +++ b/tiflash/tiflash-disaggregated-and-s3.md @@ -9,7 +9,7 @@ By default, TiFlash is deployed using the coupled storage and compute architectu ## Architecture overview -![TiFlash Write and Compute Separation Architecture](/media/tiflash/tiflash-s3.png) +![TiFlash Write and Compute Separation Architecture](./media/tiflash/tiflash-s3.png) In the disaggregated storage and compute architecture, different functionalities of the TiFlash process are divided and allocated to two types of nodes: the Write Node and the Compute Node. These two types of nodes can be deployed separately and scaled independently, which means that you can decide the number of Write Nodes and Compute Nodes to be deployed as needed. diff --git a/tiflash/tiflash-mintso-scheduler.md b/tiflash/tiflash-mintso-scheduler.md index b9c0ac894ffdc..44410d2eaf1b5 100644 --- a/tiflash/tiflash-mintso-scheduler.md +++ b/tiflash/tiflash-mintso-scheduler.md @@ -19,7 +19,7 @@ To improve TiFlash's processing capability in high concurrency scenarios, an MPP As mentioned in the [background](#background), the initial purpose of introducing the TiFlash task scheduler is to control the number of threads used during MPP query execution. A simple scheduling strategy is to specify the maximum number of threads TiFlash can request. For each MPP task, the scheduler decides whether the MPP task can be scheduled based on the current number of threads used by the system and the expected number of threads the MPP task will use: -![TiFlash MinTSO Scheduler v1](/media/tiflash/tiflash_mintso_v1.png) +![TiFlash MinTSO Scheduler v1](./media/tiflash/tiflash_mintso_v1.png) Although the preceding scheduling strategy can effectively control the number of system threads, an MPP task is not the smallest independent execution unit, and dependencies exist between different MPP tasks: @@ -59,7 +59,7 @@ The goal of the MinTSO scheduler is to control the number of system threads whil The scheduling process of the MinTSO Scheduler is as follows: -![TiFlash MinTSO Scheduler v2](/media/tiflash/tiflash_mintso_v2.png) +![TiFlash MinTSO Scheduler v2](./media/tiflash/tiflash_mintso_v2.png) By introducing soft limit and hard limit, the MinTSO scheduler effectively avoids system deadlock while controlling the number of system threads. In high concurrency scenarios, however, most queries might only have part of their MPP tasks scheduled. Queries with only part of MPP tasks scheduled cannot execute normally, leading to low system execution efficiency. To avoid this situation, TiFlash introduces a query-level limit for the MinTSO scheduler, called active_set_soft_limit. This limit allows only MPP tasks of up to active_set_soft_limit queries to participate in scheduling; MPP tasks of other queries do not participate in scheduling, and only after the current queries finish can new queries participate in scheduling. This limit is only a soft limit because for the MinTSO query, all its MPP tasks can be scheduled directly as long as the number of system threads does not exceed the hard limit. diff --git a/tiflash/tiflash-overview.md b/tiflash/tiflash-overview.md index c7e5a8e06cfbb..1cc2b178d5d55 100644 --- a/tiflash/tiflash-overview.md +++ b/tiflash/tiflash-overview.md @@ -18,7 +18,7 @@ With TiDB Cloud, you can create an HTAP cluster easily by specifying one or more ## Architecture -![TiFlash Architecture](/media/tidb-storage-architecture-1.png) +![TiFlash Architecture](./media/tidb-storage-architecture-1.png) The above figure is the architecture of TiDB in its HTAP form, including TiFlash nodes. diff --git a/tiflash/tiflash-pipeline-model.md b/tiflash/tiflash-pipeline-model.md index 37752c3862485..b799454b5ef44 100644 --- a/tiflash/tiflash-pipeline-model.md +++ b/tiflash/tiflash-pipeline-model.md @@ -30,7 +30,7 @@ The new pipeline execution model makes the following optimizations: The architecture of the pipeline execution model is as follows: -![TiFlash pipeline execution model design](/media/tiflash/tiflash-pipeline-model.png) +![TiFlash pipeline execution model design](./media/tiflash/tiflash-pipeline-model.png) As shown in the preceding figure, the pipeline execution model consists of two main components: the pipeline query executor and the task scheduler. diff --git a/tiflash/use-tiflash-mpp-mode.md b/tiflash/use-tiflash-mpp-mode.md index ee4c9fe197897..e77f5d89647f8 100644 --- a/tiflash/use-tiflash-mpp-mode.md +++ b/tiflash/use-tiflash-mpp-mode.md @@ -21,7 +21,7 @@ TiFlash supports using the MPP mode to execute queries, which introduces cross-n The following diagram shows how the MPP mode works. -![mpp-mode](/media/tiflash/tiflash-mpp.png) +![mpp-mode](./media/tiflash/tiflash-mpp.png) ## Control whether to select the MPP mode diff --git a/tikv-in-memory-engine.md b/tikv-in-memory-engine.md index bf7fbb4f1fa75..634973cf49019 100644 --- a/tikv-in-memory-engine.md +++ b/tikv-in-memory-engine.md @@ -18,7 +18,7 @@ The TiKV MVCC in-memory engine caches the latest written MVCC versions in memory The following diagram illustrates how TiKV organizes MVCC versions: -![IME caches recent versions to reduce CPU overhead](/media/tikv-ime-data-organization.png) +![IME caches recent versions to reduce CPU overhead](./media/tikv-ime-data-organization.png) The preceding diagram shows two rows of records, each with 9 MVCC versions. The behavior comparison between enabling and not enabling the in-memory engine is as follows: diff --git a/tikv-overview.md b/tikv-overview.md index 077ae60234b57..46c152267eac7 100644 --- a/tikv-overview.md +++ b/tikv-overview.md @@ -11,7 +11,7 @@ TiKV is a distributed and transactional key-value database, which provides trans TiKV implements the multi-raft-group replica mechanism based on the design of Google Spanner. A Region is a basic unit of the key-value data movement and refers to a data range in a Store. Each Region is replicated to multiple nodes. These multiple replicas form a Raft group. A replica of a Region is called a Peer. Typically there are 3 peers in a Region. One of them is the leader, which provides the read and write services. The PD component balances all the Regions automatically to guarantee that the read and write throughput is balanced among all the nodes in the TiKV cluster. With PD and carefully designed Raft groups, TiKV excels in horizontal scalability and can easily scale to store more than 100 TBs of data. -![TiKV Architecture](/media/tikv-arch.png) +![TiKV Architecture](./media/tikv-arch.png) ### Region and RocksDB diff --git a/time-to-live.md b/time-to-live.md index 146d4177fa46f..6bfe2ee1ca1ed 100644 --- a/time-to-live.md +++ b/time-to-live.md @@ -278,7 +278,7 @@ Currently, the TTL feature has the following limitations: In the [Grafana `TiDB` dashboard](/grafana-tidb-dashboard.md), the panel `TTL Insert Rows Per Hour` records the total number of rows inserted in the previous hour. The corresponding `TTL Delete Rows Per Hour` records the total number of rows deleted by the TTL task in the previous hour. If `TTL Insert Rows Per Hour` is higher than `TTL Delete Rows Per Hour` for a long time, it means that the rate of insertion is higher than the rate of deletion and the total amount of data will increase. For example: - ![insert fast example](/media/ttl/insert-fast.png) + ![insert fast example](./media/ttl/insert-fast.png) It is worth noting that since TTL does not guarantee that the expired rows will be deleted immediately, and the rows currently inserted will be deleted in a future TTL task, even if the speed of TTL deletion is lower than the speed of insertion in a short period of time, it does not necessarily mean that the speed of TTL is too slow. You need to consider the situation in its context. @@ -286,11 +286,11 @@ Currently, the TTL feature has the following limitations: Look at the `TTL Scan Worker Time By Phase` and `TTL Delete Worker Time By Phase` panels. If the scan worker is in the `dispatch` phase for a large percentage of time and the delete worker is rarely in the `idle` phase, then the scan worker is waiting for the delete worker to finish the deletion. If the cluster resources are still free at this point, you can consider increasing `tidb_ttl_ delete_worker_count` to increase the number of delete workers. For example: - ![scan fast example](/media/ttl/scan-fast.png) + ![scan fast example](./media/ttl/scan-fast.png) In contrast, if the scan worker is rarely in the `dispatch` phase and the delete worker is in the `idle` phase for a long time, then the scan worker is relatively busy. For example: - ![delete fast example](/media/ttl/delete-fast.png) + ![delete fast example](./media/ttl/delete-fast.png) The percentage of scan and delete in TTL jobs is related to the machine configuration and data distribution, so the monitoring data at each moment is only representative of the TTL Jobs being executed. You can read the table `mysql.tidb_ttl_job_history` to determine which TTL job is running at a certain moment and the corresponding table of the job. diff --git a/tispark-overview.md b/tispark-overview.md index 95e13d8f44abc..8364632197710 100644 --- a/tispark-overview.md +++ b/tispark-overview.md @@ -6,7 +6,7 @@ aliases: ['/docs/dev/tispark-overview/','/docs/dev/reference/tispark/','/docs/de # TiSpark User Guide -![TiSpark architecture](/media/tispark-architecture.png) +![TiSpark architecture](./media/tispark-architecture.png) ## TiSpark vs TiFlash diff --git a/troubleshoot-hot-spot-issues.md b/troubleshoot-hot-spot-issues.md index c316e7ab2fc1e..9db4473ddecf6 100644 --- a/troubleshoot-hot-spot-issues.md +++ b/troubleshoot-hot-spot-issues.md @@ -71,19 +71,19 @@ Performance problems are not necessarily caused by hotspots and might be caused The **Key Visualizer** feature in [TiDB Dashboard](/dashboard/dashboard-intro.md) helps users narrow down hotspot troubleshooting scope to the table level. The following is an example of the thermal diagram shown by **Key Visualizer**. The horizontal axis of the graph is time, and the vertical axis are various tables and indexes. The brighter the color, the greater the load. You can switch the read or write flow in the toolbar. -![Dashboard Example 1](/media/troubleshoot-hot-spot-issues-1.png) +![Dashboard Example 1](./media/troubleshoot-hot-spot-issues-1.png) The following bright diagonal lines (oblique upward or downward) can appear in the write flow graph. Because the write only appears at the end, as the number of table Regions becomes larger, it appears as a ladder. This indicates that a write hotspot shows in this table: -![Dashboard Example 2](/media/troubleshoot-hot-spot-issues-2.png) +![Dashboard Example 2](./media/troubleshoot-hot-spot-issues-2.png) For read hotspots, a bright horizontal line is generally shown in the thermal diagram. Usually these are caused by small tables with a large number of accesses, shown as follows: -![Dashboard Example 3](/media/troubleshoot-hot-spot-issues-3.png) +![Dashboard Example 3](./media/troubleshoot-hot-spot-issues-3.png) Hover over the bright block, you can see what table or index has a heavy load. For example: -![Dashboard Example 4](/media/troubleshoot-hot-spot-issues-4.png) +![Dashboard Example 4](./media/troubleshoot-hot-spot-issues-4.png) ## Use `SHARD_ROW_ID_BITS` to process hotspots @@ -112,9 +112,9 @@ For the table with a primary key of the `CLUSTERED` type, TiDB uses the primary The following two load diagrams shows the case where two tables without primary keys use `SHARD_ROW_ID_BITS` to scatter hotspots. The first diagram shows the situation before scattering hotspots, while the second one shows the situation after scattering hotspots. -![Dashboard Example 5](/media/troubleshoot-hot-spot-issues-5.png) +![Dashboard Example 5](./media/troubleshoot-hot-spot-issues-5.png) -![Dashboard Example 6](/media/troubleshoot-hot-spot-issues-6.png) +![Dashboard Example 6](./media/troubleshoot-hot-spot-issues-6.png) As shown in the load diagrams above, before setting `SHARD_ROW_ID_BITS`, load hotspots are concentrated on a single Region. After setting `SHARD_ROW_ID_BITS`, load hotspots become scattered. @@ -162,9 +162,9 @@ SELECT LAST_INSERT_ID(); The following two load diagrams shows the situations both before and after modifying `AUTO_INCREMENT` to `AUTO_RANDOM` to scatter hotspots. The first one uses `AUTO_INCREMENT`, while the second one uses `AUTO_RANDOM`. -![Dashboard Example 7](/media/troubleshoot-hot-spot-issues-7.png) +![Dashboard Example 7](./media/troubleshoot-hot-spot-issues-7.png) -![Dashboard Example 8](/media/troubleshoot-hot-spot-issues-8.png) +![Dashboard Example 8](./media/troubleshoot-hot-spot-issues-8.png) As shown in the load diagrams above, using `AUTO_RANDOM` to replace `AUTO_INCREMENT` can well scatter hotspots. diff --git a/troubleshoot-lock-conflicts.md b/troubleshoot-lock-conflicts.md index 9267104f45ed5..7243adb5c1da9 100644 --- a/troubleshoot-lock-conflicts.md +++ b/troubleshoot-lock-conflicts.md @@ -173,7 +173,7 @@ This section provides the solutions of common lock conflict issues in the optimi As the TiDB server receives a read request from a client, it gets a globally unique and increasing timestamp at the physical time as the start_ts of the current transaction. The transaction needs to read the latest data before start_ts, that is, the target key of the latest commit_ts that is smaller than start_ts. When the transaction finds that the target key is locked by another transaction, and it cannot know which phase the other transaction is in, a read-write conflict happens. The diagram is as follows: -![read-write conflict](/media/troubleshooting-lock-pic-04.png) +![read-write conflict](./media/troubleshooting-lock-pic-04.png) Txn0 completes the Prewrite phase and enters the Commit phase. At this time, Txn1 requests to read the same target key. Txn1 needs to read the target key of the latest commit_ts that is smaller than its start_ts. Because Txn1's start_ts is larger than Txn0's lock_ts, Txn1 must wait for the target key's lock to be cleared, but it hasn't been done. As a result, Txn1 cannot confirm whether Txn0 has been committed or not. Thus, a read-write conflict between Txn1 and Txn0 happens. @@ -185,8 +185,8 @@ You can detect the read-write conflict in your TiDB cluster by the following way In the `KV Errors` panel in the TiDB dashboard, `not_expired`/`resolve` in `Lock Resolve OPS` and `tikvLockFast` in `KV Backoff OPS` are monitoring metrics that can be used to check read-write conflicts in transactions. If the values of all the metrics increase, there might be many read-write conflicts. The `not_expired` item means that the transaction's lock has not timed out. The `resolve` item means that the other transaction tries to clean up the locks. The `tikvLockFast` item means that read-write conflicts occur. - ![KV-backoff-txnLockFast-optimistic](/media/troubleshooting-lock-pic-09.png) - ![KV-Errors-resolve-optimistic](/media/troubleshooting-lock-pic-08.png) + ![KV-backoff-txnLockFast-optimistic](./media/troubleshooting-lock-pic-09.png) + ![KV-Errors-resolve-optimistic](./media/troubleshooting-lock-pic-08.png) * Logs of the TiDB server @@ -238,8 +238,8 @@ You can check whether there's any "KeyIsLocked" error in the TiDB monitoring on The `KV Errors` panel in the TiDB dashboard has two monitoring metrics `Lock Resolve OPS` and `KV Backoff OPS` which can be used to check write-write conflicts caused by a transaction. If the `resolve` item under `Lock Resolve OPS` and the `txnLock` item under `KV Backoff OPS` have a clear upward trend, a "KeyIsLocked" error occurs. `resolve` refers to the operation that attempts to clear the lock, and `txnLock` represents a write conflict. -![KV-backoff-txnLockFast-optimistic-01](/media/troubleshooting-lock-pic-07.png) -![KV-Errors-resolve-optimistic-01](/media/troubleshooting-lock-pic-08.png) +![KV-backoff-txnLockFast-optimistic-01](./media/troubleshooting-lock-pic-07.png) +![KV-Errors-resolve-optimistic-01](./media/troubleshooting-lock-pic-08.png) Solutions: diff --git a/troubleshoot-stale-read.md b/troubleshoot-stale-read.md index 112e1ac045943..19c9425158926 100644 --- a/troubleshoot-stale-read.md +++ b/troubleshoot-stale-read.md @@ -41,11 +41,11 @@ This section introduces how to diagnose Stale Read issues using Grafana, `tikv-c In the [Grafana > TiDB dashboard > **KV Request** dashboard](/grafana-tidb-dashboard.md#kv-request), the following panels show the hit rate, OPS, and traffic of Stale Read: -![Stale Read Hit/Miss OPS](/media/stale-read/metrics-hit-miss.png) +![Stale Read Hit/Miss OPS](./media/stale-read/metrics-hit-miss.png) -![Stale Read Req OPS](/media/stale-read/metrics-ops.png) +![Stale Read Req OPS](./media/stale-read/metrics-ops.png) -![Stale Read Req Traffic](/media/stale-read/traffic.png) +![Stale Read Req Traffic](./media/stale-read/traffic.png) For more information about the preceding metrics, see [TiDB monitoring metrics](/grafana-tidb-dashboard.md#kv-request). @@ -166,11 +166,11 @@ To address CheckLeader issues, you can check the network and the **Check Leader If you observe an increasing miss rate of **Stale Read OPS** as follows: -![Example: Stale Read OPS](/media/stale-read/example-ops.png) +![Example: Stale Read OPS](./media/stale-read/example-ops.png) You can first check the **Max Resolved TS gap** and **Min Resolved TS Region** metrics in the [**TiKV-Details** > **Resolved-TS** dashboard](/grafana-tikv-dashboard.md#resolved-ts): -![Example: Max Resolved TS gap](/media/stale-read/example-ts-gap.png) +![Example: Max Resolved TS gap](./media/stale-read/example-ts-gap.png) From the preceding metrics, you can find that Region `3121` and some other Regions have not updated their resolved-ts in time. diff --git a/troubleshoot-write-conflicts.md b/troubleshoot-write-conflicts.md index c8363034bf772..786f74a85b336 100644 --- a/troubleshoot-write-conflicts.md +++ b/troubleshoot-write-conflicts.md @@ -30,13 +30,13 @@ In the TiDB Grafana panel, check the following monitoring metrics under **KV Err * **KV Backoff OPS** indicates the count of error messages per second returned by TiKV. - ![kv-backoff-ops](/media/troubleshooting-write-conflict-kv-backoff-ops.png) + ![kv-backoff-ops](./media/troubleshooting-write-conflict-kv-backoff-ops.png) The `txnlock` metric indicates the write-write conflict. The `txnLockFast` metric indicates the read-write conflict. * **Lock Resolve OPS** indicates the count of items related to transaction conflicts per second: - ![lock-resolve-ops](/media/troubleshooting-write-conflict-lock-resolve-ops.png) + ![lock-resolve-ops](./media/troubleshooting-write-conflict-lock-resolve-ops.png) - `not_expired` indicates the TTL of the lock was not expired. The conflict transaction cannot resolve locks until the TTL is expired. - `wait_expired` indicates that the transaction needs to wait the lock to expire. @@ -44,7 +44,7 @@ In the TiDB Grafana panel, check the following monitoring metrics under **KV Err * **KV Retry Duration** indicates the duration of re-sends the KV request: - ![kv-retry-duration](/media/troubleshooting-write-conflict-kv-retry-duration.png) + ![kv-retry-duration](./media/troubleshooting-write-conflict-kv-retry-duration.png) You can also use `[kv:9007]Write conflict` as the keywords to search in the TiDB log. The keywords also indicate the write conflict exists in the cluster. diff --git a/two-data-centers-in-one-city-deployment.md b/two-data-centers-in-one-city-deployment.md index 6f8268b6e2beb..d85cd86828a82 100644 --- a/two-data-centers-in-one-city-deployment.md +++ b/two-data-centers-in-one-city-deployment.md @@ -25,7 +25,7 @@ The architecture of the cluster deployment is as follows: - The cluster has six replicas: three Voter replicas in AZ1, and two Voter replicas along with one Learner replica in AZ2. For the TiKV component, each rack has a proper label. - The Raft protocol is adopted to ensure consistency and high availability of data, which is transparent to users. -![2-AZ-in-1-region architecture](/media/two-dc-replication-1.png) +![2-AZ-in-1-region architecture](./media/two-dc-replication-1.png) This deployment solution defines three statuses to control and identify the replication status of the cluster, which restricts the replication mode of TiKV. The replication mode of the cluster can automatically and adaptively switch between the three statuses. For details, see the [Status switch](#status-switch) section. diff --git a/vector-search/vector-search-overview.md b/vector-search/vector-search-overview.md index 62ca2d8e3e6dd..e3cc17ea98f0c 100644 --- a/vector-search/vector-search-overview.md +++ b/vector-search/vector-search-overview.md @@ -59,7 +59,7 @@ After converting raw data into vector embeddings and storing them in TiDB, your TiDB vector search identifies the top-k nearest neighbor (KNN) vectors by using a [distance function](/vector-search/vector-search-functions-and-operators.md) to calculate the distance between the given vector and vectors stored in the database. The vectors closest to the given vector in the query represent the most similar data in meaning. -![The Schematic TiDB Vector Search](/media/vector-search/embedding-search.png) +![The Schematic TiDB Vector Search](./media/vector-search/embedding-search.png) As a relational database with integrated vector search capabilities, TiDB enables you to store data and their corresponding vector representations (that is, vector embeddings) together in one database. You can choose any of the following ways for storage: