diff --git a/.github/workflows/assign_milestone.yml b/.github/workflows/assign_milestone.yml
index 443d28e80d6..0e8eb91f94a 100644
--- a/.github/workflows/assign_milestone.yml
+++ b/.github/workflows/assign_milestone.yml
@@ -12,7 +12,7 @@ env:
jobs:
build:
name: Assign Milestone
- runs-on: ubuntu-22.04
+ runs-on: ubuntu-latest
permissions:
pull-requests: write
@@ -20,7 +20,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Checkout code
uses: actions/checkout@v4
diff --git a/.github/workflows/check_make_vtadmin_authz_testgen.yml b/.github/workflows/check_make_vtadmin_authz_testgen.yml
index 08104997714..601fea2981c 100644
--- a/.github/workflows/check_make_vtadmin_authz_testgen.yml
+++ b/.github/workflows/check_make_vtadmin_authz_testgen.yml
@@ -8,7 +8,7 @@ env:
jobs:
build:
name: Check Make vtadmin_authz_testgen
- runs-on: ubuntu-22.04
+ runs-on: ubuntu-latest
steps:
- name: Skip CI
run: |
@@ -49,7 +49,7 @@ jobs:
uses: actions/setup-go@v5
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.vtadmin_changes == 'true'
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.vtadmin_changes == 'true'
diff --git a/.github/workflows/check_make_vtadmin_web_proto.yml b/.github/workflows/check_make_vtadmin_web_proto.yml
index f45b9455b00..f49aa071239 100644
--- a/.github/workflows/check_make_vtadmin_web_proto.yml
+++ b/.github/workflows/check_make_vtadmin_web_proto.yml
@@ -6,7 +6,7 @@ permissions: read-all
jobs:
build:
name: Check Make VTAdmin Web Proto
- runs-on: ubuntu-22.04
+ runs-on: ubuntu-latest
steps:
- name: Skip CI
run: |
@@ -49,7 +49,7 @@ jobs:
uses: actions/setup-go@v5
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.proto_changes == 'true'
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.proto_changes == 'true'
diff --git a/.github/workflows/close_stale_pull_requests.yml b/.github/workflows/close_stale_pull_requests.yml
index e0201c0104b..7b994d7fff2 100644
--- a/.github/workflows/close_stale_pull_requests.yml
+++ b/.github/workflows/close_stale_pull_requests.yml
@@ -9,7 +9,7 @@ permissions: read-all
jobs:
close_stale_pull_requests:
- runs-on: ubuntu-22.04
+ runs-on: ubuntu-latest
permissions:
pull-requests: write
diff --git a/.github/workflows/cluster_endtoend_12.yml b/.github/workflows/cluster_endtoend_12.yml
index 3713a571b13..459e59e9458 100644
--- a/.github/workflows/cluster_endtoend_12.yml
+++ b/.github/workflows/cluster_endtoend_12.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_13.yml b/.github/workflows/cluster_endtoend_13.yml
index d4b81e06957..2cd47be95dc 100644
--- a/.github/workflows/cluster_endtoend_13.yml
+++ b/.github/workflows/cluster_endtoend_13.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_15.yml b/.github/workflows/cluster_endtoend_15.yml
index 3785fdcc28e..1e9fe44a18a 100644
--- a/.github/workflows/cluster_endtoend_15.yml
+++ b/.github/workflows/cluster_endtoend_15.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_18.yml b/.github/workflows/cluster_endtoend_18.yml
index d68f88430fc..465abb6c3e3 100644
--- a/.github/workflows/cluster_endtoend_18.yml
+++ b/.github/workflows/cluster_endtoend_18.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_21.yml b/.github/workflows/cluster_endtoend_21.yml
index a06b7d72940..375f8e9ab31 100644
--- a/.github/workflows/cluster_endtoend_21.yml
+++ b/.github/workflows/cluster_endtoend_21.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_22.yml b/.github/workflows/cluster_endtoend_22.yml
index 62dfc6fe059..4c67161b5a1 100644
--- a/.github/workflows/cluster_endtoend_22.yml
+++ b/.github/workflows/cluster_endtoend_22.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_backup_pitr.yml b/.github/workflows/cluster_endtoend_backup_pitr.yml
index f7e08685b97..1dcf3a3f656 100644
--- a/.github/workflows/cluster_endtoend_backup_pitr.yml
+++ b/.github/workflows/cluster_endtoend_backup_pitr.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_backup_pitr_xtrabackup.yml b/.github/workflows/cluster_endtoend_backup_pitr_xtrabackup.yml
index e92a4270a74..75ad2000ff8 100644
--- a/.github/workflows/cluster_endtoend_backup_pitr_xtrabackup.yml
+++ b/.github/workflows/cluster_endtoend_backup_pitr_xtrabackup.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_ers_prs_newfeatures_heavy.yml b/.github/workflows/cluster_endtoend_ers_prs_newfeatures_heavy.yml
index a6703085c33..a5ce553b5cd 100644
--- a/.github/workflows/cluster_endtoend_ers_prs_newfeatures_heavy.yml
+++ b/.github/workflows/cluster_endtoend_ers_prs_newfeatures_heavy.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_mysql80.yml b/.github/workflows/cluster_endtoend_mysql80.yml
index 78375ea549e..1cec0679b9d 100644
--- a/.github/workflows/cluster_endtoend_mysql80.yml
+++ b/.github/workflows/cluster_endtoend_mysql80.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_mysql_server_vault.yml b/.github/workflows/cluster_endtoend_mysql_server_vault.yml
index 16974e3d159..2388d9b6648 100644
--- a/.github/workflows/cluster_endtoend_mysql_server_vault.yml
+++ b/.github/workflows/cluster_endtoend_mysql_server_vault.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_onlineddl_ghost.yml b/.github/workflows/cluster_endtoend_onlineddl_ghost.yml
index 1fe69e60be8..27ae764995f 100644
--- a/.github/workflows/cluster_endtoend_onlineddl_ghost.yml
+++ b/.github/workflows/cluster_endtoend_onlineddl_ghost.yml
@@ -74,7 +74,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_onlineddl_revert.yml b/.github/workflows/cluster_endtoend_onlineddl_revert.yml
index 115b59efa0a..4b542988c2e 100644
--- a/.github/workflows/cluster_endtoend_onlineddl_revert.yml
+++ b/.github/workflows/cluster_endtoend_onlineddl_revert.yml
@@ -74,7 +74,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_onlineddl_scheduler.yml b/.github/workflows/cluster_endtoend_onlineddl_scheduler.yml
index fba4bf7e009..e5ce1b444eb 100644
--- a/.github/workflows/cluster_endtoend_onlineddl_scheduler.yml
+++ b/.github/workflows/cluster_endtoend_onlineddl_scheduler.yml
@@ -74,7 +74,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl.yml
index 3612cb498c5..6d2d14b72a4 100644
--- a/.github/workflows/cluster_endtoend_onlineddl_vrepl.yml
+++ b/.github/workflows/cluster_endtoend_onlineddl_vrepl.yml
@@ -74,7 +74,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress.yml
index cdd5217a3f4..c91c87dc09f 100644
--- a/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress.yml
+++ b/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress.yml
@@ -74,7 +74,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_suite.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_suite.yml
index 7daab66a4aa..18a3273bce2 100644
--- a/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_suite.yml
+++ b/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_suite.yml
@@ -74,7 +74,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl_suite.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl_suite.yml
index 7329b0e0892..0bdf2c03156 100644
--- a/.github/workflows/cluster_endtoend_onlineddl_vrepl_suite.yml
+++ b/.github/workflows/cluster_endtoend_onlineddl_vrepl_suite.yml
@@ -74,7 +74,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_schemadiff_vrepl.yml b/.github/workflows/cluster_endtoend_schemadiff_vrepl.yml
index 2aba8197130..0648fca17a5 100644
--- a/.github/workflows/cluster_endtoend_schemadiff_vrepl.yml
+++ b/.github/workflows/cluster_endtoend_schemadiff_vrepl.yml
@@ -74,7 +74,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_tabletmanager_consul.yml b/.github/workflows/cluster_endtoend_tabletmanager_consul.yml
index a14a5c3e7aa..7ff6800277f 100644
--- a/.github/workflows/cluster_endtoend_tabletmanager_consul.yml
+++ b/.github/workflows/cluster_endtoend_tabletmanager_consul.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_tabletmanager_tablegc.yml b/.github/workflows/cluster_endtoend_tabletmanager_tablegc.yml
index afc57695e83..d70aedf1430 100644
--- a/.github/workflows/cluster_endtoend_tabletmanager_tablegc.yml
+++ b/.github/workflows/cluster_endtoend_tabletmanager_tablegc.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_tabletmanager_throttler_topo.yml b/.github/workflows/cluster_endtoend_tabletmanager_throttler_topo.yml
index 55e52394db3..35c5cba9c6b 100644
--- a/.github/workflows/cluster_endtoend_tabletmanager_throttler_topo.yml
+++ b/.github/workflows/cluster_endtoend_tabletmanager_throttler_topo.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_topo_connection_cache.yml b/.github/workflows/cluster_endtoend_topo_connection_cache.yml
index f3aa40dbb18..7f2d1c4760e 100644
--- a/.github/workflows/cluster_endtoend_topo_connection_cache.yml
+++ b/.github/workflows/cluster_endtoend_topo_connection_cache.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_vreplication_across_db_versions.yml b/.github/workflows/cluster_endtoend_vreplication_across_db_versions.yml
index e5c88b13147..b545dfbe1c9 100644
--- a/.github/workflows/cluster_endtoend_vreplication_across_db_versions.yml
+++ b/.github/workflows/cluster_endtoend_vreplication_across_db_versions.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_vreplication_basic.yml b/.github/workflows/cluster_endtoend_vreplication_basic.yml
index 86691286bdc..4ce0e941dba 100644
--- a/.github/workflows/cluster_endtoend_vreplication_basic.yml
+++ b/.github/workflows/cluster_endtoend_vreplication_basic.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_vreplication_cellalias.yml b/.github/workflows/cluster_endtoend_vreplication_cellalias.yml
index 671b27d8c3e..4258d0484dc 100644
--- a/.github/workflows/cluster_endtoend_vreplication_cellalias.yml
+++ b/.github/workflows/cluster_endtoend_vreplication_cellalias.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_vreplication_foreign_key_stress.yml b/.github/workflows/cluster_endtoend_vreplication_foreign_key_stress.yml
index c61bddf2e10..35dc9f58ed5 100644
--- a/.github/workflows/cluster_endtoend_vreplication_foreign_key_stress.yml
+++ b/.github/workflows/cluster_endtoend_vreplication_foreign_key_stress.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_vreplication_migrate_vdiff2_convert_tz.yml b/.github/workflows/cluster_endtoend_vreplication_migrate_vdiff2_convert_tz.yml
index 958b9a6c2b8..6f5aa22e95e 100644
--- a/.github/workflows/cluster_endtoend_vreplication_migrate_vdiff2_convert_tz.yml
+++ b/.github/workflows/cluster_endtoend_vreplication_migrate_vdiff2_convert_tz.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_vreplication_partial_movetables_and_materialize.yml b/.github/workflows/cluster_endtoend_vreplication_partial_movetables_and_materialize.yml
index c928b08ea9a..829c06e57a6 100644
--- a/.github/workflows/cluster_endtoend_vreplication_partial_movetables_and_materialize.yml
+++ b/.github/workflows/cluster_endtoend_vreplication_partial_movetables_and_materialize.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_vreplication_v2.yml b/.github/workflows/cluster_endtoend_vreplication_v2.yml
index e391e698d8e..30244171ef8 100644
--- a/.github/workflows/cluster_endtoend_vreplication_v2.yml
+++ b/.github/workflows/cluster_endtoend_vreplication_v2.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_vstream.yml b/.github/workflows/cluster_endtoend_vstream.yml
index 3946614f674..c81c36a641f 100644
--- a/.github/workflows/cluster_endtoend_vstream.yml
+++ b/.github/workflows/cluster_endtoend_vstream.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_vtbackup.yml b/.github/workflows/cluster_endtoend_vtbackup.yml
index 4dea0079881..81b2ff49d2d 100644
--- a/.github/workflows/cluster_endtoend_vtbackup.yml
+++ b/.github/workflows/cluster_endtoend_vtbackup.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_vtctlbackup_sharded_clustertest_heavy.yml b/.github/workflows/cluster_endtoend_vtctlbackup_sharded_clustertest_heavy.yml
index 1cbe2be67e3..730b91fc2d3 100644
--- a/.github/workflows/cluster_endtoend_vtctlbackup_sharded_clustertest_heavy.yml
+++ b/.github/workflows/cluster_endtoend_vtctlbackup_sharded_clustertest_heavy.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_vtgate_concurrentdml.yml b/.github/workflows/cluster_endtoend_vtgate_concurrentdml.yml
index 2a7ba3c8d7f..9576c15816f 100644
--- a/.github/workflows/cluster_endtoend_vtgate_concurrentdml.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_concurrentdml.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_vtgate_foreignkey_stress.yml b/.github/workflows/cluster_endtoend_vtgate_foreignkey_stress.yml
index 87765c30955..720ff666a15 100644
--- a/.github/workflows/cluster_endtoend_vtgate_foreignkey_stress.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_foreignkey_stress.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_vtgate_gen4.yml b/.github/workflows/cluster_endtoend_vtgate_gen4.yml
index 84c0cc1c850..abf05102c95 100644
--- a/.github/workflows/cluster_endtoend_vtgate_gen4.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_gen4.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_vtgate_general_heavy.yml b/.github/workflows/cluster_endtoend_vtgate_general_heavy.yml
index 302204aa91b..585d277a419 100644
--- a/.github/workflows/cluster_endtoend_vtgate_general_heavy.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_general_heavy.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_vtgate_godriver.yml b/.github/workflows/cluster_endtoend_vtgate_godriver.yml
index 33a080768ef..1144565c406 100644
--- a/.github/workflows/cluster_endtoend_vtgate_godriver.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_godriver.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_vtgate_partial_keyspace.yml b/.github/workflows/cluster_endtoend_vtgate_partial_keyspace.yml
index 24585039482..bf281932d70 100644
--- a/.github/workflows/cluster_endtoend_vtgate_partial_keyspace.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_partial_keyspace.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_vtgate_queries.yml b/.github/workflows/cluster_endtoend_vtgate_queries.yml
index 196eb5d9804..a99ab389ae1 100644
--- a/.github/workflows/cluster_endtoend_vtgate_queries.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_queries.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_vtgate_readafterwrite.yml b/.github/workflows/cluster_endtoend_vtgate_readafterwrite.yml
index bc9923e4421..bfe3b156fc0 100644
--- a/.github/workflows/cluster_endtoend_vtgate_readafterwrite.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_readafterwrite.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_vtgate_reservedconn.yml b/.github/workflows/cluster_endtoend_vtgate_reservedconn.yml
index dfc3658e405..4487e18be13 100644
--- a/.github/workflows/cluster_endtoend_vtgate_reservedconn.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_reservedconn.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_vtgate_schema.yml b/.github/workflows/cluster_endtoend_vtgate_schema.yml
index 3f51b9a8a72..7bb9bddb5f4 100644
--- a/.github/workflows/cluster_endtoend_vtgate_schema.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_schema.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_vtgate_schema_tracker.yml b/.github/workflows/cluster_endtoend_vtgate_schema_tracker.yml
index abc2ab056a6..858a6a2236b 100644
--- a/.github/workflows/cluster_endtoend_vtgate_schema_tracker.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_schema_tracker.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_vtgate_tablet_healthcheck_cache.yml b/.github/workflows/cluster_endtoend_vtgate_tablet_healthcheck_cache.yml
index c3d5787e76a..6aabc077d1e 100644
--- a/.github/workflows/cluster_endtoend_vtgate_tablet_healthcheck_cache.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_tablet_healthcheck_cache.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_vtgate_topo.yml b/.github/workflows/cluster_endtoend_vtgate_topo.yml
index 4bb6142fc27..4150505b305 100644
--- a/.github/workflows/cluster_endtoend_vtgate_topo.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_topo.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_vtgate_topo_consul.yml b/.github/workflows/cluster_endtoend_vtgate_topo_consul.yml
index ece3d5e8fee..cd389612d75 100644
--- a/.github/workflows/cluster_endtoend_vtgate_topo_consul.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_topo_consul.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_vtgate_topo_etcd.yml b/.github/workflows/cluster_endtoend_vtgate_topo_etcd.yml
index 9210541ecba..c51a149f104 100644
--- a/.github/workflows/cluster_endtoend_vtgate_topo_etcd.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_topo_etcd.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_vtgate_transaction.yml b/.github/workflows/cluster_endtoend_vtgate_transaction.yml
index 84d6eba4b80..2a01e5fd2b7 100644
--- a/.github/workflows/cluster_endtoend_vtgate_transaction.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_transaction.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_vtgate_unsharded.yml b/.github/workflows/cluster_endtoend_vtgate_unsharded.yml
index 5ba68f749bb..4440154b9cf 100644
--- a/.github/workflows/cluster_endtoend_vtgate_unsharded.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_unsharded.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_vtgate_vindex_heavy.yml b/.github/workflows/cluster_endtoend_vtgate_vindex_heavy.yml
index 2a0d2713f63..74f5f376884 100644
--- a/.github/workflows/cluster_endtoend_vtgate_vindex_heavy.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_vindex_heavy.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_vtgate_vschema.yml b/.github/workflows/cluster_endtoend_vtgate_vschema.yml
index e9882112a8b..82f196c6579 100644
--- a/.github/workflows/cluster_endtoend_vtgate_vschema.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_vschema.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_vtorc.yml b/.github/workflows/cluster_endtoend_vtorc.yml
index eb391f3999d..6aa56d4ec8b 100644
--- a/.github/workflows/cluster_endtoend_vtorc.yml
+++ b/.github/workflows/cluster_endtoend_vtorc.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_vttablet_prscomplex.yml b/.github/workflows/cluster_endtoend_vttablet_prscomplex.yml
index d2a4d099925..27bc12c7385 100644
--- a/.github/workflows/cluster_endtoend_vttablet_prscomplex.yml
+++ b/.github/workflows/cluster_endtoend_vttablet_prscomplex.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_xb_backup.yml b/.github/workflows/cluster_endtoend_xb_backup.yml
index 0ea9a48f175..fa41b3a664b 100644
--- a/.github/workflows/cluster_endtoend_xb_backup.yml
+++ b/.github/workflows/cluster_endtoend_xb_backup.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_xb_recovery.yml b/.github/workflows/cluster_endtoend_xb_recovery.yml
index cbb5d8630fa..97025fb6ffe 100644
--- a/.github/workflows/cluster_endtoend_xb_recovery.yml
+++ b/.github/workflows/cluster_endtoend_xb_recovery.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/codeql_analysis.yml b/.github/workflows/codeql_analysis.yml
index 5270c01b63b..461d8e05925 100644
--- a/.github/workflows/codeql_analysis.yml
+++ b/.github/workflows/codeql_analysis.yml
@@ -14,7 +14,7 @@ permissions: read-all
jobs:
analyze:
name: Analyze
- runs-on: ubuntu-22.04
+ runs-on: ubuntu-latest
permissions:
actions: read
contents: read
@@ -32,7 +32,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
@@ -75,13 +75,6 @@ jobs:
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo apt-get install -y gnupg2
- sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release enable-only pxb-24
- sudo apt-get update
- sudo apt-get install -y percona-xtrabackup-24
-
- name: Building binaries
timeout-minutes: 30
run: |
diff --git a/.github/workflows/docker_build_base.yml b/.github/workflows/docker_build_base.yml
index 377b1a76b79..651bcb0a7c2 100644
--- a/.github/workflows/docker_build_base.yml
+++ b/.github/workflows/docker_build_base.yml
@@ -22,7 +22,7 @@ jobs:
strategy:
fail-fast: true
matrix:
- branch: [ latest, mysql57, percona57, percona80 ]
+ branch: [ latest, percona80 ]
steps:
- name: Check out code
diff --git a/.github/workflows/docker_build_lite.yml b/.github/workflows/docker_build_lite.yml
index ebac086cf91..2f316b88436 100644
--- a/.github/workflows/docker_build_lite.yml
+++ b/.github/workflows/docker_build_lite.yml
@@ -22,7 +22,7 @@ jobs:
strategy:
fail-fast: true
matrix:
- branch: [ latest, mysql57, mysql80, percona57, percona80 ]
+ branch: [ latest, mysql80, percona80 ]
steps:
- name: Check out code
diff --git a/.github/workflows/docker_build_vttestserver.yml b/.github/workflows/docker_build_vttestserver.yml
index e597fc81413..f9e4fcdf6da 100644
--- a/.github/workflows/docker_build_vttestserver.yml
+++ b/.github/workflows/docker_build_vttestserver.yml
@@ -22,7 +22,7 @@ jobs:
strategy:
fail-fast: true
matrix:
- branch: [ mysql57, mysql80 ]
+ branch: [ mysql80 ]
steps:
- name: Check out code
diff --git a/.github/workflows/docker_test_cluster_10.yml b/.github/workflows/docker_test_cluster_10.yml
index 5803929123f..8df868c8128 100644
--- a/.github/workflows/docker_test_cluster_10.yml
+++ b/.github/workflows/docker_test_cluster_10.yml
@@ -55,7 +55,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/docker_test_cluster_25.yml b/.github/workflows/docker_test_cluster_25.yml
index 51f2baca2d2..84cabccaa36 100644
--- a/.github/workflows/docker_test_cluster_25.yml
+++ b/.github/workflows/docker_test_cluster_25.yml
@@ -55,7 +55,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/e2e_race.yml b/.github/workflows/e2e_race.yml
index c0282dc4f9e..36383b9f5a5 100644
--- a/.github/workflows/e2e_race.yml
+++ b/.github/workflows/e2e_race.yml
@@ -54,7 +54,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-additions access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/endtoend.yml b/.github/workflows/endtoend.yml
index a70449558bc..d2461571617 100644
--- a/.github/workflows/endtoend.yml
+++ b/.github/workflows/endtoend.yml
@@ -53,7 +53,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-additions access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/local_example.yml b/.github/workflows/local_example.yml
index 14d6e0f0779..42698ce9485 100644
--- a/.github/workflows/local_example.yml
+++ b/.github/workflows/local_example.yml
@@ -58,7 +58,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.examples == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-additions access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.examples == 'true'
diff --git a/.github/workflows/region_example.yml b/.github/workflows/region_example.yml
index d99a132bf8e..4792551793e 100644
--- a/.github/workflows/region_example.yml
+++ b/.github/workflows/region_example.yml
@@ -58,7 +58,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.examples == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-additions access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.examples == 'true'
diff --git a/.github/workflows/static_checks_etc.yml b/.github/workflows/static_checks_etc.yml
index da15edbb2d3..7f5b92dc5c8 100644
--- a/.github/workflows/static_checks_etc.yml
+++ b/.github/workflows/static_checks_etc.yml
@@ -119,7 +119,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && (steps.changes.outputs.go_files == 'true' || steps.changes.outputs.parser_changes == 'true' || steps.changes.outputs.proto_changes == 'true')
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-additions access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && (steps.changes.outputs.go_files == 'true' || steps.changes.outputs.parser_changes == 'true' || steps.changes.outputs.proto_changes == 'true')
diff --git a/.github/workflows/unit_race.yml b/.github/workflows/unit_race.yml
index 7384fe24147..be6e3d3c156 100644
--- a/.github/workflows/unit_race.yml
+++ b/.github/workflows/unit_race.yml
@@ -59,7 +59,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-additions access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true'
diff --git a/.github/workflows/unit_test_mysql80.yml b/.github/workflows/unit_test_mysql80.yml
index e40ae045bd3..fe0d9ffc6cf 100644
--- a/.github/workflows/unit_test_mysql80.yml
+++ b/.github/workflows/unit_test_mysql80.yml
@@ -71,7 +71,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-additions access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true'
diff --git a/.github/workflows/update_golang_dependencies.yml b/.github/workflows/update_golang_dependencies.yml
index b416c09f949..cae15c803f4 100644
--- a/.github/workflows/update_golang_dependencies.yml
+++ b/.github/workflows/update_golang_dependencies.yml
@@ -19,7 +19,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Check out code
uses: actions/checkout@v4
diff --git a/.github/workflows/update_golang_version.yml b/.github/workflows/update_golang_version.yml
index 519fac82482..774f7a9ce3b 100644
--- a/.github/workflows/update_golang_version.yml
+++ b/.github/workflows/update_golang_version.yml
@@ -22,7 +22,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Check out code
uses: actions/checkout@v4
diff --git a/.github/workflows/upgrade_downgrade_test_backups_e2e.yml b/.github/workflows/upgrade_downgrade_test_backups_e2e.yml
index d7e0e213037..bee926973e1 100644
--- a/.github/workflows/upgrade_downgrade_test_backups_e2e.yml
+++ b/.github/workflows/upgrade_downgrade_test_backups_e2e.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-additions access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/upgrade_downgrade_test_backups_e2e_next_release.yml b/.github/workflows/upgrade_downgrade_test_backups_e2e_next_release.yml
index 326233a710f..c49d8229dd2 100644
--- a/.github/workflows/upgrade_downgrade_test_backups_e2e_next_release.yml
+++ b/.github/workflows/upgrade_downgrade_test_backups_e2e_next_release.yml
@@ -72,7 +72,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/upgrade_downgrade_test_backups_manual.yml b/.github/workflows/upgrade_downgrade_test_backups_manual.yml
index 0c006625510..0be2fdc0a54 100644
--- a/.github/workflows/upgrade_downgrade_test_backups_manual.yml
+++ b/.github/workflows/upgrade_downgrade_test_backups_manual.yml
@@ -77,7 +77,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-additions access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/upgrade_downgrade_test_backups_manual_next_release.yml b/.github/workflows/upgrade_downgrade_test_backups_manual_next_release.yml
index 7a14447608a..740ef22fac3 100644
--- a/.github/workflows/upgrade_downgrade_test_backups_manual_next_release.yml
+++ b/.github/workflows/upgrade_downgrade_test_backups_manual_next_release.yml
@@ -75,7 +75,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/upgrade_downgrade_test_query_serving_queries.yml b/.github/workflows/upgrade_downgrade_test_query_serving_queries.yml
index 93daba76d6d..160d5c8166a 100644
--- a/.github/workflows/upgrade_downgrade_test_query_serving_queries.yml
+++ b/.github/workflows/upgrade_downgrade_test_query_serving_queries.yml
@@ -76,7 +76,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-additions access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -121,63 +121,49 @@ jobs:
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo apt-get install -y gnupg2
- sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release enable-only pxb-24
- sudo apt-get update
- sudo apt-get install -y percona-xtrabackup-24
-
- # Checkout to the last release of Vitess
- - name: Check out other version's code (${{ steps.output-previous-release-ref.outputs.previous_release_ref }})
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/checkout@v4
- with:
- ref: ${{ steps.output-previous-release-ref.outputs.previous_release_ref }}
-
- - name: Get dependencies for the last release
+ # Build current commit's binaries
+ - name: Get dependencies for this commit
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
go mod download
- - name: Building last release's binaries
+ - name: Building the binaries for this commit
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
timeout-minutes: 10
run: |
source build.env
NOVTADMINBUILD=1 make build
- mkdir -p /tmp/vitess-build-other/
- cp -R bin /tmp/vitess-build-other/
+ mkdir -p /tmp/vitess-build-current/
+ cp -R bin /tmp/vitess-build-current/
rm -Rf bin/*
- # Checkout to this build's commit
- - name: Check out commit's code
+ # Checkout to the last release of Vitess
+ - name: Check out other version's code (${{ steps.output-previous-release-ref.outputs.previous_release_ref }})
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/checkout@v4
+ with:
+ ref: ${{ steps.output-previous-release-ref.outputs.previous_release_ref }}
- - name: Get dependencies for this commit
+ - name: Get dependencies for the last release
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
go mod download
- - name: Building the binaries for this commit
+ - name: Building last release's binaries
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
timeout-minutes: 10
run: |
source build.env
NOVTADMINBUILD=1 make build
- mkdir -p /tmp/vitess-build-current/
- cp -R bin /tmp/vitess-build-current/
+ mkdir -p /tmp/vitess-build-other/
+ cp -R bin /tmp/vitess-build-other/
+ rm -Rf bin/*
- # Running a test with vtgate and vttablet using version n
- - name: Run query serving tests (vtgate=N, vttablet=N)
+ - name: Convert ErrorContains checks to Error checks
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- rm -rf /tmp/vtdataroot
- mkdir -p /tmp/vtdataroot
-
- source build.env
- eatmydata -- go run test.go -skip-build -keep-data=false -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_queries
+ find ./go/test/endtoend -name '*.go' -exec sed -i 's/ErrorContains/Error/g' {} +
+ find ./go/test/endtoend -name '*.go' -exec sed -i 's/EqualError/Error/g' {} +
# Swap the binaries in the bin. Use vtgate version n-1 and keep vttablet at version n
- name: Use last release's VTGate
@@ -185,12 +171,13 @@ jobs:
run: |
source build.env
+ cp -r /tmp/vitess-build-current/bin/* $PWD/bin/
rm -f $PWD/bin/vtgate
cp /tmp/vitess-build-other/bin/vtgate $PWD/bin/vtgate
vtgate --version
- # Running a test with vtgate at version n-1 and vttablet at version n
- - name: Run query serving tests (vtgate=N-1, vttablet=N)
+ # Running a test with vtgate at version n-1 and vttablet/vtctld at version n
+ - name: Run query serving tests (vtgate=N-1, vttablet=N, vtctld=N)
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
rm -rf /tmp/vtdataroot
@@ -199,22 +186,38 @@ jobs:
source build.env
eatmydata -- go run test.go -skip-build -keep-data=false -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_queries
- # Swap the binaries again. This time, vtgate will be at version n, and vttablet will be at version n-1
- - name: Use current version VTGate, and other version VTTablet
+ - name: Check out commit's code
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ uses: actions/checkout@v4
+ with:
+ fetch-depth: 0
+
+ # Swap the binaries again. This time, vtgate will be at version n, and vttablet/vtctld will be at version n-1
+ - name: Use current version VTGate, and other version VTTablet/VTctld
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
source build.env
+
+ rm -Rf bin/*
+ cp -r /tmp/vitess-build-current/bin/* $PWD/bin/
+
+ rm -f $PWD/bin/vttablet $PWD/bin/mysqlctl $PWD/bin/mysqlctld
- rm -f $PWD/bin/vtgate $PWD/bin/vttablet $PWD/bin/mysqlctl $PWD/bin/mysqlctld
- cp /tmp/vitess-build-current/bin/vtgate $PWD/bin/vtgate
cp /tmp/vitess-build-other/bin/vttablet $PWD/bin/vttablet
cp /tmp/vitess-build-other/bin/mysqlctl $PWD/bin/mysqlctl
cp /tmp/vitess-build-other/bin/mysqlctld $PWD/bin/mysqlctld
+
+ cp /tmp/vitess-build-other/bin/vtctld $PWD/bin/vtctld
+ cp /tmp/vitess-build-other/bin/vtctldclient $PWD/bin/vtctldclient
+ cp /tmp/vitess-build-other/bin/vtctl $PWD/bin/vtctl
+ cp /tmp/vitess-build-other/bin/vtctlclient $PWD/bin/vtctlclient
+
vtgate --version
vttablet --version
+ vtctl --version
- # Running a test with vtgate at version n and vttablet at version n-1
- - name: Run query serving tests (vtgate=N, vttablet=N-1)
+ # Running a test with vtgate at version n and vttablet/vtctld at version n-1
+ - name: Run query serving tests (vtgate=N, vttablet=N-1, vtctld=N-1)
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
rm -rf /tmp/vtdataroot
diff --git a/.github/workflows/upgrade_downgrade_test_query_serving_queries_next_release.yml b/.github/workflows/upgrade_downgrade_test_query_serving_queries_next_release.yml
index a666e7a90fd..5851aa456e1 100644
--- a/.github/workflows/upgrade_downgrade_test_query_serving_queries_next_release.yml
+++ b/.github/workflows/upgrade_downgrade_test_query_serving_queries_next_release.yml
@@ -74,7 +74,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -115,63 +115,49 @@ jobs:
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo apt-get install -y gnupg2
- sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release enable-only pxb-24
- sudo apt-get update
- sudo apt-get install -y percona-xtrabackup-24
-
- # Checkout to the next release of Vitess
- - name: Check out other version's code (${{ steps.output-next-release-ref.outputs.next_release_ref }})
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/checkout@v4
- with:
- ref: ${{ steps.output-next-release-ref.outputs.next_release_ref }}
-
- - name: Get dependencies for the next release
+ # Build current commit's binaries
+ - name: Get dependencies for this commit
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
go mod download
- - name: Building next release's binaries
+ - name: Building the binaries for this commit
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
timeout-minutes: 10
run: |
source build.env
NOVTADMINBUILD=1 make build
- mkdir -p /tmp/vitess-build-other/
- cp -R bin /tmp/vitess-build-other/
+ mkdir -p /tmp/vitess-build-current/
+ cp -R bin /tmp/vitess-build-current/
rm -Rf bin/*
- # Checkout to this build's commit
- - name: Check out commit's code
+ # Checkout to the next release of Vitess
+ - name: Check out other version's code (${{ steps.output-next-release-ref.outputs.next_release_ref }})
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/checkout@v4
+ with:
+ ref: ${{ steps.output-next-release-ref.outputs.next_release_ref }}
- - name: Get dependencies for this commit
+ - name: Get dependencies for the next release
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
go mod download
- - name: Building the binaries for this commit
+ - name: Building next release's binaries
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
timeout-minutes: 10
run: |
source build.env
NOVTADMINBUILD=1 make build
- mkdir -p /tmp/vitess-build-current/
- cp -R bin /tmp/vitess-build-current/
+ mkdir -p /tmp/vitess-build-other/
+ cp -R bin /tmp/vitess-build-other/
+ rm -Rf bin/*
- # Running a test with vtgate and vttablet using version n
- - name: Run query serving tests (vtgate=N, vttablet=N)
+ - name: Convert ErrorContains checks to Error checks
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- rm -rf /tmp/vtdataroot
- mkdir -p /tmp/vtdataroot
-
- source build.env
- eatmydata -- go run test.go -skip-build -keep-data=false -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_queries
+ find ./go/test/endtoend -name '*.go' -exec sed -i 's/ErrorContains/Error/g' {} +
+ find ./go/test/endtoend -name '*.go' -exec sed -i 's/EqualError/Error/g' {} +
# Swap the binaries in the bin. Use vtgate version n+1 and keep vttablet at version n
- name: Use next release's VTGate
@@ -179,6 +165,7 @@ jobs:
run: |
source build.env
+ cp -r /tmp/vitess-build-current/bin/* $PWD/bin/
rm -f $PWD/bin/vtgate
cp /tmp/vitess-build-other/bin/vtgate $PWD/bin/vtgate
vtgate --version
@@ -193,28 +180,38 @@ jobs:
source build.env
eatmydata -- go run test.go -skip-build -keep-data=false -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_queries
+ - name: Check out commit's code
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ uses: actions/checkout@v4
+ with:
+ fetch-depth: 0
+
# Swap the binaries again. This time, vtgate will be at version n, and vttablet will be at version n+1
- - name: Use current version VTGate, and other version VTTablet
+ - name: Use current version VTGate, and other version VTTablet/VTctld
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
source build.env
- rm -f $PWD/bin/vtgate $PWD/bin/vttablet $PWD/bin/mysqlctl $PWD/bin/mysqlctld
- cp /tmp/vitess-build-current/bin/vtgate $PWD/bin/vtgate
+ rm -Rf bin/*
+ cp -r /tmp/vitess-build-current/bin/* $PWD/bin/
- cp /tmp/vitess-build-other/bin/vtctld $PWD/bin
- cp /tmp/vitess-build-other/bin/vtctldclient $PWD/bin
- cp /tmp/vitess-build-other/bin/vtctl $PWD/bin
- cp /tmp/vitess-build-other/bin/vtctlclient $PWD/bin
+ rm -f $PWD/bin/vttablet $PWD/bin/mysqlctl $PWD/bin/mysqlctld
cp /tmp/vitess-build-other/bin/vttablet $PWD/bin/vttablet
cp /tmp/vitess-build-other/bin/mysqlctl $PWD/bin/mysqlctl
cp /tmp/vitess-build-other/bin/mysqlctld $PWD/bin/mysqlctld
+
+ cp /tmp/vitess-build-other/bin/vtctld $PWD/bin/vtctld
+ cp /tmp/vitess-build-other/bin/vtctldclient $PWD/bin/vtctldclient
+ cp /tmp/vitess-build-other/bin/vtctl $PWD/bin/vtctl
+ cp /tmp/vitess-build-other/bin/vtctlclient $PWD/bin/vtctlclient
+
vtgate --version
vttablet --version
+ vtctl --version
# Running a test with vtgate at version n and vttablet at version n+1
- - name: Run query serving tests (vtgate=N, vttablet=N+1)
+ - name: Run query serving tests (vtgate=N, vttablet=N+1, vtctld=N+1)
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
rm -rf /tmp/vtdataroot
diff --git a/.github/workflows/upgrade_downgrade_test_query_serving_schema.yml b/.github/workflows/upgrade_downgrade_test_query_serving_schema.yml
index af0084315a9..17331b912fa 100644
--- a/.github/workflows/upgrade_downgrade_test_query_serving_schema.yml
+++ b/.github/workflows/upgrade_downgrade_test_query_serving_schema.yml
@@ -76,7 +76,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-additions access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -121,13 +121,6 @@ jobs:
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo apt-get install -y gnupg2
- sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release enable-only pxb-24
- sudo apt-get update
- sudo apt-get install -y percona-xtrabackup-24
-
# Checkout to the last release of Vitess
- name: Check out other version's code (${{ steps.output-previous-release-ref.outputs.previous_release_ref }})
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -169,6 +162,12 @@ jobs:
mkdir -p /tmp/vitess-build-current/
cp -R bin /tmp/vitess-build-current/
+ - name: Convert ErrorContains checks to Error checks
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ run: |
+ find ./go/test/endtoend -name '*.go' -exec sed -i 's/ErrorContains/Error/g' {} +
+ find ./go/test/endtoend -name '*.go' -exec sed -i 's/EqualError/Error/g' {} +
+
# Running a test with vtgate and vttablet using version n
- name: Run query serving tests (vtgate=N, vttablet=N)
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/upgrade_downgrade_test_query_serving_schema_next_release.yml b/.github/workflows/upgrade_downgrade_test_query_serving_schema_next_release.yml
index 6c30b4acb05..f03ee6a5ad5 100644
--- a/.github/workflows/upgrade_downgrade_test_query_serving_schema_next_release.yml
+++ b/.github/workflows/upgrade_downgrade_test_query_serving_schema_next_release.yml
@@ -74,7 +74,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -115,13 +115,6 @@ jobs:
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo apt-get install -y gnupg2
- sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release enable-only pxb-24
- sudo apt-get update
- sudo apt-get install -y percona-xtrabackup-24
-
# Checkout to the next release of Vitess
- name: Check out other version's code (${{ steps.output-next-release-ref.outputs.next_release_ref }})
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -163,6 +156,12 @@ jobs:
mkdir -p /tmp/vitess-build-current/
cp -R bin /tmp/vitess-build-current/
+ - name: Convert ErrorContains checks to Error checks
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ run: |
+ find ./go/test/endtoend -name '*.go' -exec sed -i 's/ErrorContains/Error/g' {} +
+ find ./go/test/endtoend -name '*.go' -exec sed -i 's/EqualError/Error/g' {} +
+
# Running a test with vtgate and vttablet using version n
- name: Run query serving tests (vtgate=N, vttablet=N)
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/upgrade_downgrade_test_reparent_new_vtctl.yml b/.github/workflows/upgrade_downgrade_test_reparent_new_vtctl.yml
index e9b33cdce90..76ff7fa94e6 100644
--- a/.github/workflows/upgrade_downgrade_test_reparent_new_vtctl.yml
+++ b/.github/workflows/upgrade_downgrade_test_reparent_new_vtctl.yml
@@ -74,7 +74,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -115,13 +115,6 @@ jobs:
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo apt-get install -y gnupg2
- sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release enable-only pxb-24
- sudo apt-get update
- sudo apt-get install -y percona-xtrabackup-24
-
# Checkout to the next release of Vitess
- name: Check out other version's code (${{ steps.output-next-release-ref.outputs.next_release_ref }})
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/upgrade_downgrade_test_reparent_new_vttablet.yml b/.github/workflows/upgrade_downgrade_test_reparent_new_vttablet.yml
index f67a3214d24..9fdeeeb57dd 100644
--- a/.github/workflows/upgrade_downgrade_test_reparent_new_vttablet.yml
+++ b/.github/workflows/upgrade_downgrade_test_reparent_new_vttablet.yml
@@ -74,7 +74,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/upgrade_downgrade_test_reparent_old_vtctl.yml b/.github/workflows/upgrade_downgrade_test_reparent_old_vtctl.yml
index b05631a4862..fcd8d186833 100644
--- a/.github/workflows/upgrade_downgrade_test_reparent_old_vtctl.yml
+++ b/.github/workflows/upgrade_downgrade_test_reparent_old_vtctl.yml
@@ -76,7 +76,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-additions access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -121,13 +121,6 @@ jobs:
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo apt-get install -y gnupg2
- sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release enable-only pxb-24
- sudo apt-get update
- sudo apt-get install -y percona-xtrabackup-24
-
# Checkout to the last release of Vitess
- name: Check out other version's code (${{ steps.output-previous-release-ref.outputs.previous_release_ref }})
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/upgrade_downgrade_test_reparent_old_vttablet.yml b/.github/workflows/upgrade_downgrade_test_reparent_old_vttablet.yml
index 372f223f06a..ab39e4208c6 100644
--- a/.github/workflows/upgrade_downgrade_test_reparent_old_vttablet.yml
+++ b/.github/workflows/upgrade_downgrade_test_reparent_old_vttablet.yml
@@ -76,7 +76,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-additions access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -121,13 +121,6 @@ jobs:
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo apt-get install -y gnupg2
- sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release enable-only pxb-24
- sudo apt-get update
- sudo apt-get install -y percona-xtrabackup-24
-
# Checkout to the last release of Vitess
- name: Check out other version's code (${{ steps.output-previous-release-ref.outputs.previous_release_ref }})
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/vitess_tester_vtgate.yml b/.github/workflows/vitess_tester_vtgate.yml
index 63d7332fc39..56ec696844c 100644
--- a/.github/workflows/vitess_tester_vtgate.yml
+++ b/.github/workflows/vitess_tester_vtgate.yml
@@ -60,7 +60,7 @@ jobs:
end_to_end:
- 'go/**/*.go'
- 'go/vt/sidecardb/**/*.sql'
- - 'go/test/endtoend/onlineddl/vrepl_suite/**'
+ - 'go/test/endtoend/vtgate/vitess_tester/**'
- 'test.go'
- 'Makefile'
- 'build.env'
@@ -76,7 +76,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-additions access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -119,7 +119,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
# install vitess tester
- go install github.com/vitessio/vitess-tester@eb953122baba163ed8ccaa6642458ee984f5d7e4
+ go install github.com/vitessio/vitess-tester@89dd933a9ea0e15f69ca58b9c8ea09a358762cca
- name: Setup launchable dependencies
if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
@@ -150,9 +150,9 @@ jobs:
# We go over all the directories in the given path.
# If there is a vschema file there, we use it, otherwise we let vitess-tester autogenerate it.
if [ -f $dir/vschema.json ]; then
- vitess-tester --sharded --xunit --test-dir $dir --vschema "$dir"vschema.json
+ vitess-tester --xunit --vschema "$dir"vschema.json $dir/*.test
else
- vitess-tester --sharded --xunit --test-dir $dir
+ vitess-tester --sharded --xunit $dir/*.test
fi
# Number the reports by changing their file names.
mv report.xml report"$i".xml
diff --git a/.github/workflows/vtadmin_web_lint.yml b/.github/workflows/vtadmin_web_lint.yml
index 035850a3c9b..37ca4bbcfcd 100644
--- a/.github/workflows/vtadmin_web_lint.yml
+++ b/.github/workflows/vtadmin_web_lint.yml
@@ -16,7 +16,7 @@ permissions: read-all
jobs:
lint:
- runs-on: ubuntu-22.04
+ runs-on: ubuntu-latest
steps:
- name: Skip CI
run: |
diff --git a/Makefile b/Makefile
index 5b84184f5a9..becafd889b4 100644
--- a/Makefile
+++ b/Makefile
@@ -280,9 +280,9 @@ $(PROTO_GO_OUTS): minimaltools install_protoc-gen-go proto/*.proto
# Please read docker/README.md to understand the different available images.
# This rule builds the bootstrap images for all flavors.
-DOCKER_IMAGES_FOR_TEST = mysql57 mysql80 percona57 percona80
+DOCKER_IMAGES_FOR_TEST = mysql80 percona80
DOCKER_IMAGES = common $(DOCKER_IMAGES_FOR_TEST)
-BOOTSTRAP_VERSION=27.5
+BOOTSTRAP_VERSION=27.8
ensure_bootstrap_version:
find docker/ -type f -exec sed -i "s/^\(ARG bootstrap_version\)=.*/\1=${BOOTSTRAP_VERSION}/" {} \;
sed -i 's/\(^.*flag.String(\"bootstrap-version\",\) *\"[^\"]\+\"/\1 \"${BOOTSTRAP_VERSION}\"/' test.go
@@ -321,7 +321,7 @@ endef
docker_base:
${call build_docker_image,docker/base/Dockerfile,vitess/base}
-DOCKER_BASE_SUFFIX = mysql80 percona57 percona80
+DOCKER_BASE_SUFFIX = mysql80 percona80
DOCKER_BASE_TARGETS = $(addprefix docker_base_, $(DOCKER_BASE_SUFFIX))
$(DOCKER_BASE_TARGETS): docker_base_%:
${call build_docker_image,docker/base/Dockerfile.$*,vitess/base:$*}
@@ -350,7 +350,7 @@ docker_run_local:
docker_mini:
${call build_docker_image,docker/mini/Dockerfile,vitess/mini}
-DOCKER_VTTESTSERVER_SUFFIX = mysql57 mysql80
+DOCKER_VTTESTSERVER_SUFFIX = mysql80
DOCKER_VTTESTSERVER_TARGETS = $(addprefix docker_vttestserver_,$(DOCKER_VTTESTSERVER_SUFFIX))
$(DOCKER_VTTESTSERVER_TARGETS): docker_vttestserver_%:
${call build_docker_image,docker/vttestserver/Dockerfile.$*,vitess/vttestserver:$*}
diff --git a/build.env b/build.env
index 9f87f437391..3a1b39fc86c 100755
--- a/build.env
+++ b/build.env
@@ -17,7 +17,7 @@
source ./tools/shell_functions.inc
go version >/dev/null 2>&1 || fail "Go is not installed or is not in \$PATH. See https://vitess.io/contributing/build-from-source for install instructions."
-goversion_min 1.22.5 || echo "Go version reported: `go version`. Version 1.22.5+ recommended. See https://vitess.io/contributing/build-from-source for install instructions."
+goversion_min 1.22.8 || echo "Go version reported: `go version`. Version 1.22.8+ recommended. See https://vitess.io/contributing/build-from-source for install instructions."
mkdir -p dist
mkdir -p bin
diff --git a/changelog/19.0/19.0.6/changelog.md b/changelog/19.0/19.0.6/changelog.md
new file mode 100644
index 00000000000..3a3506ac1ff
--- /dev/null
+++ b/changelog/19.0/19.0.6/changelog.md
@@ -0,0 +1,48 @@
+# Changelog of Vitess v19.0.6
+
+### Bug fixes
+#### Query Serving
+ * [release-19.0] bugfix: don't treat join predicates as filter predicates (#16472) [#16474](https://github.com/vitessio/vitess/pull/16474)
+ * [release-19.0] fix: reference table join merge (#16488) [#16496](https://github.com/vitessio/vitess/pull/16496)
+ * [release-19.0] simplify merging logic (#16525) [#16532](https://github.com/vitessio/vitess/pull/16532)
+ * [release-19.0] Fix: Offset planning in hash joins (#16540) [#16551](https://github.com/vitessio/vitess/pull/16551)
+ * [release-19.0] Fix query plan cache misses metric (#16562) [#16627](https://github.com/vitessio/vitess/pull/16627)
+ * [release-19.0] JSON Encoding: Use Type_RAW for marshalling json (#16637) [#16681](https://github.com/vitessio/vitess/pull/16681)
+#### Throttler
+ * v19 backport: Throttler/vreplication: fix app name used by VPlayer (#16578) [#16580](https://github.com/vitessio/vitess/pull/16580)
+#### VReplication
+ * [release-19.0] VStream API: validate that last PK has fields defined (#16478) [#16486](https://github.com/vitessio/vitess/pull/16486)
+#### VTAdmin
+ * [release-19.0] VTAdmin: Upgrade websockets js package (#16504) [#16512](https://github.com/vitessio/vitess/pull/16512)
+#### VTGate
+ * [release-19.0] Fix `RemoveTablet` during `TabletExternallyReparented` causing connection issues (#16371) [#16567](https://github.com/vitessio/vitess/pull/16567)
+#### VTorc
+ * [release-19.0] FindErrantGTIDs: superset is not an errant GTID situation (#16725) [#16728](https://github.com/vitessio/vitess/pull/16728)
+### CI/Build
+#### General
+ * [release-19.0] Upgrade the Golang version to `go1.22.6` [#16543](https://github.com/vitessio/vitess/pull/16543)
+#### VTAdmin
+ * [release-19.0] Update micromatch to 4.0.8 (#16660) [#16666](https://github.com/vitessio/vitess/pull/16666)
+### Enhancement
+#### Build/CI
+ * [release-19.0] Improve the queries upgrade/downgrade CI workflow by using same test code version as binary (#16494) [#16501](https://github.com/vitessio/vitess/pull/16501)
+#### Online DDL
+ * [release-19.0] VReplication workflows: retry "wrong tablet type" errors (#16645) [#16652](https://github.com/vitessio/vitess/pull/16652)
+### Internal Cleanup
+#### Build/CI
+ * [release-19.0] Move from 4-cores larger runners to `ubuntu-latest` (#16714) [#16717](https://github.com/vitessio/vitess/pull/16717)
+#### Docker
+ * [release-19.0] Remove mysql57/percona57 bootstrap images (#16620) [#16622](https://github.com/vitessio/vitess/pull/16622)
+### Performance
+#### Online DDL
+ * v19 backport: Online DDL: avoid SQL's `CONVERT(...)`, convert programmatically if needed [#16603](https://github.com/vitessio/vitess/pull/16603)
+### Regression
+#### Query Serving
+ * [release-19.0] bugfix: Allow cross-keyspace joins (#16520) [#16523](https://github.com/vitessio/vitess/pull/16523)
+### Release
+#### General
+ * [release-19.0] Bump to `v19.0.6-SNAPSHOT` after the `v19.0.5` release [#16456](https://github.com/vitessio/vitess/pull/16456)
+### Testing
+#### Query Serving
+ * [release-19.0] Replace ErrorContains checks with Error checks before running upgrade downgrade [#16700](https://github.com/vitessio/vitess/pull/16700)
+
diff --git a/changelog/19.0/19.0.6/release_notes.md b/changelog/19.0/19.0.6/release_notes.md
new file mode 100644
index 00000000000..422bb50d1eb
--- /dev/null
+++ b/changelog/19.0/19.0.6/release_notes.md
@@ -0,0 +1,7 @@
+# Release of Vitess v19.0.6
+The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/19.0/19.0.6/changelog.md).
+
+The release includes 21 merged Pull Requests.
+
+Thanks to all our contributors: @GuptaManan100, @app/vitess-bot, @shlomi-noach, @systay, @vitess-bot
+
diff --git a/changelog/19.0/19.0.7/changelog.md b/changelog/19.0/19.0.7/changelog.md
new file mode 100644
index 00000000000..a5c9a693f9c
--- /dev/null
+++ b/changelog/19.0/19.0.7/changelog.md
@@ -0,0 +1,50 @@
+# Changelog of Vitess v19.0.7
+
+### Bug fixes
+#### Backup and Restore
+ * [release-19.0] Fail fast when builtinbackup fails to restore a single file (#16856) [#16867](https://github.com/vitessio/vitess/pull/16867)
+#### Query Serving
+ * Backport: Fix ACL checks for CTEs (#16642) [#16776](https://github.com/vitessio/vitess/pull/16776)
+ * [release-19.0] VTTablet: smartconnpool: notify all expired waiters (#16897) [#16901](https://github.com/vitessio/vitess/pull/16901)
+ * [release-19.0] fixes bugs around expression precedence and LIKE (#16934 & #16649) [#16945](https://github.com/vitessio/vitess/pull/16945)
+ * [release-19.0] bugfix: add HAVING columns inside derived tables (#16976) [#16978](https://github.com/vitessio/vitess/pull/16978)
+ * [release-19.0] bugfix: treat EXPLAIN like SELECT (#17054) [#17056](https://github.com/vitessio/vitess/pull/17056)
+ * [release-19.0] Delegate Column Availability Checks to MySQL for Single-Route Queries (#17077) [#17085](https://github.com/vitessio/vitess/pull/17085)
+ * Bugfix for Panic on Joined Queries with Non-Authoritative Tables in Vitess 19.0 [#17103](https://github.com/vitessio/vitess/pull/17103)
+#### VTAdmin
+ * [release-19.0] VTAdmin: Fix serve-handler's path-to-regexp dep and add default schema refresh (#16778) [#16783](https://github.com/vitessio/vitess/pull/16783)
+#### VTGate
+ * [release-19.0] Support passing filters to `discovery.NewHealthCheck(...)` (#16170) [#16871](https://github.com/vitessio/vitess/pull/16871)
+ * [release-19.0] Fix deadlock between health check and topology watcher (#16995) [#17008](https://github.com/vitessio/vitess/pull/17008)
+#### VTTablet
+ * [release-19.0] Fix race in `replicationLagModule` of `go/vt/throttle` (#16078) [#16899](https://github.com/vitessio/vitess/pull/16899)
+### CI/Build
+#### Docker
+ * [release-19.0] Remove mysql57 from docker images [#16763](https://github.com/vitessio/vitess/pull/16763)
+#### General
+ * [release-19.0] Upgrade Golang to 1.22.8 [#16895](https://github.com/vitessio/vitess/pull/16895)
+### Dependencies
+#### Java
+ * [release-19.0] Bump com.google.protobuf:protobuf-java from 3.24.3 to 3.25.5 in /java (#16809) [#16837](https://github.com/vitessio/vitess/pull/16837)
+ * [release-19.0] Bump commons-io:commons-io from 2.7 to 2.14.0 in /java (#16889) [#16930](https://github.com/vitessio/vitess/pull/16930)
+#### VTAdmin
+ * [release-19.0] VTAdmin: Address security vuln in path-to-regexp node pkg (#16770) [#16772](https://github.com/vitessio/vitess/pull/16772)
+### Enhancement
+#### Online DDL
+ * [release-19.0] Improve Schema Engine's TablesWithSize80 query (#17066) [#17089](https://github.com/vitessio/vitess/pull/17089)
+### Internal Cleanup
+#### VTAdmin
+ * [release-19.0] VTAdmin: Upgrade deps to address security vulns (#16843) [#16846](https://github.com/vitessio/vitess/pull/16846)
+### Regression
+#### Backup and Restore
+ * [release-19.0] Fix unreachable errors when taking a backup (#17062) [#17110](https://github.com/vitessio/vitess/pull/17110)
+#### Query Serving
+ * [release-19.0] fix: route engine to handle column truncation for execute after lookup (#16981) [#16984](https://github.com/vitessio/vitess/pull/16984)
+ * [release-19.0] Add support for `MultiEqual` opcode for lookup vindexes. (#16975) [#17039](https://github.com/vitessio/vitess/pull/17039)
+### Release
+#### General
+ * [release-19.0] Code Freeze for `v19.0.7` [#17148](https://github.com/vitessio/vitess/pull/17148)
+### Testing
+#### Cluster management
+ * [release-19.0] Flaky test fixes (#16940) [#16958](https://github.com/vitessio/vitess/pull/16958)
+
diff --git a/changelog/19.0/19.0.7/release_notes.md b/changelog/19.0/19.0.7/release_notes.md
new file mode 100644
index 00000000000..9cc9532d018
--- /dev/null
+++ b/changelog/19.0/19.0.7/release_notes.md
@@ -0,0 +1,7 @@
+# Release of Vitess v19.0.7
+The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/19.0/19.0.7/changelog.md).
+
+The release includes 24 merged Pull Requests.
+
+Thanks to all our contributors: @app/vitess-bot, @frouioui, @systay, @timvaillancourt, @vitess-bot
+
diff --git a/changelog/19.0/README.md b/changelog/19.0/README.md
index ae90ef2df1b..008c92c2aec 100644
--- a/changelog/19.0/README.md
+++ b/changelog/19.0/README.md
@@ -1,4 +1,12 @@
## v19.0
+* **[19.0.7](19.0.7)**
+ * [Changelog](19.0.7/changelog.md)
+ * [Release Notes](19.0.7/release_notes.md)
+
+* **[19.0.6](19.0.6)**
+ * [Changelog](19.0.6/changelog.md)
+ * [Release Notes](19.0.6/release_notes.md)
+
* **[19.0.5](19.0.5)**
* [Changelog](19.0.5/changelog.md)
* [Release Notes](19.0.5/release_notes.md)
diff --git a/docker/base/Dockerfile b/docker/base/Dockerfile
index 1db25591f71..06a3a97c8af 100644
--- a/docker/base/Dockerfile
+++ b/docker/base/Dockerfile
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-ARG bootstrap_version=27.5
+ARG bootstrap_version=27.8
ARG image="vitess/bootstrap:${bootstrap_version}-mysql80"
FROM "${image}"
diff --git a/docker/base/Dockerfile.mysql57 b/docker/base/Dockerfile.mysql57
index 8d66a1e3604..8a95b1688aa 100644
--- a/docker/base/Dockerfile.mysql57
+++ b/docker/base/Dockerfile.mysql57
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-ARG bootstrap_version=27.5
+ARG bootstrap_version=27.8
ARG image="vitess/bootstrap:${bootstrap_version}-mysql57"
FROM "${image}"
diff --git a/docker/base/Dockerfile.percona57 b/docker/base/Dockerfile.percona57
index d73eed5b917..3bfeeb758f2 100644
--- a/docker/base/Dockerfile.percona57
+++ b/docker/base/Dockerfile.percona57
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-ARG bootstrap_version=27.5
+ARG bootstrap_version=27.8
ARG image="vitess/bootstrap:${bootstrap_version}-percona57"
FROM "${image}"
diff --git a/docker/base/Dockerfile.percona80 b/docker/base/Dockerfile.percona80
index 597979e05ea..de7f70901b4 100644
--- a/docker/base/Dockerfile.percona80
+++ b/docker/base/Dockerfile.percona80
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-ARG bootstrap_version=27.5
+ARG bootstrap_version=27.8
ARG image="vitess/bootstrap:${bootstrap_version}-percona80"
FROM "${image}"
diff --git a/docker/bootstrap/CHANGELOG.md b/docker/bootstrap/CHANGELOG.md
index d739c55b80b..6bedb33bac7 100644
--- a/docker/bootstrap/CHANGELOG.md
+++ b/docker/bootstrap/CHANGELOG.md
@@ -129,3 +129,16 @@ List of changes between bootstrap image versions.
## [27.5] - 2024-07-02
### Changes
- Update build to golang 1.22.5
+
+## [27.6] - 2024-08-07
+### Changes
+- Update build to golang 1.22.6
+- MySQL57 and Percona57 tags will be removed thereafter
+
+## [27.7] - 2024-09-05
+### Changes
+- Update build to golang 1.22.7
+
+## [27.8] - 2024-10-04
+### Changes
+- Update build to golang 1.22.8
diff --git a/docker/bootstrap/Dockerfile.common b/docker/bootstrap/Dockerfile.common
index ac7859c0a1e..3c8a24b9540 100644
--- a/docker/bootstrap/Dockerfile.common
+++ b/docker/bootstrap/Dockerfile.common
@@ -1,4 +1,4 @@
-FROM --platform=linux/amd64 golang:1.22.5-bullseye
+FROM --platform=linux/amd64 golang:1.22.8-bullseye
# Install Vitess build dependencies
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
diff --git a/docker/bootstrap/Dockerfile.mysql57 b/docker/bootstrap/Dockerfile.mysql57
deleted file mode 100644
index d523241f499..00000000000
--- a/docker/bootstrap/Dockerfile.mysql57
+++ /dev/null
@@ -1,26 +0,0 @@
-ARG bootstrap_version
-ARG image="vitess/bootstrap:${bootstrap_version}-common"
-
-FROM --platform=linux/amd64 "${image}"
-
-# Install MySQL 5.7
-RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends gnupg dirmngr ca-certificates && \
- for i in $(seq 1 10); do apt-key adv --no-tty --recv-keys --keyserver keyserver.ubuntu.com A8D3785C && break; done && \
- add-apt-repository 'deb http://repo.mysql.com/apt/debian/ buster mysql-5.7' && \
- for i in $(seq 1 10); do apt-key adv --no-tty --keyserver keyserver.ubuntu.com --recv-keys 9334A25F8507EFA5 && break; done && \
- echo 'deb http://repo.percona.com/apt buster main' > /etc/apt/sources.list.d/percona.list && \
- { \
- echo debconf debconf/frontend select Noninteractive; \
- echo percona-server-server-5.7 percona-server-server/root_password password 'unused'; \
- echo percona-server-server-5.7 percona-server-server/root_password_again password 'unused'; \
- } | debconf-set-selections && \
- percona-release enable-only tools \
- apt-get update -y && \
- DEBIAN_FRONTEND=noninteractive apt-get install -y mysql-server libmysqlclient-dev libdbd-mysql-perl rsync libev4 percona-xtrabackup-24 && \
- rm -rf /var/lib/apt/lists/*
-
-# Bootstrap Vitess
-WORKDIR /vt/src/vitess.io/vitess
-
-USER vitess
-RUN ./bootstrap.sh
diff --git a/docker/bootstrap/Dockerfile.percona57 b/docker/bootstrap/Dockerfile.percona57
deleted file mode 100644
index f43c655b3d7..00000000000
--- a/docker/bootstrap/Dockerfile.percona57
+++ /dev/null
@@ -1,24 +0,0 @@
-ARG bootstrap_version
-ARG image="vitess/bootstrap:${bootstrap_version}-common"
-
-FROM --platform=linux/amd64 "${image}"
-
-# Install Percona 5.7
-RUN for i in $(seq 1 10); do apt-key adv --no-tty --keyserver keyserver.ubuntu.com --recv-keys 9334A25F8507EFA5 && break; done && \
- add-apt-repository 'deb http://repo.percona.com/apt bullseye main' && \
- { \
- echo debconf debconf/frontend select Noninteractive; \
- echo percona-server-server-5.7 percona-server-server/root_password password 'unused'; \
- echo percona-server-server-5.7 percona-server-server/root_password_again password 'unused'; \
- } | debconf-set-selections && \
- percona-release enable-only tools \
- apt-get update && \
- apt-get install -y --no-install-recommends percona-server-server-5.7 && \
- apt-get install -y --no-install-recommends libperconaserverclient20-dev percona-xtrabackup-24 && \
- rm -rf /var/lib/apt/lists/*
-
-# Bootstrap Vitess
-WORKDIR /vt/src/vitess.io/vitess
-
-USER vitess
-RUN ./bootstrap.sh
diff --git a/docker/bootstrap/README.md b/docker/bootstrap/README.md
index 717f4336442..b273305d6b9 100644
--- a/docker/bootstrap/README.md
+++ b/docker/bootstrap/README.md
@@ -6,9 +6,7 @@ after successfully running `bootstrap.sh` and `dev.env`.
The `vitess/bootstrap` image comes in different flavors:
* `vitess/bootstrap:common` - dependencies that are common to all flavors
-* `vitess/bootstrap:mysql57` - bootstrap image for MySQL 5.7
* `vitess/bootstrap:mysql80` - bootstrap image for MySQL 8.0
-* `vitess/bootstrap:percona57` - bootstrap image for Percona Server 5.7
* `vitess/bootstrap:percona80` - bootstrap image for Percona Server 8.0
**NOTE: Unlike the base image that builds Vitess itself, this bootstrap image
diff --git a/docker/lite/Dockerfile.mysql57 b/docker/lite/Dockerfile.mysql57
index d8f38f32496..90b6490ec00 100644
--- a/docker/lite/Dockerfile.mysql57
+++ b/docker/lite/Dockerfile.mysql57
@@ -17,7 +17,7 @@
# ensure images contain the right binaries.
# Use a temporary layer for the build stage.
-ARG bootstrap_version=27.5
+ARG bootstrap_version=27.8
ARG image="vitess/bootstrap:${bootstrap_version}-mysql57"
FROM "${image}" AS builder
diff --git a/docker/lite/Dockerfile.mysql80 b/docker/lite/Dockerfile.mysql80
index bb73c0a8ff5..1d57b5fe07e 100644
--- a/docker/lite/Dockerfile.mysql80
+++ b/docker/lite/Dockerfile.mysql80
@@ -17,7 +17,7 @@
# ensure images contain the right binaries.
# Use a temporary layer for the build stage.
-ARG bootstrap_version=27.5
+ARG bootstrap_version=27.8
ARG image="vitess/bootstrap:${bootstrap_version}-mysql80"
FROM "${image}" AS builder
diff --git a/docker/lite/Dockerfile.percona57 b/docker/lite/Dockerfile.percona57
index da5798bf228..710629200ef 100644
--- a/docker/lite/Dockerfile.percona57
+++ b/docker/lite/Dockerfile.percona57
@@ -17,7 +17,7 @@
# ensure images contain the right binaries.
# Use a temporary layer for the build stage.
-ARG bootstrap_version=27.5
+ARG bootstrap_version=27.8
ARG image="vitess/bootstrap:${bootstrap_version}-percona57"
FROM "${image}" AS builder
diff --git a/docker/lite/Dockerfile.percona80 b/docker/lite/Dockerfile.percona80
index 66478659c20..e187f11e6c5 100644
--- a/docker/lite/Dockerfile.percona80
+++ b/docker/lite/Dockerfile.percona80
@@ -17,7 +17,7 @@
# ensure images contain the right binaries.
# Use a temporary layer for the build stage.
-ARG bootstrap_version=27.5
+ARG bootstrap_version=27.8
ARG image="vitess/bootstrap:${bootstrap_version}-percona80"
FROM "${image}" AS builder
diff --git a/docker/lite/Dockerfile.testing b/docker/lite/Dockerfile.testing
index 254502a08c1..9d43852a9d7 100644
--- a/docker/lite/Dockerfile.testing
+++ b/docker/lite/Dockerfile.testing
@@ -17,7 +17,7 @@
# ensure images contain the right binaries.
# Use a temporary layer for the build stage.
-ARG bootstrap_version=27.5
+ARG bootstrap_version=27.8
ARG image="vitess/bootstrap:${bootstrap_version}-mysql57"
FROM "${image}" AS builder
diff --git a/docker/lite/Dockerfile.ubi7.mysql57 b/docker/lite/Dockerfile.ubi7.mysql57
index 6625fe3cf53..9912a03e862 100644
--- a/docker/lite/Dockerfile.ubi7.mysql57
+++ b/docker/lite/Dockerfile.ubi7.mysql57
@@ -17,7 +17,7 @@
# ensure images contain the right binaries.
# Use a temporary layer for the build stage.
-ARG bootstrap_version=27.5
+ARG bootstrap_version=27.8
ARG image="vitess/bootstrap:${bootstrap_version}-mysql57"
FROM "${image}" AS builder
diff --git a/docker/lite/Dockerfile.ubi7.mysql80 b/docker/lite/Dockerfile.ubi7.mysql80
index 3807e67c230..e870ec2052c 100644
--- a/docker/lite/Dockerfile.ubi7.mysql80
+++ b/docker/lite/Dockerfile.ubi7.mysql80
@@ -17,7 +17,7 @@
# ensure images contain the right binaries.
# Use a temporary layer for the build stage.
-ARG bootstrap_version=27.5
+ARG bootstrap_version=27.8
ARG image="vitess/bootstrap:${bootstrap_version}-mysql80"
FROM "${image}" AS builder
diff --git a/docker/lite/Dockerfile.ubi7.percona57 b/docker/lite/Dockerfile.ubi7.percona57
index 86fa1ca2038..39384460dbe 100644
--- a/docker/lite/Dockerfile.ubi7.percona57
+++ b/docker/lite/Dockerfile.ubi7.percona57
@@ -17,7 +17,7 @@
# ensure images contain the right binaries.
# Use a temporary layer for the build stage.
-ARG bootstrap_version=27.5
+ARG bootstrap_version=27.8
ARG image="vitess/bootstrap:${bootstrap_version}-percona57"
FROM "${image}" AS builder
diff --git a/docker/lite/Dockerfile.ubi7.percona80 b/docker/lite/Dockerfile.ubi7.percona80
index aff6af97cb2..e74bb08cd89 100644
--- a/docker/lite/Dockerfile.ubi7.percona80
+++ b/docker/lite/Dockerfile.ubi7.percona80
@@ -17,7 +17,7 @@
# ensure images contain the right binaries.
# Use a temporary layer for the build stage.
-ARG bootstrap_version=27.5
+ARG bootstrap_version=27.8
ARG image="vitess/bootstrap:${bootstrap_version}-percona80"
FROM "${image}" AS builder
diff --git a/docker/lite/Dockerfile.ubi8.arm64.mysql80 b/docker/lite/Dockerfile.ubi8.arm64.mysql80
index f4bde08d2b3..e8b05a6a4a4 100644
--- a/docker/lite/Dockerfile.ubi8.arm64.mysql80
+++ b/docker/lite/Dockerfile.ubi8.arm64.mysql80
@@ -17,7 +17,7 @@
# ensure images contain the right binaries.
# Use a temporary layer for the build stage.
-ARG bootstrap_version=27.5
+ARG bootstrap_version=27.8
ARG image="vitess/bootstrap:${bootstrap_version}-mysql80"
FROM "${image}" AS builder
diff --git a/docker/lite/Dockerfile.ubi8.mysql80 b/docker/lite/Dockerfile.ubi8.mysql80
index 2bc5d9ddd07..fde931e5c9f 100644
--- a/docker/lite/Dockerfile.ubi8.mysql80
+++ b/docker/lite/Dockerfile.ubi8.mysql80
@@ -17,7 +17,7 @@
# ensure images contain the right binaries.
# Use a temporary layer for the build stage.
-ARG bootstrap_version=27.5
+ARG bootstrap_version=27.8
ARG image="vitess/bootstrap:${bootstrap_version}-mysql80"
FROM "${image}" AS builder
diff --git a/docker/local/Dockerfile b/docker/local/Dockerfile
index 8d74247bce7..0c7b7c8ed61 100644
--- a/docker/local/Dockerfile
+++ b/docker/local/Dockerfile
@@ -1,4 +1,4 @@
-ARG bootstrap_version=27.5
+ARG bootstrap_version=27.8
ARG image="vitess/bootstrap:${bootstrap_version}-common"
FROM "${image}"
diff --git a/docker/vttestserver/Dockerfile.mysql57 b/docker/vttestserver/Dockerfile.mysql57
index 444df680f12..48dcf4f21f4 100644
--- a/docker/vttestserver/Dockerfile.mysql57
+++ b/docker/vttestserver/Dockerfile.mysql57
@@ -17,7 +17,7 @@
# ensure images contain the right binaries.
# Use a temporary layer for the build stage.
-ARG bootstrap_version=27.5
+ARG bootstrap_version=27.8
ARG image="vitess/bootstrap:${bootstrap_version}-mysql57"
FROM "${image}" AS builder
diff --git a/docker/vttestserver/Dockerfile.mysql80 b/docker/vttestserver/Dockerfile.mysql80
index f6ca717180a..a09125a3987 100644
--- a/docker/vttestserver/Dockerfile.mysql80
+++ b/docker/vttestserver/Dockerfile.mysql80
@@ -17,7 +17,7 @@
# ensure images contain the right binaries.
# Use a temporary layer for the build stage.
-ARG bootstrap_version=27.5
+ARG bootstrap_version=27.8
ARG image="vitess/bootstrap:${bootstrap_version}-mysql80"
FROM "${image}" AS builder
diff --git a/examples/common/scripts/vtadmin-up.sh b/examples/common/scripts/vtadmin-up.sh
index 356f6ac3880..499c6ea6204 100755
--- a/examples/common/scripts/vtadmin-up.sh
+++ b/examples/common/scripts/vtadmin-up.sh
@@ -43,7 +43,7 @@ vtadmin \
--alsologtostderr \
--rbac \
--rbac-config="${script_dir}/../vtadmin/rbac.yaml" \
- --cluster "id=${cluster_name},name=${cluster_name},discovery=staticfile,discovery-staticfile-path=${script_dir}/../vtadmin/discovery.json,tablet-fqdn-tmpl=http://{{ .Tablet.Hostname }}:15{{ .Tablet.Alias.Uid }}" \
+ --cluster "id=${cluster_name},name=${cluster_name},discovery=staticfile,discovery-staticfile-path=${script_dir}/../vtadmin/discovery.json,tablet-fqdn-tmpl=http://{{ .Tablet.Hostname }}:15{{ .Tablet.Alias.Uid }},schema-cache-default-expiration=1m" \
> "${log_dir}/vtadmin-api.out" 2>&1 &
vtadmin_api_pid=$!
diff --git a/examples/compose/docker-compose.beginners.yml b/examples/compose/docker-compose.beginners.yml
index 2e816d6a1c1..3e5e450c1c0 100644
--- a/examples/compose/docker-compose.beginners.yml
+++ b/examples/compose/docker-compose.beginners.yml
@@ -58,7 +58,7 @@ services:
- "3306"
vtctld:
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.7
ports:
- "15000:$WEB_PORT"
- "$GRPC_PORT"
@@ -81,7 +81,7 @@ services:
condition: service_healthy
vtgate:
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.7
ports:
- "15099:$WEB_PORT"
- "$GRPC_PORT"
@@ -111,7 +111,7 @@ services:
condition: service_healthy
schemaload:
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.7
command:
- sh
- -c
@@ -144,12 +144,12 @@ services:
environment:
- KEYSPACES=$KEYSPACE
- GRPC_PORT=15999
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.7
volumes:
- .:/script
vttablet100:
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.7
ports:
- "15100:$WEB_PORT"
- "$GRPC_PORT"
@@ -181,7 +181,7 @@ services:
retries: 15
vttablet101:
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.7
ports:
- "15101:$WEB_PORT"
- "$GRPC_PORT"
@@ -213,7 +213,7 @@ services:
retries: 15
vttablet102:
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.7
ports:
- "15102:$WEB_PORT"
- "$GRPC_PORT"
@@ -245,7 +245,7 @@ services:
retries: 15
vttablet103:
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.7
ports:
- "15103:$WEB_PORT"
- "$GRPC_PORT"
@@ -277,7 +277,7 @@ services:
retries: 15
vtorc:
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.7
command: ["sh", "-c", "/script/vtorc-up.sh"]
depends_on:
- vtctld
@@ -307,7 +307,7 @@ services:
retries: 15
vreplication:
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.7
volumes:
- ".:/script"
environment:
diff --git a/examples/compose/docker-compose.yml b/examples/compose/docker-compose.yml
index 8626e6f3c85..d7e463046ee 100644
--- a/examples/compose/docker-compose.yml
+++ b/examples/compose/docker-compose.yml
@@ -75,7 +75,7 @@ services:
- SCHEMA_FILES=lookup_keyspace_schema_file.sql
- POST_LOAD_FILE=
- EXTERNAL_DB=0
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.7
volumes:
- .:/script
schemaload_test_keyspace:
@@ -101,7 +101,7 @@ services:
- SCHEMA_FILES=test_keyspace_schema_file.sql
- POST_LOAD_FILE=
- EXTERNAL_DB=0
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.7
volumes:
- .:/script
set_keyspace_durability_policy:
@@ -115,7 +115,7 @@ services:
environment:
- KEYSPACES=test_keyspace lookup_keyspace
- GRPC_PORT=15999
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.7
volumes:
- .:/script
vreplication:
@@ -129,7 +129,7 @@ services:
- TOPOLOGY_FLAGS=--topo_implementation consul --topo_global_server_address consul1:8500
--topo_global_root vitess/global
- EXTERNAL_DB=0
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.7
volumes:
- .:/script
vtctld:
@@ -143,7 +143,7 @@ services:
depends_on:
external_db_host:
condition: service_healthy
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.7
ports:
- 15000:8080
- "15999"
@@ -160,7 +160,7 @@ services:
--normalize_queries=true '
depends_on:
- vtctld
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.7
ports:
- 15099:8080
- "15999"
@@ -182,7 +182,7 @@ services:
- EXTERNAL_DB=0
- DB_USER=
- DB_PASS=
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.7
ports:
- 13000:8080
volumes:
@@ -217,7 +217,7 @@ services:
- CMD-SHELL
- curl -s --fail --show-error localhost:8080/debug/health
timeout: 10s
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.7
ports:
- 15101:8080
- "15999"
@@ -254,7 +254,7 @@ services:
- CMD-SHELL
- curl -s --fail --show-error localhost:8080/debug/health
timeout: 10s
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.7
ports:
- 15102:8080
- "15999"
@@ -291,7 +291,7 @@ services:
- CMD-SHELL
- curl -s --fail --show-error localhost:8080/debug/health
timeout: 10s
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.7
ports:
- 15201:8080
- "15999"
@@ -328,7 +328,7 @@ services:
- CMD-SHELL
- curl -s --fail --show-error localhost:8080/debug/health
timeout: 10s
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.7
ports:
- 15202:8080
- "15999"
@@ -365,7 +365,7 @@ services:
- CMD-SHELL
- curl -s --fail --show-error localhost:8080/debug/health
timeout: 10s
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.7
ports:
- 15301:8080
- "15999"
@@ -402,7 +402,7 @@ services:
- CMD-SHELL
- curl -s --fail --show-error localhost:8080/debug/health
timeout: 10s
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.7
ports:
- 15302:8080
- "15999"
diff --git a/examples/compose/vtcompose/docker-compose.test.yml b/examples/compose/vtcompose/docker-compose.test.yml
index f4abaad543c..72c58601a66 100644
--- a/examples/compose/vtcompose/docker-compose.test.yml
+++ b/examples/compose/vtcompose/docker-compose.test.yml
@@ -79,7 +79,7 @@ services:
- SCHEMA_FILES=test_keyspace_schema_file.sql
- POST_LOAD_FILE=
- EXTERNAL_DB=0
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.7
volumes:
- .:/script
schemaload_unsharded_keyspace:
@@ -103,7 +103,7 @@ services:
- SCHEMA_FILES=unsharded_keyspace_schema_file.sql
- POST_LOAD_FILE=
- EXTERNAL_DB=0
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.7
volumes:
- .:/script
set_keyspace_durability_policy_test_keyspace:
@@ -117,7 +117,7 @@ services:
environment:
- GRPC_PORT=15999
- KEYSPACES=test_keyspace
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.7
volumes:
- .:/script
set_keyspace_durability_policy_unsharded_keyspace:
@@ -130,7 +130,7 @@ services:
environment:
- GRPC_PORT=15999
- KEYSPACES=unsharded_keyspace
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.7
volumes:
- .:/script
vreplication:
@@ -144,7 +144,7 @@ services:
- TOPOLOGY_FLAGS=--topo_implementation consul --topo_global_server_address consul1:8500
--topo_global_root vitess/global
- EXTERNAL_DB=0
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.7
volumes:
- .:/script
vtctld:
@@ -159,7 +159,7 @@ services:
depends_on:
external_db_host:
condition: service_healthy
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.7
ports:
- 15000:8080
- "15999"
@@ -176,7 +176,7 @@ services:
''grpc-vtgateservice'' --normalize_queries=true '
depends_on:
- vtctld
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.7
ports:
- 15099:8080
- "15999"
@@ -199,7 +199,7 @@ services:
- EXTERNAL_DB=0
- DB_USER=
- DB_PASS=
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.7
ports:
- 13000:8080
volumes:
@@ -234,7 +234,7 @@ services:
- CMD-SHELL
- curl -s --fail --show-error localhost:8080/debug/health
timeout: 10s
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.7
ports:
- 15101:8080
- "15999"
@@ -271,7 +271,7 @@ services:
- CMD-SHELL
- curl -s --fail --show-error localhost:8080/debug/health
timeout: 10s
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.7
ports:
- 15102:8080
- "15999"
@@ -308,7 +308,7 @@ services:
- CMD-SHELL
- curl -s --fail --show-error localhost:8080/debug/health
timeout: 10s
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.7
ports:
- 15201:8080
- "15999"
@@ -345,7 +345,7 @@ services:
- CMD-SHELL
- curl -s --fail --show-error localhost:8080/debug/health
timeout: 10s
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.7
ports:
- 15202:8080
- "15999"
@@ -382,7 +382,7 @@ services:
- CMD-SHELL
- curl -s --fail --show-error localhost:8080/debug/health
timeout: 10s
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.7
ports:
- 15301:8080
- "15999"
diff --git a/examples/compose/vtcompose/vtcompose.go b/examples/compose/vtcompose/vtcompose.go
index 25a1a19bce5..de9857f5f02 100644
--- a/examples/compose/vtcompose/vtcompose.go
+++ b/examples/compose/vtcompose/vtcompose.go
@@ -533,7 +533,7 @@ func generateDefaultShard(tabAlias int, shard string, keyspaceData keyspaceInfo,
- op: add
path: /services/init_shard_primary%[2]d
value:
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.7
command: ["sh", "-c", "/vt/bin/vtctldclient %[5]s InitShardPrimary --force %[4]s/%[3]s %[6]s-%[2]d "]
%[1]s
`, dependsOn, aliases[0], shard, keyspaceData.keyspace, opts.topologyFlags, opts.cell)
@@ -565,7 +565,7 @@ func generateExternalPrimary(
- op: add
path: /services/vttablet%[1]d
value:
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.7
ports:
- "15%[1]d:%[3]d"
- "%[4]d"
@@ -627,7 +627,7 @@ func generateDefaultTablet(tabAlias int, shard, role, keyspace string, dbInfo ex
- op: add
path: /services/vttablet%[1]d
value:
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.7
ports:
- "15%[1]d:%[4]d"
- "%[5]d"
@@ -665,7 +665,7 @@ func generateVtctld(opts vtOptions) string {
- op: add
path: /services/vtctld
value:
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.7
ports:
- "15000:%[1]d"
- "%[2]d"
@@ -696,7 +696,7 @@ func generateVtgate(opts vtOptions) string {
- op: add
path: /services/vtgate
value:
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.7
ports:
- "15099:%[1]d"
- "%[2]d"
@@ -738,7 +738,7 @@ func generateVTOrc(dbInfo externalDbInfo, keyspaceInfoMap map[string]keyspaceInf
- op: add
path: /services/vtorc
value:
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.7
volumes:
- ".:/script"
environment:
@@ -763,7 +763,7 @@ func generateVreplication(dbInfo externalDbInfo, opts vtOptions) string {
- op: add
path: /services/vreplication
value:
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.7
volumes:
- ".:/script"
environment:
@@ -791,7 +791,7 @@ func generateSetKeyspaceDurabilityPolicy(
- op: add
path: /services/set_keyspace_durability_policy_%[3]s
value:
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.7
volumes:
- ".:/script"
environment:
@@ -828,7 +828,7 @@ func generateSchemaload(
- op: add
path: /services/schemaload_%[7]s
value:
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.7
volumes:
- ".:/script"
environment:
diff --git a/examples/operator/101_initial_cluster.yaml b/examples/operator/101_initial_cluster.yaml
index 4c4d92f1f1f..7df159a56f9 100644
--- a/examples/operator/101_initial_cluster.yaml
+++ b/examples/operator/101_initial_cluster.yaml
@@ -8,14 +8,14 @@ metadata:
name: example
spec:
images:
- vtctld: vitess/lite:v19.0.5
- vtadmin: vitess/vtadmin:v19.0.5
- vtgate: vitess/lite:v19.0.5
- vttablet: vitess/lite:v19.0.5
- vtbackup: vitess/lite:v19.0.5
- vtorc: vitess/lite:v19.0.5
+ vtctld: vitess/lite:v19.0.7
+ vtadmin: vitess/vtadmin:v19.0.7
+ vtgate: vitess/lite:v19.0.7
+ vttablet: vitess/lite:v19.0.7
+ vtbackup: vitess/lite:v19.0.7
+ vtorc: vitess/lite:v19.0.7
mysqld:
- mysql80Compatible: vitess/lite:v19.0.5
+ mysql80Compatible: vitess/lite:v19.0.7
mysqldExporter: prom/mysqld-exporter:v0.11.0
cells:
- name: zone1
diff --git a/examples/operator/201_customer_tablets.yaml b/examples/operator/201_customer_tablets.yaml
index d49cec49120..aa6b244b3fb 100644
--- a/examples/operator/201_customer_tablets.yaml
+++ b/examples/operator/201_customer_tablets.yaml
@@ -4,14 +4,14 @@ metadata:
name: example
spec:
images:
- vtctld: vitess/lite:v19.0.5
- vtadmin: vitess/vtadmin:v19.0.5
- vtgate: vitess/lite:v19.0.5
- vttablet: vitess/lite:v19.0.5
- vtbackup: vitess/lite:v19.0.5
- vtorc: vitess/lite:v19.0.5
+ vtctld: vitess/lite:v19.0.7
+ vtadmin: vitess/vtadmin:v19.0.7
+ vtgate: vitess/lite:v19.0.7
+ vttablet: vitess/lite:v19.0.7
+ vtbackup: vitess/lite:v19.0.7
+ vtorc: vitess/lite:v19.0.7
mysqld:
- mysql80Compatible: vitess/lite:v19.0.5
+ mysql80Compatible: vitess/lite:v19.0.7
mysqldExporter: prom/mysqld-exporter:v0.11.0
cells:
- name: zone1
diff --git a/examples/operator/302_new_shards.yaml b/examples/operator/302_new_shards.yaml
index 5a0e8e141d1..d7da70d3aa4 100644
--- a/examples/operator/302_new_shards.yaml
+++ b/examples/operator/302_new_shards.yaml
@@ -4,14 +4,14 @@ metadata:
name: example
spec:
images:
- vtctld: vitess/lite:v19.0.5
- vtadmin: vitess/vtadmin:v19.0.5
- vtgate: vitess/lite:v19.0.5
- vttablet: vitess/lite:v19.0.5
- vtbackup: vitess/lite:v19.0.5
- vtorc: vitess/lite:v19.0.5
+ vtctld: vitess/lite:v19.0.7
+ vtadmin: vitess/vtadmin:v19.0.7
+ vtgate: vitess/lite:v19.0.7
+ vttablet: vitess/lite:v19.0.7
+ vtbackup: vitess/lite:v19.0.7
+ vtorc: vitess/lite:v19.0.7
mysqld:
- mysql80Compatible: vitess/lite:v19.0.5
+ mysql80Compatible: vitess/lite:v19.0.7
mysqldExporter: prom/mysqld-exporter:v0.11.0
cells:
- name: zone1
diff --git a/examples/operator/306_down_shard_0.yaml b/examples/operator/306_down_shard_0.yaml
index 1b28fe76bc6..23465bfccb4 100644
--- a/examples/operator/306_down_shard_0.yaml
+++ b/examples/operator/306_down_shard_0.yaml
@@ -4,14 +4,14 @@ metadata:
name: example
spec:
images:
- vtctld: vitess/lite:v19.0.5
- vtadmin: vitess/vtadmin:v19.0.5
- vtgate: vitess/lite:v19.0.5
- vttablet: vitess/lite:v19.0.5
- vtbackup: vitess/lite:v19.0.5
- vtorc: vitess/lite:v19.0.5
+ vtctld: vitess/lite:v19.0.7
+ vtadmin: vitess/vtadmin:v19.0.7
+ vtgate: vitess/lite:v19.0.7
+ vttablet: vitess/lite:v19.0.7
+ vtbackup: vitess/lite:v19.0.7
+ vtorc: vitess/lite:v19.0.7
mysqld:
- mysql80Compatible: vitess/lite:v19.0.5
+ mysql80Compatible: vitess/lite:v19.0.7
mysqldExporter: prom/mysqld-exporter:v0.11.0
cells:
- name: zone1
diff --git a/go.mod b/go.mod
index 71ddf197aae..27f2a64847d 100644
--- a/go.mod
+++ b/go.mod
@@ -1,6 +1,6 @@
module vitess.io/vitess
-go 1.22.5
+go 1.22.8
require (
cloud.google.com/go/storage v1.39.0
diff --git a/go/mysql/flavor_mysql.go b/go/mysql/flavor_mysql.go
index f83f087582c..dd108a74225 100644
--- a/go/mysql/flavor_mysql.go
+++ b/go/mysql/flavor_mysql.go
@@ -337,15 +337,20 @@ WHERE t.table_schema = database()
GROUP BY t.table_name, t.table_type, t.create_time, t.table_comment`
// TablesWithSize80 is a query to select table along with size for mysql 8.0
-//
// Note the following:
-// - We use a single query to fetch both partitioned and non-partitioned tables. This is because
-// accessing `information_schema.innodb_tablespaces` is expensive on servers with many tablespaces,
-// and every query that loads the table needs to perform full table scans on it. Doing a single
-// table scan is more efficient than doing more than one.
-// - We utilize `INFORMATION_SCHEMA`.`TABLES`.`CREATE_OPTIONS` column to do early pruning before the JOIN.
// - `TABLES`.`TABLE_NAME` has `utf8mb4_0900_ai_ci` collation. `INNODB_TABLESPACES`.`NAME` has `utf8mb3_general_ci`.
// We normalize the collation to get better query performance (we force the casting at the time of our choosing)
+// - InnoDB has different table names than MySQL does, in particular for partitioned tables. As far as InnoDB
+// is concerned, each partition is its own table.
+// - We use a `UNION ALL` approach to handle two distinct scenarios: tables that are partitioned and those that are not.
+// Since we `LEFT JOIN` from `TABLES` to `INNODB_TABLESPACES`, we know we already do full table scan on `TABLES`. We therefore
+// don't mind spending some extra computation time (as in `CONCAT(t.table_schema, '/', t.table_name, '#p#%') COLLATE utf8mb3_general_ci`)
+// to make things easier for the JOIN.
+// - We utilize `INFORMATION_SCHEMA`.`TABLES`.`CREATE_OPTIONS` column to tell if the table is partitioned or not. The column
+// may be `NULL` or may have multiple attributes, one of which is "partitioned", which we are looking for.
+// - In a partitioned table, InnoDB will return multiple rows for the same table name, one for each partition, which we successively SUM.
+// We also `SUM` the sizes in the non-partitioned case. This is not because we need to, but because it makes the query
+// symmetric and less prone to future edit errors.
const TablesWithSize80 = `SELECT t.table_name,
t.table_type,
UNIX_TIMESTAMP(t.create_time),
@@ -353,10 +358,24 @@ const TablesWithSize80 = `SELECT t.table_name,
SUM(i.file_size),
SUM(i.allocated_size)
FROM information_schema.tables t
- LEFT JOIN information_schema.innodb_tablespaces i
- ON i.name LIKE CONCAT(t.table_schema, '/', t.table_name, IF(t.create_options <=> 'partitioned', '#p#%', '')) COLLATE utf8mb3_general_ci
+ LEFT JOIN (SELECT name, file_size, allocated_size FROM information_schema.innodb_tablespaces WHERE name LIKE CONCAT(database(), '/%')) i
+ ON i.name = CONCAT(t.table_schema, '/', t.table_name) COLLATE utf8mb3_general_ci
WHERE
- t.table_schema = database()
+ t.table_schema = database() AND IFNULL(t.create_options, '') NOT LIKE '%partitioned%'
+ GROUP BY
+ t.table_schema, t.table_name, t.table_type, t.create_time, t.table_comment
+UNION ALL
+SELECT t.table_name,
+ t.table_type,
+ UNIX_TIMESTAMP(t.create_time),
+ t.table_comment,
+ SUM(i.file_size),
+ SUM(i.allocated_size)
+ FROM information_schema.tables t
+ LEFT JOIN (SELECT name, file_size, allocated_size FROM information_schema.innodb_tablespaces WHERE name LIKE CONCAT(database(), '/%')) i
+ ON i.name LIKE (CONCAT(t.table_schema, '/', t.table_name, '#p#%') COLLATE utf8mb3_general_ci)
+ WHERE
+ t.table_schema = database() AND t.create_options LIKE '%partitioned%'
GROUP BY
t.table_schema, t.table_name, t.table_type, t.create_time, t.table_comment
`
diff --git a/go/mysql/json/marshal.go b/go/mysql/json/marshal.go
index d1a0072ccbb..97d14a336c8 100644
--- a/go/mysql/json/marshal.go
+++ b/go/mysql/json/marshal.go
@@ -175,6 +175,6 @@ func MarshalSQLValue(buf []byte) (*sqltypes.Value, error) {
return nil, err
}
- newVal := sqltypes.MakeTrusted(querypb.Type_JSON, jsonVal.MarshalSQLTo(nil))
+ newVal := sqltypes.MakeTrusted(querypb.Type_RAW, jsonVal.MarshalSQLTo(nil))
return &newVal, nil
}
diff --git a/go/mysql/replication/replication_status.go b/go/mysql/replication/replication_status.go
index 6b3d1bf2214..0b8ba0f785f 100644
--- a/go/mysql/replication/replication_status.go
+++ b/go/mysql/replication/replication_status.go
@@ -201,6 +201,14 @@ func (s *ReplicationStatus) FindErrantGTIDs(otherReplicaStatuses []*ReplicationS
otherSets = append(otherSets, otherSet)
}
+ if len(otherSets) == 1 {
+ // If there is only one replica to compare against, and one is a subset of the other, then we consider them not to be errant.
+ // It simply means that one replica might be behind on replication.
+ if relayLogSet.Contains(otherSets[0]) || otherSets[0].Contains(relayLogSet) {
+ return nil, nil
+ }
+ }
+
// Copy set for final diffSet so we don't mutate receiver.
diffSet := make(Mysql56GTIDSet, len(relayLogSet))
for sid, intervals := range relayLogSet {
diff --git a/go/mysql/replication/replication_status_test.go b/go/mysql/replication/replication_status_test.go
index c1f5991f253..a88cb1570f7 100644
--- a/go/mysql/replication/replication_status_test.go
+++ b/go/mysql/replication/replication_status_test.go
@@ -105,6 +105,16 @@ func TestFindErrantGTIDs(t *testing.T) {
otherRepStatuses: []*ReplicationStatus{{SourceUUID: sid1, RelayLogPosition: Position{GTIDSet: set1}}},
// servers with the same GTID sets should not be diagnosed with errant GTIDs
want: nil,
+ }, {
+ mainRepStatus: &ReplicationStatus{SourceUUID: sourceSID, RelayLogPosition: Position{GTIDSet: set2}},
+ otherRepStatuses: []*ReplicationStatus{{SourceUUID: sid1, RelayLogPosition: Position{GTIDSet: set3}}},
+ // set2 is a strict subset of set3
+ want: nil,
+ }, {
+ mainRepStatus: &ReplicationStatus{SourceUUID: sourceSID, RelayLogPosition: Position{GTIDSet: set3}},
+ otherRepStatuses: []*ReplicationStatus{{SourceUUID: sid1, RelayLogPosition: Position{GTIDSet: set2}}},
+ // set3 is a strict superset of set2
+ want: nil,
}}
for _, testcase := range testcases {
diff --git a/go/mysql/server_test.go b/go/mysql/server_test.go
index 082a176e3af..72b6f25d0c8 100644
--- a/go/mysql/server_test.go
+++ b/go/mysql/server_test.go
@@ -1424,7 +1424,7 @@ func TestListenerShutdown(t *testing.T) {
l.Shutdown()
- assert.EqualValues(t, 1, connRefuse.Get(), "connRefuse")
+ waitForConnRefuse(t, 1)
err = conn.Ping()
require.EqualError(t, err, "Server shutdown in progress (errno 1053) (sqlstate 08S01)")
@@ -1436,6 +1436,24 @@ func TestListenerShutdown(t *testing.T) {
require.Equal(t, "Server shutdown in progress", sqlErr.Message)
}
+func waitForConnRefuse(t *testing.T, valWanted int64) {
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+ tick := time.NewTicker(100 * time.Millisecond)
+ defer tick.Stop()
+
+ for {
+ select {
+ case <-ctx.Done():
+ require.FailNow(t, "connRefuse did not reach %v", valWanted)
+ case <-tick.C:
+ if connRefuse.Get() == valWanted {
+ return
+ }
+ }
+ }
+}
+
func TestParseConnAttrs(t *testing.T) {
expected := map[string]string{
"_client_version": "8.0.11",
diff --git a/go/pools/smartconnpool/waitlist.go b/go/pools/smartconnpool/waitlist.go
index d4abeade0ac..f16215f4b14 100644
--- a/go/pools/smartconnpool/waitlist.go
+++ b/go/pools/smartconnpool/waitlist.go
@@ -88,11 +88,14 @@ func (wl *waitlist[C]) expire(force bool) {
// or remove everything if force is true
for e := wl.list.Front(); e != nil; e = e.Next() {
if force || e.Value.ctx.Err() != nil {
- wl.list.Remove(e)
expired = append(expired, e)
continue
}
}
+ // remove the expired waiters from the waitlist after traversing it
+ for _, e := range expired {
+ wl.list.Remove(e)
+ }
wl.mu.Unlock()
// once all the expired waiters have been removed from the waitlist, wake them up one by one
diff --git a/go/pools/smartconnpool/waitlist_test.go b/go/pools/smartconnpool/waitlist_test.go
new file mode 100644
index 00000000000..1486aa989b6
--- /dev/null
+++ b/go/pools/smartconnpool/waitlist_test.go
@@ -0,0 +1,68 @@
+/*
+Copyright 2024 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package smartconnpool
+
+import (
+ "context"
+ "sync/atomic"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestWaitlistExpireWithMultipleWaiters(t *testing.T) {
+ wait := waitlist[*TestConn]{}
+ wait.init()
+
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond)
+ defer cancel()
+
+ waiterCount := 2
+ expireCount := atomic.Int32{}
+
+ for i := 0; i < waiterCount; i++ {
+ go func() {
+ _, err := wait.waitForConn(ctx, nil)
+ if err != nil {
+ expireCount.Add(1)
+ }
+ }()
+ }
+
+ // Wait for the context to expire
+ <-ctx.Done()
+
+ // Expire the waiters
+ wait.expire(false)
+
+ // Wait for the notified goroutines to finish
+ timeout := time.After(1 * time.Second)
+ ticker := time.NewTicker(10 * time.Millisecond)
+ defer ticker.Stop()
+ for expireCount.Load() != int32(waiterCount) {
+ select {
+ case <-timeout:
+ require.Failf(t, "Timed out waiting for all waiters to expire", "Wanted %d, got %d", waiterCount, expireCount.Load())
+ case <-ticker.C:
+ // try again
+ }
+ }
+
+ assert.Equal(t, int32(waiterCount), expireCount.Load())
+}
diff --git a/go/test/endtoend/backup/vtbackup/backup_only_test.go b/go/test/endtoend/backup/vtbackup/backup_only_test.go
index 33474ec5ff8..ebb0767a6a7 100644
--- a/go/test/endtoend/backup/vtbackup/backup_only_test.go
+++ b/go/test/endtoend/backup/vtbackup/backup_only_test.go
@@ -69,15 +69,10 @@ func TestTabletInitialBackup(t *testing.T) {
// Initialize the tablets
initTablets(t, false, false)
- vtTabletVersion, err := cluster.GetMajorVersion("vttablet")
- require.NoError(t, err)
- // For all version at or above v17.0.0, each replica will start in super_read_only mode. Let's verify that is working correctly.
- if vtTabletVersion >= 17 {
- err := primary.VttabletProcess.CreateDB("testDB")
- require.ErrorContains(t, err, "The MySQL server is running with the --super-read-only option so it cannot execute this statement")
- err = replica1.VttabletProcess.CreateDB("testDB")
- require.ErrorContains(t, err, "The MySQL server is running with the --super-read-only option so it cannot execute this statement")
- }
+ err := primary.VttabletProcess.CreateDB("testDB")
+ require.ErrorContains(t, err, "The MySQL server is running with the --super-read-only option so it cannot execute this statement")
+ err = replica1.VttabletProcess.CreateDB("testDB")
+ require.ErrorContains(t, err, "The MySQL server is running with the --super-read-only option so it cannot execute this statement")
// Restore the Tablet
restore(t, primary, "replica", "NOT_SERVING")
@@ -172,7 +167,7 @@ func firstBackupTest(t *testing.T, tabletType string) {
restore(t, replica2, "replica", "SERVING")
// Replica2 takes time to serve. Sleeping for 5 sec.
time.Sleep(5 * time.Second)
- //check the new replica has the data
+ // check the new replica has the data
cluster.VerifyRowsInTablet(t, replica2, keyspaceName, 2)
removeBackups(t)
diff --git a/go/test/endtoend/cluster/vtctld_process.go b/go/test/endtoend/cluster/vtctld_process.go
index d87427af9b9..6ac6ed5d2b0 100644
--- a/go/test/endtoend/cluster/vtctld_process.go
+++ b/go/test/endtoend/cluster/vtctld_process.go
@@ -65,15 +65,10 @@ func (vtctld *VtctldProcess) Setup(cell string, extraArgs ...string) (err error)
"--log_dir", vtctld.LogDir,
"--port", fmt.Sprintf("%d", vtctld.Port),
"--grpc_port", fmt.Sprintf("%d", vtctld.GrpcPort),
+ "--bind-address", "127.0.0.1",
+ "--grpc_bind_address", "127.0.0.1",
)
- if v, err := GetMajorVersion("vtctld"); err != nil {
- return err
- } else if v >= 18 {
- vtctld.proc.Args = append(vtctld.proc.Args, "--bind-address", "127.0.0.1")
- vtctld.proc.Args = append(vtctld.proc.Args, "--grpc_bind_address", "127.0.0.1")
- }
-
if *isCoverage {
vtctld.proc.Args = append(vtctld.proc.Args, "--test.coverprofile="+getCoveragePath("vtctld.out"))
}
diff --git a/go/test/endtoend/cluster/vtgate_process.go b/go/test/endtoend/cluster/vtgate_process.go
index d1877fb89bb..cec137bfabe 100644
--- a/go/test/endtoend/cluster/vtgate_process.go
+++ b/go/test/endtoend/cluster/vtgate_process.go
@@ -85,12 +85,8 @@ func (vtgate *VtgateProcess) Setup() (err error) {
"--tablet_types_to_wait", vtgate.TabletTypesToWait,
"--service_map", vtgate.ServiceMap,
"--mysql_auth_server_impl", vtgate.MySQLAuthServerImpl,
- }
- if v, err := GetMajorVersion("vtgate"); err != nil {
- return err
- } else if v >= 18 {
- args = append(args, "--bind-address", "127.0.0.1")
- args = append(args, "--grpc_bind_address", "127.0.0.1")
+ "--bind-address", "127.0.0.1",
+ "--grpc_bind_address", "127.0.0.1",
}
// If no explicit mysql_server_version has been specified then we autodetect
// the MySQL version that will be used for the test and base the vtgate's
diff --git a/go/test/endtoend/cluster/vtorc_process.go b/go/test/endtoend/cluster/vtorc_process.go
index 25bbb74c36c..c6ab9c5471a 100644
--- a/go/test/endtoend/cluster/vtorc_process.go
+++ b/go/test/endtoend/cluster/vtorc_process.go
@@ -126,14 +126,9 @@ func (orc *VTOrcProcess) Setup() (err error) {
"--instance-poll-time", "1s",
// Faster topo information refresh speeds up the tests. This doesn't add any significant load either
"--topo-information-refresh-duration", "3s",
+ "--bind-address", "127.0.0.1",
)
- if v, err := GetMajorVersion("vtorc"); err != nil {
- return err
- } else if v >= 18 {
- orc.proc.Args = append(orc.proc.Args, "--bind-address", "127.0.0.1")
- }
-
if *isCoverage {
orc.proc.Args = append(orc.proc.Args, "--test.coverprofile="+getCoveragePath("orc.out"))
}
diff --git a/go/test/endtoend/cluster/vttablet_process.go b/go/test/endtoend/cluster/vttablet_process.go
index 69f1cd4bb88..f5b19094195 100644
--- a/go/test/endtoend/cluster/vttablet_process.go
+++ b/go/test/endtoend/cluster/vttablet_process.go
@@ -110,13 +110,9 @@ func (vttablet *VttabletProcess) Setup() (err error) {
"--file_backup_storage_root", vttablet.FileBackupStorageRoot,
"--service_map", vttablet.ServiceMap,
"--db_charset", vttablet.Charset,
+ "--bind-address", "127.0.0.1",
+ "--grpc_bind_address", "127.0.0.1",
)
- if v, err := GetMajorVersion("vttablet"); err != nil {
- return err
- } else if v >= 18 {
- vttablet.proc.Args = append(vttablet.proc.Args, "--bind-address", "127.0.0.1")
- vttablet.proc.Args = append(vttablet.proc.Args, "--grpc_bind_address", "127.0.0.1")
- }
if *isCoverage {
vttablet.proc.Args = append(vttablet.proc.Args, "--test.coverprofile="+getCoveragePath("vttablet.out"))
diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/alter-charset-non-utf8-80-vcopier/allow_schemadiff_normalization b/go/test/endtoend/onlineddl/vrepl_suite/testdata/alter-charset-non-utf8-80-vcopier/allow_schemadiff_normalization
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/alter-charset-non-utf8-80-vcopier/alter b/go/test/endtoend/onlineddl/vrepl_suite/testdata/alter-charset-non-utf8-80-vcopier/alter
new file mode 100644
index 00000000000..b5ec82b1a8b
--- /dev/null
+++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/alter-charset-non-utf8-80-vcopier/alter
@@ -0,0 +1 @@
+MODIFY `t1` varchar(128) CHARACTER SET utf8mb4 NOT NULL, MODIFY `t2` varchar(128) CHARACTER SET latin2 NOT NULL, MODIFY `tutf8` varchar(128) CHARACTER SET latin1 NOT NULL
diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/alter-charset-non-utf8-80-vcopier/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/alter-charset-non-utf8-80-vcopier/create.sql
new file mode 100644
index 00000000000..79e8fda23ee
--- /dev/null
+++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/alter-charset-non-utf8-80-vcopier/create.sql
@@ -0,0 +1,19 @@
+drop table if exists onlineddl_test;
+create table onlineddl_test (
+ id int auto_increment,
+ t1 varchar(128) charset latin1 collate latin1_swedish_ci,
+ t2 varchar(128) charset latin1 collate latin1_swedish_ci,
+ tutf8 varchar(128) charset utf8,
+ tutf8mb4 varchar(128) charset utf8mb4,
+ tlatin1 varchar(128) charset latin1 collate latin1_swedish_ci,
+ primary key(id)
+) auto_increment=1;
+
+insert into onlineddl_test values (null, md5(rand()), md5(rand()), md5(rand()), md5(rand()), md5(rand()));
+insert into onlineddl_test values (null, 'átesting', 'átesting', 'átesting', 'átesting', 'átesting');
+insert into onlineddl_test values (null, 'testátest', 'testátest', 'testátest', '🍻😀', 'átesting');
+insert into onlineddl_test values (null, 'átesting-binlog', 'átesting-binlog', 'átesting-binlog', 'átesting-binlog', 'átesting-binlog');
+insert into onlineddl_test values (null, 'testátest-binlog', 'testátest-binlog', 'testátest-binlog', '🍻😀', 'átesting-binlog');
+insert into onlineddl_test values (null, 'átesting-bnull', 'átesting-bnull', 'átesting-bnull', null, null);
+
+drop event if exists onlineddl_test;
diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/alter-charset-non-utf8-80-vcopier/ignore_versions b/go/test/endtoend/onlineddl/vrepl_suite/testdata/alter-charset-non-utf8-80-vcopier/ignore_versions
new file mode 100644
index 00000000000..0790a1e68fd
--- /dev/null
+++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/alter-charset-non-utf8-80-vcopier/ignore_versions
@@ -0,0 +1 @@
+(5.5|5.6|5.7)
diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/non-utf8-charset-pk/allow_schemadiff_normalization b/go/test/endtoend/onlineddl/vrepl_suite/testdata/non-utf8-charset-pk/allow_schemadiff_normalization
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/non-utf8-charset-pk/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/non-utf8-charset-pk/create.sql
new file mode 100644
index 00000000000..c0313e62c8d
--- /dev/null
+++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/non-utf8-charset-pk/create.sql
@@ -0,0 +1,30 @@
+drop table if exists onlineddl_test;
+create table onlineddl_test (
+ id varchar(128) charset latin1 collate latin1_swedish_ci,
+ t1 varchar(128) charset latin1 collate latin1_swedish_ci,
+ t2 varchar(128) charset latin1 collate latin1_swedish_ci,
+ tutf8 varchar(128) charset utf8,
+ tutf8mb4 varchar(128) charset utf8mb4,
+ tlatin1 varchar(128) charset latin1 collate latin1_swedish_ci,
+ primary key(id)
+) auto_increment=1;
+
+insert into onlineddl_test values (concat('átesting-', md5(rand())), md5(rand()), md5(rand()), md5(rand()), md5(rand()), md5(rand()));
+insert into onlineddl_test values (concat('átesting-', md5(rand())), 'átesting', 'átesting', 'átesting', 'átesting', 'átesting');
+insert into onlineddl_test values (concat('átesting-', md5(rand())), 'testátest', 'testátest', 'testátest', '🍻😀', 'átesting');
+
+drop event if exists onlineddl_test;
+delimiter ;;
+create event onlineddl_test
+ on schedule every 1 second
+ starts current_timestamp
+ ends current_timestamp + interval 60 second
+ on completion not preserve
+ enable
+ do
+begin
+ insert into onlineddl_test values (concat('átesting-', md5(rand())), md5(rand()), md5(rand()), md5(rand()), md5(rand()), md5(rand()));
+ insert into onlineddl_test values (concat('átesting-', md5(rand())), 'átesting-binlog', 'átesting-binlog', 'átesting-binlog', 'átesting-binlog', 'átesting-binlog');
+ insert into onlineddl_test values (concat('átesting-', md5(rand())), 'testátest-binlog', 'testátest-binlog', 'testátest-binlog', '🍻😀', 'átesting-binlog');
+ insert into onlineddl_test values (concat('átesting-', md5(rand())), 'átesting-bnull', 'átesting-bnull', 'átesting-bnull', null, null);
+end ;;
diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/non-utf8-charset-pk/ignore_versions b/go/test/endtoend/onlineddl/vrepl_suite/testdata/non-utf8-charset-pk/ignore_versions
new file mode 100644
index 00000000000..0790a1e68fd
--- /dev/null
+++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/non-utf8-charset-pk/ignore_versions
@@ -0,0 +1 @@
+(5.5|5.6|5.7)
diff --git a/go/test/endtoend/reparent/plannedreparent/reparent_test.go b/go/test/endtoend/reparent/plannedreparent/reparent_test.go
index 1f6f4da0a78..38e872f0f2b 100644
--- a/go/test/endtoend/reparent/plannedreparent/reparent_test.go
+++ b/go/test/endtoend/reparent/plannedreparent/reparent_test.go
@@ -199,13 +199,13 @@ func TestReparentFromOutsideWithNoPrimary(t *testing.T) {
}
func reparentFromOutside(t *testing.T, clusterInstance *cluster.LocalProcessCluster, downPrimary bool) {
- //This test will start a primary and 3 replicas.
- //Then:
- //- one replica will be the new primary
- //- one replica will be reparented to that new primary
- //- one replica will be busted and dead in the water and we'll call TabletExternallyReparented.
- //Args:
- //downPrimary: kills the old primary first
+ // This test will start a primary and 3 replicas.
+ // Then:
+ // - one replica will be the new primary
+ // - one replica will be reparented to that new primary
+ // - one replica will be busted and dead in the water and we'll call TabletExternallyReparented.
+ // Args:
+ // downPrimary: kills the old primary first
ctx := context.Background()
tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets
@@ -218,7 +218,7 @@ func reparentFromOutside(t *testing.T, clusterInstance *cluster.LocalProcessClus
demoteCommands := "SET GLOBAL read_only = ON; FLUSH TABLES WITH READ LOCK; UNLOCK TABLES"
utils.RunSQL(ctx, t, demoteCommands, tablets[0])
- //Get the position of the old primary and wait for the new one to catch up.
+ // Get the position of the old primary and wait for the new one to catch up.
err := utils.WaitForReplicationPosition(t, tablets[0], tablets[1])
require.NoError(t, err)
}
@@ -453,14 +453,7 @@ func TestFullStatus(t *testing.T) {
assert.Contains(t, primaryStatus.PrimaryStatus.String(), "vt-0000000101-bin")
assert.Equal(t, primaryStatus.GtidPurged, "MySQL56/")
assert.False(t, primaryStatus.ReadOnly)
- vtTabletVersion, err := cluster.GetMajorVersion("vttablet")
- require.NoError(t, err)
- vtcltlVersion, err := cluster.GetMajorVersion("vtctl")
- require.NoError(t, err)
- // For all version at or above v17.0.0, each replica will start in super_read_only mode.
- if vtTabletVersion >= 17 && vtcltlVersion >= 17 {
- assert.False(t, primaryStatus.SuperReadOnly)
- }
+ assert.False(t, primaryStatus.SuperReadOnly)
assert.True(t, primaryStatus.SemiSyncPrimaryEnabled)
assert.True(t, primaryStatus.SemiSyncReplicaEnabled)
assert.True(t, primaryStatus.SemiSyncPrimaryStatus)
@@ -514,10 +507,7 @@ func TestFullStatus(t *testing.T) {
assert.Contains(t, replicaStatus.PrimaryStatus.String(), "vt-0000000102-bin")
assert.Equal(t, replicaStatus.GtidPurged, "MySQL56/")
assert.True(t, replicaStatus.ReadOnly)
- // For all version at or above v17.0.0, each replica will start in super_read_only mode.
- if vtTabletVersion >= 17 && vtcltlVersion >= 17 {
- assert.True(t, replicaStatus.SuperReadOnly)
- }
+ assert.True(t, replicaStatus.SuperReadOnly)
assert.False(t, replicaStatus.SemiSyncPrimaryEnabled)
assert.True(t, replicaStatus.SemiSyncReplicaEnabled)
assert.False(t, replicaStatus.SemiSyncPrimaryStatus)
diff --git a/go/test/endtoend/vtgate/lookup_test.go b/go/test/endtoend/vtgate/lookup_test.go
index b4b53295d8d..6c653070338 100644
--- a/go/test/endtoend/vtgate/lookup_test.go
+++ b/go/test/endtoend/vtgate/lookup_test.go
@@ -552,6 +552,22 @@ func TestConsistentLookupUpdate(t *testing.T) {
require.Empty(t, qr.Rows)
}
+func TestSelectMultiEqualLookup(t *testing.T) {
+ conn, closer := start(t)
+ defer closer()
+
+ utils.Exec(t, conn, "insert into t10 (id, sharding_key, col1) values (1, 1, 'bar'), (2, 1, 'bar'), (3, 1, 'bar'), (4, 2, 'bar'), (5, 2, 'bar')")
+
+ for _, workload := range []string{"oltp", "olap"} {
+ t.Run(workload, func(t *testing.T) {
+ utils.Exec(t, conn, "set workload = "+workload)
+
+ utils.AssertMatches(t, conn, "select id from t10 WHERE (col1, id) IN (('bar', 1), ('baz', 2), ('qux', 3), ('barbar', 4))", "[[INT64(1)]]")
+ utils.AssertMatches(t, conn, "select id from t10 WHERE (col1 = 'bar' AND id = 1) OR (col1 = 'baz' AND id = 2) OR (col1 = 'qux' AND id = 3) OR (col1 = 'barbar' AND id = 4)", "[[INT64(1)]]")
+ })
+ }
+}
+
func TestSelectNullLookup(t *testing.T) {
conn, closer := start(t)
defer closer()
diff --git a/go/test/endtoend/vtgate/queries/aggregation/aggregation_test.go b/go/test/endtoend/vtgate/queries/aggregation/aggregation_test.go
index 83840a78516..ffe0650c35a 100644
--- a/go/test/endtoend/vtgate/queries/aggregation/aggregation_test.go
+++ b/go/test/endtoend/vtgate/queries/aggregation/aggregation_test.go
@@ -105,7 +105,6 @@ func TestAggregateTypes(t *testing.T) {
mcmp.AssertMatches("select val1 as a, count(*) from aggr_test group by a order by 2, a", `[[VARCHAR("b") INT64(1)] [VARCHAR("d") INT64(1)] [VARCHAR("a") INT64(2)] [VARCHAR("c") INT64(2)] [VARCHAR("e") INT64(2)]]`)
mcmp.AssertMatches("select sum(val1) from aggr_test", `[[FLOAT64(0)]]`)
mcmp.Run("Average for sharded keyspaces", func(mcmp *utils.MySQLCompare) {
- mcmp.SkipIfBinaryIsBelowVersion(19, "vtgate")
mcmp.AssertMatches("select avg(val1) from aggr_test", `[[FLOAT64(0)]]`)
})
}
@@ -209,7 +208,6 @@ func TestAggrOnJoin(t *testing.T) {
`[[VARCHAR("a")]]`)
mcmp.Run("Average in join for sharded", func(mcmp *utils.MySQLCompare) {
- mcmp.SkipIfBinaryIsBelowVersion(19, "vtgate")
mcmp.AssertMatches(`select avg(a1.val2), avg(a2.val2) from aggr_test a1 join aggr_test a2 on a1.val2 = a2.id join t3 t on a2.val2 = t.id7`,
"[[DECIMAL(1.5000) DECIMAL(1.0000)]]")
@@ -367,7 +365,6 @@ func TestAggOnTopOfLimit(t *testing.T) {
mcmp.AssertMatches("select val1, count(*) from (select id, val1 from aggr_test where val2 < 4 order by val1 limit 2) as x group by val1", `[[NULL INT64(1)] [VARCHAR("a") INT64(1)]]`)
mcmp.AssertMatchesNoOrder("select val1, count(val2) from (select val1, val2 from aggr_test limit 8) as x group by val1", `[[NULL INT64(1)] [VARCHAR("a") INT64(2)] [VARCHAR("b") INT64(1)] [VARCHAR("c") INT64(2)]]`)
mcmp.Run("Average in sharded query", func(mcmp *utils.MySQLCompare) {
- mcmp.SkipIfBinaryIsBelowVersion(19, "vtgate")
mcmp.AssertMatches("select avg(val2) from (select id, val2 from aggr_test where val2 is null limit 2) as x", "[[NULL]]")
mcmp.AssertMatchesNoOrder("select val1, avg(val2) from (select val1, val2 from aggr_test limit 8) as x group by val1", `[[NULL DECIMAL(2.0000)] [VARCHAR("a") DECIMAL(3.5000)] [VARCHAR("b") DECIMAL(1.0000)] [VARCHAR("c") DECIMAL(3.5000)]]`)
})
@@ -379,7 +376,6 @@ func TestAggOnTopOfLimit(t *testing.T) {
mcmp.AssertMatches("select count(val2), sum(val2) from (select id, val2 from aggr_test where val2 is null limit 2) as x", "[[INT64(0) NULL]]")
mcmp.AssertMatches("select val1, count(*), sum(id) from (select id, val1 from aggr_test where val2 < 4 order by val1 limit 2) as x group by val1", `[[NULL INT64(1) DECIMAL(7)] [VARCHAR("a") INT64(1) DECIMAL(2)]]`)
mcmp.Run("Average in sharded query", func(mcmp *utils.MySQLCompare) {
- mcmp.SkipIfBinaryIsBelowVersion(19, "vtgate")
mcmp.AssertMatches("select count(*), sum(val1), avg(val1) from (select id, val1 from aggr_test where val2 < 4 order by val1 desc limit 2) as x", "[[INT64(2) FLOAT64(0) FLOAT64(0)]]")
mcmp.AssertMatches("select count(val1), sum(id), avg(id) from (select id, val1 from aggr_test where val2 < 4 order by val1 desc limit 2) as x", "[[INT64(2) DECIMAL(7) DECIMAL(3.5000)]]")
mcmp.AssertMatchesNoOrder("select val1, count(val2), sum(val2), avg(val2) from (select val1, val2 from aggr_test limit 8) as x group by val1",
@@ -401,7 +397,6 @@ func TestEmptyTableAggr(t *testing.T) {
mcmp.AssertMatches(" select t1.`name`, count(*) from t2 inner join t1 on (t1.t1_id = t2.id) where t1.value = 'foo' group by t1.`name`", "[]")
mcmp.AssertMatches(" select t1.`name`, count(*) from t1 inner join t2 on (t1.t1_id = t2.id) where t1.value = 'foo' group by t1.`name`", "[]")
mcmp.Run("Average in sharded query", func(mcmp *utils.MySQLCompare) {
- mcmp.SkipIfBinaryIsBelowVersion(19, "vtgate")
mcmp.AssertMatches(" select count(t1.value) from t2 inner join t1 on (t1.t1_id = t2.id) where t1.value = 'foo'", "[[INT64(0)]]")
mcmp.AssertMatches(" select avg(t1.value) from t2 inner join t1 on (t1.t1_id = t2.id) where t1.value = 'foo'", "[[NULL]]")
})
@@ -417,7 +412,6 @@ func TestEmptyTableAggr(t *testing.T) {
mcmp.AssertMatches(" select count(*) from t2 inner join t1 on (t1.t1_id = t2.id) where t1.value = 'foo'", "[[INT64(0)]]")
mcmp.AssertMatches(" select t1.`name`, count(*) from t2 inner join t1 on (t1.t1_id = t2.id) where t1.value = 'foo' group by t1.`name`", "[]")
mcmp.Run("Average in sharded query", func(mcmp *utils.MySQLCompare) {
- mcmp.SkipIfBinaryIsBelowVersion(19, "vtgate")
mcmp.AssertMatches(" select count(t1.value) from t2 inner join t1 on (t1.t1_id = t2.id) where t1.value = 'foo'", "[[INT64(0)]]")
mcmp.AssertMatches(" select avg(t1.value) from t2 inner join t1 on (t1.t1_id = t2.id) where t1.value = 'foo'", "[[NULL]]")
mcmp.AssertMatches(" select t1.`name`, count(*) from t1 inner join t2 on (t1.t1_id = t2.id) where t1.value = 'foo' group by t1.`name`", "[]")
@@ -473,7 +467,6 @@ func TestAggregateLeftJoin(t *testing.T) {
mcmp.AssertMatches("SELECT count(*) FROM t2 LEFT JOIN t1 ON t1.t1_id = t2.id WHERE IFNULL(t1.name, 'NOTSET') = 'r'", `[[INT64(1)]]`)
mcmp.Run("Average in sharded query", func(mcmp *utils.MySQLCompare) {
- mcmp.SkipIfBinaryIsBelowVersion(19, "vtgate")
mcmp.AssertMatches("SELECT avg(t1.shardkey) FROM t1 LEFT JOIN t2 ON t1.t1_id = t2.id", `[[DECIMAL(0.5000)]]`)
mcmp.AssertMatches("SELECT avg(t2.shardkey) FROM t1 LEFT JOIN t2 ON t1.t1_id = t2.id", `[[DECIMAL(1.0000)]]`)
aggregations := []string{
@@ -530,7 +523,6 @@ func TestScalarAggregate(t *testing.T) {
mcmp.Exec("insert into aggr_test(id, val1, val2) values(1,'a',1), (2,'A',1), (3,'b',1), (4,'c',3), (5,'c',4)")
mcmp.AssertMatches("select count(distinct val1) from aggr_test", `[[INT64(3)]]`)
mcmp.Run("Average in sharded query", func(mcmp *utils.MySQLCompare) {
- mcmp.SkipIfBinaryIsBelowVersion(19, "vtgate")
mcmp.AssertMatches("select avg(val1) from aggr_test", `[[FLOAT64(0)]]`)
})
}
@@ -590,7 +582,6 @@ func TestComplexAggregation(t *testing.T) {
mcmp.Exec(`SELECT name+COUNT(t1_id)+1 FROM t1 GROUP BY name`)
mcmp.Exec(`SELECT COUNT(*)+shardkey+MIN(t1_id)+1+MAX(t1_id)*SUM(t1_id)+1+name FROM t1 GROUP BY shardkey, name`)
mcmp.Run("Average in sharded query", func(mcmp *utils.MySQLCompare) {
- mcmp.SkipIfBinaryIsBelowVersion(19, "vtgate")
mcmp.Exec(`SELECT COUNT(t1_id)+MAX(shardkey)+AVG(t1_id) FROM t1`)
})
}
@@ -713,7 +704,6 @@ func TestDistinctAggregation(t *testing.T) {
}
func TestHavingQueries(t *testing.T) {
- utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate")
mcmp, closer := start(t)
defer closer()
diff --git a/go/test/endtoend/vtgate/queries/derived/cte_test.go b/go/test/endtoend/vtgate/queries/derived/cte_test.go
index 61ddf5d6661..677a5dba653 100644
--- a/go/test/endtoend/vtgate/queries/derived/cte_test.go
+++ b/go/test/endtoend/vtgate/queries/derived/cte_test.go
@@ -18,12 +18,9 @@ package misc
import (
"testing"
-
- "vitess.io/vitess/go/test/endtoend/utils"
)
func TestCTEWithOrderByLimit(t *testing.T) {
- utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate")
mcmp, closer := start(t)
defer closer()
@@ -31,7 +28,6 @@ func TestCTEWithOrderByLimit(t *testing.T) {
}
func TestCTEAggregationOnRHS(t *testing.T) {
- utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate")
mcmp, closer := start(t)
defer closer()
@@ -40,7 +36,6 @@ func TestCTEAggregationOnRHS(t *testing.T) {
}
func TestCTERemoveInnerOrderBy(t *testing.T) {
- utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate")
mcmp, closer := start(t)
defer closer()
@@ -48,7 +43,6 @@ func TestCTERemoveInnerOrderBy(t *testing.T) {
}
func TestCTEWithHaving(t *testing.T) {
- utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate")
mcmp, closer := start(t)
defer closer()
@@ -59,7 +53,6 @@ func TestCTEWithHaving(t *testing.T) {
}
func TestCTEColumns(t *testing.T) {
- utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate")
mcmp, closer := start(t)
defer closer()
diff --git a/go/test/endtoend/vtgate/queries/derived/derived_test.go b/go/test/endtoend/vtgate/queries/derived/derived_test.go
index 6eb7ee914cd..6d9551b5d99 100644
--- a/go/test/endtoend/vtgate/queries/derived/derived_test.go
+++ b/go/test/endtoend/vtgate/queries/derived/derived_test.go
@@ -92,7 +92,6 @@ func TestDerivedTableColumns(t *testing.T) {
// We do this by not using the apply join we usually use, and instead use the hash join engine primitive
// These tests exercise these situations
func TestDerivedTablesWithLimit(t *testing.T) {
- utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate")
// We need full type info before planning this, so we wait for the schema tracker
require.NoError(t,
utils.WaitForAuthoritative(t, keyspaceName, "user", clusterInstance.VtgateProcess.ReadVSchema))
@@ -117,6 +116,7 @@ func TestDerivedTablesWithLimit(t *testing.T) {
// TestDerivedTableColumnAliasWithJoin tests the derived table having alias column and using it in the join condition
func TestDerivedTableColumnAliasWithJoin(t *testing.T) {
utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate")
+
mcmp, closer := start(t)
defer closer()
diff --git a/go/test/endtoend/vtgate/queries/dml/dml_test.go b/go/test/endtoend/vtgate/queries/dml/dml_test.go
index 9d060e99881..c3d1acdec4d 100644
--- a/go/test/endtoend/vtgate/queries/dml/dml_test.go
+++ b/go/test/endtoend/vtgate/queries/dml/dml_test.go
@@ -21,8 +21,6 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
-
- "vitess.io/vitess/go/test/endtoend/utils"
)
func TestMultiEqual(t *testing.T) {
@@ -45,8 +43,6 @@ func TestMultiEqual(t *testing.T) {
// TestMultiTableDelete executed multi-table delete queries
func TestMultiTableDelete(t *testing.T) {
- utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate")
-
mcmp, closer := start(t)
defer closer()
@@ -82,8 +78,6 @@ func TestMultiTableDelete(t *testing.T) {
// TestDeleteWithLimit executed delete queries with limit
func TestDeleteWithLimit(t *testing.T) {
- utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate")
-
mcmp, closer := start(t)
defer closer()
diff --git a/go/test/endtoend/vtgate/queries/dml/insert_test.go b/go/test/endtoend/vtgate/queries/dml/insert_test.go
index dfb5961d887..a3749c4f0fc 100644
--- a/go/test/endtoend/vtgate/queries/dml/insert_test.go
+++ b/go/test/endtoend/vtgate/queries/dml/insert_test.go
@@ -21,9 +21,7 @@ import (
"testing"
"github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
- "vitess.io/vitess/go/test/endtoend/cluster"
"vitess.io/vitess/go/test/endtoend/utils"
)
@@ -92,19 +90,10 @@ func TestFailureInsertSelect(t *testing.T) {
// primary key same
mcmp.AssertContainsError("insert into s_tbl(id, num) select id, num*20 from s_tbl where id = 1", `AlreadyExists desc = Duplicate entry '1' for key`)
// lookup key same (does not fail on MySQL as there is no lookup, and we have not put unique constraint on num column)
- vtgateVersion, err := cluster.GetMajorVersion("vtgate")
- require.NoError(t, err)
- if vtgateVersion >= 19 {
- utils.AssertContainsError(t, mcmp.VtConn, "insert into s_tbl(id, num) select id*20, num from s_tbl where id = 1", `(errno 1062) (sqlstate 23000)`)
- // mismatch column count
- mcmp.AssertContainsError("insert into s_tbl(id, num) select 100,200,300", `column count does not match value count with the row`)
- mcmp.AssertContainsError("insert into s_tbl(id, num) select 100", `column count does not match value count with the row`)
- } else {
- utils.AssertContainsError(t, mcmp.VtConn, "insert into s_tbl(id, num) select id*20, num from s_tbl where id = 1", `lookup.Create: Code: ALREADY_EXISTS`)
- // mismatch column count
- mcmp.AssertContainsError("insert into s_tbl(id, num) select 100,200,300", `column count does not match value count at row 1`)
- mcmp.AssertContainsError("insert into s_tbl(id, num) select 100", `column count does not match value count at row 1`)
- }
+ utils.AssertContainsError(t, mcmp.VtConn, "insert into s_tbl(id, num) select id*20, num from s_tbl where id = 1", `(errno 1062) (sqlstate 23000)`)
+ // mismatch column count
+ mcmp.AssertContainsError("insert into s_tbl(id, num) select 100,200,300", `column count does not match value count with the row`)
+ mcmp.AssertContainsError("insert into s_tbl(id, num) select 100", `column count does not match value count with the row`)
})
}
}
@@ -483,3 +472,29 @@ func TestMixedCases(t *testing.T) {
// final check count on the lookup vindex table.
utils.AssertMatches(t, mcmp.VtConn, "select count(*) from lkp_mixed_idx", "[[INT64(12)]]")
}
+
+// TestInsertJson tests that selected json values are encoded correctly.
+func TestInsertJson(t *testing.T) {
+ utils.SkipIfBinaryIsBelowVersion(t, 21, "vtgate")
+ utils.SkipIfBinaryIsBelowVersion(t, 21, "vttablet")
+
+ mcmp, closer := start(t)
+ defer closer()
+
+ mcmp.Exec(`insert into j_tbl(id, jdoc) values (1, '{}'), (2, '{"a": 1, "b": 2}')`)
+ mcmp.Exec(`select * from j_tbl order by id`)
+
+ mcmp.Exec(`insert into j_tbl(id, jdoc) select 3, json_object("k", "a")`)
+ mcmp.Exec(`select * from j_tbl order by id`)
+
+ mcmp.Exec(`insert into j_tbl(id, jdoc) select 4,JSON_OBJECT(
+ 'date', CAST(1629849600 AS UNSIGNED),
+ 'keywordSourceId', CAST(930701976723823 AS UNSIGNED),
+ 'keywordSourceVersionId', CAST(210825230433 AS UNSIGNED)
+ )`)
+ mcmp.Exec(`select * from j_tbl order by id`)
+
+ utils.Exec(t, mcmp.VtConn, `insert into uks.j_utbl(id, jdoc) select * from sks.j_tbl`)
+ utils.AssertMatches(t, mcmp.VtConn, `select * from uks.j_utbl order by id`,
+ `[[INT64(1) JSON("{}")] [INT64(2) JSON("{\"a\": 1, \"b\": 2}")] [INT64(3) JSON("{\"k\": \"a\"}")] [INT64(4) JSON("{\"date\": 1629849600, \"keywordSourceId\": 930701976723823, \"keywordSourceVersionId\": 210825230433}")]]`)
+}
diff --git a/go/test/endtoend/vtgate/queries/dml/main_test.go b/go/test/endtoend/vtgate/queries/dml/main_test.go
index c00e27fe3a0..0c4d58aa614 100644
--- a/go/test/endtoend/vtgate/queries/dml/main_test.go
+++ b/go/test/endtoend/vtgate/queries/dml/main_test.go
@@ -133,7 +133,7 @@ func start(t *testing.T) (utils.MySQLCompare, func()) {
tables := []string{
"s_tbl", "num_vdx_tbl", "user_tbl", "order_tbl", "oevent_tbl", "oextra_tbl",
- "auto_tbl", "oid_vdx_tbl", "unq_idx", "nonunq_idx", "u_tbl", "mixed_tbl", "lkp_map_idx",
+ "auto_tbl", "oid_vdx_tbl", "unq_idx", "nonunq_idx", "u_tbl", "mixed_tbl", "lkp_map_idx", "j_tbl", "j_utbl",
}
for _, table := range tables {
// TODO (@frouioui): following assertions produce different results between MySQL and Vitess
diff --git a/go/test/endtoend/vtgate/queries/dml/sharded_schema.sql b/go/test/endtoend/vtgate/queries/dml/sharded_schema.sql
index 3310724d420..cc24737a0fa 100644
--- a/go/test/endtoend/vtgate/queries/dml/sharded_schema.sql
+++ b/go/test/endtoend/vtgate/queries/dml/sharded_schema.sql
@@ -86,3 +86,10 @@ create table lkp_mixed_idx
keyspace_id varbinary(20),
primary key (lkp_key)
) Engine = InnoDB;
+
+create table j_tbl
+(
+ id bigint,
+ jdoc json,
+ primary key (id)
+) Engine = InnoDB;
\ No newline at end of file
diff --git a/go/test/endtoend/vtgate/queries/dml/unsharded_schema.sql b/go/test/endtoend/vtgate/queries/dml/unsharded_schema.sql
index 4d2ad06618a..cd64605ad20 100644
--- a/go/test/endtoend/vtgate/queries/dml/unsharded_schema.sql
+++ b/go/test/endtoend/vtgate/queries/dml/unsharded_schema.sql
@@ -34,4 +34,11 @@ values (0, 1, 1000);
insert into auto_seq(id, next_id, cache)
values (0, 666, 1000);
insert into mixed_seq(id, next_id, cache)
-values (0, 1, 1000);
\ No newline at end of file
+values (0, 1, 1000);
+
+create table j_utbl
+(
+ id bigint,
+ jdoc json,
+ primary key (id)
+) Engine = InnoDB;
\ No newline at end of file
diff --git a/go/test/endtoend/vtgate/queries/dml/vschema.json b/go/test/endtoend/vtgate/queries/dml/vschema.json
index a42a93d7403..72a949a49e4 100644
--- a/go/test/endtoend/vtgate/queries/dml/vschema.json
+++ b/go/test/endtoend/vtgate/queries/dml/vschema.json
@@ -188,6 +188,14 @@
"name": "hash"
}
]
+ },
+ "j_tbl": {
+ "column_vindexes": [
+ {
+ "column": "id",
+ "name": "hash"
+ }
+ ]
}
}
}
\ No newline at end of file
diff --git a/go/test/endtoend/vtgate/queries/informationschema/informationschema_test.go b/go/test/endtoend/vtgate/queries/informationschema/informationschema_test.go
index 5ba9877bf5f..a1ef2711499 100644
--- a/go/test/endtoend/vtgate/queries/informationschema/informationschema_test.go
+++ b/go/test/endtoend/vtgate/queries/informationschema/informationschema_test.go
@@ -221,9 +221,7 @@ func TestInfrSchemaAndUnionAll(t *testing.T) {
}
func TestTypeORMQuery(t *testing.T) {
- utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate")
// This test checks that we can run queries similar to the ones that the TypeORM framework uses
-
require.NoError(t,
utils.WaitForAuthoritative(t, "ks", "t1", clusterInstance.VtgateProcess.ReadVSchema))
@@ -270,7 +268,6 @@ WHERE TABLE_SCHEMA = 'ks' AND TABLE_NAME = 't2';
}
func TestJoinWithSingleShardQueryOnRHS(t *testing.T) {
- utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate")
// This test checks that we can run queries like this, where the RHS is a single shard query
mcmp, closer := start(t)
defer closer()
diff --git a/go/test/endtoend/vtgate/queries/misc/misc_test.go b/go/test/endtoend/vtgate/queries/misc/misc_test.go
index 2d861b1a625..7dd3bd5dfbb 100644
--- a/go/test/endtoend/vtgate/queries/misc/misc_test.go
+++ b/go/test/endtoend/vtgate/queries/misc/misc_test.go
@@ -37,7 +37,7 @@ func start(t *testing.T) (utils.MySQLCompare, func()) {
require.NoError(t, err)
deleteAll := func() {
- tables := []string{"t1", "uks.unsharded"}
+ tables := []string{"t1", "uks.unsharded", "tbl"}
for _, table := range tables {
_, _ = mcmp.ExecAndIgnore("delete from " + table)
}
@@ -60,15 +60,8 @@ func TestBitVals(t *testing.T) {
mcmp.AssertMatches(`select b'1001', 0x9, B'010011011010'`, `[[VARBINARY("\t") VARBINARY("\t") VARBINARY("\x04\xda")]]`)
mcmp.AssertMatches(`select b'1001', 0x9, B'010011011010' from t1`, `[[VARBINARY("\t") VARBINARY("\t") VARBINARY("\x04\xda")]]`)
- vtgateVersion, err := cluster.GetMajorVersion("vtgate")
- require.NoError(t, err)
- if vtgateVersion >= 19 {
- mcmp.AssertMatchesNoCompare(`select 1 + b'1001', 2 + 0x9, 3 + B'010011011010'`, `[[INT64(10) UINT64(11) INT64(1245)]]`, `[[INT64(10) UINT64(11) INT64(1245)]]`)
- mcmp.AssertMatchesNoCompare(`select 1 + b'1001', 2 + 0x9, 3 + B'010011011010' from t1`, `[[INT64(10) UINT64(11) INT64(1245)]]`, `[[INT64(10) UINT64(11) INT64(1245)]]`)
- } else {
- mcmp.AssertMatchesNoCompare(`select 1 + b'1001', 2 + 0x9, 3 + B'010011011010'`, `[[INT64(10) UINT64(11) INT64(1245)]]`, `[[UINT64(10) UINT64(11) UINT64(1245)]]`)
- mcmp.AssertMatchesNoCompare(`select 1 + b'1001', 2 + 0x9, 3 + B'010011011010' from t1`, `[[INT64(10) UINT64(11) INT64(1245)]]`, `[[UINT64(10) UINT64(11) UINT64(1245)]]`)
- }
+ mcmp.AssertMatchesNoCompare(`select 1 + b'1001', 2 + 0x9, 3 + B'010011011010'`, `[[INT64(10) UINT64(11) INT64(1245)]]`, `[[INT64(10) UINT64(11) INT64(1245)]]`)
+ mcmp.AssertMatchesNoCompare(`select 1 + b'1001', 2 + 0x9, 3 + B'010011011010' from t1`, `[[INT64(10) UINT64(11) INT64(1245)]]`, `[[INT64(10) UINT64(11) INT64(1245)]]`)
}
// TestTimeFunctionWithPrecision tests that inserting data with NOW(1) works as intended.
@@ -303,8 +296,6 @@ func TestAnalyze(t *testing.T) {
// TestTransactionModeVar executes SELECT on `transaction_mode` variable
func TestTransactionModeVar(t *testing.T) {
- utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate")
-
mcmp, closer := start(t)
defer closer()
@@ -334,8 +325,18 @@ func TestTransactionModeVar(t *testing.T) {
}
}
+// TestAliasesInOuterJoinQueries tests that aliases work in queries that have outer join clauses.
+func TestAliasesInOuterJoinQueries(t *testing.T) {
+ mcmp, closer := start(t)
+ defer closer()
+
+ // Insert data into the 2 tables
+ mcmp.Exec("insert into t1(id1, id2) values (1,2), (42,5), (5, 42)")
+ mcmp.Exec("insert into tbl(id, unq_col, nonunq_col) values (1,2,3), (2,5,3), (3, 42, 2)")
+ mcmp.ExecWithColumnCompare("select * from t1 t left join tbl on t.id1 = 666 and t.id2 = tbl.id")
+}
+
func TestAlterTableWithView(t *testing.T) {
- utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate")
mcmp, closer := start(t)
defer closer()
@@ -389,6 +390,7 @@ func TestAlterTableWithView(t *testing.T) {
func TestHandleNullableColumn(t *testing.T) {
utils.SkipIfBinaryIsBelowVersion(t, 21, "vtgate")
+
require.NoError(t,
utils.WaitForAuthoritative(t, keyspaceName, "tbl", clusterInstance.VtgateProcess.ReadVSchema))
mcmp, closer := start(t)
diff --git a/go/test/endtoend/vtgate/queries/misc/schema.sql b/go/test/endtoend/vtgate/queries/misc/schema.sql
index f87d7c19078..c1d9a7ed8b3 100644
--- a/go/test/endtoend/vtgate/queries/misc/schema.sql
+++ b/go/test/endtoend/vtgate/queries/misc/schema.sql
@@ -9,7 +9,7 @@ create table tbl
(
id bigint,
unq_col bigint,
- nonunq_col bigint,
+ nonunq_col bigint not null,
primary key (id),
unique (unq_col)
) Engine = InnoDB;
diff --git a/go/test/endtoend/vtgate/queries/normalize/normalize_test.go b/go/test/endtoend/vtgate/queries/normalize/normalize_test.go
index a3637ef5230..b6495443a8e 100644
--- a/go/test/endtoend/vtgate/queries/normalize/normalize_test.go
+++ b/go/test/endtoend/vtgate/queries/normalize/normalize_test.go
@@ -28,7 +28,6 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
- "vitess.io/vitess/go/test/endtoend/cluster"
"vitess.io/vitess/go/test/endtoend/utils"
"vitess.io/vitess/go/mysql"
@@ -40,16 +39,7 @@ func TestNormalizeAllFields(t *testing.T) {
defer conn.Close()
insertQuery := `insert into t1 values (1, "chars", "variable chars", x'73757265', 0x676F, 0.33, 9.99, 1, "1976-06-08", "small", "b", "{\"key\":\"value\"}", point(1,5), b'011', 0b0101)`
-
- normalizedInsertQuery := `insert into t1 values (:vtg1 /* INT64 */, :vtg2 /* VARCHAR */, :vtg3 /* VARCHAR */, :vtg4 /* HEXVAL */, :vtg5 /* HEXNUM */, :vtg6 /* DECIMAL(3,2) */, :vtg7 /* DECIMAL(3,2) */, :vtg8 /* INT64 */, :vtg9 /* VARCHAR */, :vtg10 /* VARCHAR */, :vtg11 /* VARCHAR */, :vtg12 /* VARCHAR */, point(:vtg13 /* INT64 */, :vtg14 /* INT64 */), :vtg15 /* BITNUM */, :vtg16 /* BITNUM */)`
- vtgateVersion, err := cluster.GetMajorVersion("vtgate")
- require.NoError(t, err)
- if vtgateVersion < 20 {
- normalizedInsertQuery = `insert into t1 values (:vtg1 /* INT64 */, :vtg2 /* VARCHAR */, :vtg3 /* VARCHAR */, :vtg4 /* HEXVAL */, :vtg5 /* HEXNUM */, :vtg6 /* DECIMAL */, :vtg7 /* DECIMAL */, :vtg8 /* INT64 */, :vtg9 /* VARCHAR */, :vtg10 /* VARCHAR */, :vtg11 /* VARCHAR */, :vtg12 /* VARCHAR */, point(:vtg13 /* INT64 */, :vtg14 /* INT64 */), :vtg15 /* BITNUM */, :vtg16 /* BITNUM */)`
- }
- if vtgateVersion < 19 {
- normalizedInsertQuery = `insert into t1 values (:vtg1 /* INT64 */, :vtg2 /* VARCHAR */, :vtg3 /* VARCHAR */, :vtg4 /* HEXVAL */, :vtg5 /* HEXNUM */, :vtg6 /* DECIMAL */, :vtg7 /* DECIMAL */, :vtg8 /* INT64 */, :vtg9 /* VARCHAR */, :vtg10 /* VARCHAR */, :vtg11 /* VARCHAR */, :vtg12 /* VARCHAR */, point(:vtg13 /* INT64 */, :vtg14 /* INT64 */), :vtg15 /* HEXNUM */, :vtg16 /* HEXNUM */)`
- }
+ normalizedInsertQuery := `insert into t1 values (:vtg1 /* INT64 */, :vtg2 /* VARCHAR */, :vtg3 /* VARCHAR */, :vtg4 /* HEXVAL */, :vtg5 /* HEXNUM */, :vtg6 /* DECIMAL */, :vtg7 /* DECIMAL */, :vtg8 /* INT64 */, :vtg9 /* VARCHAR */, :vtg10 /* VARCHAR */, :vtg11 /* VARCHAR */, :vtg12 /* VARCHAR */, point(:vtg13 /* INT64 */, :vtg14 /* INT64 */), :vtg15 /* BITNUM */, :vtg16 /* BITNUM */)`
selectQuery := "select * from t1"
utils.Exec(t, conn, insertQuery)
qr := utils.Exec(t, conn, selectQuery)
diff --git a/go/test/endtoend/vtgate/queries/orderby/orderby_test.go b/go/test/endtoend/vtgate/queries/orderby/orderby_test.go
index 1d2ee7db795..f6c52cab2ac 100644
--- a/go/test/endtoend/vtgate/queries/orderby/orderby_test.go
+++ b/go/test/endtoend/vtgate/queries/orderby/orderby_test.go
@@ -85,9 +85,6 @@ func TestOrderBy(t *testing.T) {
}
func TestOrderByComplex(t *testing.T) {
- // tests written to try to trick the ORDER BY engine and planner
- utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate")
-
mcmp, closer := start(t)
defer closer()
diff --git a/go/test/endtoend/vtgate/queries/reference/main_test.go b/go/test/endtoend/vtgate/queries/reference/main_test.go
index 4c9440ca4ff..c350038bf6e 100644
--- a/go/test/endtoend/vtgate/queries/reference/main_test.go
+++ b/go/test/endtoend/vtgate/queries/reference/main_test.go
@@ -18,6 +18,7 @@ package reference
import (
"context"
+ _ "embed"
"flag"
"fmt"
"os"
@@ -39,68 +40,16 @@ var (
vtParams mysql.ConnParams
unshardedKeyspaceName = "uks"
- unshardedSQLSchema = `
- CREATE TABLE IF NOT EXISTS zip(
- id BIGINT NOT NULL AUTO_INCREMENT,
- code5 INT(5) NOT NULL,
- PRIMARY KEY(id)
- ) ENGINE=InnoDB;
+ //go:embed uschema.sql
+ unshardedSQLSchema string
+ //go:embed uvschema.json
+ unshardedVSchema string
- INSERT INTO zip(id, code5)
- VALUES (1, 47107),
- (2, 82845),
- (3, 11237);
-
- CREATE TABLE IF NOT EXISTS zip_detail(
- id BIGINT NOT NULL AUTO_INCREMENT,
- zip_id BIGINT NOT NULL,
- discontinued_at DATE,
- PRIMARY KEY(id)
- ) ENGINE=InnoDB;
-
- `
- unshardedVSchema = `
- {
- "sharded":false,
- "tables": {
- "zip": {},
- "zip_detail": {}
- }
- }
- `
shardedKeyspaceName = "sks"
- shardedSQLSchema = `
- CREATE TABLE IF NOT EXISTS delivery_failure (
- id BIGINT NOT NULL,
- zip_detail_id BIGINT NOT NULL,
- reason VARCHAR(255),
- PRIMARY KEY(id)
- ) ENGINE=InnoDB;
- `
- shardedVSchema = `
- {
- "sharded": true,
- "vindexes": {
- "hash": {
- "type": "hash"
- }
- },
- "tables": {
- "delivery_failure": {
- "columnVindexes": [
- {
- "column": "id",
- "name": "hash"
- }
- ]
- },
- "zip_detail": {
- "type": "reference",
- "source": "` + unshardedKeyspaceName + `.zip_detail"
- }
- }
- }
- `
+ //go:embed sschema.sql
+ shardedSQLSchema string
+ //go:embed svschema.json
+ shardedVSchema string
)
func TestMain(m *testing.M) {
diff --git a/go/test/endtoend/vtgate/queries/reference/reference_test.go b/go/test/endtoend/vtgate/queries/reference/reference_test.go
index ae7319a52e3..2a1df47a352 100644
--- a/go/test/endtoend/vtgate/queries/reference/reference_test.go
+++ b/go/test/endtoend/vtgate/queries/reference/reference_test.go
@@ -85,19 +85,20 @@ func TestReferenceRouting(t *testing.T) {
t.Run("Complex reference query", func(t *testing.T) {
utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate")
+
// Verify a complex query using reference tables with a left join having a derived table with an order by clause works as intended.
utils.AssertMatches(
t,
conn,
`SELECT t.id FROM (
- SELECT zd.id, zd.zip_id
- FROM `+shardedKeyspaceName+`.zip_detail AS zd
- WHERE zd.id IN (2)
- ORDER BY zd.discontinued_at
- LIMIT 1
- ) AS t
- LEFT JOIN `+shardedKeyspaceName+`.zip_detail AS t0 ON t.zip_id = t0.zip_id
- ORDER BY t.id`,
+ SELECT zd.id, zd.zip_id
+ FROM `+shardedKeyspaceName+`.zip_detail AS zd
+ WHERE zd.id IN (2)
+ ORDER BY zd.discontinued_at
+ LIMIT 1
+ ) AS t
+ LEFT JOIN `+shardedKeyspaceName+`.zip_detail AS t0 ON t.zip_id = t0.zip_id
+ ORDER BY t.id`,
`[[INT64(2)]]`,
)
})
@@ -156,3 +157,19 @@ func TestReferenceRouting(t *testing.T) {
`[[INT64(2)]]`,
)
}
+
+// TestMultiReferenceQuery tests that a query with multiple references with unsharded keyspace and sharded keyspace works with join.
+func TestMultiReferenceQuery(t *testing.T) {
+ utils.SkipIfBinaryIsBelowVersion(t, 21, "vtgate")
+ conn, closer := start(t)
+ defer closer()
+
+ query :=
+ `select 1
+ from delivery_failure df1
+ join delivery_failure df2 on df1.id = df2.id
+ join uks.zip_detail zd1 on df1.zip_detail_id = zd1.zip_id
+ join uks.zip_detail zd2 on zd1.zip_id = zd2.zip_id`
+
+ utils.Exec(t, conn, query)
+}
diff --git a/go/test/endtoend/vtgate/queries/reference/sschema.sql b/go/test/endtoend/vtgate/queries/reference/sschema.sql
new file mode 100644
index 00000000000..0fcaf63a422
--- /dev/null
+++ b/go/test/endtoend/vtgate/queries/reference/sschema.sql
@@ -0,0 +1,6 @@
+CREATE TABLE IF NOT EXISTS delivery_failure (
+ id BIGINT NOT NULL,
+ zip_detail_id BIGINT NOT NULL,
+ reason VARCHAR(255),
+ PRIMARY KEY(id)
+) ENGINE=InnoDB;
\ No newline at end of file
diff --git a/go/test/endtoend/vtgate/queries/reference/svschema.json b/go/test/endtoend/vtgate/queries/reference/svschema.json
new file mode 100644
index 00000000000..815e0e8d21c
--- /dev/null
+++ b/go/test/endtoend/vtgate/queries/reference/svschema.json
@@ -0,0 +1,22 @@
+{
+ "sharded": true,
+ "vindexes": {
+ "hash": {
+ "type": "hash"
+ }
+ },
+ "tables": {
+ "delivery_failure": {
+ "columnVindexes": [
+ {
+ "column": "id",
+ "name": "hash"
+ }
+ ]
+ },
+ "zip_detail": {
+ "type": "reference",
+ "source": "uks.zip_detail"
+ }
+ }
+}
\ No newline at end of file
diff --git a/go/test/endtoend/vtgate/queries/reference/uschema.sql b/go/test/endtoend/vtgate/queries/reference/uschema.sql
new file mode 100644
index 00000000000..52737928469
--- /dev/null
+++ b/go/test/endtoend/vtgate/queries/reference/uschema.sql
@@ -0,0 +1,17 @@
+CREATE TABLE IF NOT EXISTS zip(
+ id BIGINT NOT NULL AUTO_INCREMENT,
+ code5 INT(5) NOT NULL,
+ PRIMARY KEY(id)
+) ENGINE=InnoDB;
+
+INSERT INTO zip(id, code5)
+VALUES (1, 47107),
+ (2, 82845),
+ (3, 11237);
+
+CREATE TABLE IF NOT EXISTS zip_detail(
+ id BIGINT NOT NULL AUTO_INCREMENT,
+ zip_id BIGINT NOT NULL,
+ discontinued_at DATE,
+ PRIMARY KEY(id)
+) ENGINE=InnoDB;
\ No newline at end of file
diff --git a/go/test/endtoend/vtgate/queries/reference/uvschema.json b/go/test/endtoend/vtgate/queries/reference/uvschema.json
new file mode 100644
index 00000000000..fdcfca0d7a9
--- /dev/null
+++ b/go/test/endtoend/vtgate/queries/reference/uvschema.json
@@ -0,0 +1,6 @@
+{
+ "tables": {
+ "zip": {},
+ "zip_detail": {}
+ }
+}
\ No newline at end of file
diff --git a/go/test/endtoend/vtgate/queries/subquery/subquery_test.go b/go/test/endtoend/vtgate/queries/subquery/subquery_test.go
index e3f3cc52a5b..173a4a51b45 100644
--- a/go/test/endtoend/vtgate/queries/subquery/subquery_test.go
+++ b/go/test/endtoend/vtgate/queries/subquery/subquery_test.go
@@ -80,7 +80,6 @@ func TestNotINQueries(t *testing.T) {
// Test only supported in >= v16.0.0
func TestSubqueriesExists(t *testing.T) {
- utils.SkipIfBinaryIsBelowVersion(t, 16, "vtgate")
mcmp, closer := start(t)
defer closer()
@@ -162,7 +161,6 @@ func TestSubqueryInReference(t *testing.T) {
// TestSubqueryInAggregation validates that subquery work inside aggregation functions.
func TestSubqueryInAggregation(t *testing.T) {
- utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate")
mcmp, closer := start(t)
defer closer()
@@ -181,6 +179,7 @@ func TestSubqueryInAggregation(t *testing.T) {
// are handled correctly when there are joins inside the derived table
func TestSubqueryInDerivedTable(t *testing.T) {
utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate")
+
mcmp, closer := start(t)
defer closer()
@@ -195,6 +194,7 @@ func TestSubqueries(t *testing.T) {
// The commented out queries are failing because of wrong types being returned.
// The tests are commented out until the issue is fixed.
utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate")
+
mcmp, closer := start(t)
defer closer()
queries := []string{
diff --git a/go/test/endtoend/vtgate/queries/timeout/timeout_test.go b/go/test/endtoend/vtgate/queries/timeout/timeout_test.go
index 9c81a6c5822..25a7f57b3bc 100644
--- a/go/test/endtoend/vtgate/queries/timeout/timeout_test.go
+++ b/go/test/endtoend/vtgate/queries/timeout/timeout_test.go
@@ -96,5 +96,9 @@ func TestQueryTimeoutWithTables(t *testing.T) {
_, err = utils.ExecAllowError(t, mcmp.VtConn, "select /*vt+ QUERY_TIMEOUT_MS=20 */ sleep(0.1) from t1 where id1 = 1")
require.Error(t, err)
assert.Contains(t, err.Error(), "context deadline exceeded")
- assert.Contains(t, err.Error(), "(errno 1317) (sqlstate 70100)")
+ vttabletVersion, err2 := cluster.GetMajorVersion("vttablet")
+ require.NoError(t, err2)
+ if vttabletVersion <= 19 {
+ require.Contains(t, err.Error(), "(errno 1317) (sqlstate 70100)")
+ }
}
diff --git a/go/test/endtoend/vtgate/queries/tpch/tpch_test.go b/go/test/endtoend/vtgate/queries/tpch/tpch_test.go
index c0d8c798273..451eb9b6c67 100644
--- a/go/test/endtoend/vtgate/queries/tpch/tpch_test.go
+++ b/go/test/endtoend/vtgate/queries/tpch/tpch_test.go
@@ -49,6 +49,7 @@ func start(t *testing.T) (utils.MySQLCompare, func()) {
func TestTPCHQueries(t *testing.T) {
utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate")
+
mcmp, closer := start(t)
defer closer()
err := utils.WaitForColumn(t, clusterInstance.VtgateProcess, keyspaceName, "region", `R_COMMENT`)
diff --git a/go/test/endtoend/vtgate/schema.sql b/go/test/endtoend/vtgate/schema.sql
index 4c9ed46fe9a..8e7eed0b594 100644
--- a/go/test/endtoend/vtgate/schema.sql
+++ b/go/test/endtoend/vtgate/schema.sql
@@ -164,4 +164,4 @@ create table t11
col2 int,
col3 int,
primary key (id)
-) Engine = InnoDB;
\ No newline at end of file
+) Engine = InnoDB;
diff --git a/go/test/endtoend/vtgate/schematracker/sharded/st_sharded_test.go b/go/test/endtoend/vtgate/schematracker/sharded/st_sharded_test.go
index 7f560071d8b..aab8bbd2a04 100644
--- a/go/test/endtoend/vtgate/schematracker/sharded/st_sharded_test.go
+++ b/go/test/endtoend/vtgate/schematracker/sharded/st_sharded_test.go
@@ -184,13 +184,7 @@ func TestInitAndUpdate(t *testing.T) {
require.NoError(t, err)
defer conn.Close()
- vtgateVersion, err := cluster.GetMajorVersion("vtgate")
- require.NoError(t, err)
-
- expected := `[[VARCHAR("dual")] [VARCHAR("t2")] [VARCHAR("t2_id4_idx")] [VARCHAR("t8")]]`
- if vtgateVersion >= 17 {
- expected = `[[VARCHAR("t2")] [VARCHAR("t2_id4_idx")] [VARCHAR("t8")]]`
- }
+ expected := `[[VARCHAR("t2")] [VARCHAR("t2_id4_idx")] [VARCHAR("t8")]]`
utils.AssertMatchesWithTimeout(t, conn,
"SHOW VSCHEMA TABLES",
expected,
@@ -198,6 +192,8 @@ func TestInitAndUpdate(t *testing.T) {
30*time.Second,
"initial table list not complete")
+ vtgateVersion, err := cluster.GetMajorVersion("vtgate")
+ require.NoError(t, err)
if vtgateVersion >= 19 {
utils.AssertMatches(t, conn,
"SHOW VSCHEMA KEYSPACES",
@@ -206,10 +202,7 @@ func TestInitAndUpdate(t *testing.T) {
// Init
_ = utils.Exec(t, conn, "create table test_sc (id bigint primary key)")
- expected = `[[VARCHAR("dual")] [VARCHAR("t2")] [VARCHAR("t2_id4_idx")] [VARCHAR("t8")] [VARCHAR("test_sc")]]`
- if vtgateVersion >= 17 {
- expected = `[[VARCHAR("t2")] [VARCHAR("t2_id4_idx")] [VARCHAR("t8")] [VARCHAR("test_sc")]]`
- }
+ expected = `[[VARCHAR("t2")] [VARCHAR("t2_id4_idx")] [VARCHAR("t8")] [VARCHAR("test_sc")]]`
utils.AssertMatchesWithTimeout(t, conn,
"SHOW VSCHEMA TABLES",
expected,
@@ -219,10 +212,7 @@ func TestInitAndUpdate(t *testing.T) {
// Tables Update via health check.
_ = utils.Exec(t, conn, "create table test_sc1 (id bigint primary key)")
- expected = `[[VARCHAR("dual")] [VARCHAR("t2")] [VARCHAR("t2_id4_idx")] [VARCHAR("t8")] [VARCHAR("test_sc")] [VARCHAR("test_sc1")]]`
- if vtgateVersion >= 17 {
- expected = `[[VARCHAR("t2")] [VARCHAR("t2_id4_idx")] [VARCHAR("t8")] [VARCHAR("test_sc")] [VARCHAR("test_sc1")]]`
- }
+ expected = `[[VARCHAR("t2")] [VARCHAR("t2_id4_idx")] [VARCHAR("t8")] [VARCHAR("test_sc")] [VARCHAR("test_sc1")]]`
utils.AssertMatchesWithTimeout(t, conn,
"SHOW VSCHEMA TABLES",
expected,
@@ -231,10 +221,7 @@ func TestInitAndUpdate(t *testing.T) {
"test_sc1 not in vschema tables")
_ = utils.Exec(t, conn, "drop table test_sc, test_sc1")
- expected = `[[VARCHAR("dual")] [VARCHAR("t2")] [VARCHAR("t2_id4_idx")] [VARCHAR("t8")]]`
- if vtgateVersion >= 17 {
- expected = `[[VARCHAR("t2")] [VARCHAR("t2_id4_idx")] [VARCHAR("t8")]]`
- }
+ expected = `[[VARCHAR("t2")] [VARCHAR("t2_id4_idx")] [VARCHAR("t8")]]`
utils.AssertMatchesWithTimeout(t, conn,
"SHOW VSCHEMA TABLES",
expected,
@@ -253,12 +240,7 @@ func TestDMLOnNewTable(t *testing.T) {
// create a new table which is not part of the VSchema
utils.Exec(t, conn, `create table new_table_tracked(id bigint, name varchar(100), primary key(id)) Engine=InnoDB`)
- vtgateVersion, err := cluster.GetMajorVersion("vtgate")
- require.NoError(t, err)
- expected := `[[VARCHAR("dual")] [VARCHAR("new_table_tracked")] [VARCHAR("t2")] [VARCHAR("t2_id4_idx")] [VARCHAR("t8")]]`
- if vtgateVersion >= 17 {
- expected = `[[VARCHAR("new_table_tracked")] [VARCHAR("t2")] [VARCHAR("t2_id4_idx")] [VARCHAR("t8")]]`
- }
+ expected := `[[VARCHAR("new_table_tracked")] [VARCHAR("t2")] [VARCHAR("t2_id4_idx")] [VARCHAR("t8")]]`
// wait for vttablet's schema reload interval to pass
utils.AssertMatchesWithTimeout(t, conn,
"SHOW VSCHEMA TABLES",
@@ -302,9 +284,6 @@ func TestDMLOnNewTable(t *testing.T) {
// TestNewView validates that view tracking works as expected.
func TestNewView(t *testing.T) {
- utils.SkipIfBinaryIsBelowVersion(t, 16, "vtgate")
- utils.SkipIfBinaryIsBelowVersion(t, 16, "vttablet")
-
ctx := context.Background()
conn, err := mysql.Connect(ctx, &vtParams)
require.NoError(t, err)
@@ -327,9 +306,6 @@ func TestNewView(t *testing.T) {
// TestViewAndTable validates that new column added in table is present in the view definition
func TestViewAndTable(t *testing.T) {
- utils.SkipIfBinaryIsBelowVersion(t, 16, "vtgate")
- utils.SkipIfBinaryIsBelowVersion(t, 16, "vttablet")
-
ctx := context.Background()
conn, err := mysql.Connect(ctx, &vtParams)
require.NoError(t, err)
diff --git a/go/test/endtoend/vtgate/vitess_tester/expressions/expressions.test b/go/test/endtoend/vtgate/vitess_tester/expressions/expressions.test
new file mode 100644
index 00000000000..60c1e641463
--- /dev/null
+++ b/go/test/endtoend/vtgate/vitess_tester/expressions/expressions.test
@@ -0,0 +1,30 @@
+# This file contains queries that test expressions in Vitess.
+# We've found a number of bugs around precedences that we want to test.
+CREATE TABLE t0
+(
+ c1 BIT,
+ INDEX idx_c1 (c1)
+);
+
+INSERT INTO t0(c1)
+VALUES ('');
+
+
+SELECT *
+FROM t0;
+
+SELECT ((t0.c1 = 'a'))
+FROM t0;
+
+SELECT *
+FROM t0
+WHERE ((t0.c1 = 'a'));
+
+
+SELECT (1 LIKE ('a' IS NULL));
+SELECT (NOT (1 LIKE ('a' IS NULL)));
+
+SELECT (~ (1 || 0)) IS NULL;
+
+SELECT 1
+WHERE (~ (1 || 0)) IS NULL;
diff --git a/go/test/endtoend/vtgate/vitess_tester/join/join.test b/go/test/endtoend/vtgate/vitess_tester/join/join.test
new file mode 100644
index 00000000000..e550145f8d5
--- /dev/null
+++ b/go/test/endtoend/vtgate/vitess_tester/join/join.test
@@ -0,0 +1,78 @@
+CREATE TABLE `t1`
+(
+ `id` int unsigned NOT NULL AUTO_INCREMENT,
+ `name` varchar(191) NOT NULL,
+ PRIMARY KEY (`id`)
+) ENGINE InnoDB,
+ CHARSET utf8mb4,
+ COLLATE utf8mb4_unicode_ci;
+
+CREATE TABLE `t2`
+(
+ `id` bigint unsigned NOT NULL AUTO_INCREMENT,
+ `t1_id` int unsigned NOT NULL,
+ PRIMARY KEY (`id`)
+) ENGINE InnoDB,
+ CHARSET utf8mb4,
+ COLLATE utf8mb4_unicode_ci;
+
+CREATE TABLE `t3`
+(
+ `id` bigint unsigned NOT NULL AUTO_INCREMENT,
+ `name` varchar(191) NOT NULL,
+ PRIMARY KEY (`id`)
+) ENGINE InnoDB,
+ CHARSET utf8mb4,
+ COLLATE utf8mb4_unicode_ci;
+
+CREATE TABLE `t4`
+(
+ `id` bigint unsigned NOT NULL AUTO_INCREMENT,
+ `col` int unsigned NOT NULL,
+ PRIMARY KEY (`id`)
+) ENGINE InnoDB,
+ CHARSET utf8mb4,
+ COLLATE utf8mb4_unicode_ci;
+
+insert into t1 (id, name)
+values (1, 'A'),
+ (2, 'B'),
+ (3, 'C'),
+ (4, 'D');
+
+insert into t2 (id, t1_id)
+values (1, 1),
+ (2, 2),
+ (3, 3);
+
+insert into t3 (id, name)
+values (1, 'A'),
+ (2, 'B'),
+ (3, 'B'),
+ (4, 'B'),
+ (5, 'B');
+
+insert into t4 (id, col)
+values (1, 1),
+ (2, 2),
+ (3, 3);
+
+-- wait_authoritative t1
+-- wait_authoritative t2
+-- wait_authoritative t3
+select 42
+from t1
+ join t2 on t1.id = t2.t1_id
+ join t3 on t1.id = t3.id
+where t1.name
+ or t2.id
+ or t3.name;
+
+# Complex query that requires hash join underneath a memory sort and ordered aggregate
+select 1
+from t1
+ join t2 on t1.id = t2.t1_id
+ join t4 on t4.col = t2.id
+ left join (select t4.col, count(*) as count from t4 group by t4.col) t3 on t3.col = t2.id
+where t1.id IN (1, 2)
+group by t2.id, t4.col;
diff --git a/go/test/endtoend/vtgate/vitess_tester/join/vschema.json b/go/test/endtoend/vtgate/vitess_tester/join/vschema.json
new file mode 100644
index 00000000000..1105b951e61
--- /dev/null
+++ b/go/test/endtoend/vtgate/vitess_tester/join/vschema.json
@@ -0,0 +1,46 @@
+{
+ "keyspaces": {
+ "joinks": {
+ "sharded": true,
+ "vindexes": {
+ "hash": {
+ "type": "hash"
+ }
+ },
+ "tables": {
+ "t1": {
+ "column_vindexes": [
+ {
+ "column": "id",
+ "name": "hash"
+ }
+ ]
+ },
+ "t2": {
+ "column_vindexes": [
+ {
+ "column": "t1_id",
+ "name": "hash"
+ }
+ ]
+ },
+ "t3": {
+ "column_vindexes": [
+ {
+ "column": "id",
+ "name": "hash"
+ }
+ ]
+ },
+ "t4": {
+ "column_vindexes": [
+ {
+ "column": "id",
+ "name": "hash"
+ }
+ ]
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/go/test/endtoend/vtgate/vitess_tester/two_sharded_keyspaces/queries.test b/go/test/endtoend/vtgate/vitess_tester/two_sharded_keyspaces/queries.test
new file mode 100644
index 00000000000..28c55e559c9
--- /dev/null
+++ b/go/test/endtoend/vtgate/vitess_tester/two_sharded_keyspaces/queries.test
@@ -0,0 +1,39 @@
+use customer;
+create table if not exists customer
+(
+ customer_id bigint not null,
+ email varbinary(128),
+ primary key (customer_id)
+) ENGINE = InnoDB;
+
+insert into customer.customer(customer_id, email)
+values (1, '[alice@domain.com](mailto:alice@domain.com)'),
+ (2, '[bob@domain.com](mailto:bob@domain.com)'),
+ (3, '[charlie@domain.com](mailto:charlie@domain.com)'),
+ (4, '[dan@domain.com](mailto:dan@domain.com)'),
+ (5, '[eve@domain.com](mailto:eve@domain.com)');
+use corder;
+create table if not exists corder
+(
+ order_id bigint not null,
+ customer_id bigint,
+ sku varbinary(128),
+ price bigint,
+ primary key (order_id)
+) ENGINE = InnoDB;
+insert into corder.corder(order_id, customer_id, sku, price)
+values (1, 1, 'SKU-1001', 100),
+ (2, 2, 'SKU-1002', 30),
+ (3, 3, 'SKU-1002', 30),
+ (4, 4, 'SKU-1002', 30),
+ (5, 5, 'SKU-1002', 30);
+
+select co.order_id, co.customer_id, co.price
+from corder.corder co
+ left join customer.customer cu on co.customer_id = cu.customer_id
+where cu.customer_id = 1;
+
+# This query was accidentally disallowed by https://github.com/vitessio/vitess/pull/16520
+select 1
+from customer.customer
+where customer_id in (select customer_id from corder.corder where price > 50);
\ No newline at end of file
diff --git a/go/test/endtoend/vtgate/vitess_tester/two_sharded_keyspaces/vschema.json b/go/test/endtoend/vtgate/vitess_tester/two_sharded_keyspaces/vschema.json
new file mode 100644
index 00000000000..5672042bace
--- /dev/null
+++ b/go/test/endtoend/vtgate/vitess_tester/two_sharded_keyspaces/vschema.json
@@ -0,0 +1,72 @@
+{
+ "keyspaces": {
+ "customer": {
+ "sharded": true,
+ "vindexes": {
+ "hash": {
+ "type": "hash",
+ "params": {},
+ "owner": ""
+ }
+ },
+ "tables": {
+ "customer": {
+ "type": "",
+ "column_vindexes": [
+ {
+ "column": "customer_id",
+ "name": "hash",
+ "columns": []
+ }
+ ],
+ "columns": [],
+ "pinned": "",
+ "column_list_authoritative": false,
+ "source": ""
+ }
+ },
+ "require_explicit_routing": false,
+ "foreign_key_mode": 0,
+ "multi_tenant_spec": null
+ },
+ "corder": {
+ "sharded": true,
+ "vindexes": {
+ "hash": {
+ "type": "hash",
+ "params": {},
+ "owner": ""
+ }
+ },
+ "tables": {
+ "corder": {
+ "type": "",
+ "column_vindexes": [
+ {
+ "column": "customer_id",
+ "name": "hash",
+ "columns": []
+ }
+ ],
+ "columns": [],
+ "pinned": "",
+ "column_list_authoritative": false,
+ "source": ""
+ }
+ },
+ "require_explicit_routing": false,
+ "foreign_key_mode": 0,
+ "multi_tenant_spec": null
+ }
+ },
+ "routing_rules": {
+ "rules": []
+ },
+ "shard_routing_rules": {
+ "rules": []
+ },
+ "keyspace_routing_rules": null,
+ "mirror_rules": {
+ "rules": []
+ }
+}
\ No newline at end of file
diff --git a/go/test/endtoend/vtgate/vschema/vschema_test.go b/go/test/endtoend/vtgate/vschema/vschema_test.go
index 92863ff7dc8..eec54f8f47f 100644
--- a/go/test/endtoend/vtgate/vschema/vschema_test.go
+++ b/go/test/endtoend/vtgate/vschema/vschema_test.go
@@ -110,16 +110,7 @@ func TestVSchema(t *testing.T) {
`[[INT64(1) VARCHAR("test1")] [INT64(2) VARCHAR("test2")] [INT64(3) VARCHAR("test3")] [INT64(4) VARCHAR("test4")]]`)
utils.AssertMatches(t, conn, "delete from vt_user", `[]`)
-
- vtgateVersion, err := cluster.GetMajorVersion("vtgate")
- require.NoError(t, err)
-
- // Test empty vschema
- if vtgateVersion >= 17 {
- utils.AssertMatches(t, conn, "SHOW VSCHEMA TABLES", `[]`)
- } else {
- utils.AssertMatches(t, conn, "SHOW VSCHEMA TABLES", `[[VARCHAR("dual")]]`)
- }
+ utils.AssertMatches(t, conn, "SHOW VSCHEMA TABLES", `[]`)
// Use the DDL to create an unsharded vschema and test again
@@ -135,11 +126,7 @@ func TestVSchema(t *testing.T) {
utils.Exec(t, conn, "commit")
// Test Showing Tables
- if vtgateVersion >= 17 {
- utils.AssertMatches(t, conn, "SHOW VSCHEMA TABLES", `[[VARCHAR("main")] [VARCHAR("vt_user")]]`)
- } else {
- utils.AssertMatches(t, conn, "SHOW VSCHEMA TABLES", `[[VARCHAR("dual")] [VARCHAR("main")] [VARCHAR("vt_user")]]`)
- }
+ utils.AssertMatches(t, conn, "SHOW VSCHEMA TABLES", `[[VARCHAR("main")] [VARCHAR("vt_user")]]`)
// Test Showing Vindexes
utils.AssertMatches(t, conn, "SHOW VSCHEMA VINDEXES", `[]`)
diff --git a/go/vt/discovery/healthcheck.go b/go/vt/discovery/healthcheck.go
index d37b7419fd6..47146988de2 100644
--- a/go/vt/discovery/healthcheck.go
+++ b/go/vt/discovery/healthcheck.go
@@ -368,7 +368,7 @@ func NewHealthCheck(ctx context.Context, retryDelay, healthCheckTimeout time.Dur
healthy: make(map[KeyspaceShardTabletType][]*TabletHealth),
subscribers: make(map[chan *TabletHealth]struct{}),
cellAliases: make(map[string]string),
- loadTabletsTrigger: make(chan struct{}),
+ loadTabletsTrigger: make(chan struct{}, 1),
}
var topoWatchers []*TopologyWatcher
cells := strings.Split(cellsToWatch, ",")
@@ -483,7 +483,20 @@ func (hc *HealthCheckImpl) deleteTablet(tablet *topodata.Tablet) {
// delete from healthy list
healthy, ok := hc.healthy[key]
if ok && len(healthy) > 0 {
- hc.recomputeHealthy(key)
+ if tabletType == topodata.TabletType_PRIMARY {
+ // If the deleted tablet was a primary,
+ // and it matches what we think is the current active primary,
+ // clear the healthy list for the primary.
+ //
+ // See the logic in `updateHealth` for more details.
+ alias := tabletAliasString(topoproto.TabletAliasString(healthy[0].Tablet.Alias))
+ if alias == tabletAlias {
+ hc.healthy[key] = []*TabletHealth{}
+ }
+ } else {
+ // Simply recompute the list of healthy tablets for all other tablet types.
+ hc.recomputeHealthy(key)
+ }
}
}
}()
@@ -535,7 +548,13 @@ func (hc *HealthCheckImpl) updateHealth(th *TabletHealth, prevTarget *query.Targ
if prevTarget.TabletType == topodata.TabletType_PRIMARY {
if primaries := hc.healthData[oldTargetKey]; len(primaries) == 0 {
log.Infof("We will have no health data for the next new primary tablet after demoting the tablet: %v, so start loading tablets now", topotools.TabletIdent(th.Tablet))
- hc.loadTabletsTrigger <- struct{}{}
+ // We want to trigger a loadTablets call, but if the channel is not empty
+ // then a trigger is already scheduled, we don't need to trigger another one.
+ // This also prevents the code from deadlocking as described in https://github.com/vitessio/vitess/issues/16994.
+ select {
+ case hc.loadTabletsTrigger <- struct{}{}:
+ default:
+ }
}
}
}
@@ -599,6 +618,13 @@ func (hc *HealthCheckImpl) updateHealth(th *TabletHealth, prevTarget *query.Targ
hc.broadcast(th)
}
+// recomputeHealthy recomputes the healthy tablets for the given key.
+//
+// This filters out tablets that might be healthy, but are not part of the current
+// cell or cell alias. It also performs filtering of tablets based on replication lag,
+// if configured to do so.
+//
+// This should not be called for primary tablets.
func (hc *HealthCheckImpl) recomputeHealthy(key KeyspaceShardTabletType) {
all := hc.healthData[key]
allArray := make([]*TabletHealth, 0, len(all))
diff --git a/go/vt/discovery/healthcheck_test.go b/go/vt/discovery/healthcheck_test.go
index fa7c497c146..1e7bbd41033 100644
--- a/go/vt/discovery/healthcheck_test.go
+++ b/go/vt/discovery/healthcheck_test.go
@@ -855,6 +855,127 @@ func TestRemoveTablet(t *testing.T) {
assert.Empty(t, a, "wrong result, expected empty list")
}
+// When an external primary failover is performed,
+// the demoted primary will advertise itself as a `PRIMARY`
+// tablet until it recognizes that it was demoted,
+// and until all in-flight operations have either finished
+// (successfully or unsuccessfully, see `--shutdown_grace_period` flag).
+//
+// During this time, operations like `RemoveTablet` should not lead
+// to multiple tablets becoming valid targets for `PRIMARY`.
+func TestRemoveTabletDuringExternalReparenting(t *testing.T) {
+ ctx := utils.LeakCheckContext(t)
+
+ // reset error counters
+ hcErrorCounters.ResetAll()
+ ts := memorytopo.NewServer(ctx, "cell")
+ defer ts.Close()
+ hc := createTestHc(ctx, ts)
+ // close healthcheck
+ defer hc.Close()
+
+ firstTablet := createTestTablet(0, "cell", "a")
+ firstTablet.Type = topodatapb.TabletType_PRIMARY
+
+ secondTablet := createTestTablet(1, "cell", "b")
+ secondTablet.Type = topodatapb.TabletType_REPLICA
+
+ thirdTablet := createTestTablet(2, "cell", "c")
+ thirdTablet.Type = topodatapb.TabletType_REPLICA
+
+ firstTabletHealthStream := make(chan *querypb.StreamHealthResponse)
+ firstTabletConn := createFakeConn(firstTablet, firstTabletHealthStream)
+ firstTabletConn.errCh = make(chan error)
+
+ secondTabletHealthStream := make(chan *querypb.StreamHealthResponse)
+ secondTabletConn := createFakeConn(secondTablet, secondTabletHealthStream)
+ secondTabletConn.errCh = make(chan error)
+
+ thirdTabletHealthStream := make(chan *querypb.StreamHealthResponse)
+ thirdTabletConn := createFakeConn(thirdTablet, thirdTabletHealthStream)
+ thirdTabletConn.errCh = make(chan error)
+
+ resultChan := hc.Subscribe()
+
+ hc.AddTablet(firstTablet)
+ <-resultChan
+
+ hc.AddTablet(secondTablet)
+ <-resultChan
+
+ hc.AddTablet(thirdTablet)
+ <-resultChan
+
+ firstTabletPrimaryTermStartTimestamp := time.Now().Unix() - 10
+
+ firstTabletHealthStream <- &querypb.StreamHealthResponse{
+ TabletAlias: firstTablet.Alias,
+ Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_PRIMARY},
+ Serving: true,
+
+ PrimaryTermStartTimestamp: firstTabletPrimaryTermStartTimestamp,
+ RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 0, CpuUsage: 0.5},
+ }
+ <-resultChan
+
+ secondTabletHealthStream <- &querypb.StreamHealthResponse{
+ TabletAlias: secondTablet.Alias,
+ Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA},
+ Serving: true,
+
+ PrimaryTermStartTimestamp: 0,
+ RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 1, CpuUsage: 0.5},
+ }
+ <-resultChan
+
+ thirdTabletHealthStream <- &querypb.StreamHealthResponse{
+ TabletAlias: thirdTablet.Alias,
+ Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA},
+ Serving: true,
+
+ PrimaryTermStartTimestamp: 0,
+ RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 1, CpuUsage: 0.5},
+ }
+ <-resultChan
+
+ secondTabletPrimaryTermStartTimestamp := time.Now().Unix()
+
+ // Simulate a failover
+ firstTabletHealthStream <- &querypb.StreamHealthResponse{
+ TabletAlias: firstTablet.Alias,
+ Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_PRIMARY},
+ Serving: true,
+
+ PrimaryTermStartTimestamp: firstTabletPrimaryTermStartTimestamp,
+ RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 0, CpuUsage: 0.5},
+ }
+ <-resultChan
+
+ secondTabletHealthStream <- &querypb.StreamHealthResponse{
+ TabletAlias: secondTablet.Alias,
+ Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_PRIMARY},
+ Serving: true,
+
+ PrimaryTermStartTimestamp: secondTabletPrimaryTermStartTimestamp,
+ RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 0, CpuUsage: 0.5},
+ }
+ <-resultChan
+
+ hc.RemoveTablet(thirdTablet)
+
+ // `secondTablet` should be the primary now
+ expectedTabletStats := []*TabletHealth{{
+ Tablet: secondTablet,
+ Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_PRIMARY},
+ Serving: true,
+ Stats: &querypb.RealtimeStats{ReplicationLagSeconds: 0, CpuUsage: 0.5},
+ PrimaryTermStartTime: secondTabletPrimaryTermStartTimestamp,
+ }}
+
+ actualTabletStats := hc.GetHealthyTabletStats(&querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_PRIMARY})
+ mustMatch(t, expectedTabletStats, actualTabletStats, "unexpected result")
+}
+
// TestGetHealthyTablets tests the functionality of GetHealthyTabletStats.
func TestGetHealthyTablets(t *testing.T) {
ctx := utils.LeakCheckContext(t)
diff --git a/go/vt/discovery/topology_watcher_test.go b/go/vt/discovery/topology_watcher_test.go
index 775267af65c..834fdcb1afe 100644
--- a/go/vt/discovery/topology_watcher_test.go
+++ b/go/vt/discovery/topology_watcher_test.go
@@ -28,6 +28,7 @@ import (
"google.golang.org/protobuf/proto"
"vitess.io/vitess/go/test/utils"
+ querypb "vitess.io/vitess/go/vt/proto/query"
"vitess.io/vitess/go/vt/logutil"
topodatapb "vitess.io/vitess/go/vt/proto/topodata"
@@ -709,3 +710,67 @@ func TestGetTabletErrorDoesNotRemoveFromHealthcheck(t *testing.T) {
assert.True(t, proto.Equal(tablet1, allTablets[key1]))
assert.True(t, proto.Equal(tablet2, allTablets[key2]))
}
+
+// TestDeadlockBetweenTopologyWatcherAndHealthCheck tests the possibility of a deadlock
+// between the topology watcher and the health check.
+// The issue https://github.com/vitessio/vitess/issues/16994 has more details on the deadlock.
+func TestDeadlockBetweenTopologyWatcherAndHealthCheck(t *testing.T) {
+ ctx := utils.LeakCheckContext(t)
+
+ // create a new memory topo server and an health check instance.
+ ts, _ := memorytopo.NewServerAndFactory(ctx, "zone-1")
+ hc := NewHealthCheck(ctx, time.Hour, time.Hour, ts, "zone-1", "", nil)
+ defer hc.Close()
+ defer hc.topoWatchers[0].Stop()
+
+ // Add a tablet to the topology.
+ tablet1 := &topodatapb.Tablet{
+ Alias: &topodatapb.TabletAlias{
+ Cell: "zone-1",
+ Uid: 100,
+ },
+ Type: topodatapb.TabletType_REPLICA,
+ Hostname: "host1",
+ PortMap: map[string]int32{
+ "grpc": 123,
+ },
+ Keyspace: "keyspace",
+ Shard: "shard",
+ }
+ err := ts.CreateTablet(ctx, tablet1)
+ // Run the first loadTablets call to ensure the tablet is present in the topology watcher.
+ hc.topoWatchers[0].loadTablets()
+ require.NoError(t, err)
+
+ // We want to run updateHealth with arguments that always
+ // make it trigger load Tablets.
+ th := &TabletHealth{
+ Tablet: tablet1,
+ Target: &querypb.Target{
+ Keyspace: "keyspace",
+ Shard: "shard",
+ TabletType: topodatapb.TabletType_REPLICA,
+ },
+ }
+ prevTarget := &querypb.Target{
+ Keyspace: "keyspace",
+ Shard: "shard",
+ TabletType: topodatapb.TabletType_PRIMARY,
+ }
+
+ // If we run the updateHealth function often enough, then we
+ // will see the deadlock where the topology watcher is trying to replace
+ // the tablet in the health check, but health check has the mutex acquired
+ // already because it is calling updateHealth.
+ // updateHealth itself will be stuck trying to send on the shared channel.
+ for i := 0; i < 10; i++ {
+ // Update the port of the tablet so that when update Health asks topo watcher to
+ // refresh the tablets, it finds an update and tries to replace it.
+ _, err = ts.UpdateTabletFields(ctx, tablet1.Alias, func(t *topodatapb.Tablet) error {
+ t.PortMap["testing_port"] = int32(i + 1)
+ return nil
+ })
+ require.NoError(t, err)
+ hc.updateHealth(th, prevTarget, false, false)
+ }
+}
diff --git a/go/vt/mysqlctl/backup_blackbox_race_test.go b/go/vt/mysqlctl/backup_blackbox_race_test.go
new file mode 100644
index 00000000000..1cbae4bd398
--- /dev/null
+++ b/go/vt/mysqlctl/backup_blackbox_race_test.go
@@ -0,0 +1,152 @@
+//go:build !race
+
+/*
+Copyright 2024 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package mysqlctl_test is the blackbox tests for package mysqlctl.
+package mysqlctl_test
+
+import (
+ "fmt"
+ "os"
+ "path"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+
+ "vitess.io/vitess/go/mysql"
+ "vitess.io/vitess/go/mysql/fakesqldb"
+ "vitess.io/vitess/go/test/utils"
+ "vitess.io/vitess/go/vt/logutil"
+ "vitess.io/vitess/go/vt/mysqlctl"
+ "vitess.io/vitess/go/vt/mysqlctl/backupstats"
+ "vitess.io/vitess/go/vt/mysqlctl/filebackupstorage"
+ "vitess.io/vitess/go/vt/proto/topodata"
+ "vitess.io/vitess/go/vt/proto/vttime"
+ "vitess.io/vitess/go/vt/topo"
+ "vitess.io/vitess/go/vt/topo/memorytopo"
+)
+
+// This test triggers a certain code path that only happens when a backup file fails to be backed up,
+// only and only if, all the other backup files have either started or finished. When we reach
+// this scenario, files no longer try to acquire the semaphore and thus the backup cannot fail
+// because of context deadline when acquiring it. At this point, the only place where the backup
+// can fail, is if the return of be.backupFiles fails, and we record the error correctly.
+// This test specifically test this scenario and arose because of issue https://github.com/vitessio/vitess/issues/17063
+// The test does:
+// 1. Create the backup and data directory
+// 2. Create a keyspace and shard
+// 3. Already create the last backup file that would be created
+// 4. Remove all permissions on this file
+// 5. Execute the restore
+// 6. The restore must fail due to an error on file number 3 ("cannot add file: 3")
+//
+// This test is extracted into its own file that won't be run if we do 'go test -race' as this test
+// exposes an old race condition that will be fixed after https://github.com/vitessio/vitess/pull/17062
+// Link to the race condition issue: https://github.com/vitessio/vitess/issues/17065
+func TestExecuteBackupWithFailureOnLastFile(t *testing.T) {
+ ctx := utils.LeakCheckContext(t)
+
+ // Set up local backup directory
+ id := fmt.Sprintf("%d", time.Now().UnixNano())
+ backupRoot := fmt.Sprintf("testdata/builtinbackup_test_%s", id)
+ filebackupstorage.FileBackupStorageRoot = backupRoot
+ require.NoError(t, createBackupDir(backupRoot, "innodb", "log", "datadir"))
+ dataDir := path.Join(backupRoot, "datadir")
+ // Add some files under data directory to force backup to execute semaphore acquire inside
+ // backupFiles() method (https://github.com/vitessio/vitess/blob/main/go/vt/mysqlctl/builtinbackupengine.go#L483).
+ require.NoError(t, createBackupDir(dataDir, "test1"))
+ require.NoError(t, createBackupDir(dataDir, "test2"))
+ require.NoError(t, createBackupFiles(path.Join(dataDir, "test1"), 2, "ibd"))
+ require.NoError(t, createBackupFiles(path.Join(dataDir, "test2"), 2, "ibd"))
+ defer os.RemoveAll(backupRoot)
+
+ needIt, err := needInnoDBRedoLogSubdir()
+ require.NoError(t, err)
+ if needIt {
+ fpath := path.Join("log", mysql.DynamicRedoLogSubdir)
+ if err := createBackupDir(backupRoot, fpath); err != nil {
+ require.Failf(t, err.Error(), "failed to create directory: %s", fpath)
+ }
+ }
+
+ // Set up topo
+ keyspace, shard := "mykeyspace", "-"
+ ts := memorytopo.NewServer(ctx, "cell1")
+ defer ts.Close()
+
+ require.NoError(t, ts.CreateKeyspace(ctx, keyspace, &topodata.Keyspace{}))
+ require.NoError(t, ts.CreateShard(ctx, keyspace, shard))
+
+ tablet := topo.NewTablet(100, "cell1", "mykeyspace-00-80-0100")
+ tablet.Keyspace = keyspace
+ tablet.Shard = shard
+
+ require.NoError(t, ts.CreateTablet(ctx, tablet))
+
+ _, err = ts.UpdateShardFields(ctx, keyspace, shard, func(si *topo.ShardInfo) error {
+ si.PrimaryAlias = &topodata.TabletAlias{Uid: 100, Cell: "cell1"}
+
+ now := time.Now()
+ si.PrimaryTermStartTime = &vttime.Time{Seconds: int64(now.Second()), Nanoseconds: int32(now.Nanosecond())}
+
+ return nil
+ })
+
+ require.NoError(t, err)
+
+ be := &mysqlctl.BuiltinBackupEngine{}
+ bh := filebackupstorage.NewBackupHandle(nil, "", "", false)
+ // Spin up a fake daemon to be used in backups. It needs to be allowed to receive:
+ // "STOP REPLICA", "START REPLICA", in that order.
+ fakedb := fakesqldb.New(t)
+ defer fakedb.Close()
+ mysqld := mysqlctl.NewFakeMysqlDaemon(fakedb)
+ defer mysqld.Close()
+ mysqld.ExpectedExecuteSuperQueryList = []string{"STOP SLAVE", "START SLAVE"}
+
+ // With this setup, 4 backup files will be created (0, 1, 2, 3). For the last file (3), we create
+ // it in advance and remove all permission on the file so that the backup be.ExecuteBackup will not
+ // be able to override the file and thus will fail. Triggering the error mechanism after calling be.backupFile.
+ lastBackupFile := path.Join(backupRoot, "3")
+ f, err := os.Create(lastBackupFile)
+ require.NoError(t, err)
+ _, err = f.Write(make([]byte, 1024))
+ require.NoError(t, err)
+ require.NoError(t, f.Chmod(0444))
+ require.NoError(t, f.Close())
+
+ backupResult, err := be.ExecuteBackup(ctx, mysqlctl.BackupParams{
+ Logger: logutil.NewConsoleLogger(),
+ Mysqld: mysqld,
+ Cnf: &mysqlctl.Mycnf{
+ InnodbDataHomeDir: path.Join(backupRoot, "innodb"),
+ InnodbLogGroupHomeDir: path.Join(backupRoot, "log"),
+ DataDir: path.Join(backupRoot, "datadir"),
+ },
+ Stats: backupstats.NewFakeStats(),
+ Concurrency: 4,
+ HookExtraEnv: map[string]string{},
+ TopoServer: ts,
+ Keyspace: keyspace,
+ Shard: shard,
+ MysqlShutdownTimeout: mysqlShutdownTimeout,
+ }, bh)
+
+ require.ErrorContains(t, err, "cannot add file: 3")
+ require.Equal(t, mysqlctl.BackupUnusable, backupResult)
+}
diff --git a/go/vt/mysqlctl/backup_blackbox_test.go b/go/vt/mysqlctl/backup_blackbox_test.go
index 4508c4e4306..eafe34f7f07 100644
--- a/go/vt/mysqlctl/backup_blackbox_test.go
+++ b/go/vt/mysqlctl/backup_blackbox_test.go
@@ -402,7 +402,7 @@ func TestExecuteBackupWithCanceledContext(t *testing.T) {
require.Error(t, err)
// all four files will fail
- require.ErrorContains(t, err, "context canceled;context canceled;context canceled;context canceled")
+ require.ErrorContains(t, err, "context canceled")
assert.Equal(t, mysqlctl.BackupUnusable, backupResult)
}
diff --git a/go/vt/mysqlctl/builtinbackupengine.go b/go/vt/mysqlctl/builtinbackupengine.go
index 94ed7bdee6a..af6b8fdbd85 100644
--- a/go/vt/mysqlctl/builtinbackupengine.go
+++ b/go/vt/mysqlctl/builtinbackupengine.go
@@ -611,39 +611,51 @@ func (be *BuiltinBackupEngine) backupFiles(
// Backup with the provided concurrency.
sema := semaphore.NewWeighted(int64(params.Concurrency))
wg := sync.WaitGroup{}
+
+ ctxCancel, cancel := context.WithCancel(ctx)
+ defer cancel()
+
for i := range fes {
wg.Add(1)
go func(i int) {
defer wg.Done()
fe := &fes[i]
// Wait until we are ready to go, return if we encounter an error
- acqErr := sema.Acquire(ctx, 1)
+ acqErr := sema.Acquire(ctxCancel, 1)
if acqErr != nil {
log.Errorf("Unable to acquire semaphore needed to backup file: %s, err: %s", fe.Name, acqErr.Error())
bh.RecordError(acqErr)
+ cancel()
return
}
defer sema.Release(1)
+
+ // First check if we have any error, if we have, there is no point trying backing up this file.
+ // We check for errors before checking if the context is canceled on purpose, if there was an
+ // error, the context would have been canceled already.
+ if bh.HasErrors() {
+ params.Logger.Errorf("Failed to restore files due to error: %v", bh.Error())
+ return
+ }
+
// Check for context cancellation explicitly because, the way semaphore code is written, theoretically we might
// end up not throwing an error even after cancellation. Please see https://cs.opensource.google/go/x/sync/+/refs/tags/v0.1.0:semaphore/semaphore.go;l=66,
// which suggests that if the context is already done, `Acquire()` may still succeed without blocking. This introduces
// unpredictability in my test cases, so in order to avoid that, I am adding this cancellation check.
select {
- case <-ctx.Done():
+ case <-ctxCancel.Done():
log.Errorf("Context canceled or timed out during %q backup", fe.Name)
bh.RecordError(vterrors.Errorf(vtrpc.Code_CANCELED, "context canceled"))
return
default:
}
- if bh.HasErrors() {
- params.Logger.Infof("failed to backup files due to error.")
- return
- }
-
// Backup the individual file.
name := fmt.Sprintf("%v", i)
- bh.RecordError(be.backupFile(ctx, params, bh, fe, name))
+ if err := be.backupFile(ctxCancel, params, bh, fe, name); err != nil {
+ bh.RecordError(err)
+ cancel()
+ }
}(i)
}
@@ -775,11 +787,14 @@ func (bp *backupPipe) HashString() string {
return hex.EncodeToString(bp.crc32.Sum(nil))
}
-func (bp *backupPipe) ReportProgress(period time.Duration, logger logutil.Logger) {
+func (bp *backupPipe) ReportProgress(ctx context.Context, period time.Duration, logger logutil.Logger) {
tick := time.NewTicker(period)
defer tick.Stop()
for {
select {
+ case <-ctx.Done():
+ logger.Infof("Canceled %q file", bp.filename)
+ return
case <-bp.done:
logger.Infof("Done taking Backup %q", bp.filename)
return
@@ -822,7 +837,7 @@ func (be *BuiltinBackupEngine) backupFile(ctx context.Context, params BackupPara
}
br := newBackupReader(fe.Name, fi.Size(), timedSource)
- go br.ReportProgress(builtinBackupProgress, params.Logger)
+ go br.ReportProgress(ctx, builtinBackupProgress, params.Logger)
// Open the destination file for writing, and a buffer.
params.Logger.Infof("Backing up file: %v", fe.Name)
@@ -1021,43 +1036,53 @@ func (be *BuiltinBackupEngine) restoreFiles(ctx context.Context, params RestoreP
sema := semaphore.NewWeighted(int64(params.Concurrency))
rec := concurrency.AllErrorRecorder{}
wg := sync.WaitGroup{}
+
+ ctxCancel, cancel := context.WithCancel(ctx)
+ defer cancel()
+
for i := range fes {
wg.Add(1)
go func(i int) {
defer wg.Done()
fe := &fes[i]
// Wait until we are ready to go, return if we encounter an error
- acqErr := sema.Acquire(ctx, 1)
+ acqErr := sema.Acquire(ctxCancel, 1)
if acqErr != nil {
log.Errorf("Unable to acquire semaphore needed to restore file: %s, err: %s", fe.Name, acqErr.Error())
rec.RecordError(acqErr)
+ cancel()
return
}
defer sema.Release(1)
+
+ // First check if we have any error, if we have, there is no point trying to restore this file.
+ // We check for errors before checking if the context is canceled on purpose, if there was an
+ // error, the context would have been canceled already.
+ if rec.HasErrors() {
+ params.Logger.Errorf("Failed to restore files due to error: %v", bh.Error())
+ return
+ }
+
// Check for context cancellation explicitly because, the way semaphore code is written, theoretically we might
// end up not throwing an error even after cancellation. Please see https://cs.opensource.google/go/x/sync/+/refs/tags/v0.1.0:semaphore/semaphore.go;l=66,
// which suggests that if the context is already done, `Acquire()` may still succeed without blocking. This introduces
// unpredictability in my test cases, so in order to avoid that, I am adding this cancellation check.
select {
- case <-ctx.Done():
+ case <-ctxCancel.Done():
log.Errorf("Context canceled or timed out during %q restore", fe.Name)
rec.RecordError(vterrors.Errorf(vtrpc.Code_CANCELED, "context canceled"))
return
default:
}
- if rec.HasErrors() {
- params.Logger.Infof("Failed to restore files due to error.")
- return
- }
-
fe.ParentPath = createdDir
// And restore the file.
name := fmt.Sprintf("%v", i)
params.Logger.Infof("Copying file %v: %v", name, fe.Name)
- err := be.restoreFile(ctx, params, bh, fe, bm, name)
+ err := be.restoreFile(ctxCancel, params, bh, fe, bm, name)
if err != nil {
rec.RecordError(vterrors.Wrapf(err, "can't restore file %v to %v", name, fe.Name))
+ cancel()
}
}(i)
}
@@ -1087,7 +1112,7 @@ func (be *BuiltinBackupEngine) restoreFile(ctx context.Context, params RestorePa
}()
br := newBackupReader(name, 0, timedSource)
- go br.ReportProgress(builtinBackupProgress, params.Logger)
+ go br.ReportProgress(ctx, builtinBackupProgress, params.Logger)
var reader io.Reader = br
// Open the destination file for writing.
diff --git a/go/vt/mysqlctl/fakemysqldaemon.go b/go/vt/mysqlctl/fakemysqldaemon.go
index 33a553a25e9..3f2eb741d92 100644
--- a/go/vt/mysqlctl/fakemysqldaemon.go
+++ b/go/vt/mysqlctl/fakemysqldaemon.go
@@ -286,13 +286,6 @@ func (fmd *FakeMysqlDaemon) GetServerUUID(ctx context.Context) (string, error) {
return "000000", nil
}
-// CurrentPrimaryPositionLocked is thread-safe.
-func (fmd *FakeMysqlDaemon) CurrentPrimaryPositionLocked(pos replication.Position) {
- fmd.mu.Lock()
- defer fmd.mu.Unlock()
- fmd.CurrentPrimaryPosition = pos
-}
-
// ReplicationStatus is part of the MysqlDaemon interface.
func (fmd *FakeMysqlDaemon) ReplicationStatus() (replication.ReplicationStatus, error) {
if fmd.ReplicationStatusError != nil {
@@ -316,6 +309,8 @@ func (fmd *FakeMysqlDaemon) ReplicationStatus() (replication.ReplicationStatus,
// PrimaryStatus is part of the MysqlDaemon interface.
func (fmd *FakeMysqlDaemon) PrimaryStatus(ctx context.Context) (replication.PrimaryStatus, error) {
+ fmd.mu.Lock()
+ defer fmd.mu.Unlock()
if fmd.PrimaryStatusError != nil {
return replication.PrimaryStatus{}, fmd.PrimaryStatusError
}
@@ -381,7 +376,21 @@ func (fmd *FakeMysqlDaemon) GetPreviousGTIDs(ctx context.Context, binlog string)
// PrimaryPosition is part of the MysqlDaemon interface.
func (fmd *FakeMysqlDaemon) PrimaryPosition() (replication.Position, error) {
- return fmd.CurrentPrimaryPosition, nil
+ return fmd.GetPrimaryPositionLocked(), nil
+}
+
+// GetPrimaryPositionLocked gets the primary position while holding the lock.
+func (fmd *FakeMysqlDaemon) GetPrimaryPositionLocked() replication.Position {
+ fmd.mu.Lock()
+ defer fmd.mu.Unlock()
+ return fmd.CurrentPrimaryPosition
+}
+
+// SetPrimaryPositionLocked is thread-safe.
+func (fmd *FakeMysqlDaemon) SetPrimaryPositionLocked(pos replication.Position) {
+ fmd.mu.Lock()
+ defer fmd.mu.Unlock()
+ fmd.CurrentPrimaryPosition = pos
}
// IsReadOnly is part of the MysqlDaemon interface.
diff --git a/go/vt/proto/query/query.pb.go b/go/vt/proto/query/query.pb.go
index 098ccad1032..b5e72b6a1f3 100644
--- a/go/vt/proto/query/query.pb.go
+++ b/go/vt/proto/query/query.pb.go
@@ -315,6 +315,8 @@ const (
// BITNUM specifies a base 2 binary type (unquoted varbinary).
// Properties: 34, IsText.
Type_BITNUM Type = 4130
+ // RAW specifies a type which won't be quoted but the value used as-is while encoding.
+ Type_RAW Type = 2084
)
// Enum value maps for Type.
@@ -355,6 +357,7 @@ var (
4128: "HEXNUM",
4129: "HEXVAL",
4130: "BITNUM",
+ 2084: "RAW",
}
Type_value = map[string]int32{
"NULL_TYPE": 0,
@@ -392,6 +395,7 @@ var (
"HEXNUM": 4128,
"HEXVAL": 4129,
"BITNUM": 4130,
+ "RAW": 2084,
}
)
@@ -6510,7 +6514,7 @@ var file_query_proto_rawDesc = []byte{
0x46, 0x4c, 0x4f, 0x41, 0x54, 0x10, 0x80, 0x08, 0x12, 0x0d, 0x0a, 0x08, 0x49, 0x53, 0x51, 0x55,
0x4f, 0x54, 0x45, 0x44, 0x10, 0x80, 0x10, 0x12, 0x0b, 0x0a, 0x06, 0x49, 0x53, 0x54, 0x45, 0x58,
0x54, 0x10, 0x80, 0x20, 0x12, 0x0d, 0x0a, 0x08, 0x49, 0x53, 0x42, 0x49, 0x4e, 0x41, 0x52, 0x59,
- 0x10, 0x80, 0x40, 0x2a, 0xc0, 0x03, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d, 0x0a, 0x09,
+ 0x10, 0x80, 0x40, 0x2a, 0xca, 0x03, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d, 0x0a, 0x09,
0x4e, 0x55, 0x4c, 0x4c, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x04, 0x49,
0x4e, 0x54, 0x38, 0x10, 0x81, 0x02, 0x12, 0x0a, 0x0a, 0x05, 0x55, 0x49, 0x4e, 0x54, 0x38, 0x10,
0x82, 0x06, 0x12, 0x0a, 0x0a, 0x05, 0x49, 0x4e, 0x54, 0x31, 0x36, 0x10, 0x83, 0x02, 0x12, 0x0b,
@@ -6538,18 +6542,19 @@ var file_query_proto_rawDesc = []byte{
0x0e, 0x0a, 0x0a, 0x45, 0x58, 0x50, 0x52, 0x45, 0x53, 0x53, 0x49, 0x4f, 0x4e, 0x10, 0x1f, 0x12,
0x0b, 0x0a, 0x06, 0x48, 0x45, 0x58, 0x4e, 0x55, 0x4d, 0x10, 0xa0, 0x20, 0x12, 0x0b, 0x0a, 0x06,
0x48, 0x45, 0x58, 0x56, 0x41, 0x4c, 0x10, 0xa1, 0x20, 0x12, 0x0b, 0x0a, 0x06, 0x42, 0x49, 0x54,
- 0x4e, 0x55, 0x4d, 0x10, 0xa2, 0x20, 0x2a, 0x46, 0x0a, 0x10, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61,
- 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e,
- 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x50, 0x52, 0x45, 0x50, 0x41,
- 0x52, 0x45, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4f, 0x4d, 0x4d, 0x49, 0x54, 0x10, 0x02,
- 0x12, 0x0c, 0x0a, 0x08, 0x52, 0x4f, 0x4c, 0x4c, 0x42, 0x41, 0x43, 0x4b, 0x10, 0x03, 0x2a, 0x31,
- 0x0a, 0x0f, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x79, 0x70,
- 0x65, 0x12, 0x09, 0x0a, 0x05, 0x56, 0x49, 0x45, 0x57, 0x53, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06,
- 0x54, 0x41, 0x42, 0x4c, 0x45, 0x53, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x4c, 0x4c, 0x10,
- 0x02, 0x42, 0x35, 0x0a, 0x0f, 0x69, 0x6f, 0x2e, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x5a, 0x22, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69, 0x6f, 0x2f,
- 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x2f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x4e, 0x55, 0x4d, 0x10, 0xa2, 0x20, 0x12, 0x08, 0x0a, 0x03, 0x52, 0x41, 0x57, 0x10, 0xa4, 0x10,
+ 0x2a, 0x46, 0x0a, 0x10, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53,
+ 0x74, 0x61, 0x74, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10,
+ 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x50, 0x52, 0x45, 0x50, 0x41, 0x52, 0x45, 0x10, 0x01, 0x12, 0x0a,
+ 0x0a, 0x06, 0x43, 0x4f, 0x4d, 0x4d, 0x49, 0x54, 0x10, 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x52, 0x4f,
+ 0x4c, 0x4c, 0x42, 0x41, 0x43, 0x4b, 0x10, 0x03, 0x2a, 0x31, 0x0a, 0x0f, 0x53, 0x63, 0x68, 0x65,
+ 0x6d, 0x61, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x56,
+ 0x49, 0x45, 0x57, 0x53, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x54, 0x41, 0x42, 0x4c, 0x45, 0x53,
+ 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x4c, 0x4c, 0x10, 0x02, 0x42, 0x35, 0x0a, 0x0f, 0x69,
+ 0x6f, 0x2e, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x5a, 0x22,
+ 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73,
+ 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x71, 0x75, 0x65,
+ 0x72, 0x79, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
diff --git a/go/vt/servenv/version.go b/go/vt/servenv/version.go
index 61c606c65ea..4ff3046ea28 100644
--- a/go/vt/servenv/version.go
+++ b/go/vt/servenv/version.go
@@ -19,4 +19,4 @@ package servenv
// DO NOT EDIT
// THIS FILE IS AUTO-GENERATED DURING NEW RELEASES BY THE VITESS-RELEASER
-const versionName = "19.0.5"
+const versionName = "19.0.7"
diff --git a/go/vt/sqlparser/analyzer.go b/go/vt/sqlparser/analyzer.go
index ea0773d99cc..98b7677a1f3 100644
--- a/go/vt/sqlparser/analyzer.go
+++ b/go/vt/sqlparser/analyzer.go
@@ -137,7 +137,7 @@ func ASTToStatementType(stmt Statement) StatementType {
// CanNormalize takes Statement and returns if the statement can be normalized.
func CanNormalize(stmt Statement) bool {
switch stmt.(type) {
- case *Select, *Union, *Insert, *Update, *Delete, *Set, *CallProc, *Stream: // TODO: we could merge this logic into ASTrewriter
+ case *Select, *Union, *Insert, *Update, *Delete, *Set, *CallProc, *Stream, *VExplainStmt: // TODO: we could merge this logic into ASTrewriter
return true
}
return false
diff --git a/go/vt/sqlparser/ast_funcs.go b/go/vt/sqlparser/ast_funcs.go
index b3798bbd28f..0579552751d 100644
--- a/go/vt/sqlparser/ast_funcs.go
+++ b/go/vt/sqlparser/ast_funcs.go
@@ -1468,10 +1468,6 @@ func (op BinaryExprOperator) ToString() string {
return ShiftLeftStr
case ShiftRightOp:
return ShiftRightStr
- case JSONExtractOp:
- return JSONExtractOpStr
- case JSONUnquoteExtractOp:
- return JSONUnquoteExtractOpStr
default:
return "Unknown BinaryExprOperator"
}
diff --git a/go/vt/sqlparser/constants.go b/go/vt/sqlparser/constants.go
index 44b88384551..806d14f8588 100644
--- a/go/vt/sqlparser/constants.go
+++ b/go/vt/sqlparser/constants.go
@@ -160,19 +160,17 @@ const (
IsNotFalseStr = "is not false"
// BinaryExpr.Operator
- BitAndStr = "&"
- BitOrStr = "|"
- BitXorStr = "^"
- PlusStr = "+"
- MinusStr = "-"
- MultStr = "*"
- DivStr = "/"
- IntDivStr = "div"
- ModStr = "%"
- ShiftLeftStr = "<<"
- ShiftRightStr = ">>"
- JSONExtractOpStr = "->"
- JSONUnquoteExtractOpStr = "->>"
+ BitAndStr = "&"
+ BitOrStr = "|"
+ BitXorStr = "^"
+ PlusStr = "+"
+ MinusStr = "-"
+ MultStr = "*"
+ DivStr = "/"
+ IntDivStr = "div"
+ ModStr = "%"
+ ShiftLeftStr = "<<"
+ ShiftRightStr = ">>"
// UnaryExpr.Operator
UPlusStr = "+"
@@ -727,8 +725,6 @@ const (
ModOp
ShiftLeftOp
ShiftRightOp
- JSONExtractOp
- JSONUnquoteExtractOp
)
// Constant for Enum Type - UnaryExprOperator
diff --git a/go/vt/sqlparser/normalizer_test.go b/go/vt/sqlparser/normalizer_test.go
index de1fdc868ad..394968f2893 100644
--- a/go/vt/sqlparser/normalizer_test.go
+++ b/go/vt/sqlparser/normalizer_test.go
@@ -209,6 +209,23 @@ func TestNormalize(t *testing.T) {
outbv: map[string]*querypb.BindVariable{
"v1": sqltypes.BitNumBindVariable([]byte("0b11")),
},
+ }, {
+ // json value in insert
+ in: "insert into t values ('{\"k\", \"v\"}')",
+ outstmt: "insert into t values (:bv1 /* VARCHAR */)",
+ outbv: map[string]*querypb.BindVariable{
+ "bv1": sqltypes.StringBindVariable("{\"k\", \"v\"}"),
+ },
+ }, {
+ // json function in insert
+ in: "insert into t values (JSON_OBJECT('_id', 27, 'name', 'carrot'))",
+ outstmt: "insert into t values (json_object(:bv1 /* VARCHAR */, :bv2 /* INT64 */, :bv3 /* VARCHAR */, :bv4 /* VARCHAR */))",
+ outbv: map[string]*querypb.BindVariable{
+ "bv1": sqltypes.StringBindVariable("_id"),
+ "bv2": sqltypes.Int64BindVariable(27),
+ "bv3": sqltypes.StringBindVariable("name"),
+ "bv4": sqltypes.StringBindVariable("carrot"),
+ },
}, {
// ORDER BY column_position
in: "select a, b from t order by 1 asc",
@@ -397,6 +414,13 @@ func TestNormalize(t *testing.T) {
"bv2": sqltypes.Int64BindVariable(2),
"bv3": sqltypes.TestBindVariable([]any{1, 2}),
},
+ }, {
+ in: "SELECT 1 WHERE (~ (1||0)) IS NULL",
+ outstmt: "select :bv1 /* INT64 */ from dual where ~(:bv1 /* INT64 */ or :bv2 /* INT64 */) is null",
+ outbv: map[string]*querypb.BindVariable{
+ "bv1": sqltypes.Int64BindVariable(1),
+ "bv2": sqltypes.Int64BindVariable(0),
+ },
}}
parser := NewTestParser()
for _, tc := range testcases {
@@ -487,6 +511,7 @@ func TestNormalizeOneCasae(t *testing.T) {
err = Normalize(tree, NewReservedVars("vtg", known), bv)
require.NoError(t, err)
normalizerOutput := String(tree)
+ require.EqualValues(t, testOne.output, normalizerOutput)
if normalizerOutput == "otheradmin" || normalizerOutput == "otherread" {
return
}
diff --git a/go/vt/sqlparser/parse_test.go b/go/vt/sqlparser/parse_test.go
index c6e2e37a1ef..1d5a00aa892 100644
--- a/go/vt/sqlparser/parse_test.go
+++ b/go/vt/sqlparser/parse_test.go
@@ -1005,9 +1005,11 @@ var (
}, {
input: "select /* u~ */ 1 from t where a = ~b",
}, {
- input: "select /* -> */ a.b -> 'ab' from t",
+ input: "select /* -> */ a.b -> 'ab' from t",
+ output: "select /* -> */ json_extract(a.b, 'ab') from t",
}, {
- input: "select /* -> */ a.b ->> 'ab' from t",
+ input: "select /* -> */ a.b ->> 'ab' from t",
+ output: "select /* -> */ json_unquote(json_extract(a.b, 'ab')) from t",
}, {
input: "select /* empty function */ 1 from t where a = b()",
}, {
@@ -5772,7 +5774,7 @@ partition by range (YEAR(purchased)) subpartition by hash (TO_DAYS(purchased))
},
{
input: "create table t (id int, info JSON, INDEX zips((CAST(info->'$.field' AS unsigned ARRAY))))",
- output: "create table t (\n\tid int,\n\tinfo JSON,\n\tkey zips ((cast(info -> '$.field' as unsigned array)))\n)",
+ output: "create table t (\n\tid int,\n\tinfo JSON,\n\tkey zips ((cast(json_extract(info, '$.field') as unsigned array)))\n)",
},
}
parser := NewTestParser()
diff --git a/go/vt/sqlparser/parsed_query.go b/go/vt/sqlparser/parsed_query.go
index a612e555ee8..491e7400988 100644
--- a/go/vt/sqlparser/parsed_query.go
+++ b/go/vt/sqlparser/parsed_query.go
@@ -101,7 +101,7 @@ func EncodeValue(buf *strings.Builder, value *querypb.BindVariable) {
sqltypes.ProtoToValue(bv).EncodeSQLStringBuilder(buf)
}
buf.WriteByte(')')
- case querypb.Type_JSON:
+ case querypb.Type_RAW:
v, _ := sqltypes.BindVariableToValue(value)
buf.Write(v.Raw())
default:
diff --git a/go/vt/sqlparser/parsed_query_test.go b/go/vt/sqlparser/parsed_query_test.go
index ef59676883f..2e01655c644 100644
--- a/go/vt/sqlparser/parsed_query_test.go
+++ b/go/vt/sqlparser/parsed_query_test.go
@@ -20,6 +20,8 @@ import (
"reflect"
"testing"
+ "github.com/stretchr/testify/require"
+
"vitess.io/vitess/go/sqltypes"
querypb "vitess.io/vitess/go/vt/proto/query"
@@ -80,6 +82,14 @@ func TestGenerateQuery(t *testing.T) {
"vals": sqltypes.TestBindVariable([]any{1, "aa"}),
},
output: "select * from a where id in (1, 'aa')",
+ }, {
+ desc: "json bindvar and raw bindvar",
+ query: "insert into t values (:v1, :v2)",
+ bindVars: map[string]*querypb.BindVariable{
+ "v1": sqltypes.ValueBindVariable(sqltypes.MakeTrusted(querypb.Type_JSON, []byte(`{"key": "value"}`))),
+ "v2": sqltypes.ValueBindVariable(sqltypes.MakeTrusted(querypb.Type_RAW, []byte(`json_object("k", "v")`))),
+ },
+ output: `insert into t values ('{\"key\": \"value\"}', json_object("k", "v"))`,
}, {
desc: "list bind vars 0 arguments",
query: "select * from a where id in ::vals",
@@ -138,20 +148,19 @@ func TestGenerateQuery(t *testing.T) {
parser := NewTestParser()
for _, tcase := range tcases {
- tree, err := parser.Parse(tcase.query)
- if err != nil {
- t.Errorf("parse failed for %s: %v", tcase.desc, err)
- continue
- }
- buf := NewTrackedBuffer(nil)
- buf.Myprintf("%v", tree)
- pq := buf.ParsedQuery()
- bytes, err := pq.GenerateQuery(tcase.bindVars, tcase.extras)
- if err != nil {
- assert.Equal(t, tcase.output, err.Error())
- } else {
- assert.Equal(t, tcase.output, string(bytes))
- }
+ t.Run(tcase.query, func(t *testing.T) {
+ tree, err := parser.Parse(tcase.query)
+ require.NoError(t, err)
+ buf := NewTrackedBuffer(nil)
+ buf.Myprintf("%v", tree)
+ pq := buf.ParsedQuery()
+ bytes, err := pq.GenerateQuery(tcase.bindVars, tcase.extras)
+ if err != nil {
+ assert.Equal(t, tcase.output, err.Error())
+ } else {
+ assert.Equal(t, tcase.output, bytes)
+ }
+ })
}
}
diff --git a/go/vt/sqlparser/precedence.go b/go/vt/sqlparser/precedence.go
index ec590b23f95..1b5576f65b1 100644
--- a/go/vt/sqlparser/precedence.go
+++ b/go/vt/sqlparser/precedence.go
@@ -38,7 +38,6 @@ const (
P14
P15
P16
- P17
)
// precedenceFor returns the precedence of an expression.
@@ -58,10 +57,7 @@ func precedenceFor(in Expr) Precendence {
case *BetweenExpr:
return P12
case *ComparisonExpr:
- switch node.Operator {
- case EqualOp, NotEqualOp, GreaterThanOp, GreaterEqualOp, LessThanOp, LessEqualOp, LikeOp, InOp, RegexpOp, NullSafeEqualOp:
- return P11
- }
+ return P11
case *IsExpr:
return P11
case *BinaryExpr:
@@ -83,7 +79,7 @@ func precedenceFor(in Expr) Precendence {
switch node.Operator {
case UPlusOp, UMinusOp:
return P4
- case BangOp:
+ default:
return P3
}
}
diff --git a/go/vt/sqlparser/precedence_test.go b/go/vt/sqlparser/precedence_test.go
index 774ada31dbd..d06014369be 100644
--- a/go/vt/sqlparser/precedence_test.go
+++ b/go/vt/sqlparser/precedence_test.go
@@ -159,6 +159,9 @@ func TestParens(t *testing.T) {
{in: "(10 - 2) - 1", expected: "10 - 2 - 1"},
{in: "10 - (2 - 1)", expected: "10 - (2 - 1)"},
{in: "0 <=> (1 and 0)", expected: "0 <=> (1 and 0)"},
+ {in: "(~ (1||0)) IS NULL", expected: "~(1 or 0) is null"},
+ {in: "1 not like ('a' is null)", expected: "1 not like ('a' is null)"},
+ {in: ":vtg1 not like (:vtg2 is null)", expected: ":vtg1 not like (:vtg2 is null)"},
}
parser := NewTestParser()
diff --git a/go/vt/sqlparser/sql.go b/go/vt/sqlparser/sql.go
index 2e4c30eb682..41e942d9406 100644
--- a/go/vt/sqlparser/sql.go
+++ b/go/vt/sqlparser/sql.go
@@ -17949,7 +17949,7 @@ yydefault:
var yyLOCAL Expr
//line sql.y:5515
{
- yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: JSONExtractOp, Right: yyDollar[3].exprUnion()}
+ yyLOCAL = &JSONExtractExpr{JSONDoc: yyDollar[1].exprUnion(), PathList: []Expr{yyDollar[3].exprUnion()}}
}
yyVAL.union = yyLOCAL
case 1066:
@@ -17957,7 +17957,7 @@ yydefault:
var yyLOCAL Expr
//line sql.y:5519
{
- yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: JSONUnquoteExtractOp, Right: yyDollar[3].exprUnion()}
+ yyLOCAL = &JSONUnquoteExpr{JSONValue: &JSONExtractExpr{JSONDoc: yyDollar[1].exprUnion(), PathList: []Expr{yyDollar[3].exprUnion()}}}
}
yyVAL.union = yyLOCAL
case 1067:
diff --git a/go/vt/sqlparser/sql.y b/go/vt/sqlparser/sql.y
index 420155020f8..4bf0e4834f6 100644
--- a/go/vt/sqlparser/sql.y
+++ b/go/vt/sqlparser/sql.y
@@ -5513,11 +5513,11 @@ function_call_keyword
}
| column_name_or_offset JSON_EXTRACT_OP text_literal_or_arg
{
- $$ = &BinaryExpr{Left: $1, Operator: JSONExtractOp, Right: $3}
+ $$ = &JSONExtractExpr{JSONDoc: $1, PathList: []Expr{$3}}
}
| column_name_or_offset JSON_UNQUOTE_EXTRACT_OP text_literal_or_arg
{
- $$ = &BinaryExpr{Left: $1, Operator: JSONUnquoteExtractOp, Right: $3}
+ $$ = &JSONUnquoteExpr{JSONValue: &JSONExtractExpr{JSONDoc: $1, PathList: []Expr{$3}}}
}
column_names_opt_paren:
diff --git a/go/vt/sqlparser/tracked_buffer_test.go b/go/vt/sqlparser/tracked_buffer_test.go
index 4dff65634e8..1ff5ab3bcb9 100644
--- a/go/vt/sqlparser/tracked_buffer_test.go
+++ b/go/vt/sqlparser/tracked_buffer_test.go
@@ -270,7 +270,7 @@ func TestCanonicalOutput(t *testing.T) {
},
{
"create table t (id int, info JSON, INDEX zips((CAST(info->'$.field' AS unsigned array))))",
- "CREATE TABLE `t` (\n\t`id` int,\n\t`info` JSON,\n\tKEY `zips` ((CAST(`info` -> '$.field' AS unsigned array)))\n)",
+ "CREATE TABLE `t` (\n\t`id` int,\n\t`info` JSON,\n\tKEY `zips` ((CAST(JSON_EXTRACT(`info`, '$.field') AS unsigned array)))\n)",
},
{
"select 1 from t1 into outfile 'test/t1.txt'",
diff --git a/go/vt/vtctl/grpcvtctldserver/server_test.go b/go/vt/vtctl/grpcvtctldserver/server_test.go
index 952f4e37e8d..fd6827537c4 100644
--- a/go/vt/vtctl/grpcvtctldserver/server_test.go
+++ b/go/vt/vtctl/grpcvtctldserver/server_test.go
@@ -12620,7 +12620,7 @@ func TestTabletExternallyReparented(t *testing.T) {
defer func() {
topofactory.SetError(nil)
- ctx, cancel := context.WithTimeout(ctx, time.Millisecond*10)
+ ctx, cancel := context.WithTimeout(ctx, time.Second*10)
defer cancel()
resp, err := vtctld.GetTablets(ctx, &vtctldatapb.GetTabletsRequest{})
diff --git a/go/vt/vtctl/reparentutil/replication.go b/go/vt/vtctl/reparentutil/replication.go
index 9b33a5b0536..7c39befded1 100644
--- a/go/vt/vtctl/reparentutil/replication.go
+++ b/go/vt/vtctl/reparentutil/replication.go
@@ -123,7 +123,7 @@ func FindValidEmergencyReparentCandidates(
case len(errantGTIDs) != 0:
// This tablet has errant GTIDs. It's not a valid candidate for
// reparent, so don't insert it into the final mapping.
- log.Errorf("skipping %v because we detected errant GTIDs - %v", alias, errantGTIDs)
+ log.Errorf("skipping %v with GTIDSet:%v because we detected errant GTIDs - %v", alias, relayLogGTIDSet, errantGTIDs)
continue
}
diff --git a/go/vt/vtctl/reparentutil/replication_test.go b/go/vt/vtctl/reparentutil/replication_test.go
index b7a2bcb07e7..eb4602f7095 100644
--- a/go/vt/vtctl/reparentutil/replication_test.go
+++ b/go/vt/vtctl/reparentutil/replication_test.go
@@ -161,7 +161,7 @@ func TestFindValidEmergencyReparentCandidates(t *testing.T) {
shouldErr: false,
},
{
- name: "tablet with errant GTIDs is excluded",
+ name: "tablet with superset GTIDs is included",
statusMap: map[string]*replicationdatapb.StopReplicationStatus{
"r1": {
After: &replicationdatapb.Status{
@@ -169,19 +169,33 @@ func TestFindValidEmergencyReparentCandidates(t *testing.T) {
RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-5",
},
},
- "errant": {
+ "r2": {
After: &replicationdatapb.Status{
SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562",
RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-5,AAAAAAAA-71CA-11E1-9E33-C80AA9429562:1",
},
},
},
- primaryStatusMap: map[string]*replicationdatapb.PrimaryStatus{
- "p1": {
- Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-5",
+ expected: []string{"r1", "r2"},
+ shouldErr: false,
+ },
+ {
+ name: "tablets with errant GTIDs are excluded",
+ statusMap: map[string]*replicationdatapb.StopReplicationStatus{
+ "r1": {
+ After: &replicationdatapb.Status{
+ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562",
+ RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-5,AAAAAAAA-71CA-11E1-9E33-C80AA9429562:1",
+ },
+ },
+ "r2": {
+ After: &replicationdatapb.Status{
+ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562",
+ RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-5,AAAAAAAA-71CA-11E1-9E33-C80AA9429562:2-3",
+ },
},
},
- expected: []string{"r1", "p1"},
+ expected: []string{},
shouldErr: false,
},
{
diff --git a/go/vt/vtgate/engine/route.go b/go/vt/vtgate/engine/route.go
index 1a0c17d6be5..7185f8db36c 100644
--- a/go/vt/vtgate/engine/route.go
+++ b/go/vt/vtgate/engine/route.go
@@ -137,11 +137,12 @@ func (route *Route) SetTruncateColumnCount(count int) {
func (route *Route) TryExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool) (*sqltypes.Result, error) {
ctx, cancelFunc := addQueryTimeout(ctx, vcursor, route.QueryTimeout)
defer cancelFunc()
- qr, err := route.executeInternal(ctx, vcursor, bindVars, wantfields)
+ rss, bvs, err := route.findRoute(ctx, vcursor, bindVars)
if err != nil {
return nil, err
}
- return qr.Truncate(route.TruncateColumnCount), nil
+
+ return route.executeShards(ctx, vcursor, bindVars, wantfields, rss, bvs)
}
// addQueryTimeout adds a query timeout to the context it receives and returns the modified context along with the cancel function.
@@ -159,20 +160,6 @@ const (
IgnoreReserveTxn cxtKey = iota
)
-func (route *Route) executeInternal(
- ctx context.Context,
- vcursor VCursor,
- bindVars map[string]*querypb.BindVariable,
- wantfields bool,
-) (*sqltypes.Result, error) {
- rss, bvs, err := route.findRoute(ctx, vcursor, bindVars)
- if err != nil {
- return nil, err
- }
-
- return route.executeShards(ctx, vcursor, bindVars, wantfields, rss, bvs)
-}
-
func (route *Route) executeShards(
ctx context.Context,
vcursor VCursor,
@@ -228,11 +215,15 @@ func (route *Route) executeShards(
}
}
- if len(route.OrderBy) == 0 {
- return result, nil
+ if len(route.OrderBy) != 0 {
+ var err error
+ result, err = route.sort(result)
+ if err != nil {
+ return nil, err
+ }
}
- return route.sort(result)
+ return result.Truncate(route.TruncateColumnCount), nil
}
func filterOutNilErrors(errs []error) []error {
@@ -389,10 +380,8 @@ func (route *Route) sort(in *sqltypes.Result) (*sqltypes.Result, error) {
// the contents of any row.
out := in.ShallowCopy()
- if err := route.OrderBy.SortResult(out); err != nil {
- return nil, err
- }
- return out.Truncate(route.TruncateColumnCount), nil
+ err := route.OrderBy.SortResult(out)
+ return out, err
}
func (route *Route) description() PrimitiveDescription {
diff --git a/go/vt/vtgate/engine/vindex_lookup.go b/go/vt/vtgate/engine/vindex_lookup.go
index 8bf8755c40e..2e0e2857498 100644
--- a/go/vt/vtgate/engine/vindex_lookup.go
+++ b/go/vt/vtgate/engine/vindex_lookup.go
@@ -252,7 +252,7 @@ func (vr *VindexLookup) generateIds(ctx context.Context, vcursor VCursor, bindVa
switch vr.Opcode {
case Equal, EqualUnique:
return []sqltypes.Value{value.Value(vcursor.ConnCollation())}, nil
- case IN:
+ case IN, MultiEqual:
return value.TupleValues(), nil
}
return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "opcode %s not supported for VindexLookup", vr.Opcode.String())
diff --git a/go/vt/vtgate/engine/vindex_lookup_test.go b/go/vt/vtgate/engine/vindex_lookup_test.go
new file mode 100644
index 00000000000..d734bf12080
--- /dev/null
+++ b/go/vt/vtgate/engine/vindex_lookup_test.go
@@ -0,0 +1,135 @@
+/*
+Copyright 2024 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package engine
+
+import (
+ "context"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "vitess.io/vitess/go/sqltypes"
+ querypb "vitess.io/vitess/go/vt/proto/query"
+ "vitess.io/vitess/go/vt/vtgate/evalengine"
+ "vitess.io/vitess/go/vt/vtgate/vindexes"
+)
+
+var (
+ vindex, _ = vindexes.CreateVindex("lookup_unique", "", map[string]string{
+ "table": "lkp",
+ "from": "from",
+ "to": "toc",
+ "write_only": "true",
+ })
+ ks = &vindexes.Keyspace{Name: "ks", Sharded: true}
+)
+
+func TestVindexLookup(t *testing.T) {
+ planableVindex, ok := vindex.(vindexes.LookupPlanable)
+ require.True(t, ok, "not a lookup vindex")
+ _, args := planableVindex.Query()
+
+ fp := &fakePrimitive{
+ results: []*sqltypes.Result{
+ sqltypes.MakeTestResult(
+ sqltypes.MakeTestFields("id|keyspace_id", "int64|varbinary"),
+ "1|\x10"),
+ },
+ }
+ route := NewRoute(ByDestination, ks, "dummy_select", "dummy_select_field")
+ vdxLookup := &VindexLookup{
+ Opcode: EqualUnique,
+ Keyspace: ks,
+ Vindex: planableVindex,
+ Arguments: args,
+ Values: []evalengine.Expr{evalengine.NewLiteralInt(1)},
+ Lookup: fp,
+ SendTo: route,
+ }
+
+ vc := &loggingVCursor{results: []*sqltypes.Result{defaultSelectResult}}
+
+ result, err := vdxLookup.TryExecute(context.Background(), vc, map[string]*querypb.BindVariable{}, false)
+ require.NoError(t, err)
+ fp.ExpectLog(t, []string{`Execute from: type:TUPLE values:{type:INT64 value:"1"} false`})
+ vc.ExpectLog(t, []string{
+ `ResolveDestinations ks [type:INT64 value:"1"] Destinations:DestinationKeyspaceID(10)`,
+ `ExecuteMultiShard ks.-20: dummy_select {} false false`,
+ })
+ expectResult(t, result, defaultSelectResult)
+
+ fp.rewind()
+ vc.Rewind()
+ result, err = wrapStreamExecute(vdxLookup, vc, map[string]*querypb.BindVariable{}, false)
+ require.NoError(t, err)
+ vc.ExpectLog(t, []string{
+ `ResolveDestinations ks [type:INT64 value:"1"] Destinations:DestinationKeyspaceID(10)`,
+ `StreamExecuteMulti dummy_select ks.-20: {} `,
+ })
+ expectResult(t, result, defaultSelectResult)
+}
+
+func TestVindexLookupTruncate(t *testing.T) {
+ planableVindex, ok := vindex.(vindexes.LookupPlanable)
+ require.True(t, ok, "not a lookup vindex")
+ _, args := planableVindex.Query()
+
+ fp := &fakePrimitive{
+ results: []*sqltypes.Result{
+ sqltypes.MakeTestResult(
+ sqltypes.MakeTestFields("id|keyspace_id", "int64|varbinary"),
+ "1|\x10"),
+ },
+ }
+ route := NewRoute(ByDestination, ks, "dummy_select", "dummy_select_field")
+ route.TruncateColumnCount = 1
+ vdxLookup := &VindexLookup{
+ Opcode: EqualUnique,
+ Keyspace: ks,
+ Vindex: planableVindex,
+ Arguments: args,
+ Values: []evalengine.Expr{evalengine.NewLiteralInt(1)},
+ Lookup: fp,
+ SendTo: route,
+ }
+
+ vc := &loggingVCursor{results: []*sqltypes.Result{
+ sqltypes.MakeTestResult(sqltypes.MakeTestFields("name|morecol", "varchar|int64"),
+ "foo|1", "bar|2", "baz|3"),
+ }}
+
+ wantRes := sqltypes.MakeTestResult(sqltypes.MakeTestFields("name", "varchar"),
+ "foo", "bar", "baz")
+ result, err := vdxLookup.TryExecute(context.Background(), vc, map[string]*querypb.BindVariable{}, false)
+ require.NoError(t, err)
+ fp.ExpectLog(t, []string{`Execute from: type:TUPLE values:{type:INT64 value:"1"} false`})
+ vc.ExpectLog(t, []string{
+ `ResolveDestinations ks [type:INT64 value:"1"] Destinations:DestinationKeyspaceID(10)`,
+ `ExecuteMultiShard ks.-20: dummy_select {} false false`,
+ })
+ expectResult(t, result, wantRes)
+
+ fp.rewind()
+ vc.Rewind()
+ result, err = wrapStreamExecute(vdxLookup, vc, map[string]*querypb.BindVariable{}, false)
+ require.NoError(t, err)
+ vc.ExpectLog(t, []string{
+ `ResolveDestinations ks [type:INT64 value:"1"] Destinations:DestinationKeyspaceID(10)`,
+ `StreamExecuteMulti dummy_select ks.-20: {} `,
+ })
+ expectResult(t, result, wantRes)
+}
diff --git a/go/vt/vtgate/evalengine/compiler_test.go b/go/vt/vtgate/evalengine/compiler_test.go
index a9d5cb561f6..797daa4d1b1 100644
--- a/go/vt/vtgate/evalengine/compiler_test.go
+++ b/go/vt/vtgate/evalengine/compiler_test.go
@@ -24,6 +24,8 @@ import (
"testing"
"time"
+ "github.com/stretchr/testify/assert"
+
"github.com/olekukonko/tablewriter"
"vitess.io/vitess/go/mysql/collations"
@@ -93,7 +95,18 @@ func (s *Tracker) String() string {
return s.buf.String()
}
+func TestOneCase(t *testing.T) {
+ query := ``
+ if query == "" {
+ t.Skip("no query to test")
+ }
+ venv := vtenv.NewTestEnv()
+ env := evalengine.EmptyExpressionEnv(venv)
+ testCompilerCase(t, query, venv, nil, env)
+}
+
func TestCompilerReference(t *testing.T) {
+ // This test runs a lot of queries and compares the results of the evalengine in eval mode to the results of the compiler.
now := time.Now()
evalengine.SystemTime = func() time.Time { return now }
defer func() { evalengine.SystemTime = time.Now }()
@@ -107,52 +120,11 @@ func TestCompilerReference(t *testing.T) {
tc.Run(func(query string, row []sqltypes.Value) {
env.Row = row
-
- stmt, err := venv.Parser().ParseExpr(query)
- if err != nil {
- // no need to test un-parseable queries
- return
- }
-
- fields := evalengine.FieldResolver(tc.Schema)
- cfg := &evalengine.Config{
- ResolveColumn: fields.Column,
- ResolveType: fields.Type,
- Collation: collations.CollationUtf8mb4ID,
- Environment: venv,
- NoConstantFolding: true,
- }
-
- converted, err := evalengine.Translate(stmt, cfg)
- if err != nil {
- return
- }
-
- expected, evalErr := env.EvaluateAST(converted)
total++
-
- res, vmErr := env.Evaluate(converted)
- if vmErr != nil {
- switch {
- case evalErr == nil:
- t.Errorf("failed evaluation from compiler:\nSQL: %s\nError: %s", query, vmErr)
- case evalErr.Error() != vmErr.Error():
- t.Errorf("error mismatch:\nSQL: %s\nError eval: %s\nError comp: %s", query, evalErr, vmErr)
- default:
- supported++
- }
- return
+ testCompilerCase(t, query, venv, tc.Schema, env)
+ if !t.Failed() {
+ supported++
}
-
- eval := expected.String()
- comp := res.String()
-
- if eval != comp {
- t.Errorf("bad evaluation from compiler:\nSQL: %s\nEval: %s\nComp: %s", query, eval, comp)
- return
- }
-
- supported++
})
track.Add(tc.Name(), supported, total)
@@ -162,6 +134,51 @@ func TestCompilerReference(t *testing.T) {
t.Logf("\n%s", track.String())
}
+func testCompilerCase(t *testing.T, query string, venv *vtenv.Environment, schema []*querypb.Field, env *evalengine.ExpressionEnv) {
+ stmt, err := venv.Parser().ParseExpr(query)
+ if err != nil {
+ // no need to test un-parseable queries
+ return
+ }
+
+ fields := evalengine.FieldResolver(schema)
+ cfg := &evalengine.Config{
+ ResolveColumn: fields.Column,
+ ResolveType: fields.Type,
+ Collation: collations.CollationUtf8mb4ID,
+ Environment: venv,
+ NoConstantFolding: true,
+ }
+
+ converted, err := evalengine.Translate(stmt, cfg)
+ if err != nil {
+ return
+ }
+
+ var expected evalengine.EvalResult
+ var evalErr error
+ assert.NotPanics(t, func() {
+ expected, evalErr = env.EvaluateAST(converted)
+ })
+ var res evalengine.EvalResult
+ var vmErr error
+ assert.NotPanics(t, func() {
+ res, vmErr = env.Evaluate(converted)
+ })
+ switch {
+ case vmErr == nil && evalErr == nil:
+ eval := expected.String()
+ comp := res.String()
+ assert.Equalf(t, eval, comp, "bad evaluation from compiler:\nSQL: %s\nEval: %s\nComp: %s", query, eval, comp)
+ case vmErr == nil:
+ t.Errorf("failed evaluation from evalengine:\nSQL: %s\nError: %s", query, evalErr)
+ case evalErr == nil:
+ t.Errorf("failed evaluation from compiler:\nSQL: %s\nError: %s", query, vmErr)
+ case evalErr.Error() != vmErr.Error():
+ t.Errorf("error mismatch:\nSQL: %s\nError eval: %s\nError comp: %s", query, evalErr, vmErr)
+ }
+}
+
func TestCompilerSingle(t *testing.T) {
var testCases = []struct {
expression string
diff --git a/go/vt/vtgate/evalengine/expr.go b/go/vt/vtgate/evalengine/expr.go
index 44026f97e69..b90390e1ba8 100644
--- a/go/vt/vtgate/evalengine/expr.go
+++ b/go/vt/vtgate/evalengine/expr.go
@@ -56,7 +56,7 @@ func (expr *BinaryExpr) arguments(env *ExpressionEnv) (eval, eval, error) {
}
right, err := expr.Right.eval(env)
if err != nil {
- return nil, nil, err
+ return left, nil, err
}
return left, right, nil
}
diff --git a/go/vt/vtgate/evalengine/expr_compare.go b/go/vt/vtgate/evalengine/expr_compare.go
index ca4cdd75f74..8cff9cf25a9 100644
--- a/go/vt/vtgate/evalengine/expr_compare.go
+++ b/go/vt/vtgate/evalengine/expr_compare.go
@@ -580,13 +580,18 @@ func (l *LikeExpr) matchWildcard(left, right []byte, coll collations.ID) bool {
}
fullColl := colldata.Lookup(coll)
wc := fullColl.Wildcard(right, 0, 0, 0)
- return wc.Match(left)
+ return wc.Match(left) == !l.Negate
}
func (l *LikeExpr) eval(env *ExpressionEnv) (eval, error) {
- left, right, err := l.arguments(env)
- if left == nil || right == nil || err != nil {
- return nil, err
+ left, err := l.Left.eval(env)
+ if err != nil || left == nil {
+ return left, err
+ }
+
+ right, err := l.Right.eval(env)
+ if err != nil || right == nil {
+ return right, err
}
var col collations.TypedCollation
@@ -595,18 +600,9 @@ func (l *LikeExpr) eval(env *ExpressionEnv) (eval, error) {
return nil, err
}
- var matched bool
- switch {
- case typeIsTextual(left.SQLType()) && typeIsTextual(right.SQLType()):
- matched = l.matchWildcard(left.(*evalBytes).bytes, right.(*evalBytes).bytes, col.Collation)
- case typeIsTextual(right.SQLType()):
- matched = l.matchWildcard(left.ToRawBytes(), right.(*evalBytes).bytes, col.Collation)
- case typeIsTextual(left.SQLType()):
- matched = l.matchWildcard(left.(*evalBytes).bytes, right.ToRawBytes(), col.Collation)
- default:
- matched = l.matchWildcard(left.ToRawBytes(), right.ToRawBytes(), collations.CollationBinaryID)
- }
- return newEvalBool(matched == !l.Negate), nil
+ matched := l.matchWildcard(left.ToRawBytes(), right.ToRawBytes(), col.Collation)
+
+ return newEvalBool(matched), nil
}
func (expr *LikeExpr) compile(c *compiler) (ctype, error) {
@@ -615,12 +611,14 @@ func (expr *LikeExpr) compile(c *compiler) (ctype, error) {
return ctype{}, err
}
+ skip1 := c.compileNullCheck1(lt)
+
rt, err := expr.Right.compile(c)
if err != nil {
return ctype{}, err
}
- skip := c.compileNullCheck2(lt, rt)
+ skip2 := c.compileNullCheck1(rt)
if !lt.isTextual() {
c.asm.Convert_xc(2, sqltypes.VarChar, c.collation, nil)
@@ -672,6 +670,6 @@ func (expr *LikeExpr) compile(c *compiler) (ctype, error) {
})
}
- c.asm.jumpDestination(skip)
+ c.asm.jumpDestination(skip1, skip2)
return ctype{Type: sqltypes.Int64, Col: collationNumeric, Flag: flagIsBoolean | flagNullable}, nil
}
diff --git a/go/vt/vtgate/evalengine/testcases/cases.go b/go/vt/vtgate/evalengine/testcases/cases.go
index b9ae41722eb..5ef8aab78d7 100644
--- a/go/vt/vtgate/evalengine/testcases/cases.go
+++ b/go/vt/vtgate/evalengine/testcases/cases.go
@@ -1065,24 +1065,26 @@ func CollationOperations(yield Query) {
}
func LikeComparison(yield Query) {
- var left = []string{
+ var left = append(inputConversions,
`'foobar'`, `'FOOBAR'`,
`'1234'`, `1234`,
`_utf8mb4 'foobar' COLLATE utf8mb4_0900_as_cs`,
- `_utf8mb4 'FOOBAR' COLLATE utf8mb4_0900_as_cs`,
- }
- var right = append([]string{
+ `_utf8mb4 'FOOBAR' COLLATE utf8mb4_0900_as_cs`)
+
+ var right = append(left,
+ `NULL`, `1`, `0`,
`'foo%'`, `'FOO%'`, `'foo_ar'`, `'FOO_AR'`,
`'12%'`, `'12_4'`,
`_utf8mb4 'foo%' COLLATE utf8mb4_0900_as_cs`,
`_utf8mb4 'FOO%' COLLATE utf8mb4_0900_as_cs`,
`_utf8mb4 'foo_ar' COLLATE utf8mb4_0900_as_cs`,
- `_utf8mb4 'FOO_AR' COLLATE utf8mb4_0900_as_cs`,
- }, left...)
+ `_utf8mb4 'FOO_AR' COLLATE utf8mb4_0900_as_cs`)
for _, lhs := range left {
for _, rhs := range right {
- yield(fmt.Sprintf("%s LIKE %s", lhs, rhs), nil)
+ for _, op := range []string{"LIKE", "NOT LIKE"} {
+ yield(fmt.Sprintf("%s %s %s", lhs, op, rhs), nil)
+ }
}
}
}
diff --git a/go/vt/vtgate/evalengine/translate.go b/go/vt/vtgate/evalengine/translate.go
index b4ced2cdd61..4952e4328fe 100644
--- a/go/vt/vtgate/evalengine/translate.go
+++ b/go/vt/vtgate/evalengine/translate.go
@@ -87,7 +87,7 @@ func (ast *astCompiler) translateComparisonExpr2(op sqlparser.ComparisonExprOper
Negate: op == sqlparser.NotRegexpOp,
}, nil
default:
- return nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, op.ToString())
+ return nil, vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, op.ToString())
}
}
@@ -309,10 +309,6 @@ func (ast *astCompiler) translateBinaryExpr(binary *sqlparser.BinaryExpr) (IR, e
return &BitwiseExpr{BinaryExpr: binaryExpr, Op: &opBitShl{}}, nil
case sqlparser.ShiftRightOp:
return &BitwiseExpr{BinaryExpr: binaryExpr, Op: &opBitShr{}}, nil
- case sqlparser.JSONExtractOp:
- return builtinJSONExtractRewrite(left, right)
- case sqlparser.JSONUnquoteExtractOp:
- return builtinJSONExtractUnquoteRewrite(left, right)
default:
return nil, translateExprNotSupported(binary)
}
diff --git a/go/vt/vtgate/executor.go b/go/vt/vtgate/executor.go
index d1e555bf990..b99873ced02 100644
--- a/go/vt/vtgate/executor.go
+++ b/go/vt/vtgate/executor.go
@@ -202,7 +202,7 @@ func NewExecutor(
return e.plans.Metrics.Hits()
})
stats.NewCounterFunc("QueryPlanCacheMisses", "Query plan cache misses", func() int64 {
- return e.plans.Metrics.Hits()
+ return e.plans.Metrics.Misses()
})
servenv.HTTPHandle(pathQueryPlans, e)
servenv.HTTPHandle(pathScatterStats, e)
diff --git a/go/vt/vtgate/executor_select_test.go b/go/vt/vtgate/executor_select_test.go
index 8f3e436deb8..90118603f46 100644
--- a/go/vt/vtgate/executor_select_test.go
+++ b/go/vt/vtgate/executor_select_test.go
@@ -2942,6 +2942,67 @@ func TestSubQueryAndQueryWithLimit(t *testing.T) {
assert.Equal(t, `type:INT64 value:"100"`, sbc2.Queries[1].BindVariables["__upper_limit"].String())
}
+func TestSelectUsingMultiEqualOnLookupColumn(t *testing.T) {
+ executor, sbc1, sbc2, sbclookup, _ := createExecutorEnv(t)
+
+ // No results on shard `-20` (`sbc1`), but some lookup results on shard `40-60` (`sbc2`)
+ sbclookup.SetResults([]*sqltypes.Result{{
+ Fields: []*querypb.Field{
+ {Name: "lu_col", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)},
+ {Name: "keyspace_id", Type: sqltypes.VarBinary, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_BINARY_FLAG)},
+ },
+ Rows: [][]sqltypes.Value{{
+ sqltypes.NewInt32(2),
+ sqltypes.MakeTrusted(sqltypes.VarBinary, []byte("\x45")),
+ }},
+ }})
+
+ sbc1.SetResults([]*sqltypes.Result{{
+ Fields: []*querypb.Field{
+ {Name: "nv_lu_col", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)},
+ {Name: "other", Type: sqltypes.VarChar, Charset: collations.CollationUtf8mb4ID},
+ },
+ Rows: [][]sqltypes.Value{},
+ }})
+
+ sbc2.SetResults([]*sqltypes.Result{{
+ Fields: []*querypb.Field{
+ {Name: "nv_lu_col", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)},
+ {Name: "other", Type: sqltypes.VarChar, Charset: collations.CollationUtf8mb4ID},
+ },
+ Rows: [][]sqltypes.Value{{
+ sqltypes.NewInt32(2),
+ sqltypes.NewVarChar("baz"),
+ }},
+ }})
+
+ result, err := exec(executor, NewSafeSession(&vtgatepb.Session{
+ TargetString: KsTestSharded,
+ }), "select nv_lu_col, other from t2_lookup WHERE (nv_lu_col = 1 AND other = 'bar') OR (nv_lu_col = 2 AND other = 'baz') OR (nv_lu_col = 3 AND other = 'qux') OR (nv_lu_col = 4 AND other = 'brz') OR (nv_lu_col = 5 AND other = 'brz')")
+
+ require.NoError(t, err)
+
+ require.Len(t, sbc1.Queries, 0)
+ require.Len(t, sbc2.Queries, 1)
+
+ require.Equal(t, []*querypb.BoundQuery{{
+ Sql: "select nv_lu_col, other from t2_lookup where nv_lu_col = 1 and other = 'bar' or nv_lu_col = 2 and other = 'baz' or nv_lu_col = 3 and other = 'qux' or nv_lu_col = 4 and other = 'brz' or nv_lu_col = 5 and other = 'brz'",
+ BindVariables: map[string]*querypb.BindVariable{},
+ }}, sbc2.Queries)
+
+ wantResult := &sqltypes.Result{
+ Fields: []*querypb.Field{
+ {Name: "nv_lu_col", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)},
+ {Name: "other", Type: sqltypes.VarChar, Charset: collations.CollationUtf8mb4ID},
+ },
+ Rows: [][]sqltypes.Value{{
+ sqltypes.NewInt32(2),
+ sqltypes.NewVarChar("baz"),
+ }},
+ }
+ require.Equal(t, wantResult, result)
+}
+
func TestCrossShardSubqueryStream(t *testing.T) {
executor, sbc1, sbc2, _, ctx := createExecutorEnv(t)
result1 := []*sqltypes.Result{{
diff --git a/go/vt/vtgate/executor_test.go b/go/vt/vtgate/executor_test.go
index c4d231cf651..498d87eca8c 100644
--- a/go/vt/vtgate/executor_test.go
+++ b/go/vt/vtgate/executor_test.go
@@ -2280,7 +2280,7 @@ func TestExecutorVExplain(t *testing.T) {
result, err = executorExec(ctx, executor, session, "vexplain plan select 42", bindVars)
require.NoError(t, err)
- expected := `[[VARCHAR("{\n\t\"OperatorType\": \"Projection\",\n\t\"Expressions\": [\n\t\t\"42 as 42\"\n\t],\n\t\"Inputs\": [\n\t\t{\n\t\t\t\"OperatorType\": \"SingleRow\"\n\t\t}\n\t]\n}")]]`
+ expected := `[[VARCHAR("{\n\t\"OperatorType\": \"Projection\",\n\t\"Expressions\": [\n\t\t\":vtg1 as :vtg1 /* INT64 */\"\n\t],\n\t\"Inputs\": [\n\t\t{\n\t\t\t\"OperatorType\": \"SingleRow\"\n\t\t}\n\t]\n}")]]`
require.Equal(t, expected, fmt.Sprintf("%v", result.Rows))
}
diff --git a/go/vt/vtgate/planbuilder/operators/apply_join.go b/go/vt/vtgate/planbuilder/operators/apply_join.go
index 402a2ae19ba..a54c71646ec 100644
--- a/go/vt/vtgate/planbuilder/operators/apply_join.go
+++ b/go/vt/vtgate/planbuilder/operators/apply_join.go
@@ -277,6 +277,10 @@ func (aj *ApplyJoin) ShortDescription() string {
}
firstPart := fmt.Sprintf("on %s columns: %s", fn(aj.JoinPredicates), fn(aj.JoinColumns))
+ if aj.LeftJoin {
+ firstPart = "LEFT JOIN " + firstPart
+ }
+
if len(aj.ExtraLHSVars) == 0 {
return firstPart
}
diff --git a/go/vt/vtgate/planbuilder/operators/hash_join.go b/go/vt/vtgate/planbuilder/operators/hash_join.go
index f997ed5205d..135fda276b5 100644
--- a/go/vt/vtgate/planbuilder/operators/hash_join.go
+++ b/go/vt/vtgate/planbuilder/operators/hash_join.go
@@ -300,20 +300,9 @@ func (hj *HashJoin) addColumn(ctx *plancontext.PlanningContext, in sqlparser.Exp
inOffset = op.AddColumn(ctx, false, false, aeWrap(expr))
}
- // we turn the
+ // we have to turn the incoming offset to an outgoing offset of the columns this operator is exposing
internalOffset := offsetter(inOffset)
-
- // ok, we have an offset from the input operator. Let's check if we already have it
- // in our list of incoming columns
-
- for idx, offset := range hj.ColumnOffsets {
- if internalOffset == offset {
- return idx
- }
- }
-
hj.ColumnOffsets = append(hj.ColumnOffsets, internalOffset)
-
return len(hj.ColumnOffsets) - 1
}
@@ -408,17 +397,7 @@ func (hj *HashJoin) addSingleSidedColumn(
// we have to turn the incoming offset to an outgoing offset of the columns this operator is exposing
internalOffset := offsetter(inOffset)
-
- // ok, we have an offset from the input operator. Let's check if we already have it
- // in our list of incoming columns
- for idx, offset := range hj.ColumnOffsets {
- if internalOffset == offset {
- return idx
- }
- }
-
hj.ColumnOffsets = append(hj.ColumnOffsets, internalOffset)
-
return len(hj.ColumnOffsets) - 1
}
diff --git a/go/vt/vtgate/planbuilder/operators/horizon_expanding.go b/go/vt/vtgate/planbuilder/operators/horizon_expanding.go
index 64c471ac62c..9549822274e 100644
--- a/go/vt/vtgate/planbuilder/operators/horizon_expanding.go
+++ b/go/vt/vtgate/planbuilder/operators/horizon_expanding.go
@@ -83,6 +83,12 @@ func expandSelectHorizon(ctx *plancontext.PlanningContext, horizon *Horizon, sel
for _, order := range horizon.Query.GetOrderBy() {
qp.addDerivedColumn(ctx, order.Expr)
}
+ sel, isSel := horizon.Query.(*sqlparser.Select)
+ if isSel && sel.Having != nil {
+ for _, pred := range sqlparser.SplitAndExpression(nil, sel.Having.Expr) {
+ qp.addDerivedColumn(ctx, pred)
+ }
+ }
}
op := createProjectionFromSelect(ctx, horizon)
if qp.HasAggr {
@@ -294,6 +300,7 @@ outer:
func createProjectionForComplexAggregation(a *Aggregator, qp *QueryProjection) Operator {
p := newAliasedProjection(a)
p.DT = a.DT
+ a.DT = nil // we don't need the derived table twice
for _, expr := range qp.SelectExprs {
ae, err := expr.GetAliasedExpr()
if err != nil {
diff --git a/go/vt/vtgate/planbuilder/operators/join_merging.go b/go/vt/vtgate/planbuilder/operators/join_merging.go
index 0cc5da9121f..0042994bda3 100644
--- a/go/vt/vtgate/planbuilder/operators/join_merging.go
+++ b/go/vt/vtgate/planbuilder/operators/join_merging.go
@@ -22,12 +22,13 @@ import (
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext"
+ "vitess.io/vitess/go/vt/vtgate/vindexes"
)
// mergeJoinInputs checks whether two operators can be merged into a single one.
// If they can be merged, a new operator with the merged routing is returned
// If they cannot be merged, nil is returned.
-func mergeJoinInputs(ctx *plancontext.PlanningContext, lhs, rhs Operator, joinPredicates []sqlparser.Expr, m merger) *Route {
+func mergeJoinInputs(ctx *plancontext.PlanningContext, lhs, rhs Operator, joinPredicates []sqlparser.Expr, m *joinMerger) *Route {
lhsRoute, rhsRoute, routingA, routingB, a, b, sameKeyspace := prepareInputRoutes(lhs, rhs)
if lhsRoute == nil {
return nil
@@ -40,6 +41,11 @@ func mergeJoinInputs(ctx *plancontext.PlanningContext, lhs, rhs Operator, joinPr
case b == dual:
return m.merge(ctx, lhsRoute, rhsRoute, routingA)
+ // As both are reference route. We need to merge the alternates as well.
+ case a == anyShard && b == anyShard && sameKeyspace:
+ newrouting := mergeAnyShardRoutings(ctx, routingA.(*AnyShardRouting), routingB.(*AnyShardRouting), joinPredicates, m.innerJoin)
+ return m.merge(ctx, lhsRoute, rhsRoute, newrouting)
+
// an unsharded/reference route can be merged with anything going to that keyspace
case a == anyShard && sameKeyspace:
return m.merge(ctx, lhsRoute, rhsRoute, routingB)
@@ -58,13 +64,33 @@ func mergeJoinInputs(ctx *plancontext.PlanningContext, lhs, rhs Operator, joinPr
// sharded routing is complex, so we handle it in a separate method
case a == sharded && b == sharded:
- return tryMergeJoinShardedRouting(ctx, lhsRoute, rhsRoute, m, joinPredicates)
+ return tryMergeShardedRouting(ctx, lhsRoute, rhsRoute, m, joinPredicates)
default:
return nil
}
}
+func mergeAnyShardRoutings(ctx *plancontext.PlanningContext, a, b *AnyShardRouting, joinPredicates []sqlparser.Expr, innerJoin bool) *AnyShardRouting {
+ alternates := make(map[*vindexes.Keyspace]*Route)
+ for ak, av := range a.Alternates {
+ for bk, bv := range b.Alternates {
+ // only same keyspace alternates can be merged.
+ if ak != bk {
+ continue
+ }
+ op, _ := mergeOrJoin(ctx, av, bv, joinPredicates, innerJoin)
+ if r, ok := op.(*Route); ok {
+ alternates[ak] = r
+ }
+ }
+ }
+ return &AnyShardRouting{
+ keyspace: a.keyspace,
+ Alternates: alternates,
+ }
+}
+
func prepareInputRoutes(lhs Operator, rhs Operator) (*Route, *Route, Routing, Routing, routingType, routingType, bool) {
lhsRoute, rhsRoute := operatorsToRoutes(lhs, rhs)
if lhsRoute == nil || rhsRoute == nil {
@@ -176,7 +202,7 @@ func getRoutingType(r Routing) routingType {
panic(fmt.Sprintf("switch should be exhaustive, got %T", r))
}
-func newJoinMerge(predicates []sqlparser.Expr, innerJoin bool) merger {
+func newJoinMerge(predicates []sqlparser.Expr, innerJoin bool) *joinMerger {
return &joinMerger{
predicates: predicates,
innerJoin: innerJoin,
diff --git a/go/vt/vtgate/planbuilder/operators/joins.go b/go/vt/vtgate/planbuilder/operators/joins.go
index 266b9b8288f..b819c4a1f5f 100644
--- a/go/vt/vtgate/planbuilder/operators/joins.go
+++ b/go/vt/vtgate/planbuilder/operators/joins.go
@@ -17,7 +17,10 @@ limitations under the License.
package operators
import (
+ "fmt"
+
"vitess.io/vitess/go/vt/sqlparser"
+ "vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext"
"vitess.io/vitess/go/vt/vtgate/semantics"
)
@@ -82,7 +85,7 @@ func AddPredicate(
return join
}
- return nil
+ panic(vterrors.VT13001(fmt.Sprintf("pushed wrong predicate to the join: %s", sqlparser.String(expr))))
}
// we are looking for predicates like `tbl.col = <>` or `<> = tbl.col`,
diff --git a/go/vt/vtgate/planbuilder/operators/offset_planning.go b/go/vt/vtgate/planbuilder/operators/offset_planning.go
index 638d3d80907..712cc8ee5ad 100644
--- a/go/vt/vtgate/planbuilder/operators/offset_planning.go
+++ b/go/vt/vtgate/planbuilder/operators/offset_planning.go
@@ -38,7 +38,6 @@ func planOffsets(ctx *plancontext.PlanningContext, root Operator) Operator {
panic(vterrors.VT13001(fmt.Sprintf("should not see %T here", in)))
case offsettable:
newOp := op.planOffsets(ctx)
-
if newOp == nil {
newOp = op
}
@@ -47,7 +46,13 @@ func planOffsets(ctx *plancontext.PlanningContext, root Operator) Operator {
fmt.Println("Planned offsets for:")
fmt.Println(ToTree(newOp))
}
- return newOp, nil
+
+ if newOp == op {
+ return newOp, nil
+ } else {
+ // We got a new operator from plan offsets. We should return that something has changed.
+ return newOp, Rewrote("planning offsets introduced a new operator")
+ }
}
return in, NoRewrite
}
diff --git a/go/vt/vtgate/planbuilder/operators/route_planning.go b/go/vt/vtgate/planbuilder/operators/route_planning.go
index c58340291ff..d0b32e37200 100644
--- a/go/vt/vtgate/planbuilder/operators/route_planning.go
+++ b/go/vt/vtgate/planbuilder/operators/route_planning.go
@@ -358,13 +358,18 @@ func mergeOrJoin(ctx *plancontext.PlanningContext, lhs, rhs Operator, joinPredic
}
join := NewApplyJoin(ctx, Clone(rhs), Clone(lhs), nil, !inner)
- newOp := pushJoinPredicates(ctx, joinPredicates, join)
- return newOp, Rewrote("logical join to applyJoin, switching side because LIMIT")
+ for _, pred := range joinPredicates {
+ join.AddJoinPredicate(ctx, pred)
+ }
+ return join, Rewrote("logical join to applyJoin, switching side because LIMIT")
}
join := NewApplyJoin(ctx, Clone(lhs), Clone(rhs), nil, !inner)
- newOp := pushJoinPredicates(ctx, joinPredicates, join)
- return newOp, Rewrote("logical join to applyJoin ")
+ for _, pred := range joinPredicates {
+ join.AddJoinPredicate(ctx, pred)
+ }
+
+ return join, Rewrote("logical join to applyJoin ")
}
func operatorsToRoutes(a, b Operator) (*Route, *Route) {
@@ -583,15 +588,3 @@ func hexEqual(a, b *sqlparser.Literal) bool {
}
return false
}
-
-func pushJoinPredicates(ctx *plancontext.PlanningContext, exprs []sqlparser.Expr, op *ApplyJoin) Operator {
- if len(exprs) == 0 {
- return op
- }
-
- for _, expr := range exprs {
- AddPredicate(ctx, op, expr, true, newFilterSinglePredicate)
- }
-
- return op
-}
diff --git a/go/vt/vtgate/planbuilder/operators/sharded_routing.go b/go/vt/vtgate/planbuilder/operators/sharded_routing.go
index 6818311c0dd..61046e4da67 100644
--- a/go/vt/vtgate/planbuilder/operators/sharded_routing.go
+++ b/go/vt/vtgate/planbuilder/operators/sharded_routing.go
@@ -23,7 +23,6 @@ import (
"vitess.io/vitess/go/mysql/collations"
"vitess.io/vitess/go/slice"
"vitess.io/vitess/go/vt/sqlparser"
- "vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/vtgate/engine"
"vitess.io/vitess/go/vt/vtgate/evalengine"
"vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext"
@@ -601,13 +600,15 @@ func (tr *ShardedRouting) extraInfo() string {
)
}
-func tryMergeJoinShardedRouting(
+func tryMergeShardedRouting(
ctx *plancontext.PlanningContext,
routeA, routeB *Route,
m merger,
joinPredicates []sqlparser.Expr,
) *Route {
- sameKeyspace := routeA.Routing.Keyspace() == routeB.Routing.Keyspace()
+ if routeA.Routing.Keyspace() != routeB.Routing.Keyspace() {
+ return nil
+ }
tblA := routeA.Routing.(*ShardedRouting)
tblB := routeB.Routing.(*ShardedRouting)
@@ -636,10 +637,6 @@ func tryMergeJoinShardedRouting(
return nil
}
- if !sameKeyspace {
- panic(vterrors.VT12001("cross-shard correlated subquery"))
- }
-
canMerge := canMergeOnFilters(ctx, routeA, routeB, joinPredicates)
if !canMerge {
return nil
diff --git a/go/vt/vtgate/planbuilder/operators/subquery_planning.go b/go/vt/vtgate/planbuilder/operators/subquery_planning.go
index a85829bab6d..fb8db06f312 100644
--- a/go/vt/vtgate/planbuilder/operators/subquery_planning.go
+++ b/go/vt/vtgate/planbuilder/operators/subquery_planning.go
@@ -722,7 +722,7 @@ func mergeSubqueryInputs(ctx *plancontext.PlanningContext, in, out Operator, joi
// sharded routing is complex, so we handle it in a separate method
case inner == sharded && outer == sharded:
- return tryMergeJoinShardedRouting(ctx, inRoute, outRoute, m, joinPredicates)
+ return tryMergeShardedRouting(ctx, inRoute, outRoute, m, joinPredicates)
default:
return nil
diff --git a/go/vt/vtgate/planbuilder/testdata/aggr_cases.json b/go/vt/vtgate/planbuilder/testdata/aggr_cases.json
index f1555686230..8a703d8a620 100644
--- a/go/vt/vtgate/planbuilder/testdata/aggr_cases.json
+++ b/go/vt/vtgate/planbuilder/testdata/aggr_cases.json
@@ -571,6 +571,35 @@
]
}
},
+ {
+ "comment": "using HAVING inside a derived table still produces viable plans",
+ "query": "select id from (select id from user group by id having (count(user.id) = 2) limit 2 offset 0) subquery_for_limit",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from (select id from user group by id having (count(user.id) = 2) limit 2 offset 0) subquery_for_limit",
+ "Instructions": {
+ "OperatorType": "Limit",
+ "Count": "2",
+ "Offset": "0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from (select id, count(`user`.id) = 2 from `user` where 1 != 1 group by id) as subquery_for_limit where 1 != 1",
+ "Query": "select id from (select id, count(`user`.id) = 2 from `user` group by id having count(`user`.id) = 2) as subquery_for_limit limit :__upper_limit",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
{
"comment": "sum with distinct no unique vindex",
"query": "select col1, sum(distinct col2) from user group by col1",
@@ -3614,25 +3643,41 @@
"QueryType": "SELECT",
"Original": "select * from (select id from user having count(*) = 1) s",
"Instructions": {
- "OperatorType": "Filter",
- "Predicate": "count(*) = 1",
- "ResultColumns": 1,
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
"Inputs": [
{
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "any_value(0) AS id, sum_count_star(1) AS count(*)",
+ "OperatorType": "Projection",
+ "Expressions": [
+ ":0 as id",
+ "count(*) = 1 as count(*) = 1"
+ ],
"Inputs": [
{
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, count(*) from `user` where 1 != 1",
- "Query": "select id, count(*) from `user`",
- "Table": "`user`"
+ "OperatorType": "Filter",
+ "Predicate": "count(*) = 1",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "any_value(0) AS id, sum_count_star(1) AS count(*), any_value(2)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, count(*), 1 from `user` where 1 != 1",
+ "Query": "select id, count(*), 1 from `user`",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
}
]
}
@@ -6507,59 +6552,74 @@
"OrderBy": "(4|6) ASC, (5|7) ASC",
"Inputs": [
{
- "OperatorType": "Join",
- "Variant": "HashLeftJoin",
- "Collation": "binary",
- "ComparisonType": "INT16",
- "JoinColumnIndexes": "-1,1,-2,2,-3,3",
- "Predicate": "`user`.col = ue.col",
- "TableName": "`user`_user_extra",
+ "OperatorType": "Projection",
+ "Expressions": [
+ "count(*) as count(*)",
+ "count(*) as count(*)",
+ "`user`.col as col",
+ "ue.col as col",
+ "`user`.foo as foo",
+ "ue.bar as bar",
+ "weight_string(`user`.foo) as weight_string(`user`.foo)",
+ "weight_string(ue.bar) as weight_string(ue.bar)"
+ ],
"Inputs": [
{
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*), `user`.col, `user`.foo from `user` where 1 != 1 group by `user`.col, `user`.foo",
- "Query": "select count(*), `user`.col, `user`.foo from `user` group by `user`.col, `user`.foo",
- "Table": "`user`"
- },
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "count_star(0)",
- "GroupBy": "1, (2|3)",
+ "OperatorType": "Join",
+ "Variant": "HashLeftJoin",
+ "Collation": "binary",
+ "ComparisonType": "INT16",
+ "JoinColumnIndexes": "-1,1,-2,2,-3,3,-3,3",
+ "Predicate": "`user`.col = ue.col",
+ "TableName": "`user`_user_extra",
"Inputs": [
{
- "OperatorType": "SimpleProjection",
- "Columns": [
- 2,
- 0,
- 1,
- 3
- ],
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*), `user`.col, `user`.foo from `user` where 1 != 1 group by `user`.col, `user`.foo",
+ "Query": "select count(*), `user`.col, `user`.foo from `user` group by `user`.col, `user`.foo",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "count_star(0)",
+ "GroupBy": "1, (2|3)",
"Inputs": [
{
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "0 ASC, (1|3) ASC",
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 2,
+ 0,
+ 1,
+ 3
+ ],
"Inputs": [
{
- "OperatorType": "Limit",
- "Count": "10",
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "0 ASC, (1|3) ASC",
"Inputs": [
{
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select ue.col, ue.bar, 1, weight_string(ue.bar) from (select col, bar from user_extra where 1 != 1) as ue where 1 != 1",
- "Query": "select ue.col, ue.bar, 1, weight_string(ue.bar) from (select col, bar from user_extra) as ue limit :__upper_limit",
- "Table": "user_extra"
+ "OperatorType": "Limit",
+ "Count": "10",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select ue.col, ue.bar, 1, weight_string(ue.bar) from (select col, bar from user_extra where 1 != 1) as ue where 1 != 1",
+ "Query": "select ue.col, ue.bar, 1, weight_string(ue.bar) from (select col, bar from user_extra) as ue limit :__upper_limit",
+ "Table": "user_extra"
+ }
+ ]
}
]
}
diff --git a/go/vt/vtgate/planbuilder/testdata/cte_cases.json b/go/vt/vtgate/planbuilder/testdata/cte_cases.json
index 51b130d25cc..2fd8d573fe0 100644
--- a/go/vt/vtgate/planbuilder/testdata/cte_cases.json
+++ b/go/vt/vtgate/planbuilder/testdata/cte_cases.json
@@ -369,25 +369,41 @@
"QueryType": "SELECT",
"Original": "with s as (select id from user having count(*) = 1) select * from s",
"Instructions": {
- "OperatorType": "Filter",
- "Predicate": "count(*) = 1",
- "ResultColumns": 1,
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
"Inputs": [
{
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "any_value(0) AS id, sum_count_star(1) AS count(*)",
+ "OperatorType": "Projection",
+ "Expressions": [
+ ":0 as id",
+ "count(*) = 1 as count(*) = 1"
+ ],
"Inputs": [
{
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, count(*) from `user` where 1 != 1",
- "Query": "select id, count(*) from `user`",
- "Table": "`user`"
+ "OperatorType": "Filter",
+ "Predicate": "count(*) = 1",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "any_value(0) AS id, sum_count_star(1) AS count(*), any_value(2)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, count(*), 1 from `user` where 1 != 1",
+ "Query": "select id, count(*), 1 from `user`",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
}
]
}
diff --git a/go/vt/vtgate/planbuilder/testdata/filter_cases.json b/go/vt/vtgate/planbuilder/testdata/filter_cases.json
index 272bf694d03..2c6fb5e4160 100644
--- a/go/vt/vtgate/planbuilder/testdata/filter_cases.json
+++ b/go/vt/vtgate/planbuilder/testdata/filter_cases.json
@@ -797,6 +797,108 @@
]
}
},
+ {
+ "comment": "Disjunction of conjunctions with 4 or more disjunctions",
+ "query": "select id from user where (col = 'aa' AND name = 'bb') OR (col = 'cc' AND name = 'dd') OR (col = 'ee' AND name = 'ff') OR (col = 'gg' AND name = 'hh')",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where (col = 'aa' AND name = 'bb') OR (col = 'cc' AND name = 'dd') OR (col = 'ee' AND name = 'ff') OR (col = 'gg' AND name = 'hh')",
+ "Instructions": {
+ "OperatorType": "VindexLookup",
+ "Variant": "MultiEqual",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Values": [
+ "('bb', 'dd', 'ff', 'hh')"
+ ],
+ "Vindex": "name_user_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
+ "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
+ "Table": "name_user_vdx",
+ "Values": [
+ "::name"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where col = 'aa' and `name` = 'bb' or col = 'cc' and `name` = 'dd' or col = 'ee' and `name` = 'ff' or col = 'gg' and `name` = 'hh'",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Disjunction of conjunctions with 3 or less disjunctions",
+ "query": "select id from user where (col = 'aa' AND name = 'bb') OR (col = 'cc' AND name = 'dd') OR (col = 'ee' AND name = 'ff')",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where (col = 'aa' AND name = 'bb') OR (col = 'cc' AND name = 'dd') OR (col = 'ee' AND name = 'ff')",
+ "Instructions": {
+ "OperatorType": "VindexLookup",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Values": [
+ "('bb', 'dd', 'ff')"
+ ],
+ "Vindex": "name_user_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
+ "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
+ "Table": "name_user_vdx",
+ "Values": [
+ "::name"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where col in ('aa', 'cc', 'ee') and (col in ('aa', 'cc') or `name` = 'ff') and (col = 'aa' or `name` = 'dd' or col = 'ee') and (col = 'aa' or `name` = 'dd' or `name` = 'ff') and (`name` = 'bb' or col = 'cc' or col = 'ee') and (`name` = 'bb' or col = 'cc' or `name` = 'ff') and (`name` in ('bb', 'dd') or col = 'ee') and `name` in ::__vals",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
{
"comment": "Single table complex in clause",
"query": "select id from user where name in (col, 'bb')",
diff --git a/go/vt/vtgate/planbuilder/testdata/from_cases.json b/go/vt/vtgate/planbuilder/testdata/from_cases.json
index 28d7d3c2718..7ed42771579 100644
--- a/go/vt/vtgate/planbuilder/testdata/from_cases.json
+++ b/go/vt/vtgate/planbuilder/testdata/from_cases.json
@@ -744,6 +744,111 @@
]
}
},
+ {
+ "comment": "Complex query that has hash left join underneath a memory sort and ordered aggregation",
+ "query": "select 1 from user join user_extra on user.id = user_extra.user_id join music on music.intcol = user_extra.col left join (select user_metadata.col, count(*) as count from user_metadata group by user_metadata.col) um on um.col = user_extra.col where user.id IN (103) group by user_extra.col, music.intcol",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 from user join user_extra on user.id = user_extra.user_id join music on music.intcol = user_extra.col left join (select user_metadata.col, count(*) as count from user_metadata group by user_metadata.col) um on um.col = user_extra.col where user.id IN (103) group by user_extra.col, music.intcol",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "any_value(0) AS 1",
+ "GroupBy": "1, 4",
+ "ResultColumns": 1,
+ "Inputs": [
+ {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "1 ASC, 4 ASC",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "HashLeftJoin",
+ "Collation": "binary",
+ "ComparisonType": "INT16",
+ "JoinColumnIndexes": "-1,-2,1,-2,-4,-1",
+ "Predicate": "user_extra.col = um.col",
+ "TableName": "music_`user`, user_extra_user_metadata",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0,R:0,L:1",
+ "JoinVars": {
+ "music_intcol": 1
+ },
+ "TableName": "music_`user`, user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1, music.intcol from music where 1 != 1 group by music.intcol",
+ "Query": "select 1, music.intcol from music group by music.intcol",
+ "Table": "music"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.col, user_extra.col from `user`, user_extra where 1 != 1 group by user_extra.col, user_extra.col",
+ "Query": "select user_extra.col, user_extra.col from `user`, user_extra where `user`.id in (103) and user_extra.col = :music_intcol and `user`.id = user_extra.user_id group by user_extra.col, user_extra.col",
+ "Table": "`user`, user_extra",
+ "Values": [
+ "103"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "GroupBy": "0",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count_star(1) AS count",
+ "GroupBy": "0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_metadata.col, count(*) as `count` from user_metadata where 1 != 1 group by user_metadata.col",
+ "OrderBy": "0 ASC",
+ "Query": "select user_metadata.col, count(*) as `count` from user_metadata group by user_metadata.col order by user_metadata.col asc",
+ "Table": "user_metadata"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.music",
+ "user.user",
+ "user.user_extra",
+ "user.user_metadata"
+ ]
+ }
+ },
{
"comment": "Straight-join (ignores the straight_join hint)",
"query": "select m1.col from unsharded as m1 straight_join unsharded as m2",
@@ -808,6 +913,59 @@
]
}
},
+ {
+ "comment": "Outer join with join predicates that only depend on the inner side",
+ "query": "select 1 from user left join user_extra on user.foo = 42 and user.bar = user_extra.bar",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 from user left join user_extra on user.foo = 42 and user.bar = user_extra.bar",
+ "Instructions": {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "1 as 1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "LeftJoin",
+ "JoinVars": {
+ "user_bar": 1,
+ "user_foo": 0
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.foo, `user`.bar from `user` where 1 != 1",
+ "Query": "select `user`.foo, `user`.bar from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra where user_extra.bar = :user_bar and :user_foo = 42",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
{
"comment": "Parenthesized, single chunk",
"query": "select user.col from user join (unsharded as m1 join unsharded as m2)",
@@ -4272,28 +4430,22 @@
"ResultColumns": 2,
"Inputs": [
{
- "OperatorType": "Join",
- "Variant": "HashLeftJoin",
- "Collation": "binary",
- "ComparisonType": "INT16",
- "JoinColumnIndexes": "-1,2",
- "Predicate": "u.col = ue.col",
- "TableName": "`user`_user_extra",
+ "OperatorType": "Projection",
+ "Expressions": [
+ "id as id",
+ "user_id as user_id",
+ "weight_string(id) as weight_string(id)",
+ "weight_string(user_id) as weight_string(user_id)"
+ ],
"Inputs": [
{
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.id, u.col from (select id, col from `user` where 1 != 1) as u where 1 != 1",
- "Query": "select distinct u.id, u.col from (select id, col from `user`) as u",
- "Table": "`user`"
- },
- {
- "OperatorType": "Limit",
- "Count": "10",
+ "OperatorType": "Join",
+ "Variant": "HashLeftJoin",
+ "Collation": "binary",
+ "ComparisonType": "INT16",
+ "JoinColumnIndexes": "-1,2,-1,2",
+ "Predicate": "u.col = ue.col",
+ "TableName": "`user`_user_extra",
"Inputs": [
{
"OperatorType": "Route",
@@ -4302,9 +4454,26 @@
"Name": "user",
"Sharded": true
},
- "FieldQuery": "select ue.col, ue.user_id from (select col, user_id from user_extra where 1 != 1) as ue where 1 != 1",
- "Query": "select ue.col, ue.user_id from (select col, user_id from user_extra) as ue limit :__upper_limit",
- "Table": "user_extra"
+ "FieldQuery": "select u.id, u.col from (select id, col from `user` where 1 != 1) as u where 1 != 1",
+ "Query": "select distinct u.id, u.col from (select id, col from `user`) as u",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Limit",
+ "Count": "10",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select ue.col, ue.user_id from (select col, user_id from user_extra where 1 != 1) as ue where 1 != 1",
+ "Query": "select ue.col, ue.user_id from (select col, user_id from user_extra) as ue limit :__upper_limit",
+ "Table": "user_extra"
+ }
+ ]
}
]
}
@@ -4413,5 +4582,54 @@
"user.user_extra"
]
}
+ },
+ {
+ "comment": "Cross keyspace join",
+ "query": "select 1 from user join t1 on user.id = t1.id",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 from user join t1 on user.id = t1.id",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "JoinVars": {
+ "t1_id": 1
+ },
+ "TableName": "t1_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "zlookup_unique",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1, t1.id from t1 where 1 != 1",
+ "Query": "select 1, t1.id from t1",
+ "Table": "t1"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` where 1 != 1",
+ "Query": "select 1 from `user` where `user`.id = :t1_id",
+ "Table": "`user`",
+ "Values": [
+ ":t1_id"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "zlookup_unique.t1"
+ ]
+ }
}
]
diff --git a/go/vt/vtgate/planbuilder/testdata/reference_cases.json b/go/vt/vtgate/planbuilder/testdata/reference_cases.json
index a89fa103923..6aa01355934 100644
--- a/go/vt/vtgate/planbuilder/testdata/reference_cases.json
+++ b/go/vt/vtgate/planbuilder/testdata/reference_cases.json
@@ -746,5 +746,30 @@
"user.user"
]
}
+ },
+ {
+ "comment": "two sharded and two unsharded reference table join - all should be merged into one route",
+ "query": "select 1 from user u join user_extra ue on u.id = ue.user_id join main.source_of_ref sr on sr.foo = ue.foo join main.rerouted_ref rr on rr.bar = sr.bar",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 from user u join user_extra ue on u.id = ue.user_id join main.source_of_ref sr on sr.foo = ue.foo join main.rerouted_ref rr on rr.bar = sr.bar",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` as u, user_extra as ue, ref_with_source as sr, ref as rr where 1 != 1",
+ "Query": "select 1 from `user` as u, user_extra as ue, ref_with_source as sr, ref as rr where rr.bar = sr.bar and u.id = ue.user_id and sr.foo = ue.foo",
+ "Table": "`user`, ref, ref_with_source, user_extra"
+ },
+ "TablesUsed": [
+ "user.ref",
+ "user.ref_with_source",
+ "user.user",
+ "user.user_extra"
+ ]
+ }
}
]
diff --git a/go/vt/vtgate/planbuilder/testdata/select_cases.json b/go/vt/vtgate/planbuilder/testdata/select_cases.json
index 789f9e0de52..698a9aceaa5 100644
--- a/go/vt/vtgate/planbuilder/testdata/select_cases.json
+++ b/go/vt/vtgate/planbuilder/testdata/select_cases.json
@@ -2962,8 +2962,8 @@
"Name": "user",
"Sharded": true
},
- "FieldQuery": "select a -> '$[4]', a ->> '$[3]' from `user` where 1 != 1",
- "Query": "select a -> '$[4]', a ->> '$[3]' from `user`",
+ "FieldQuery": "select json_extract(a, '$[4]'), json_unquote(json_extract(a, '$[3]')) from `user` where 1 != 1",
+ "Query": "select json_extract(a, '$[4]'), json_unquote(json_extract(a, '$[3]')) from `user`",
"Table": "`user`"
},
"TablesUsed": [
diff --git a/go/vt/vtgate/planbuilder/testdata/unsupported_cases.json b/go/vt/vtgate/planbuilder/testdata/unsupported_cases.json
index b66ddd79ad5..f055fe6bb2c 100644
--- a/go/vt/vtgate/planbuilder/testdata/unsupported_cases.json
+++ b/go/vt/vtgate/planbuilder/testdata/unsupported_cases.json
@@ -329,6 +329,11 @@
"query": "select 1 from music union (select id from user union all select name from unsharded)",
"plan": "VT12001: unsupported: nesting of UNIONs on the right-hand side"
},
+ {
+ "comment": "Cross keyspace query with subquery",
+ "query": "select 1 from user where id = (select id from t1 where user.foo = t1.bar)",
+ "plan": "VT12001: unsupported: correlated subquery is only supported for EXISTS"
+ },
{
"comment": "multi-shard union",
"query": "select 1 from music union (select id from user union select name from unsharded)",
diff --git a/go/vt/vtgate/planbuilder/testdata/vschemas/schema.json b/go/vt/vtgate/planbuilder/testdata/vschemas/schema.json
index 7aaa2648388..d28a9f97482 100644
--- a/go/vt/vtgate/planbuilder/testdata/vschemas/schema.json
+++ b/go/vt/vtgate/planbuilder/testdata/vschemas/schema.json
@@ -252,6 +252,12 @@
"column": "non_planable",
"name": "non_planable_user_map"
}
+ ],
+ "columns": [
+ {
+ "name": "col",
+ "type": "INT16"
+ }
]
},
"user_extra": {
@@ -282,6 +288,12 @@
"column": "id",
"name": "music_user_map"
}
+ ],
+ "columns": [
+ {
+ "name": "intcol",
+ "type": "INT16"
+ }
]
},
"authoritative": {
diff --git a/go/vt/vtgate/semantics/early_rewriter_test.go b/go/vt/vtgate/semantics/early_rewriter_test.go
index c9b793580b0..adf26017d87 100644
--- a/go/vt/vtgate/semantics/early_rewriter_test.go
+++ b/go/vt/vtgate/semantics/early_rewriter_test.go
@@ -854,6 +854,9 @@ func TestRewriteNot(t *testing.T) {
}, {
sql: "select a from t1 where not a > 12",
expected: "select a from t1 where a <= 12",
+ }, {
+ sql: "select (not (1 like ('a' is null)))",
+ expected: "select 1 not like ('a' is null) from dual",
}}
for _, tcase := range tcases {
t.Run(tcase.sql, func(t *testing.T) {
diff --git a/go/vt/vttablet/endtoend/queries_test.go b/go/vt/vttablet/endtoend/queries_test.go
index 3dad415c1c1..b035013ecc3 100644
--- a/go/vt/vttablet/endtoend/queries_test.go
+++ b/go/vt/vttablet/endtoend/queries_test.go
@@ -121,6 +121,20 @@ var TestQueryCases = []framework.Testable{
},
RowsReturned: 1,
},
+ &framework.TestCase{
+ Name: "explain with bindvars",
+ Query: "explain select :__vtudvp as `@p` from dual",
+ BindVars: map[string]*querypb.BindVariable{
+ "__vtudvp": sqltypes.Int64BindVariable(1),
+ },
+ Result: [][]string{
+ {"1", "SIMPLE", "", "", "", "", "", "", "", "", "", "No tables used"},
+ },
+ Rewritten: []string{
+ "explain select 1 as `@p` from dual",
+ },
+ RowsReturned: 1,
+ },
&framework.TestCase{
Name: "limit",
Query: "select /* limit */ eid, id from vitess_a limit :a",
diff --git a/go/vt/vttablet/endtoend/transaction_test.go b/go/vt/vttablet/endtoend/transaction_test.go
index 8f6546df5f1..8c86ecbebe1 100644
--- a/go/vt/vttablet/endtoend/transaction_test.go
+++ b/go/vt/vttablet/endtoend/transaction_test.go
@@ -256,7 +256,7 @@ func TestPrepareRollback(t *testing.T) {
err = client.Prepare("aa")
if err != nil {
client.RollbackPrepared("aa", 0)
- t.Fatalf(err.Error())
+ t.Fatal(err.Error())
}
err = client.RollbackPrepared("aa", 0)
require.NoError(t, err)
diff --git a/go/vt/vttablet/onlineddl/executor.go b/go/vt/vttablet/onlineddl/executor.go
index 910f52a0333..5dd1811affd 100644
--- a/go/vt/vttablet/onlineddl/executor.go
+++ b/go/vt/vttablet/onlineddl/executor.go
@@ -895,8 +895,8 @@ func (e *Executor) cutOverVReplMigration(ctx context.Context, s *VReplStream, sh
migrationCutOverThreshold := getMigrationCutOverThreshold(onlineDDL)
- waitForPos := func(s *VReplStream, pos replication.Position) error {
- ctx, cancel := context.WithTimeout(ctx, migrationCutOverThreshold)
+ waitForPos := func(s *VReplStream, pos replication.Position, timeout time.Duration) error {
+ ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
// Wait for target to reach the up-to-date pos
if err := tmClient.VReplicationWaitForPos(ctx, tablet.Tablet, s.id, replication.EncodePosition(pos)); err != nil {
@@ -954,8 +954,12 @@ func (e *Executor) cutOverVReplMigration(ctx context.Context, s *VReplStream, sh
return err
}
e.updateMigrationStage(ctx, onlineDDL.UUID, "waiting for post-sentry pos: %v", replication.EncodePosition(postSentryPos))
- if err := waitForPos(s, postSentryPos); err != nil {
- return err
+ // We have not yet locked anything, stopped anything, or done anything that otherwise
+ // impacts query serving so we wait for a multiple of the cutover threshold here, with
+ // that variable primarily serving to limit the max time we later spend waiting for
+ // a position again AFTER we've taken the locks and table access is blocked.
+ if err := waitForPos(s, postSentryPos, migrationCutOverThreshold*3); err != nil {
+ return vterrors.Wrapf(err, "failed waiting for pos after sentry creation")
}
e.updateMigrationStage(ctx, onlineDDL.UUID, "post-sentry pos reached")
}
@@ -1129,7 +1133,7 @@ func (e *Executor) cutOverVReplMigration(ctx context.Context, s *VReplStream, sh
}
e.updateMigrationStage(ctx, onlineDDL.UUID, "waiting for post-lock pos: %v", replication.EncodePosition(postWritesPos))
- if err := waitForPos(s, postWritesPos); err != nil {
+ if err := waitForPos(s, postWritesPos, migrationCutOverThreshold); err != nil {
e.updateMigrationStage(ctx, onlineDDL.UUID, "timeout while waiting for post-lock pos: %v", err)
return err
}
diff --git a/go/vt/vttablet/onlineddl/vrepl.go b/go/vt/vttablet/onlineddl/vrepl.go
index 847e40e3fbc..fe6d2bd9141 100644
--- a/go/vt/vttablet/onlineddl/vrepl.go
+++ b/go/vt/vttablet/onlineddl/vrepl.go
@@ -571,6 +571,9 @@ func (v *VRepl) generateFilterQuery(ctx context.Context) error {
sb.WriteString(fmt.Sprintf("CONCAT(%s)", escapeName(name)))
case sourceCol.Type == vrepl.JSONColumnType:
sb.WriteString(fmt.Sprintf("convert(%s using utf8mb4)", escapeName(name)))
+ case targetCol.Type == vrepl.JSONColumnType:
+ // Convert any type to JSON: encode the type as utf8mb4 text
+ sb.WriteString(fmt.Sprintf("convert(%s using utf8mb4)", escapeName(name)))
case sourceCol.Type == vrepl.StringColumnType:
// Check source and target charset/encoding. If needed, create
// a binlogdatapb.CharsetConversion entry (later written to vreplication)
@@ -583,19 +586,19 @@ func (v *VRepl) generateFilterQuery(ctx context.Context) error {
if targetCol.Type == vrepl.StringColumnType && toCollation == collations.Unknown {
return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "Character set %s not supported for column %s", targetCol.Charset, targetCol.Name)
}
-
- if trivialCharset(fromCollation) && trivialCharset(toCollation) && targetCol.Type != vrepl.JSONColumnType {
+ if trivialCharset(fromCollation) && trivialCharset(toCollation) {
+ sb.WriteString(escapeName(name))
+ } else if fromCollation == toCollation {
+ // No need for charset conversions as both have the same collation.
sb.WriteString(escapeName(name))
} else {
+ // Charset conversion required:
v.convertCharset[targetName] = &binlogdatapb.CharsetConversion{
FromCharset: sourceCol.Charset,
ToCharset: targetCol.Charset,
}
- sb.WriteString(fmt.Sprintf("convert(%s using utf8mb4)", escapeName(name)))
+ sb.WriteString(escapeName(name))
}
- case targetCol.Type == vrepl.JSONColumnType && sourceCol.Type != vrepl.JSONColumnType:
- // Convert any type to JSON: encode the type as utf8mb4 text
- sb.WriteString(fmt.Sprintf("convert(%s using utf8mb4)", escapeName(name)))
default:
sb.WriteString(escapeName(name))
}
diff --git a/go/vt/vttablet/tabletmanager/framework_test.go b/go/vt/vttablet/tabletmanager/framework_test.go
index 4734ab9ee96..24935912b42 100644
--- a/go/vt/vttablet/tabletmanager/framework_test.go
+++ b/go/vt/vttablet/tabletmanager/framework_test.go
@@ -111,8 +111,8 @@ func newTestEnv(t *testing.T, ctx context.Context, sourceKeyspace string, source
tmclienttest.SetProtocol(fmt.Sprintf("go.vt.vttablet.tabletmanager.framework_test_%s", t.Name()), tenv.protoName)
tenv.mysqld = mysqlctl.NewFakeMysqlDaemon(fakesqldb.New(t))
- var err error
- tenv.mysqld.CurrentPrimaryPosition, err = replication.ParsePosition(gtidFlavor, gtidPosition)
+ curPosition, err := replication.ParsePosition(gtidFlavor, gtidPosition)
+ tenv.mysqld.SetPrimaryPositionLocked(curPosition)
require.NoError(t, err)
return tenv
diff --git a/go/vt/vttablet/tabletmanager/vreplication/replicator_plan.go b/go/vt/vttablet/tabletmanager/vreplication/replicator_plan.go
index 424daad4871..dce61436295 100644
--- a/go/vt/vttablet/tabletmanager/vreplication/replicator_plan.go
+++ b/go/vt/vttablet/tabletmanager/vreplication/replicator_plan.go
@@ -27,6 +27,7 @@ import (
"vitess.io/vitess/go/mysql/collations/charset"
"vitess.io/vitess/go/mysql/collations/colldata"
vjson "vitess.io/vitess/go/mysql/json"
+ "vitess.io/vitess/go/mysql/sqlerror"
"vitess.io/vitess/go/sqltypes"
"vitess.io/vitess/go/vt/binlog/binlogplayer"
"vitess.io/vitess/go/vt/sqlparser"
@@ -258,7 +259,7 @@ func (tp *TablePlan) applyBulkInsert(sqlbuffer *bytes2.Buffer, rows []*querypb.R
if i > 0 {
sqlbuffer.WriteString(", ")
}
- if err := appendFromRow(tp.BulkInsertValues, sqlbuffer, tp.Fields, row, tp.FieldsToSkip); err != nil {
+ if err := tp.appendFromRow(sqlbuffer, row); err != nil {
return nil, err
}
}
@@ -313,6 +314,30 @@ func (tp *TablePlan) isOutsidePKRange(bindvars map[string]*querypb.BindVariable,
return false
}
+// convertStringCharset does a charset conversion given raw data and an applicable conversion rule.
+// In case of a conversion error, it returns an equivalent of MySQL error 1366, which is what you'd
+// get in a failed `CONVERT()` function, e.g.:
+//
+// > create table tascii(v varchar(100) charset ascii);
+// > insert into tascii values ('€');
+// ERROR 1366 (HY000): Incorrect string value: '\xE2\x82\xAC' for column 'v' at row 1
+func (tp *TablePlan) convertStringCharset(raw []byte, conversion *binlogdatapb.CharsetConversion, fieldName string) ([]byte, error) {
+ fromCollation := tp.CollationEnv.DefaultCollationForCharset(conversion.FromCharset)
+ if fromCollation == collations.Unknown {
+ return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "character set %s not supported for column %s", conversion.FromCharset, fieldName)
+ }
+ toCollation := tp.CollationEnv.DefaultCollationForCharset(conversion.ToCharset)
+ if toCollation == collations.Unknown {
+ return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "character set %s not supported for column %s", conversion.ToCharset, fieldName)
+ }
+
+ out, err := charset.Convert(nil, colldata.Lookup(toCollation).Charset(), raw, colldata.Lookup(fromCollation).Charset())
+ if err != nil {
+ return nil, sqlerror.NewSQLError(sqlerror.ERTruncatedWrongValueForField, sqlerror.SSUnknownSQLState, "Incorrect string value: %s", err.Error())
+ }
+ return out, nil
+}
+
// bindFieldVal returns a bind variable based on given field and value.
// Most values will just bind directly. But some values may need manipulation:
// - text values with charset conversion
@@ -321,11 +346,7 @@ func (tp *TablePlan) isOutsidePKRange(bindvars map[string]*querypb.BindVariable,
func (tp *TablePlan) bindFieldVal(field *querypb.Field, val *sqltypes.Value) (*querypb.BindVariable, error) {
if conversion, ok := tp.ConvertCharset[field.Name]; ok && !val.IsNull() {
// Non-null string value, for which we have a charset conversion instruction
- fromCollation := tp.CollationEnv.DefaultCollationForCharset(conversion.FromCharset)
- if fromCollation == collations.Unknown {
- return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "Character set %s not supported for column %s", conversion.FromCharset, field.Name)
- }
- out, err := charset.Convert(nil, charset.Charset_utf8mb4{}, val.Raw(), colldata.Lookup(fromCollation).Charset())
+ out, err := tp.convertStringCharset(val.Raw(), conversion, field.Name)
if err != nil {
return nil, err
}
@@ -619,28 +640,30 @@ func valsEqual(v1, v2 sqltypes.Value) bool {
// note: there can be more fields than bind locations since extra columns might be requested from the source if not all
// primary keys columns are present in the target table, for example. Also some values in the row may not correspond for
// values from the database on the source: sum/count for aggregation queries, for example
-func appendFromRow(pq *sqlparser.ParsedQuery, buf *bytes2.Buffer, fields []*querypb.Field, row *querypb.Row, skipFields map[string]bool) error {
- bindLocations := pq.BindLocations()
- if len(fields) < len(bindLocations) {
+func (tp *TablePlan) appendFromRow(buf *bytes2.Buffer, row *querypb.Row) error {
+ bindLocations := tp.BulkInsertValues.BindLocations()
+ if len(tp.Fields) < len(bindLocations) {
return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "wrong number of fields: got %d fields for %d bind locations ",
- len(fields), len(bindLocations))
+ len(tp.Fields), len(bindLocations))
}
type colInfo struct {
typ querypb.Type
length int64
offset int64
+ field *querypb.Field
}
rowInfo := make([]*colInfo, 0)
offset := int64(0)
- for i, field := range fields { // collect info required for fields to be bound
+ for i, field := range tp.Fields { // collect info required for fields to be bound
length := row.Lengths[i]
- if !skipFields[strings.ToLower(field.Name)] {
+ if !tp.FieldsToSkip[strings.ToLower(field.Name)] {
rowInfo = append(rowInfo, &colInfo{
typ: field.Type,
length: length,
offset: offset,
+ field: field,
})
}
if length > 0 {
@@ -652,7 +675,7 @@ func appendFromRow(pq *sqlparser.ParsedQuery, buf *bytes2.Buffer, fields []*quer
var offsetQuery int
for i, loc := range bindLocations {
col := rowInfo[i]
- buf.WriteString(pq.Query[offsetQuery:loc.Offset])
+ buf.WriteString(tp.BulkInsertValues.Query[offsetQuery:loc.Offset])
typ := col.typ
switch typ {
@@ -674,12 +697,25 @@ func appendFromRow(pq *sqlparser.ParsedQuery, buf *bytes2.Buffer, fields []*quer
// -1 means a null variable; serialize it directly
buf.WriteString(sqltypes.NullStr)
} else {
- vv := sqltypes.MakeTrusted(typ, row.Values[col.offset:col.offset+col.length])
+ raw := row.Values[col.offset : col.offset+col.length]
+ var vv sqltypes.Value
+
+ if conversion, ok := tp.ConvertCharset[col.field.Name]; ok && col.length > 0 {
+ // Non-null string value, for which we have a charset conversion instruction
+ out, err := tp.convertStringCharset(raw, conversion, col.field.Name)
+ if err != nil {
+ return err
+ }
+ vv = sqltypes.MakeTrusted(typ, out)
+ } else {
+ vv = sqltypes.MakeTrusted(typ, raw)
+ }
+
vv.EncodeSQLBytes2(buf)
}
}
offsetQuery = loc.Offset + loc.Length
}
- buf.WriteString(pq.Query[offsetQuery:])
+ buf.WriteString(tp.BulkInsertValues.Query[offsetQuery:])
return nil
}
diff --git a/go/vt/vttablet/tabletmanager/vreplication/utils.go b/go/vt/vttablet/tabletmanager/vreplication/utils.go
index 21c3a61c9f1..9e3ebb42f62 100644
--- a/go/vt/vttablet/tabletmanager/vreplication/utils.go
+++ b/go/vt/vttablet/tabletmanager/vreplication/utils.go
@@ -126,7 +126,12 @@ func isUnrecoverableError(err error) bool {
if err == nil {
return false
}
- if vterrors.Code(err) == vtrpcpb.Code_FAILED_PRECONDITION {
+ switch vterrors.Code(err) {
+ case vtrpcpb.Code_FAILED_PRECONDITION:
+ if vterrors.RxWrongTablet.MatchString(err.Error()) {
+ // If the chosen tablet type picked changes, say due to PRS/ERS, we should retry.
+ return false
+ }
return true
}
sqlErr, isSQLErr := sqlerror.NewSQLErrorFromError(err).(*sqlerror.SQLError)
diff --git a/go/vt/vttablet/tabletmanager/vreplication/utils_test.go b/go/vt/vttablet/tabletmanager/vreplication/utils_test.go
new file mode 100644
index 00000000000..c00ed34a4d6
--- /dev/null
+++ b/go/vt/vttablet/tabletmanager/vreplication/utils_test.go
@@ -0,0 +1,86 @@
+/*
+Copyright 2024 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package vreplication
+
+import (
+ "errors"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "vitess.io/vitess/go/mysql/sqlerror"
+ "vitess.io/vitess/go/vt/vterrors"
+
+ vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
+)
+
+// TestIsUnrecoverableError tests the different error cases for isUnrecoverableError().
+func TestIsUnrecoverableError(t *testing.T) {
+ if runNoBlobTest {
+ t.Skip()
+ }
+
+ type testCase struct {
+ name string
+ err error
+ expected bool
+ }
+
+ testCases := []testCase{
+ {
+ name: "Nil error",
+ err: nil,
+ expected: false,
+ },
+ {
+ name: "vterrors.Code_FAILED_PRECONDITION",
+ err: vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "test error"),
+ expected: true,
+ },
+ {
+ name: "vterrors.Code_FAILED_PRECONDITION, WrongTablet",
+ err: vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "%s: %v, want: %v or %v", vterrors.WrongTablet, "PRIMARY", "REPLICA", nil),
+ expected: false,
+ },
+ {
+ name: "Non-SQL error",
+ err: errors.New("non-SQL error"),
+ expected: false,
+ },
+ {
+ name: "SQL error with ERUnknownError",
+ err: sqlerror.NewSQLError(sqlerror.ERUnknownError, "test SQL error", "test"),
+ expected: false,
+ },
+ {
+ name: "SQL error with ERAccessDeniedError",
+ err: sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, "access denied", "test"),
+ expected: true,
+ },
+ {
+ name: "SQL error with ERDataOutOfRange",
+ err: sqlerror.NewSQLError(sqlerror.ERDataOutOfRange, "data out of range", "test"),
+ expected: true,
+ },
+ }
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ result := isUnrecoverableError(tc.err)
+ require.Equal(t, tc.expected, result)
+ })
+ }
+}
diff --git a/go/vt/vttablet/tabletmanager/vreplication/vplayer.go b/go/vt/vttablet/tabletmanager/vreplication/vplayer.go
index f2cb0a96e71..992618ed3eb 100644
--- a/go/vt/vttablet/tabletmanager/vreplication/vplayer.go
+++ b/go/vt/vttablet/tabletmanager/vreplication/vplayer.go
@@ -163,7 +163,7 @@ func newVPlayer(vr *vreplicator, settings binlogplayer.VRSettings, copyState map
timeLastSaved: time.Now(),
tablePlans: make(map[string]*TablePlan),
phase: phase,
- throttlerAppName: throttlerapp.VCopierName.ConcatenateString(vr.throttlerAppName()),
+ throttlerAppName: throttlerapp.VPlayerName.ConcatenateString(vr.throttlerAppName()),
query: queryFunc,
commit: commitFunc,
batchMode: batchMode,
diff --git a/go/vt/vttablet/tabletmanager/vreplication/vreplicator_test.go b/go/vt/vttablet/tabletmanager/vreplication/vreplicator_test.go
index 3be0525dc88..4586cc761e8 100644
--- a/go/vt/vttablet/tabletmanager/vreplication/vreplicator_test.go
+++ b/go/vt/vttablet/tabletmanager/vreplication/vreplicator_test.go
@@ -31,6 +31,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+ "vitess.io/vitess/go/mysql/replication"
"vitess.io/vitess/go/vt/binlog/binlogplayer"
"vitess.io/vitess/go/vt/dbconfigs"
"vitess.io/vitess/go/vt/mysqlctl"
@@ -810,3 +811,59 @@ func waitForQueryResult(t *testing.T, dbc binlogplayer.DBClient, query, val stri
}
}
}
+
+func TestThrottlerAppNames(t *testing.T) {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ tablet := addTablet(100)
+ defer deleteTablet(tablet)
+ filter := &binlogdatapb.Filter{
+ Rules: []*binlogdatapb.Rule{{
+ Match: "t1",
+ }},
+ }
+ bls := &binlogdatapb.BinlogSource{
+ Keyspace: env.KeyspaceName,
+ Shard: env.ShardName,
+ Filter: filter,
+ }
+ id := int32(1)
+ vsclient := newTabletConnector(tablet)
+ stats := binlogplayer.NewStats()
+ defer stats.Stop()
+ dbClient := playerEngine.dbClientFactoryFiltered()
+ err := dbClient.Connect()
+ require.NoError(t, err)
+ defer dbClient.Close()
+ dbName := dbClient.DBName()
+ // Ensure there's a dummy vreplication workflow record
+ _, err = dbClient.ExecuteFetch(fmt.Sprintf("insert into _vt.vreplication (id, workflow, source, pos, max_tps, max_replication_lag, time_updated, transaction_timestamp, state, db_name) values (%d, 'test_workflow', '', '', 99999, 99999, 0, 0, 'Running', '%s') on duplicate key update workflow='test', source='', pos='', max_tps=99999, max_replication_lag=99999, time_updated=0, transaction_timestamp=0, state='Running', db_name='%s'",
+ id, dbName, dbName), 1)
+ require.NoError(t, err)
+ defer func() {
+ _, err = dbClient.ExecuteFetch(fmt.Sprintf("delete from _vt.vreplication where id = %d", id), 1)
+ require.NoError(t, err)
+ }()
+ vr := newVReplicator(id, bls, vsclient, stats, dbClient, env.Mysqld, playerEngine)
+ settings, _, err := vr.loadSettings(ctx, newVDBClient(dbClient, stats))
+ require.NoError(t, err)
+
+ throttlerAppName := vr.throttlerAppName()
+ assert.Contains(t, throttlerAppName, "test_workflow")
+ assert.Contains(t, throttlerAppName, "vreplication")
+ assert.NotContains(t, throttlerAppName, "vcopier")
+ assert.NotContains(t, throttlerAppName, "vplayer")
+
+ vp := newVPlayer(vr, settings, nil, replication.Position{}, "")
+ assert.Contains(t, vp.throttlerAppName, "test_workflow")
+ assert.Contains(t, vp.throttlerAppName, "vreplication")
+ assert.Contains(t, vp.throttlerAppName, "vplayer")
+ assert.NotContains(t, vp.throttlerAppName, "vcopier")
+
+ vc := newVCopier(vr)
+ assert.Contains(t, vc.throttlerAppName, "test_workflow")
+ assert.Contains(t, vc.throttlerAppName, "vreplication")
+ assert.Contains(t, vc.throttlerAppName, "vcopier")
+ assert.NotContains(t, vc.throttlerAppName, "vplayer")
+}
diff --git a/go/vt/vttablet/tabletserver/planbuilder/permission.go b/go/vt/vttablet/tabletserver/planbuilder/permission.go
index 79b2f9eb430..57a35666122 100644
--- a/go/vt/vttablet/tabletserver/planbuilder/permission.go
+++ b/go/vt/vttablet/tabletserver/planbuilder/permission.go
@@ -39,17 +39,17 @@ func BuildPermissions(stmt sqlparser.Statement) []Permission {
case *sqlparser.Union, *sqlparser.Select:
permissions = buildSubqueryPermissions(node, tableacl.READER, permissions)
case *sqlparser.Insert:
- permissions = buildTableExprPermissions(node.Table, tableacl.WRITER, permissions)
+ permissions = buildTableExprPermissions(node.Table, tableacl.WRITER, nil, permissions)
permissions = buildSubqueryPermissions(node, tableacl.READER, permissions)
case *sqlparser.Update:
- permissions = buildTableExprsPermissions(node.TableExprs, tableacl.WRITER, permissions)
+ permissions = buildTableExprsPermissions(node.TableExprs, tableacl.WRITER, nil, permissions)
permissions = buildSubqueryPermissions(node, tableacl.READER, permissions)
case *sqlparser.Delete:
- permissions = buildTableExprsPermissions(node.TableExprs, tableacl.WRITER, permissions)
+ permissions = buildTableExprsPermissions(node.TableExprs, tableacl.WRITER, nil, permissions)
permissions = buildSubqueryPermissions(node, tableacl.READER, permissions)
case sqlparser.DDLStatement:
for _, t := range node.AffectedTables() {
- permissions = buildTableNamePermissions(t, tableacl.ADMIN, permissions)
+ permissions = buildTableNamePermissions(t, tableacl.ADMIN, nil, permissions)
}
case
*sqlparser.AlterMigration,
@@ -60,10 +60,10 @@ func BuildPermissions(stmt sqlparser.Statement) []Permission {
permissions = []Permission{} // TODO(shlomi) what are the correct permissions here? Table is unknown
case *sqlparser.Flush:
for _, t := range node.TableNames {
- permissions = buildTableNamePermissions(t, tableacl.ADMIN, permissions)
+ permissions = buildTableNamePermissions(t, tableacl.ADMIN, nil, permissions)
}
case *sqlparser.Analyze:
- permissions = buildTableNamePermissions(node.Table, tableacl.WRITER, permissions)
+ permissions = buildTableNamePermissions(node.Table, tableacl.WRITER, nil, permissions)
case *sqlparser.OtherAdmin, *sqlparser.CallProc, *sqlparser.Begin, *sqlparser.Commit, *sqlparser.Rollback,
*sqlparser.Load, *sqlparser.Savepoint, *sqlparser.Release, *sqlparser.SRollback, *sqlparser.Set, *sqlparser.Show, sqlparser.Explain,
*sqlparser.UnlockTables:
@@ -75,43 +75,92 @@ func BuildPermissions(stmt sqlparser.Statement) []Permission {
}
func buildSubqueryPermissions(stmt sqlparser.Statement, role tableacl.Role, permissions []Permission) []Permission {
- _ = sqlparser.Walk(func(node sqlparser.SQLNode) (bool, error) {
- if sel, ok := node.(*sqlparser.Select); ok {
- permissions = buildTableExprsPermissions(sel.From, role, permissions)
+ var cteScopes [][]sqlparser.IdentifierCS
+ sqlparser.Rewrite(stmt, func(cursor *sqlparser.Cursor) bool {
+ switch node := cursor.Node().(type) {
+ case *sqlparser.Select:
+ if node.With != nil {
+ cteScopes = append(cteScopes, gatherCTEs(node.With))
+ }
+ var ctes []sqlparser.IdentifierCS
+ for _, cteScope := range cteScopes {
+ ctes = append(ctes, cteScope...)
+ }
+ permissions = buildTableExprsPermissions(node.From, role, ctes, permissions)
+ case *sqlparser.Delete:
+ if node.With != nil {
+ cteScopes = append(cteScopes, gatherCTEs(node.With))
+ }
+ case *sqlparser.Update:
+ if node.With != nil {
+ cteScopes = append(cteScopes, gatherCTEs(node.With))
+ }
+ case *sqlparser.Union:
+ if node.With != nil {
+ cteScopes = append(cteScopes, gatherCTEs(node.With))
+ }
}
- return true, nil
- }, stmt)
+ return true
+ }, func(cursor *sqlparser.Cursor) bool {
+ // When we encounter a With expression coming up, we should remove
+ // the last value from the cte scopes to ensure we none of the outer
+ // elements of the query see this table name.
+ _, isWith := cursor.Node().(*sqlparser.With)
+ if isWith {
+ cteScopes = cteScopes[:len(cteScopes)-1]
+ }
+ return true
+ })
return permissions
}
-func buildTableExprsPermissions(node []sqlparser.TableExpr, role tableacl.Role, permissions []Permission) []Permission {
+// gatherCTEs gathers the CTEs from the WITH clause.
+func gatherCTEs(with *sqlparser.With) []sqlparser.IdentifierCS {
+ var ctes []sqlparser.IdentifierCS
+ for _, cte := range with.CTEs {
+ ctes = append(ctes, cte.ID)
+ }
+ return ctes
+}
+
+func buildTableExprsPermissions(node []sqlparser.TableExpr, role tableacl.Role, ctes []sqlparser.IdentifierCS, permissions []Permission) []Permission {
for _, node := range node {
- permissions = buildTableExprPermissions(node, role, permissions)
+ permissions = buildTableExprPermissions(node, role, ctes, permissions)
}
return permissions
}
-func buildTableExprPermissions(node sqlparser.TableExpr, role tableacl.Role, permissions []Permission) []Permission {
+func buildTableExprPermissions(node sqlparser.TableExpr, role tableacl.Role, ctes []sqlparser.IdentifierCS, permissions []Permission) []Permission {
switch node := node.(type) {
case *sqlparser.AliasedTableExpr:
// An AliasedTableExpr can also be a derived table, but we should skip them here
// because the buildSubQueryPermissions walker will catch them and extract
// the corresponding table names.
if tblName, ok := node.Expr.(sqlparser.TableName); ok {
- permissions = buildTableNamePermissions(tblName, role, permissions)
+ permissions = buildTableNamePermissions(tblName, role, ctes, permissions)
}
case *sqlparser.ParenTableExpr:
- permissions = buildTableExprsPermissions(node.Exprs, role, permissions)
+ permissions = buildTableExprsPermissions(node.Exprs, role, ctes, permissions)
case *sqlparser.JoinTableExpr:
- permissions = buildTableExprPermissions(node.LeftExpr, role, permissions)
- permissions = buildTableExprPermissions(node.RightExpr, role, permissions)
+ permissions = buildTableExprPermissions(node.LeftExpr, role, ctes, permissions)
+ permissions = buildTableExprPermissions(node.RightExpr, role, ctes, permissions)
}
return permissions
}
-func buildTableNamePermissions(node sqlparser.TableName, role tableacl.Role, permissions []Permission) []Permission {
+func buildTableNamePermissions(node sqlparser.TableName, role tableacl.Role, ctes []sqlparser.IdentifierCS, permissions []Permission) []Permission {
+ tableName := node.Name.String()
+ // Check whether this table is a cte or not.
+ // If the table name is qualified, then it cannot be a cte.
+ if node.Qualifier.IsEmpty() {
+ for _, cte := range ctes {
+ if cte.String() == tableName {
+ return permissions
+ }
+ }
+ }
permissions = append(permissions, Permission{
- TableName: node.Name.String(),
+ TableName: tableName,
Role: role,
})
return permissions
diff --git a/go/vt/vttablet/tabletserver/planbuilder/permission_test.go b/go/vt/vttablet/tabletserver/planbuilder/permission_test.go
index 6d42118cb0b..7a793dadbc3 100644
--- a/go/vt/vttablet/tabletserver/planbuilder/permission_test.go
+++ b/go/vt/vttablet/tabletserver/planbuilder/permission_test.go
@@ -174,6 +174,45 @@ func TestBuildPermissions(t *testing.T) {
}, {
TableName: "t1", // derived table in update or delete needs reader permission as they cannot be modified.
}},
+ }, {
+ input: "with t as (select count(*) as a from user) select a from t",
+ output: []Permission{{
+ TableName: "user",
+ Role: tableacl.READER,
+ }},
+ }, {
+ input: "with d as (select id, count(*) as a from user) select d.a from music join d on music.user_id = d.id group by 1",
+ output: []Permission{{
+ TableName: "music",
+ Role: tableacl.READER,
+ }, {
+ TableName: "user",
+ Role: tableacl.READER,
+ }},
+ }, {
+ input: "WITH t1 AS ( SELECT id FROM t2 ) SELECT * FROM t1 JOIN ks.t1 AS t3",
+ output: []Permission{{
+ TableName: "t1",
+ Role: tableacl.READER,
+ }, {
+ TableName: "t2",
+ Role: tableacl.READER,
+ }},
+ }, {
+ input: "WITH RECURSIVE t1 (n) AS ( SELECT id from t2 UNION ALL SELECT n + 1 FROM t1 WHERE n < 5 ) SELECT * FROM t1 JOIN t1 AS t3",
+ output: []Permission{{
+ TableName: "t2",
+ Role: tableacl.READER,
+ }},
+ }, {
+ input: "(with t1 as (select count(*) as a from user) select a from t1) union select * from t1",
+ output: []Permission{{
+ TableName: "user",
+ Role: tableacl.READER,
+ }, {
+ TableName: "t1",
+ Role: tableacl.READER,
+ }},
}}
for _, tcase := range tcases {
diff --git a/go/vt/vttablet/tabletserver/planbuilder/plan.go b/go/vt/vttablet/tabletserver/planbuilder/plan.go
index 7b1e57c2f90..d37896a84f9 100644
--- a/go/vt/vttablet/tabletserver/planbuilder/plan.go
+++ b/go/vt/vttablet/tabletserver/planbuilder/plan.go
@@ -234,7 +234,9 @@ func Build(env *vtenv.Environment, statement sqlparser.Statement, tables map[str
case *sqlparser.Show:
plan, err = analyzeShow(stmt, dbName)
case *sqlparser.Analyze, sqlparser.Explain:
- plan, err = &Plan{PlanID: PlanOtherRead}, nil
+ // Analyze and Explain are treated as read-only queries.
+ // We send down a string, and get a table result back.
+ plan = &Plan{PlanID: PlanSelect, FullQuery: GenerateFullQuery(stmt)}
case *sqlparser.OtherAdmin:
plan, err = &Plan{PlanID: PlanOtherAdmin}, nil
case *sqlparser.Savepoint:
diff --git a/go/vt/vttablet/tabletserver/planbuilder/testdata/exec_cases.txt b/go/vt/vttablet/tabletserver/planbuilder/testdata/exec_cases.txt
index 977b3822050..d921180f158 100644
--- a/go/vt/vttablet/tabletserver/planbuilder/testdata/exec_cases.txt
+++ b/go/vt/vttablet/tabletserver/planbuilder/testdata/exec_cases.txt
@@ -763,14 +763,15 @@ options:PassthroughDMLs
# analyze
"analyze table a"
{
- "PlanID": "OtherRead",
+ "PlanID": "Select",
"TableName": "",
"Permissions": [
- {
- "TableName": "a",
- "Role": 1
- }
- ]
+ {
+ "TableName": "a",
+ "Role": 1
+ }
+ ],
+ "FullQuery": "analyze table a"
}
# show
@@ -783,15 +784,17 @@ options:PassthroughDMLs
# describe
"describe a"
{
- "PlanID": "OtherRead",
- "TableName": ""
+ "PlanID": "Select",
+ "TableName": "",
+ "FullQuery": "explain a"
}
# explain
"explain a"
{
- "PlanID": "OtherRead",
- "TableName": ""
+ "PlanID": "Select",
+ "TableName": "",
+ "FullQuery": "explain a"
}
# repair
diff --git a/go/vt/vttablet/tabletserver/vstreamer/uvstreamer.go b/go/vt/vttablet/tabletserver/vstreamer/uvstreamer.go
index 2b770c1d4f4..854157b1546 100644
--- a/go/vt/vttablet/tabletserver/vstreamer/uvstreamer.go
+++ b/go/vt/vttablet/tabletserver/vstreamer/uvstreamer.go
@@ -88,7 +88,7 @@ type uvstreamer struct {
config *uvstreamerConfig
- vs *vstreamer //last vstreamer created in uvstreamer
+ vs *vstreamer // last vstreamer created in uvstreamer
}
type uvstreamerConfig struct {
@@ -138,6 +138,9 @@ func (uvs *uvstreamer) buildTablePlan() error {
uvs.plans = make(map[string]*tablePlan)
tableLastPKs := make(map[string]*binlogdatapb.TableLastPK)
for _, tablePK := range uvs.inTablePKs {
+ if tablePK != nil && tablePK.Lastpk != nil && len(tablePK.Lastpk.Fields) == 0 {
+ return fmt.Errorf("lastpk for table %s has no fields defined", tablePK.TableName)
+ }
tableLastPKs[tablePK.TableName] = tablePK
}
tables := uvs.se.GetSchema()
@@ -313,7 +316,6 @@ func (uvs *uvstreamer) send2(evs []*binlogdatapb.VEvent) error {
}
behind := time.Now().UnixNano() - uvs.lastTimestampNs
uvs.setReplicationLagSeconds(behind / 1e9)
- //log.Infof("sbm set to %d", uvs.ReplicationLagSeconds)
var evs2 []*binlogdatapb.VEvent
if len(uvs.plans) > 0 {
evs2 = uvs.filterEvents(evs)
diff --git a/go/vt/vttablet/tabletserver/vstreamer/vstreamer_flaky_test.go b/go/vt/vttablet/tabletserver/vstreamer/vstreamer_flaky_test.go
index 0eda0d6c52e..0fb9a841a7c 100644
--- a/go/vt/vttablet/tabletserver/vstreamer/vstreamer_flaky_test.go
+++ b/go/vt/vttablet/tabletserver/vstreamer/vstreamer_flaky_test.go
@@ -83,6 +83,42 @@ func (tfe *TestFieldEvent) String() string {
return s
}
+// TestVStreamMissingFieldsInLastPK tests that we error out if the lastpk for a table is missing the fields spec.
+func TestVStreamMissingFieldsInLastPK(t *testing.T) {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ oldEngine := engine
+ engine = nil
+ oldEnv := env
+ env = nil
+ newEngine(t, ctx, "noblob")
+ defer func() {
+ engine = oldEngine
+ env = oldEnv
+ }()
+ execStatements(t, []string{
+ "create table t1(id int, blb blob, val varchar(4), primary key(id))",
+ })
+ defer execStatements(t, []string{
+ "drop table t1",
+ })
+ engine.se.Reload(context.Background())
+ var tablePKs []*binlogdatapb.TableLastPK
+ tablePKs = append(tablePKs, getTablePK("t1", 1))
+ for _, tpk := range tablePKs {
+ tpk.Lastpk.Fields = nil
+ }
+ filter := &binlogdatapb.Filter{
+ Rules: []*binlogdatapb.Rule{{
+ Match: "t1",
+ Filter: "select * from t1",
+ }},
+ }
+ ch := make(chan []*binlogdatapb.VEvent)
+ err := vstream(ctx, t, "", tablePKs, filter, ch)
+ require.ErrorContains(t, err, "lastpk for table t1 has no fields defined")
+}
+
// TestPlayerNoBlob sets up a new environment with mysql running with binlog_row_image as noblob. It confirms that
// the VEvents created are correct: that they don't contain the missing columns and that the DataColumns bitmap is sent
func TestNoBlob(t *testing.T) {
diff --git a/go/vt/wrangler/testlib/backup_test.go b/go/vt/wrangler/testlib/backup_test.go
index 0de8bfd78f3..8c8343608ad 100644
--- a/go/vt/wrangler/testlib/backup_test.go
+++ b/go/vt/wrangler/testlib/backup_test.go
@@ -150,7 +150,7 @@ func testBackupRestore(t *testing.T, cDetails *compressionDetails) error {
primary := NewFakeTablet(t, wr, "cell1", 0, topodatapb.TabletType_PRIMARY, db)
primary.FakeMysqlDaemon.ReadOnly = false
primary.FakeMysqlDaemon.Replicating = false
- primary.FakeMysqlDaemon.CurrentPrimaryPosition = replication.Position{
+ primary.FakeMysqlDaemon.SetPrimaryPositionLocked(replication.Position{
GTIDSet: replication.MariadbGTIDSet{
2: replication.MariadbGTID{
Domain: 2,
@@ -158,7 +158,7 @@ func testBackupRestore(t *testing.T, cDetails *compressionDetails) error {
Sequence: 457,
},
},
- }
+ })
// start primary so that replica can fetch primary position from it
primary.StartActionLoop(t, wr)
@@ -170,7 +170,7 @@ func testBackupRestore(t *testing.T, cDetails *compressionDetails) error {
sourceTablet.FakeMysqlDaemon.ReadOnly = true
sourceTablet.FakeMysqlDaemon.Replicating = true
sourceTablet.FakeMysqlDaemon.SetReplicationSourceInputs = []string{fmt.Sprintf("%s:%d", primary.Tablet.MysqlHostname, primary.Tablet.MysqlPort)}
- sourceTablet.FakeMysqlDaemon.CurrentPrimaryPosition = replication.Position{
+ sourceTablet.FakeMysqlDaemon.SetPrimaryPositionLocked(replication.Position{
GTIDSet: replication.MariadbGTIDSet{
2: replication.MariadbGTID{
Domain: 2,
@@ -178,7 +178,7 @@ func testBackupRestore(t *testing.T, cDetails *compressionDetails) error {
Sequence: 457,
},
},
- }
+ })
sourceTablet.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
// These 3 statements come from tablet startup
"STOP SLAVE",
@@ -223,7 +223,7 @@ func testBackupRestore(t *testing.T, cDetails *compressionDetails) error {
destTablet := NewFakeTablet(t, wr, "cell1", 2, topodatapb.TabletType_REPLICA, db)
destTablet.FakeMysqlDaemon.ReadOnly = true
destTablet.FakeMysqlDaemon.Replicating = true
- destTablet.FakeMysqlDaemon.CurrentPrimaryPosition = replication.Position{
+ destTablet.FakeMysqlDaemon.SetPrimaryPositionLocked(replication.Position{
GTIDSet: replication.MariadbGTIDSet{
2: replication.MariadbGTID{
Domain: 2,
@@ -231,7 +231,7 @@ func testBackupRestore(t *testing.T, cDetails *compressionDetails) error {
Sequence: 457,
},
},
- }
+ })
destTablet.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
// These 3 statements come from tablet startup
"STOP SLAVE",
@@ -249,7 +249,7 @@ func testBackupRestore(t *testing.T, cDetails *compressionDetails) error {
"RESET MASTER": {},
"SET GLOBAL gtid_purged": {},
}
- destTablet.FakeMysqlDaemon.SetReplicationPositionPos = sourceTablet.FakeMysqlDaemon.CurrentPrimaryPosition
+ destTablet.FakeMysqlDaemon.SetReplicationPositionPos = sourceTablet.FakeMysqlDaemon.GetPrimaryPositionLocked()
destTablet.FakeMysqlDaemon.SetReplicationSourceInputs = append(destTablet.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(primary.Tablet))
destTablet.StartActionLoop(t, wr)
@@ -301,7 +301,7 @@ func testBackupRestore(t *testing.T, cDetails *compressionDetails) error {
"START SLAVE",
}
- primary.FakeMysqlDaemon.SetReplicationPositionPos = primary.FakeMysqlDaemon.CurrentPrimaryPosition
+ primary.FakeMysqlDaemon.SetReplicationPositionPos = primary.FakeMysqlDaemon.GetPrimaryPositionLocked()
// restore primary from latest backup
require.NoError(t, primary.TM.RestoreData(ctx, logutil.NewConsoleLogger(), 0 /* waitForBackupInterval */, false /* deleteBeforeRestore */, time.Time{} /* restoreFromBackupTs */, time.Time{} /* restoreToTimestamp */, "", mysqlShutdownTimeout),
@@ -388,7 +388,7 @@ func TestBackupRestoreLagged(t *testing.T) {
primary := NewFakeTablet(t, wr, "cell1", 0, topodatapb.TabletType_PRIMARY, db)
primary.FakeMysqlDaemon.ReadOnly = false
primary.FakeMysqlDaemon.Replicating = false
- primary.FakeMysqlDaemon.CurrentPrimaryPosition = replication.Position{
+ primary.FakeMysqlDaemon.SetPrimaryPositionLocked(replication.Position{
GTIDSet: replication.MariadbGTIDSet{
2: replication.MariadbGTID{
Domain: 2,
@@ -396,7 +396,7 @@ func TestBackupRestoreLagged(t *testing.T) {
Sequence: 457,
},
},
- }
+ })
// start primary so that replica can fetch primary position from it
primary.StartActionLoop(t, wr)
@@ -407,7 +407,7 @@ func TestBackupRestoreLagged(t *testing.T) {
sourceTablet := NewFakeTablet(t, wr, "cell1", 1, topodatapb.TabletType_REPLICA, db)
sourceTablet.FakeMysqlDaemon.ReadOnly = true
sourceTablet.FakeMysqlDaemon.Replicating = true
- sourceTablet.FakeMysqlDaemon.CurrentPrimaryPosition = replication.Position{
+ sourceTablet.FakeMysqlDaemon.SetPrimaryPositionLocked(replication.Position{
GTIDSet: replication.MariadbGTIDSet{
2: replication.MariadbGTID{
Domain: 2,
@@ -415,7 +415,7 @@ func TestBackupRestoreLagged(t *testing.T) {
Sequence: 456,
},
},
- }
+ })
sourceTablet.FakeMysqlDaemon.SetReplicationSourceInputs = []string{fmt.Sprintf("%s:%d", primary.Tablet.MysqlHostname, primary.Tablet.MysqlPort)}
sourceTablet.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
// These 3 statements come from tablet startup
@@ -449,7 +449,7 @@ func TestBackupRestoreLagged(t *testing.T) {
timer := time.NewTicker(1 * time.Second)
<-timer.C
- sourceTablet.FakeMysqlDaemon.CurrentPrimaryPositionLocked(replication.Position{
+ sourceTablet.FakeMysqlDaemon.SetPrimaryPositionLocked(replication.Position{
GTIDSet: replication.MariadbGTIDSet{
2: replication.MariadbGTID{
Domain: 2,
@@ -468,7 +468,7 @@ func TestBackupRestoreLagged(t *testing.T) {
require.NoError(t, sourceTablet.FakeMysqlDaemon.CheckSuperQueryList())
assert.True(t, sourceTablet.FakeMysqlDaemon.Replicating)
assert.True(t, sourceTablet.FakeMysqlDaemon.Running)
- assert.Equal(t, primary.FakeMysqlDaemon.CurrentPrimaryPosition, sourceTablet.FakeMysqlDaemon.CurrentPrimaryPosition)
+ assert.Equal(t, primary.FakeMysqlDaemon.GetPrimaryPositionLocked(), sourceTablet.FakeMysqlDaemon.GetPrimaryPositionLocked())
case <-timer2.C:
require.FailNow(t, "Backup timed out")
}
@@ -477,7 +477,7 @@ func TestBackupRestoreLagged(t *testing.T) {
destTablet := NewFakeTablet(t, wr, "cell1", 2, topodatapb.TabletType_REPLICA, db)
destTablet.FakeMysqlDaemon.ReadOnly = true
destTablet.FakeMysqlDaemon.Replicating = true
- destTablet.FakeMysqlDaemon.CurrentPrimaryPosition = replication.Position{
+ destTablet.FakeMysqlDaemon.SetPrimaryPositionLocked(replication.Position{
GTIDSet: replication.MariadbGTIDSet{
2: replication.MariadbGTID{
Domain: 2,
@@ -485,7 +485,7 @@ func TestBackupRestoreLagged(t *testing.T) {
Sequence: 456,
},
},
- }
+ })
destTablet.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
// These 3 statements come from tablet startup
"STOP SLAVE",
@@ -503,7 +503,7 @@ func TestBackupRestoreLagged(t *testing.T) {
"RESET MASTER": {},
"SET GLOBAL gtid_purged": {},
}
- destTablet.FakeMysqlDaemon.SetReplicationPositionPos = destTablet.FakeMysqlDaemon.CurrentPrimaryPosition
+ destTablet.FakeMysqlDaemon.SetReplicationPositionPos = destTablet.FakeMysqlDaemon.GetPrimaryPositionLocked()
destTablet.FakeMysqlDaemon.SetReplicationSourceInputs = append(destTablet.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(primary.Tablet))
destTablet.StartActionLoop(t, wr)
@@ -526,7 +526,7 @@ func TestBackupRestoreLagged(t *testing.T) {
timer = time.NewTicker(1 * time.Second)
<-timer.C
- destTablet.FakeMysqlDaemon.CurrentPrimaryPositionLocked(replication.Position{
+ destTablet.FakeMysqlDaemon.SetPrimaryPositionLocked(replication.Position{
GTIDSet: replication.MariadbGTIDSet{
2: replication.MariadbGTID{
Domain: 2,
@@ -544,7 +544,7 @@ func TestBackupRestoreLagged(t *testing.T) {
require.NoError(t, destTablet.FakeMysqlDaemon.CheckSuperQueryList(), "destTablet.FakeMysqlDaemon.CheckSuperQueryList failed")
assert.True(t, destTablet.FakeMysqlDaemon.Replicating)
assert.True(t, destTablet.FakeMysqlDaemon.Running)
- assert.Equal(t, primary.FakeMysqlDaemon.CurrentPrimaryPosition, destTablet.FakeMysqlDaemon.CurrentPrimaryPosition)
+ assert.Equal(t, primary.FakeMysqlDaemon.GetPrimaryPositionLocked(), destTablet.FakeMysqlDaemon.GetPrimaryPositionLocked())
case <-timer2.C:
require.FailNow(t, "Restore timed out")
}
@@ -607,7 +607,7 @@ func TestRestoreUnreachablePrimary(t *testing.T) {
primary := NewFakeTablet(t, wr, "cell1", 0, topodatapb.TabletType_PRIMARY, db)
primary.FakeMysqlDaemon.ReadOnly = false
primary.FakeMysqlDaemon.Replicating = false
- primary.FakeMysqlDaemon.CurrentPrimaryPosition = replication.Position{
+ primary.FakeMysqlDaemon.SetPrimaryPositionLocked(replication.Position{
GTIDSet: replication.MariadbGTIDSet{
2: replication.MariadbGTID{
Domain: 2,
@@ -615,7 +615,7 @@ func TestRestoreUnreachablePrimary(t *testing.T) {
Sequence: 457,
},
},
- }
+ })
// start primary so that replica can fetch primary position from it
primary.StartActionLoop(t, wr)
@@ -625,7 +625,7 @@ func TestRestoreUnreachablePrimary(t *testing.T) {
sourceTablet := NewFakeTablet(t, wr, "cell1", 1, topodatapb.TabletType_REPLICA, db)
sourceTablet.FakeMysqlDaemon.ReadOnly = true
sourceTablet.FakeMysqlDaemon.Replicating = true
- sourceTablet.FakeMysqlDaemon.CurrentPrimaryPosition = replication.Position{
+ sourceTablet.FakeMysqlDaemon.SetPrimaryPositionLocked(replication.Position{
GTIDSet: replication.MariadbGTIDSet{
2: replication.MariadbGTID{
Domain: 2,
@@ -633,7 +633,7 @@ func TestRestoreUnreachablePrimary(t *testing.T) {
Sequence: 457,
},
},
- }
+ })
sourceTablet.FakeMysqlDaemon.SetReplicationSourceInputs = []string{fmt.Sprintf("%s:%d", primary.Tablet.MysqlHostname, primary.Tablet.MysqlPort)}
sourceTablet.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
// These 3 statements come from tablet startup
@@ -667,7 +667,7 @@ func TestRestoreUnreachablePrimary(t *testing.T) {
destTablet := NewFakeTablet(t, wr, "cell1", 2, topodatapb.TabletType_REPLICA, db)
destTablet.FakeMysqlDaemon.ReadOnly = true
destTablet.FakeMysqlDaemon.Replicating = true
- destTablet.FakeMysqlDaemon.CurrentPrimaryPosition = replication.Position{
+ destTablet.FakeMysqlDaemon.SetPrimaryPositionLocked(replication.Position{
GTIDSet: replication.MariadbGTIDSet{
2: replication.MariadbGTID{
Domain: 2,
@@ -675,7 +675,7 @@ func TestRestoreUnreachablePrimary(t *testing.T) {
Sequence: 457,
},
},
- }
+ })
destTablet.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
// These 3 statements come from tablet startup
"STOP SLAVE",
@@ -693,7 +693,7 @@ func TestRestoreUnreachablePrimary(t *testing.T) {
"RESET MASTER": {},
"SET GLOBAL gtid_purged": {},
}
- destTablet.FakeMysqlDaemon.SetReplicationPositionPos = sourceTablet.FakeMysqlDaemon.CurrentPrimaryPosition
+ destTablet.FakeMysqlDaemon.SetReplicationPositionPos = sourceTablet.FakeMysqlDaemon.GetPrimaryPositionLocked()
destTablet.FakeMysqlDaemon.SetReplicationSourceInputs = append(destTablet.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(primary.Tablet))
destTablet.StartActionLoop(t, wr)
@@ -782,7 +782,7 @@ func TestDisableActiveReparents(t *testing.T) {
primary := NewFakeTablet(t, wr, "cell1", 0, topodatapb.TabletType_PRIMARY, db)
primary.FakeMysqlDaemon.ReadOnly = false
primary.FakeMysqlDaemon.Replicating = false
- primary.FakeMysqlDaemon.CurrentPrimaryPosition = replication.Position{
+ primary.FakeMysqlDaemon.SetPrimaryPositionLocked(replication.Position{
GTIDSet: replication.MariadbGTIDSet{
2: replication.MariadbGTID{
Domain: 2,
@@ -790,7 +790,7 @@ func TestDisableActiveReparents(t *testing.T) {
Sequence: 457,
},
},
- }
+ })
// start primary so that replica can fetch primary position from it
primary.StartActionLoop(t, wr)
@@ -801,7 +801,7 @@ func TestDisableActiveReparents(t *testing.T) {
sourceTablet := NewFakeTablet(t, wr, "cell1", 1, topodatapb.TabletType_REPLICA, db)
sourceTablet.FakeMysqlDaemon.ReadOnly = true
sourceTablet.FakeMysqlDaemon.Replicating = true
- sourceTablet.FakeMysqlDaemon.CurrentPrimaryPosition = replication.Position{
+ sourceTablet.FakeMysqlDaemon.SetPrimaryPositionLocked(replication.Position{
GTIDSet: replication.MariadbGTIDSet{
2: replication.MariadbGTID{
Domain: 2,
@@ -809,7 +809,7 @@ func TestDisableActiveReparents(t *testing.T) {
Sequence: 457,
},
},
- }
+ })
sourceTablet.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
"STOP SLAVE",
}
@@ -834,7 +834,7 @@ func TestDisableActiveReparents(t *testing.T) {
destTablet := NewFakeTablet(t, wr, "cell1", 2, topodatapb.TabletType_REPLICA, db)
destTablet.FakeMysqlDaemon.ReadOnly = true
destTablet.FakeMysqlDaemon.Replicating = true
- destTablet.FakeMysqlDaemon.CurrentPrimaryPosition = replication.Position{
+ destTablet.FakeMysqlDaemon.SetPrimaryPositionLocked(replication.Position{
GTIDSet: replication.MariadbGTIDSet{
2: replication.MariadbGTID{
Domain: 2,
@@ -842,7 +842,7 @@ func TestDisableActiveReparents(t *testing.T) {
Sequence: 457,
},
},
- }
+ })
destTablet.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
"STOP SLAVE",
"RESET SLAVE ALL",
@@ -853,7 +853,7 @@ func TestDisableActiveReparents(t *testing.T) {
"RESET MASTER": {},
"SET GLOBAL gtid_purged": {},
}
- destTablet.FakeMysqlDaemon.SetReplicationPositionPos = sourceTablet.FakeMysqlDaemon.CurrentPrimaryPosition
+ destTablet.FakeMysqlDaemon.SetReplicationPositionPos = sourceTablet.FakeMysqlDaemon.GetPrimaryPositionLocked()
destTablet.FakeMysqlDaemon.SetReplicationSourceInputs = append(destTablet.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(primary.Tablet))
destTablet.StartActionLoop(t, wr)
diff --git a/go/vt/wrangler/testlib/emergency_reparent_shard_test.go b/go/vt/wrangler/testlib/emergency_reparent_shard_test.go
index 6cafe83b684..c72f92d0da7 100644
--- a/go/vt/wrangler/testlib/emergency_reparent_shard_test.go
+++ b/go/vt/wrangler/testlib/emergency_reparent_shard_test.go
@@ -63,7 +63,7 @@ func TestEmergencyReparentShard(t *testing.T) {
reparenttestutil.SetKeyspaceDurability(context.Background(), t, ts, "test_keyspace", "semi_sync")
oldPrimary.FakeMysqlDaemon.Replicating = false
- oldPrimary.FakeMysqlDaemon.CurrentPrimaryPosition = replication.Position{
+ oldPrimary.FakeMysqlDaemon.SetPrimaryPositionLocked(replication.Position{
GTIDSet: replication.MariadbGTIDSet{
2: replication.MariadbGTID{
Domain: 2,
@@ -71,7 +71,7 @@ func TestEmergencyReparentShard(t *testing.T) {
Sequence: 456,
},
},
- }
+ })
currentPrimaryFilePosition, _ := replication.ParseFilePosGTIDSet("mariadb-bin.000010:456")
oldPrimary.FakeMysqlDaemon.CurrentSourceFilePosition = replication.Position{
GTIDSet: currentPrimaryFilePosition,
@@ -80,7 +80,7 @@ func TestEmergencyReparentShard(t *testing.T) {
// new primary
newPrimary.FakeMysqlDaemon.ReadOnly = true
newPrimary.FakeMysqlDaemon.Replicating = true
- newPrimary.FakeMysqlDaemon.CurrentPrimaryPosition = replication.Position{
+ newPrimary.FakeMysqlDaemon.SetPrimaryPositionLocked(replication.Position{
GTIDSet: replication.MariadbGTIDSet{
2: replication.MariadbGTID{
Domain: 2,
@@ -88,7 +88,7 @@ func TestEmergencyReparentShard(t *testing.T) {
Sequence: 456,
},
},
- }
+ })
newPrimaryRelayLogPos, _ := replication.ParseFilePosGTIDSet("relay-bin.000004:456")
newPrimary.FakeMysqlDaemon.CurrentSourceFilePosition = replication.Position{
GTIDSet: newPrimaryRelayLogPos,
@@ -123,7 +123,7 @@ func TestEmergencyReparentShard(t *testing.T) {
// good replica 1 is replicating
goodReplica1.FakeMysqlDaemon.ReadOnly = true
goodReplica1.FakeMysqlDaemon.Replicating = true
- goodReplica1.FakeMysqlDaemon.CurrentPrimaryPosition = replication.Position{
+ goodReplica1.FakeMysqlDaemon.SetPrimaryPositionLocked(replication.Position{
GTIDSet: replication.MariadbGTIDSet{
2: replication.MariadbGTID{
Domain: 2,
@@ -131,7 +131,7 @@ func TestEmergencyReparentShard(t *testing.T) {
Sequence: 455,
},
},
- }
+ })
goodReplica1RelayLogPos, _ := replication.ParseFilePosGTIDSet("relay-bin.000004:455")
goodReplica1.FakeMysqlDaemon.CurrentSourceFilePosition = replication.Position{
GTIDSet: goodReplica1RelayLogPos,
@@ -154,7 +154,7 @@ func TestEmergencyReparentShard(t *testing.T) {
// good replica 2 is not replicating
goodReplica2.FakeMysqlDaemon.ReadOnly = true
goodReplica2.FakeMysqlDaemon.Replicating = false
- goodReplica2.FakeMysqlDaemon.CurrentPrimaryPosition = replication.Position{
+ goodReplica2.FakeMysqlDaemon.SetPrimaryPositionLocked(replication.Position{
GTIDSet: replication.MariadbGTIDSet{
2: replication.MariadbGTID{
Domain: 2,
@@ -162,7 +162,7 @@ func TestEmergencyReparentShard(t *testing.T) {
Sequence: 454,
},
},
- }
+ })
goodReplica2RelayLogPos, _ := replication.ParseFilePosGTIDSet("relay-bin.000004:454")
goodReplica2.FakeMysqlDaemon.CurrentSourceFilePosition = replication.Position{
GTIDSet: goodReplica2RelayLogPos,
@@ -217,7 +217,7 @@ func TestEmergencyReparentShardPrimaryElectNotBest(t *testing.T) {
newPrimary.FakeMysqlDaemon.Replicating = true
// It has transactions in its relay log, but not as many as
// moreAdvancedReplica
- newPrimary.FakeMysqlDaemon.CurrentPrimaryPosition = replication.Position{
+ newPrimary.FakeMysqlDaemon.SetPrimaryPositionLocked(replication.Position{
GTIDSet: replication.MariadbGTIDSet{
2: replication.MariadbGTID{
Domain: 2,
@@ -225,7 +225,7 @@ func TestEmergencyReparentShardPrimaryElectNotBest(t *testing.T) {
Sequence: 456,
},
},
- }
+ })
newPrimaryRelayLogPos, _ := replication.ParseFilePosGTIDSet("relay-bin.000004:456")
newPrimary.FakeMysqlDaemon.CurrentSourceFilePosition = replication.Position{
GTIDSet: newPrimaryRelayLogPos,
@@ -250,7 +250,7 @@ func TestEmergencyReparentShardPrimaryElectNotBest(t *testing.T) {
// more advanced replica
moreAdvancedReplica.FakeMysqlDaemon.Replicating = true
// relay log position is more advanced than desired new primary
- moreAdvancedReplica.FakeMysqlDaemon.CurrentPrimaryPosition = replication.Position{
+ moreAdvancedReplica.FakeMysqlDaemon.SetPrimaryPositionLocked(replication.Position{
GTIDSet: replication.MariadbGTIDSet{
2: replication.MariadbGTID{
Domain: 2,
@@ -258,14 +258,14 @@ func TestEmergencyReparentShardPrimaryElectNotBest(t *testing.T) {
Sequence: 457,
},
},
- }
+ })
moreAdvancedReplicaLogPos, _ := replication.ParseFilePosGTIDSet("relay-bin.000004:457")
moreAdvancedReplica.FakeMysqlDaemon.CurrentSourceFilePosition = replication.Position{
GTIDSet: moreAdvancedReplicaLogPos,
}
moreAdvancedReplica.FakeMysqlDaemon.SetReplicationSourceInputs = append(moreAdvancedReplica.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet), topoproto.MysqlAddr(oldPrimary.Tablet))
moreAdvancedReplica.FakeMysqlDaemon.WaitPrimaryPositions = append(moreAdvancedReplica.FakeMysqlDaemon.WaitPrimaryPositions, moreAdvancedReplica.FakeMysqlDaemon.CurrentSourceFilePosition)
- newPrimary.FakeMysqlDaemon.WaitPrimaryPositions = append(newPrimary.FakeMysqlDaemon.WaitPrimaryPositions, moreAdvancedReplica.FakeMysqlDaemon.CurrentPrimaryPosition)
+ newPrimary.FakeMysqlDaemon.WaitPrimaryPositions = append(newPrimary.FakeMysqlDaemon.WaitPrimaryPositions, moreAdvancedReplica.FakeMysqlDaemon.GetPrimaryPositionLocked())
moreAdvancedReplica.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
// These 3 statements come from tablet startup
"STOP SLAVE",
diff --git a/go/vt/wrangler/testlib/planned_reparent_shard_test.go b/go/vt/wrangler/testlib/planned_reparent_shard_test.go
index 7069df9d3e1..5441381fb5a 100644
--- a/go/vt/wrangler/testlib/planned_reparent_shard_test.go
+++ b/go/vt/wrangler/testlib/planned_reparent_shard_test.go
@@ -96,7 +96,7 @@ func TestPlannedReparentShardNoPrimaryProvided(t *testing.T) {
oldPrimary.FakeMysqlDaemon.ReadOnly = false
oldPrimary.FakeMysqlDaemon.Replicating = false
oldPrimary.FakeMysqlDaemon.ReplicationStatusError = mysql.ErrNotReplica
- oldPrimary.FakeMysqlDaemon.CurrentPrimaryPosition = newPrimary.FakeMysqlDaemon.WaitPrimaryPositions[0]
+ oldPrimary.FakeMysqlDaemon.SetPrimaryPositionLocked(newPrimary.FakeMysqlDaemon.WaitPrimaryPositions[0])
oldPrimary.FakeMysqlDaemon.SetReplicationSourceInputs = append(oldPrimary.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet))
oldPrimary.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
"FAKE SET MASTER",
@@ -213,7 +213,7 @@ func TestPlannedReparentShardNoError(t *testing.T) {
oldPrimary.FakeMysqlDaemon.ReadOnly = false
oldPrimary.FakeMysqlDaemon.Replicating = false
oldPrimary.FakeMysqlDaemon.ReplicationStatusError = mysql.ErrNotReplica
- oldPrimary.FakeMysqlDaemon.CurrentPrimaryPosition = newPrimary.FakeMysqlDaemon.WaitPrimaryPositions[0]
+ oldPrimary.FakeMysqlDaemon.SetPrimaryPositionLocked(newPrimary.FakeMysqlDaemon.WaitPrimaryPositions[0])
oldPrimary.FakeMysqlDaemon.SetReplicationSourceInputs = append(oldPrimary.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet))
oldPrimary.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
"FAKE SET MASTER",
@@ -434,7 +434,7 @@ func TestPlannedReparentShardWaitForPositionFail(t *testing.T) {
oldPrimary.FakeMysqlDaemon.ReadOnly = false
oldPrimary.FakeMysqlDaemon.Replicating = false
// set to incorrect value to make promote fail on WaitForReplicationPos
- oldPrimary.FakeMysqlDaemon.CurrentPrimaryPosition = newPrimary.FakeMysqlDaemon.PromoteResult
+ oldPrimary.FakeMysqlDaemon.SetPrimaryPositionLocked(newPrimary.FakeMysqlDaemon.PromoteResult)
oldPrimary.FakeMysqlDaemon.SetReplicationSourceInputs = append(oldPrimary.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet))
oldPrimary.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
"FAKE SET MASTER",
@@ -542,7 +542,7 @@ func TestPlannedReparentShardWaitForPositionTimeout(t *testing.T) {
// old primary
oldPrimary.FakeMysqlDaemon.ReadOnly = false
oldPrimary.FakeMysqlDaemon.Replicating = false
- oldPrimary.FakeMysqlDaemon.CurrentPrimaryPosition = newPrimary.FakeMysqlDaemon.WaitPrimaryPositions[0]
+ oldPrimary.FakeMysqlDaemon.SetPrimaryPositionLocked(newPrimary.FakeMysqlDaemon.WaitPrimaryPositions[0])
oldPrimary.FakeMysqlDaemon.SetReplicationSourceInputs = append(oldPrimary.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet))
oldPrimary.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
"FAKE SET MASTER",
@@ -616,7 +616,7 @@ func TestPlannedReparentShardRelayLogError(t *testing.T) {
primary.FakeMysqlDaemon.ReadOnly = false
primary.FakeMysqlDaemon.Replicating = false
primary.FakeMysqlDaemon.ReplicationStatusError = mysql.ErrNotReplica
- primary.FakeMysqlDaemon.CurrentPrimaryPosition = replication.Position{
+ primary.FakeMysqlDaemon.SetPrimaryPositionLocked(replication.Position{
GTIDSet: replication.MariadbGTIDSet{
7: replication.MariadbGTID{
Domain: 7,
@@ -624,7 +624,7 @@ func TestPlannedReparentShardRelayLogError(t *testing.T) {
Sequence: 990,
},
},
- }
+ })
primary.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
"SUBINSERT INTO _vt.reparent_journal (time_created_ns, action_name, primary_alias, replication_position) VALUES",
}
@@ -697,7 +697,7 @@ func TestPlannedReparentShardRelayLogErrorStartReplication(t *testing.T) {
primary.FakeMysqlDaemon.ReadOnly = false
primary.FakeMysqlDaemon.Replicating = false
primary.FakeMysqlDaemon.ReplicationStatusError = mysql.ErrNotReplica
- primary.FakeMysqlDaemon.CurrentPrimaryPosition = replication.Position{
+ primary.FakeMysqlDaemon.SetPrimaryPositionLocked(replication.Position{
GTIDSet: replication.MariadbGTIDSet{
7: replication.MariadbGTID{
Domain: 7,
@@ -705,7 +705,7 @@ func TestPlannedReparentShardRelayLogErrorStartReplication(t *testing.T) {
Sequence: 990,
},
},
- }
+ })
primary.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
"SUBINSERT INTO _vt.reparent_journal (time_created_ns, action_name, primary_alias, replication_position) VALUES",
}
@@ -815,7 +815,7 @@ func TestPlannedReparentShardPromoteReplicaFail(t *testing.T) {
oldPrimary.FakeMysqlDaemon.ReadOnly = false
oldPrimary.FakeMysqlDaemon.Replicating = false
oldPrimary.FakeMysqlDaemon.ReplicationStatusError = mysql.ErrNotReplica
- oldPrimary.FakeMysqlDaemon.CurrentPrimaryPosition = newPrimary.FakeMysqlDaemon.WaitPrimaryPositions[0]
+ oldPrimary.FakeMysqlDaemon.SetPrimaryPositionLocked(newPrimary.FakeMysqlDaemon.WaitPrimaryPositions[0])
oldPrimary.FakeMysqlDaemon.SetReplicationSourceInputs = append(oldPrimary.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet))
oldPrimary.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
"FAKE SET MASTER",
@@ -823,7 +823,7 @@ func TestPlannedReparentShardPromoteReplicaFail(t *testing.T) {
// We call a SetReplicationSource explicitly
"FAKE SET MASTER",
"START SLAVE",
- // extra SetReplicationSource call due to retry
+ // extra SetReplicationSource call due to retry)
"FAKE SET MASTER",
"START SLAVE",
}
@@ -885,7 +885,7 @@ func TestPlannedReparentShardPromoteReplicaFail(t *testing.T) {
// retrying should work
newPrimary.FakeMysqlDaemon.PromoteError = nil
- newPrimary.FakeMysqlDaemon.CurrentPrimaryPosition = newPrimary.FakeMysqlDaemon.WaitPrimaryPositions[0]
+ newPrimary.FakeMysqlDaemon.SetPrimaryPositionLocked(newPrimary.FakeMysqlDaemon.WaitPrimaryPositions[0])
// run PlannedReparentShard
err = vp.Run([]string{"PlannedReparentShard", "--wait_replicas_timeout", "10s", "--keyspace_shard", newPrimary.Tablet.Keyspace + "/" + newPrimary.Tablet.Shard, "--new_primary", topoproto.TabletAliasString(newPrimary.Tablet.Alias)})
@@ -922,7 +922,7 @@ func TestPlannedReparentShardSamePrimary(t *testing.T) {
oldPrimary.FakeMysqlDaemon.ReadOnly = true
oldPrimary.FakeMysqlDaemon.Replicating = false
oldPrimary.FakeMysqlDaemon.ReplicationStatusError = mysql.ErrNotReplica
- oldPrimary.FakeMysqlDaemon.CurrentPrimaryPosition = replication.Position{
+ oldPrimary.FakeMysqlDaemon.SetPrimaryPositionLocked(replication.Position{
GTIDSet: replication.MariadbGTIDSet{
7: replication.MariadbGTID{
Domain: 7,
@@ -930,7 +930,7 @@ func TestPlannedReparentShardSamePrimary(t *testing.T) {
Sequence: 990,
},
},
- }
+ })
oldPrimary.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
"SUBINSERT INTO _vt.reparent_journal (time_created_ns, action_name, primary_alias, replication_position) VALUES",
}
diff --git a/go/vt/wrangler/testlib/reparent_utils_test.go b/go/vt/wrangler/testlib/reparent_utils_test.go
index 8dfc3efb20d..3fa0a059ffb 100644
--- a/go/vt/wrangler/testlib/reparent_utils_test.go
+++ b/go/vt/wrangler/testlib/reparent_utils_test.go
@@ -67,7 +67,7 @@ func TestShardReplicationStatuses(t *testing.T) {
}
// primary action loop (to initialize host and port)
- primary.FakeMysqlDaemon.CurrentPrimaryPosition = replication.Position{
+ primary.FakeMysqlDaemon.SetPrimaryPositionLocked(replication.Position{
GTIDSet: replication.MariadbGTIDSet{
5: replication.MariadbGTID{
Domain: 5,
@@ -75,12 +75,12 @@ func TestShardReplicationStatuses(t *testing.T) {
Sequence: 892,
},
},
- }
+ })
primary.StartActionLoop(t, wr)
defer primary.StopActionLoop(t)
// replica loop
- replica.FakeMysqlDaemon.CurrentPrimaryPosition = replication.Position{
+ replica.FakeMysqlDaemon.SetPrimaryPositionLocked(replication.Position{
GTIDSet: replication.MariadbGTIDSet{
5: replication.MariadbGTID{
Domain: 5,
@@ -88,7 +88,7 @@ func TestShardReplicationStatuses(t *testing.T) {
Sequence: 890,
},
},
- }
+ })
replica.FakeMysqlDaemon.CurrentSourceHost = primary.Tablet.MysqlHostname
replica.FakeMysqlDaemon.CurrentSourcePort = primary.Tablet.MysqlPort
replica.FakeMysqlDaemon.SetReplicationSourceInputs = append(replica.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(primary.Tablet))
diff --git a/go/vt/wrangler/traffic_switcher_env_test.go b/go/vt/wrangler/traffic_switcher_env_test.go
index 3838ded0669..216a1d2966c 100644
--- a/go/vt/wrangler/traffic_switcher_env_test.go
+++ b/go/vt/wrangler/traffic_switcher_env_test.go
@@ -776,7 +776,7 @@ func (tme *testMigraterEnv) createDBClients(ctx context.Context, t *testing.T) {
func (tme *testMigraterEnv) setPrimaryPositions() {
for _, primary := range tme.sourcePrimaries {
- primary.FakeMysqlDaemon.CurrentPrimaryPosition = replication.Position{
+ primary.FakeMysqlDaemon.SetPrimaryPositionLocked(replication.Position{
GTIDSet: replication.MariadbGTIDSet{
5: replication.MariadbGTID{
Domain: 5,
@@ -784,10 +784,10 @@ func (tme *testMigraterEnv) setPrimaryPositions() {
Sequence: 892,
},
},
- }
+ })
}
for _, primary := range tme.targetPrimaries {
- primary.FakeMysqlDaemon.CurrentPrimaryPosition = replication.Position{
+ primary.FakeMysqlDaemon.SetPrimaryPositionLocked(replication.Position{
GTIDSet: replication.MariadbGTIDSet{
5: replication.MariadbGTID{
Domain: 5,
@@ -795,7 +795,7 @@ func (tme *testMigraterEnv) setPrimaryPositions() {
Sequence: 893,
},
},
- }
+ })
}
}
diff --git a/java/client/pom.xml b/java/client/pom.xml
index 0b40e76be7b..355bf62ecef 100644
--- a/java/client/pom.xml
+++ b/java/client/pom.xml
@@ -5,7 +5,7 @@
io.vitess
vitess-parent
- 19.0.5
+ 19.0.7
vitess-client
diff --git a/java/example/pom.xml b/java/example/pom.xml
index efb327f363f..34b0f3b2d08 100644
--- a/java/example/pom.xml
+++ b/java/example/pom.xml
@@ -5,7 +5,7 @@
io.vitess
vitess-parent
- 19.0.5
+ 19.0.7
vitess-example
diff --git a/java/grpc-client/pom.xml b/java/grpc-client/pom.xml
index 5114fd48def..d98aa2b5d13 100644
--- a/java/grpc-client/pom.xml
+++ b/java/grpc-client/pom.xml
@@ -5,7 +5,7 @@
io.vitess
vitess-parent
- 19.0.5
+ 19.0.7
vitess-grpc-client
diff --git a/java/jdbc/pom.xml b/java/jdbc/pom.xml
index eee476ea4df..bbf0879c3ac 100644
--- a/java/jdbc/pom.xml
+++ b/java/jdbc/pom.xml
@@ -5,7 +5,7 @@
io.vitess
vitess-parent
- 19.0.5
+ 19.0.7
vitess-jdbc
diff --git a/java/jdbc/src/test/java/io/vitess/jdbc/FieldWithMetadataTest.java b/java/jdbc/src/test/java/io/vitess/jdbc/FieldWithMetadataTest.java
index bcadc49d33a..26ad5fd11b3 100644
--- a/java/jdbc/src/test/java/io/vitess/jdbc/FieldWithMetadataTest.java
+++ b/java/jdbc/src/test/java/io/vitess/jdbc/FieldWithMetadataTest.java
@@ -16,6 +16,9 @@
package io.vitess.jdbc;
+import java.util.Set;
+import java.util.EnumSet;
+
import io.vitess.proto.Query;
import io.vitess.util.MysqlDefs;
import io.vitess.util.charset.CharsetMapping;
@@ -274,6 +277,16 @@ public void testNumericAndDateTimeEncoding() throws SQLException {
}
}
+ // Define the types to skip
+ Set typesToSkip = EnumSet.of(
+ Query.Type.UNRECOGNIZED,
+ Query.Type.EXPRESSION,
+ Query.Type.HEXVAL,
+ Query.Type.HEXNUM,
+ Query.Type.BITNUM,
+ Query.Type.RAW
+ );
+
@Test
public void testPrecisionAdjustFactor() throws SQLException {
VitessConnection conn = getVitessConnection();
@@ -294,7 +307,8 @@ public void testPrecisionAdjustFactor() throws SQLException {
conn.setIncludedFields(Query.ExecuteOptions.IncludedFields.TYPE_AND_NAME);
for (Query.Type type : Query.Type.values()) {
- if (type == Query.Type.UNRECOGNIZED || type == Query.Type.EXPRESSION || type == Query.Type.HEXVAL || type == Query.Type.HEXNUM || type == Query.Type.BITNUM) {
+ // Skip if the type is in the set
+ if (typesToSkip.contains(type)) {
continue;
}
diff --git a/java/pom.xml b/java/pom.xml
index 9f1d57cfede..45a4dc338b5 100644
--- a/java/pom.xml
+++ b/java/pom.xml
@@ -11,7 +11,7 @@
io.vitess
vitess-parent
- 19.0.5
+ 19.0.7
pom
Vitess Java Client libraries [Parent]
@@ -72,7 +72,7 @@
4.1.94.Final
2.0.61.Final
- 3.24.3
+ 3.25.5
3.24.3
3.0.0
2.17.1
@@ -109,7 +109,7 @@
commons-io
commons-io
- 2.7
+ 2.14.0
diff --git a/proto/query.proto b/proto/query.proto
index 4d94fcb2c83..6ba19dc6691 100644
--- a/proto/query.proto
+++ b/proto/query.proto
@@ -215,6 +215,8 @@ enum Type {
// BITNUM specifies a base 2 binary type (unquoted varbinary).
// Properties: 34, IsText.
BITNUM = 4130;
+ // RAW specifies a type which won't be quoted but the value used as-is while encoding.
+ RAW = 2084;
}
// Value represents a typed value.
diff --git a/test.go b/test.go
index 2f8851e73a4..d7322179d84 100755
--- a/test.go
+++ b/test.go
@@ -77,7 +77,7 @@ For example:
// Flags
var (
flavor = flag.String("flavor", "mysql80", "comma-separated bootstrap flavor(s) to run against (when using Docker mode). Available flavors: all,"+flavors)
- bootstrapVersion = flag.String("bootstrap-version", "27.5", "the version identifier to use for the docker images")
+ bootstrapVersion = flag.String("bootstrap-version", "27.8", "the version identifier to use for the docker images")
runCount = flag.Int("runs", 1, "run each test this many times")
retryMax = flag.Int("retry", 3, "max number of retries, to detect flaky tests")
logPass = flag.Bool("log-pass", false, "log test output even if it passes")
@@ -111,7 +111,7 @@ const (
configFileName = "test/config.json"
// List of flavors for which a bootstrap Docker image is available.
- flavors = "mysql57,mysql80,percona,percona57,percona80"
+ flavors = "mysql80,percona80"
)
// Config is the overall object serialized in test/config.json.
diff --git a/test/templates/cluster_endtoend_test.tpl b/test/templates/cluster_endtoend_test.tpl
index 8abb7f1e4c6..eaad3a20b96 100644
--- a/test/templates/cluster_endtoend_test.tpl
+++ b/test/templates/cluster_endtoend_test.tpl
@@ -74,7 +74,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/test/templates/cluster_endtoend_test_docker.tpl b/test/templates/cluster_endtoend_test_docker.tpl
index 5b170db25e5..dece09ec3d4 100644
--- a/test/templates/cluster_endtoend_test_docker.tpl
+++ b/test/templates/cluster_endtoend_test_docker.tpl
@@ -58,7 +58,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/test/templates/cluster_endtoend_test_mysql57.tpl b/test/templates/cluster_endtoend_test_mysql57.tpl
index 6ac17b2c395..c68e9df8c4e 100644
--- a/test/templates/cluster_endtoend_test_mysql57.tpl
+++ b/test/templates/cluster_endtoend_test_mysql57.tpl
@@ -79,7 +79,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-additions access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/test/templates/cluster_vitess_tester.tpl b/test/templates/cluster_vitess_tester.tpl
index 2b6ecddb730..11e2c2b1016 100644
--- a/test/templates/cluster_vitess_tester.tpl
+++ b/test/templates/cluster_vitess_tester.tpl
@@ -58,7 +58,7 @@ jobs:
end_to_end:
- 'go/**/*.go'
- 'go/vt/sidecardb/**/*.sql'
- - 'go/test/endtoend/onlineddl/vrepl_suite/**'
+ - 'go/test/endtoend/vtgate/vitess_tester/**'
- 'test.go'
- 'Makefile'
- 'build.env'
@@ -74,7 +74,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -117,7 +117,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
# install vitess tester
- go install github.com/vitessio/vitess-tester@eb953122baba163ed8ccaa6642458ee984f5d7e4
+ go install github.com/vitessio/vitess-tester@89dd933a9ea0e15f69ca58b9c8ea09a358762cca
- name: Setup launchable dependencies
if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
@@ -148,9 +148,9 @@ jobs:
# We go over all the directories in the given path.
# If there is a vschema file there, we use it, otherwise we let vitess-tester autogenerate it.
if [ -f $dir/vschema.json ]; then
- vitess-tester --sharded --xunit --test-dir $dir --vschema "$dir"vschema.json
+ vitess-tester --xunit --vschema "$dir"vschema.json $dir/*.test
else
- vitess-tester --sharded --xunit --test-dir $dir
+ vitess-tester --sharded --xunit $dir/*.test
fi
# Number the reports by changing their file names.
mv report.xml report"$i".xml
diff --git a/test/templates/dockerfile.tpl b/test/templates/dockerfile.tpl
index a31ccbe3103..6d1800c3309 100644
--- a/test/templates/dockerfile.tpl
+++ b/test/templates/dockerfile.tpl
@@ -1,4 +1,4 @@
-ARG bootstrap_version=27.5
+ARG bootstrap_version=27.8
ARG image="vitess/bootstrap:${bootstrap_version}-{{.Platform}}"
FROM "${image}"
@@ -15,7 +15,7 @@ RUN wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_
RUN apt-get update
RUN apt-get install -y gnupg2
RUN dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
-RUN percona-release enable-only pxb-24
+RUN percona-release enable-only tools
RUN apt-get update
RUN apt-get install -y percona-xtrabackup-24
{{end}}
diff --git a/test/templates/unit_test.tpl b/test/templates/unit_test.tpl
index 2beb8fac9ad..59228adba38 100644
--- a/test/templates/unit_test.tpl
+++ b/test/templates/unit_test.tpl
@@ -69,7 +69,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.8
- name: Setup github.com/slackhq/vitess-additions access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true'
diff --git a/vitess-mixin/go.mod b/vitess-mixin/go.mod
index d38b8bc4d80..fcb2de67107 100644
--- a/vitess-mixin/go.mod
+++ b/vitess-mixin/go.mod
@@ -1,6 +1,6 @@
module vitess-mixin
-go 1.13
+go 1.22.7
require (
github.com/Azure/go-autorest/autorest v0.11.1 // indirect
diff --git a/web/vtadmin/package-lock.json b/web/vtadmin/package-lock.json
index 7f5755e5cc4..639253f39d0 100644
--- a/web/vtadmin/package-lock.json
+++ b/web/vtadmin/package-lock.json
@@ -20,6 +20,7 @@
"highcharts-react-official": "^3.1.0",
"history": "^5.3.0",
"lodash-es": "^4.17.21",
+ "path-to-regexp": "^8.1.0",
"postcss-flexbugs-fixes": "^5.0.2",
"postcss-preset-env": "^8.0.1",
"query-string": "^7.1.3",
@@ -3548,7 +3549,7 @@
"node": ">=14.0.0"
},
"peerDependencies": {
- "rollup": "^2.68.0||^3.0.0"
+ "rollup": "^3.29.5"
},
"peerDependenciesMeta": {
"rollup": {
@@ -3570,7 +3571,7 @@
"node": ">=14.0.0"
},
"peerDependencies": {
- "rollup": "^1.20.0||^2.0.0||^3.0.0"
+ "rollup": "^3.29.5"
},
"peerDependenciesMeta": {
"rollup": {
@@ -4901,7 +4902,7 @@
"node": "^14.18.0 || >=16.0.0"
},
"peerDependencies": {
- "vite": "^4.1.0-beta.0"
+ "vite": "^4.5.4"
}
},
"node_modules/@vitest/expect": {
@@ -9588,7 +9589,7 @@
"whatwg-encoding": "^2.0.0",
"whatwg-mimetype": "^3.0.0",
"whatwg-url": "^12.0.1",
- "ws": "^8.13.0",
+ "ws": "^8.17.1",
"xml-name-validator": "^4.0.0"
},
"engines": {
@@ -10009,11 +10010,11 @@
}
},
"node_modules/micromatch": {
- "version": "4.0.5",
- "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.5.tgz",
- "integrity": "sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA==",
+ "version": "4.0.8",
+ "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz",
+ "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==",
"dependencies": {
- "braces": "^3.0.2",
+ "braces": "^3.0.3",
"picomatch": "^2.3.1"
},
"engines": {
@@ -10151,7 +10152,7 @@
"is-node-process": "^1.0.1",
"js-levenshtein": "^1.1.6",
"node-fetch": "^2.6.7",
- "path-to-regexp": "^6.2.0",
+ "path-to-regexp": "^8.0.0",
"statuses": "^2.0.0",
"strict-event-emitter": "^0.2.0",
"type-fest": "^1.2.2",
@@ -13871,10 +13872,12 @@
"dev": true
},
"node_modules/path-to-regexp": {
- "version": "6.2.1",
- "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-6.2.1.tgz",
- "integrity": "sha512-JLyh7xT1kizaEvcaXOQwOc2/Yhw6KZOvPf1S8401UyLk86CU79LN3vl7ztXGm/pZ+YjoyAJ4rxmHwbkBXJX+yw==",
- "dev": true
+ "version": "8.1.0",
+ "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-8.1.0.tgz",
+ "integrity": "sha512-Bqn3vc8CMHty6zuD+tG23s6v2kwxslHEhTj4eYaVKGIEB+YX/2wd0/rgXLFD9G9id9KCtbVy/3ZgmvZjpa0UdQ==",
+ "engines": {
+ "node": ">=16"
+ }
},
"node_modules/path-type": {
"version": "4.0.0",
@@ -15044,7 +15047,7 @@
"history": "^4.9.0",
"hoist-non-react-statics": "^3.1.0",
"loose-envify": "^1.3.1",
- "path-to-regexp": "^1.7.0",
+ "path-to-regexp": "^1.9.0",
"prop-types": "^15.6.2",
"react-is": "^16.6.0",
"tiny-invariant": "^1.0.2",
@@ -15103,9 +15106,9 @@
"integrity": "sha512-D2S+3GLxWH+uhrNEcoh/fnmYeP8E8/zHl644d/jdA0g2uyXvy3sb0qxotE+ne0LtccHknQzWwZEzhak7oJ0COQ=="
},
"node_modules/react-router/node_modules/path-to-regexp": {
- "version": "1.8.0",
- "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-1.8.0.tgz",
- "integrity": "sha512-n43JRhlUKUAlibEJhPeir1ncUID16QnEjNpwzNdO3Lm4ywrBpBZ5oLD0I6br9evr1Y9JTqwRtAh7JLoOzAQdVA==",
+ "version": "1.9.0",
+ "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-1.9.0.tgz",
+ "integrity": "sha512-xIp7/apCFJuUHdDLWe8O1HIkb0kQrOMb/0u6FXQjemHn/ii5LrIzU6bdECnsiTF/GjZkMEKg1xdiZwNqDYlZ6g==",
"dependencies": {
"isarray": "0.0.1"
}
@@ -15515,10 +15518,11 @@
}
},
"node_modules/rollup": {
- "version": "3.29.4",
- "resolved": "https://registry.npmjs.org/rollup/-/rollup-3.29.4.tgz",
- "integrity": "sha512-oWzmBZwvYrU0iJHtDmhsm662rC15FRXmcjCk1xD771dFDx5jJ02ufAQQTn0etB2emNk4J9EZg/yWKpsn9BWGRw==",
+ "version": "3.29.5",
+ "resolved": "https://registry.npmjs.org/rollup/-/rollup-3.29.5.tgz",
+ "integrity": "sha512-GVsDdsbJzzy4S/v3dqWPJ7EfvZJfCHiDqe80IyrF59LYuP+e6U1LJoUqeuqRbwAWoMNoXivMNeNAOf5E22VA1w==",
"dev": true,
+ "license": "MIT",
"bin": {
"rollup": "dist/bin/rollup"
},
@@ -15686,7 +15690,7 @@
"mime-types": "2.1.18",
"minimatch": "3.1.2",
"path-is-inside": "1.0.2",
- "path-to-regexp": "2.2.1",
+ "path-to-regexp": "^3.3.0",
"range-parser": "1.2.0"
}
},
@@ -15734,11 +15738,27 @@
}
},
"node_modules/serve-handler/node_modules/path-to-regexp": {
- "version": "2.2.1",
- "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-2.2.1.tgz",
- "integrity": "sha512-gu9bD6Ta5bwGrrU8muHzVOBFFREpp2iRkVfhBJahwJ6p6Xw20SjT0MxLnwkjOibQmGSYhiUnf2FLe7k+jcFmGQ==",
+ "version": "3.3.0",
+ "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-3.3.0.tgz",
+ "integrity": "sha512-qyCH421YQPS2WFDxDjftfc1ZR5WKQzVzqsp4n9M2kQhVOo/ByahFoUNJfl58kOcEGfQ//7weFTDhm+ss8Ecxgw==",
"dev": true
},
+ "node_modules/serve/node_modules/ajv": {
+ "version": "8.12.0",
+ "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.12.0.tgz",
+ "integrity": "sha512-sRu1kpcO9yLtYxBKvqfTeh9KzZEwO3STyX1HT+4CaDzC6HpTGYhIhPIzj9XuKU7KYDwnaeh5hcOwjy1QuJzBPA==",
+ "dev": true,
+ "dependencies": {
+ "fast-deep-equal": "^3.1.1",
+ "json-schema-traverse": "^1.0.0",
+ "require-from-string": "^2.0.2",
+ "uri-js": "^4.2.2"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/epoberezkin"
+ }
+ },
"node_modules/serve/node_modules/chalk": {
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/chalk/-/chalk-5.0.1.tgz",
@@ -16995,14 +17015,15 @@
}
},
"node_modules/vite": {
- "version": "4.5.2",
- "resolved": "https://registry.npmjs.org/vite/-/vite-4.5.2.tgz",
- "integrity": "sha512-tBCZBNSBbHQkaGyhGCDUGqeo2ph8Fstyp6FMSvTtsXeZSPpSMGlviAOav2hxVTqFcx8Hj/twtWKsMJXNY0xI8w==",
+ "version": "4.5.5",
+ "resolved": "https://registry.npmjs.org/vite/-/vite-4.5.5.tgz",
+ "integrity": "sha512-ifW3Lb2sMdX+WU91s3R0FyQlAyLxOzCSCP37ujw0+r5POeHPwe6udWVIElKQq8gk3t7b8rkmvqC6IHBpCff4GQ==",
"dev": true,
+ "license": "MIT",
"dependencies": {
"esbuild": "^0.18.10",
"postcss": "^8.4.27",
- "rollup": "^3.27.1"
+ "rollup": "^3.29.5"
},
"bin": {
"vite": "bin/vite.js"
@@ -17060,7 +17081,7 @@
"mlly": "^1.1.0",
"pathe": "^1.1.0",
"picocolors": "^1.0.0",
- "vite": "^3.0.0 || ^4.0.0"
+ "vite": "^4.5.4"
},
"bin": {
"vite-node": "vite-node.mjs"
@@ -17080,11 +17101,11 @@
"dependencies": {
"@rollup/pluginutils": "^4.2.1",
"@types/eslint": "^8.4.5",
- "rollup": "^2.77.2"
+ "rollup": "^3.29.5"
},
"peerDependencies": {
"eslint": ">=7",
- "vite": ">=2"
+ "vite": ">=4.5.4"
}
},
"node_modules/vite-plugin-eslint/node_modules/@rollup/pluginutils": {
@@ -17100,21 +17121,6 @@
"node": ">= 8.0.0"
}
},
- "node_modules/vite-plugin-eslint/node_modules/rollup": {
- "version": "2.79.1",
- "resolved": "https://registry.npmjs.org/rollup/-/rollup-2.79.1.tgz",
- "integrity": "sha512-uKxbd0IhMZOhjAiD5oAFp7BqvkA4Dv47qpOCtaNvng4HBwdbWtdOh8f5nZNuk2rp51PMGk3bzfWu5oayNEuYnw==",
- "dev": true,
- "bin": {
- "rollup": "dist/bin/rollup"
- },
- "engines": {
- "node": ">=10.0.0"
- },
- "optionalDependencies": {
- "fsevents": "~2.3.2"
- }
- },
"node_modules/vite-plugin-svgr": {
"version": "2.4.0",
"resolved": "https://registry.npmjs.org/vite-plugin-svgr/-/vite-plugin-svgr-2.4.0.tgz",
@@ -17125,7 +17131,7 @@
"@svgr/core": "^6.5.1"
},
"peerDependencies": {
- "vite": "^2.6.0 || 3 || 4"
+ "vite": "^4.5.4"
}
},
"node_modules/vitest": {
@@ -17155,7 +17161,7 @@
"tinybench": "^2.3.1",
"tinypool": "^0.4.0",
"tinyspy": "^1.0.2",
- "vite": "^3.0.0 || ^4.0.0",
+ "vite": "^4.5.4",
"vite-node": "0.29.8",
"why-is-node-running": "^2.2.2"
},
@@ -17478,9 +17484,9 @@
}
},
"node_modules/ws": {
- "version": "8.13.0",
- "resolved": "https://registry.npmjs.org/ws/-/ws-8.13.0.tgz",
- "integrity": "sha512-x9vcZYTrFPC7aSIbj7sRCYo7L/Xb8Iy+pW0ng0wt2vCJv7M9HOMy0UoN3rr+IFC7hb7vXoqS+P9ktyLLLhO+LA==",
+ "version": "8.18.0",
+ "resolved": "https://registry.npmjs.org/ws/-/ws-8.18.0.tgz",
+ "integrity": "sha512-8VbfWfHLbbwu3+N6OKsOMpBdT4kXPDDB9cJk2bJ6mh9ucxdlnNvH1e+roYkKmN9Nxw2yjz7VzeO9oOz2zJ04Pw==",
"dev": true,
"engines": {
"node": ">=10.0.0"
@@ -24315,7 +24321,7 @@
"whatwg-encoding": "^2.0.0",
"whatwg-mimetype": "^3.0.0",
"whatwg-url": "^12.0.1",
- "ws": "^8.13.0",
+ "ws": "^8.17.1",
"xml-name-validator": "^4.0.0"
}
},
@@ -24641,11 +24647,11 @@
"dev": true
},
"micromatch": {
- "version": "4.0.5",
- "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.5.tgz",
- "integrity": "sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA==",
+ "version": "4.0.8",
+ "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz",
+ "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==",
"requires": {
- "braces": "^3.0.2",
+ "braces": "^3.0.3",
"picomatch": "^2.3.1"
}
},
@@ -24752,7 +24758,7 @@
"is-node-process": "^1.0.1",
"js-levenshtein": "^1.1.6",
"node-fetch": "^2.6.7",
- "path-to-regexp": "^6.2.0",
+ "path-to-regexp": "^8.0.0",
"statuses": "^2.0.0",
"strict-event-emitter": "^0.2.0",
"type-fest": "^1.2.2",
@@ -27269,10 +27275,9 @@
"dev": true
},
"path-to-regexp": {
- "version": "6.2.1",
- "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-6.2.1.tgz",
- "integrity": "sha512-JLyh7xT1kizaEvcaXOQwOc2/Yhw6KZOvPf1S8401UyLk86CU79LN3vl7ztXGm/pZ+YjoyAJ4rxmHwbkBXJX+yw==",
- "dev": true
+ "version": "8.1.0",
+ "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-8.1.0.tgz",
+ "integrity": "sha512-Bqn3vc8CMHty6zuD+tG23s6v2kwxslHEhTj4eYaVKGIEB+YX/2wd0/rgXLFD9G9id9KCtbVy/3ZgmvZjpa0UdQ=="
},
"path-type": {
"version": "4.0.0",
@@ -27976,7 +27981,7 @@
"history": "^4.9.0",
"hoist-non-react-statics": "^3.1.0",
"loose-envify": "^1.3.1",
- "path-to-regexp": "^1.7.0",
+ "path-to-regexp": "^1.9.0",
"prop-types": "^15.6.2",
"react-is": "^16.6.0",
"tiny-invariant": "^1.0.2",
@@ -28002,9 +28007,9 @@
"integrity": "sha512-D2S+3GLxWH+uhrNEcoh/fnmYeP8E8/zHl644d/jdA0g2uyXvy3sb0qxotE+ne0LtccHknQzWwZEzhak7oJ0COQ=="
},
"path-to-regexp": {
- "version": "1.8.0",
- "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-1.8.0.tgz",
- "integrity": "sha512-n43JRhlUKUAlibEJhPeir1ncUID16QnEjNpwzNdO3Lm4ywrBpBZ5oLD0I6br9evr1Y9JTqwRtAh7JLoOzAQdVA==",
+ "version": "1.9.0",
+ "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-1.9.0.tgz",
+ "integrity": "sha512-xIp7/apCFJuUHdDLWe8O1HIkb0kQrOMb/0u6FXQjemHn/ii5LrIzU6bdECnsiTF/GjZkMEKg1xdiZwNqDYlZ6g==",
"requires": {
"isarray": "0.0.1"
}
@@ -28361,9 +28366,9 @@
}
},
"rollup": {
- "version": "3.29.4",
- "resolved": "https://registry.npmjs.org/rollup/-/rollup-3.29.4.tgz",
- "integrity": "sha512-oWzmBZwvYrU0iJHtDmhsm662rC15FRXmcjCk1xD771dFDx5jJ02ufAQQTn0etB2emNk4J9EZg/yWKpsn9BWGRw==",
+ "version": "3.29.5",
+ "resolved": "https://registry.npmjs.org/rollup/-/rollup-3.29.5.tgz",
+ "integrity": "sha512-GVsDdsbJzzy4S/v3dqWPJ7EfvZJfCHiDqe80IyrF59LYuP+e6U1LJoUqeuqRbwAWoMNoXivMNeNAOf5E22VA1w==",
"dev": true,
"requires": {
"fsevents": "~2.3.2"
@@ -28475,6 +28480,17 @@
"update-check": "1.5.4"
},
"dependencies": {
+ "ajv": {
+ "version": "https://registry.npmjs.org/ajv/-/ajv-8.12.0.tgz",
+ "integrity": "sha512-sRu1kpcO9yLtYxBKvqfTeh9KzZEwO3STyX1HT+4CaDzC6HpTGYhIhPIzj9XuKU7KYDwnaeh5hcOwjy1QuJzBPA==",
+ "dev": true,
+ "requires": {
+ "fast-deep-equal": "^3.1.1",
+ "json-schema-traverse": "^1.0.0",
+ "require-from-string": "^2.0.2",
+ "uri-js": "^4.2.2"
+ }
+ },
"chalk": {
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/chalk/-/chalk-5.0.1.tgz",
@@ -28495,7 +28511,7 @@
"mime-types": "2.1.18",
"minimatch": "3.1.2",
"path-is-inside": "1.0.2",
- "path-to-regexp": "2.2.1",
+ "path-to-regexp": "^3.3.0",
"range-parser": "1.2.0"
},
"dependencies": {
@@ -28534,9 +28550,9 @@
}
},
"path-to-regexp": {
- "version": "2.2.1",
- "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-2.2.1.tgz",
- "integrity": "sha512-gu9bD6Ta5bwGrrU8muHzVOBFFREpp2iRkVfhBJahwJ6p6Xw20SjT0MxLnwkjOibQmGSYhiUnf2FLe7k+jcFmGQ==",
+ "version": "3.3.0",
+ "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-3.3.0.tgz",
+ "integrity": "sha512-qyCH421YQPS2WFDxDjftfc1ZR5WKQzVzqsp4n9M2kQhVOo/ByahFoUNJfl58kOcEGfQ//7weFTDhm+ss8Ecxgw==",
"dev": true
}
}
@@ -29506,15 +29522,15 @@
"dev": true
},
"vite": {
- "version": "4.5.2",
- "resolved": "https://registry.npmjs.org/vite/-/vite-4.5.2.tgz",
- "integrity": "sha512-tBCZBNSBbHQkaGyhGCDUGqeo2ph8Fstyp6FMSvTtsXeZSPpSMGlviAOav2hxVTqFcx8Hj/twtWKsMJXNY0xI8w==",
+ "version": "4.5.5",
+ "resolved": "https://registry.npmjs.org/vite/-/vite-4.5.5.tgz",
+ "integrity": "sha512-ifW3Lb2sMdX+WU91s3R0FyQlAyLxOzCSCP37ujw0+r5POeHPwe6udWVIElKQq8gk3t7b8rkmvqC6IHBpCff4GQ==",
"dev": true,
"requires": {
"esbuild": "^0.18.10",
"fsevents": "~2.3.2",
"postcss": "^8.4.27",
- "rollup": "^3.27.1"
+ "rollup": "^3.29.5"
}
},
"vite-node": {
@@ -29528,7 +29544,7 @@
"mlly": "^1.1.0",
"pathe": "^1.1.0",
"picocolors": "^1.0.0",
- "vite": "^3.0.0 || ^4.0.0"
+ "vite": "^4.5.4"
}
},
"vite-plugin-eslint": {
@@ -29539,7 +29555,7 @@
"requires": {
"@rollup/pluginutils": "^4.2.1",
"@types/eslint": "^8.4.5",
- "rollup": "^2.77.2"
+ "rollup": "^3.29.5"
},
"dependencies": {
"@rollup/pluginutils": {
@@ -29551,15 +29567,6 @@
"estree-walker": "^2.0.1",
"picomatch": "^2.2.2"
}
- },
- "rollup": {
- "version": "2.79.1",
- "resolved": "https://registry.npmjs.org/rollup/-/rollup-2.79.1.tgz",
- "integrity": "sha512-uKxbd0IhMZOhjAiD5oAFp7BqvkA4Dv47qpOCtaNvng4HBwdbWtdOh8f5nZNuk2rp51PMGk3bzfWu5oayNEuYnw==",
- "dev": true,
- "requires": {
- "fsevents": "~2.3.2"
- }
}
}
},
@@ -29600,7 +29607,7 @@
"tinybench": "^2.3.1",
"tinypool": "^0.4.0",
"tinyspy": "^1.0.2",
- "vite": "^3.0.0 || ^4.0.0",
+ "vite": "^4.5.4",
"vite-node": "0.29.8",
"why-is-node-running": "^2.2.2"
}
@@ -29804,9 +29811,9 @@
}
},
"ws": {
- "version": "8.13.0",
- "resolved": "https://registry.npmjs.org/ws/-/ws-8.13.0.tgz",
- "integrity": "sha512-x9vcZYTrFPC7aSIbj7sRCYo7L/Xb8Iy+pW0ng0wt2vCJv7M9HOMy0UoN3rr+IFC7hb7vXoqS+P9ktyLLLhO+LA==",
+ "version": "8.18.0",
+ "resolved": "https://registry.npmjs.org/ws/-/ws-8.18.0.tgz",
+ "integrity": "sha512-8VbfWfHLbbwu3+N6OKsOMpBdT4kXPDDB9cJk2bJ6mh9ucxdlnNvH1e+roYkKmN9Nxw2yjz7VzeO9oOz2zJ04Pw==",
"dev": true,
"requires": {}
},
diff --git a/web/vtadmin/package.json b/web/vtadmin/package.json
index 5a0f01e48c8..ea068f2bafb 100644
--- a/web/vtadmin/package.json
+++ b/web/vtadmin/package.json
@@ -19,6 +19,7 @@
"highcharts-react-official": "^3.1.0",
"history": "^5.3.0",
"lodash-es": "^4.17.21",
+ "path-to-regexp": "^8.1.0",
"postcss-flexbugs-fixes": "^5.0.2",
"postcss-preset-env": "^8.0.1",
"query-string": "^7.1.3",
diff --git a/web/vtadmin/src/proto/vtadmin.d.ts b/web/vtadmin/src/proto/vtadmin.d.ts
index 66010a94e93..25946bd48e0 100644
--- a/web/vtadmin/src/proto/vtadmin.d.ts
+++ b/web/vtadmin/src/proto/vtadmin.d.ts
@@ -33979,7 +33979,8 @@ export namespace query {
EXPRESSION = 31,
HEXNUM = 4128,
HEXVAL = 4129,
- BITNUM = 4130
+ BITNUM = 4130,
+ RAW = 2084
}
/** Properties of a Value. */
diff --git a/web/vtadmin/src/proto/vtadmin.js b/web/vtadmin/src/proto/vtadmin.js
index 537fbe7a9c1..4666b5a66bf 100644
--- a/web/vtadmin/src/proto/vtadmin.js
+++ b/web/vtadmin/src/proto/vtadmin.js
@@ -80140,6 +80140,7 @@ export const query = $root.query = (() => {
* @property {number} HEXNUM=4128 HEXNUM value
* @property {number} HEXVAL=4129 HEXVAL value
* @property {number} BITNUM=4130 BITNUM value
+ * @property {number} RAW=2084 RAW value
*/
query.Type = (function() {
const valuesById = {}, values = Object.create(valuesById);
@@ -80178,6 +80179,7 @@ export const query = $root.query = (() => {
values[valuesById[4128] = "HEXNUM"] = 4128;
values[valuesById[4129] = "HEXVAL"] = 4129;
values[valuesById[4130] = "BITNUM"] = 4130;
+ values[valuesById[2084] = "RAW"] = 2084;
return values;
})();
@@ -80366,6 +80368,7 @@ export const query = $root.query = (() => {
case 4128:
case 4129:
case 4130:
+ case 2084:
break;
}
if (message.value != null && message.hasOwnProperty("value"))
@@ -80533,6 +80536,10 @@ export const query = $root.query = (() => {
case 4130:
message.type = 4130;
break;
+ case "RAW":
+ case 2084:
+ message.type = 2084;
+ break;
}
if (object.value != null)
if (typeof object.value === "string")
@@ -80805,6 +80812,7 @@ export const query = $root.query = (() => {
case 4128:
case 4129:
case 4130:
+ case 2084:
break;
}
if (message.value != null && message.hasOwnProperty("value"))
@@ -80981,6 +80989,10 @@ export const query = $root.query = (() => {
case 4130:
message.type = 4130;
break;
+ case "RAW":
+ case 2084:
+ message.type = 2084;
+ break;
}
if (object.value != null)
if (typeof object.value === "string")
@@ -82471,6 +82483,7 @@ export const query = $root.query = (() => {
case 4128:
case 4129:
case 4130:
+ case 2084:
break;
}
if (message.table != null && message.hasOwnProperty("table"))
@@ -82664,6 +82677,10 @@ export const query = $root.query = (() => {
case 4130:
message.type = 4130;
break;
+ case "RAW":
+ case 2084:
+ message.type = 2084;
+ break;
}
if (object.table != null)
message.table = String(object.table);
@@ -103539,6 +103556,7 @@ export const vschema = $root.vschema = (() => {
case 4128:
case 4129:
case 4130:
+ case 2084:
break;
}
if (message.invisible != null && message.hasOwnProperty("invisible"))
@@ -103732,6 +103750,10 @@ export const vschema = $root.vschema = (() => {
case 4130:
message.type = 4130;
break;
+ case "RAW":
+ case 2084:
+ message.type = 2084;
+ break;
}
if (object.invisible != null)
message.invisible = Boolean(object.invisible);