diff --git a/README.md b/README.md index ec1a73322..bc9d16468 100644 --- a/README.md +++ b/README.md @@ -37,6 +37,10 @@ With the `failover` plugin, the downtime during certain DB cluster operations, s Visit [this page](./docs/using-the-jdbc-driver/SupportForRDSMultiAzDBCluster.md) for more details. +### Using the AWS JDBC Driver with Amazon Aurora Global Databases + +This driver supports in-region `failover` and between-regions `planned failover` and `switchover` of [Amazon Aurora Global Databases](https://aws.amazon.com/ru/rds/aurora/global-database/). A [Global Writer Endpoint](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-global-database-connecting.html) is also recognized and can be handled to minimize potential stale DNS issue. Please check [failover plugin](./docs/using-the-jdbc-driver/using-plugins/UsingTheFailoverPlugin.md), [failover2 plugin](./docs/using-the-jdbc-driver/using-plugins/UsingTheFailover2Plugin.md) and [Aurora Initial Connection Strategy plugin](./docs/using-the-jdbc-driver/using-plugins/UsingTheAuroraInitialConnectionStrategyPlugin.md) for more information. + ### Using the AWS JDBC Driver with plain RDS databases The AWS JDBC Driver also works with RDS provided databases that are not Aurora. @@ -128,10 +132,6 @@ The development team is aware of these limitations and is working to improve the [^1]: Aurora MySQL requires v3.07 or later. -#### Amazon Aurora Global Databases - -This driver currently does not support `planned failover` or `switchover` of Amazon Aurora Global Databases. Failing over to a secondary cluster will result in errors and there may be additional unforeseen errors when working with global databases. Connecting to the primary cluster is fully supported. There is a limitation when connected to the secondary cluster; the [failover2 plugin](using-the-jdbc-driver/using-plugins/UsingTheFailover2Plugin) will not work on the secondary cluster, however the [failover plugin](using-the-jdbc-driver/using-plugins/UsingTheFailoverPlugin) will work. Full Support for Amazon Aurora Global Databases is in the backlog, but we cannot comment on a timeline right now. - ## Examples | Description | Examples | @@ -153,7 +153,7 @@ This driver currently does not support `planned failover` or `switchover` of Ama | Using Spring and Wildfly with the AWS JDBC Driver | [PostgreSQL](examples/SpringWildflyExample/README.md) | | Using Vert.x and c3p0 with the AWS JDBC Driver | [PostgreSQL](examples/VertxExample/README.md) | | Using the AWS JDBC Driver with Telemetry and using the AWS Distro for OpenTelemetry Collector | [PostgreSQL](examples/AWSDriverExample/src/main/java/software/amazon/TelemetryMetricsOTLPExample.java) | -| Using the AWS JDBC Driver with Telemetry and using the AWS X-Ray Daemon | [PostgreSQL](examples/AWSDriverExample/src/main/java/software/amazon/TelemetryMetricsXRayExample.java) | +| Using the AWS JDBC Driver with Telemetry and using the AWS X-Ray Daemon | [PostgreSQL](examples/AWSDriverExample/src/main/java/software/amazon/TelemetryTracingXRayExample.java) | ## Getting Help and Opening Issues If you encounter a bug with the AWS JDBC Driver, we would like to hear about it. diff --git a/docs/using-the-jdbc-driver/DatabaseDialects.md b/docs/using-the-jdbc-driver/DatabaseDialects.md index 04b6ace51..ebffe1afa 100644 --- a/docs/using-the-jdbc-driver/DatabaseDialects.md +++ b/docs/using-the-jdbc-driver/DatabaseDialects.md @@ -16,12 +16,14 @@ The AWS Advanced JDBC Driver is a wrapper that requires an underlying driver, an Dialect codes specify what kind of database any connections will be made to. | Dialect Code Reference | Value | Database | -| ---------------------------- | ---------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------- | -| `AURORA_MYSQL` | `aurora-mysql` | Aurora MySQL | +| ---------------------------- | ---------------------------- |----------------------------------------------------------------------------------------------------------------------------------------------------| +| `AURORA_MYSQL` | `aurora-mysql` | [Aurora MySQL](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_GettingStartedAurora.html) | +| `GLOBAL_AURORA_MYSQL` | `global-aurora-mysql` | [Aurora Global Database MySQL](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-global-database-getting-started.html) | | `RDS_MULTI_AZ_MYSQL_CLUSTER` | `rds-multi-az-mysql-cluster` | [Amazon RDS MySQL Multi-AZ DB Cluster Deployments](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/multi-az-db-clusters-concepts.html) | | `RDS_MYSQL` | `rds-mysql` | Amazon RDS MySQL | | `MYSQL` | `mysql` | MySQL | -| `AURORA_PG` | `aurora-pg` | Aurora PostgreSQL | +| `AURORA_PG` | `aurora-pg` | [Aurora PostgreSQL](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_GettingStartedAurora.html) | +| `GLOBAL_AURORA_PG` | `global-aurora-pg` | [Aurora Global Database PostgreSQL](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-global-database-getting-started.html) | | `RDS_MULTI_AZ_PG_CLUSTER` | `rds-multi-az-pg-cluster` | [Amazon RDS PostgreSQL Multi-AZ DB Cluster Deployments](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/multi-az-db-clusters-concepts.html) | | `RDS_PG` | `rds-pg` | Amazon RDS PostgreSQL | | `PG` | `pg` | PostgreSQL | diff --git a/docs/using-the-jdbc-driver/using-plugins/UsingTheAuroraInitialConnectionStrategyPlugin.md b/docs/using-the-jdbc-driver/using-plugins/UsingTheAuroraInitialConnectionStrategyPlugin.md index b32f8a3cb..6b887e3ea 100644 --- a/docs/using-the-jdbc-driver/using-plugins/UsingTheAuroraInitialConnectionStrategyPlugin.md +++ b/docs/using-the-jdbc-driver/using-plugins/UsingTheAuroraInitialConnectionStrategyPlugin.md @@ -5,6 +5,8 @@ When this plugin is enabled, if the initial connection is to a reader cluster en This plugin also helps retrieve connections more reliably. When a user connects to a cluster endpoint, the actual instance for a new connection is resolved by DNS. During failover, the cluster elects another instance to be the writer. While DNS is updating, which can take up to 40-60 seconds, if a user tries to connect to the cluster endpoint, they may be connecting to an old node. This plugin helps by replacing the out of date endpoint if DNS is updating. +In case of Aurora Global Database, a user has an option to use an [Aurora Global Writer Endpoint](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-global-database-connecting.html). Global Writer Endpoint makes a user application configuration easier. However, similar to a cluster writer endpoint mentioned above, it can also be affected by DNS updates. The Aurora Initial Connection Strategy Plugin recognizes an Aurora Global Writer Endpoint and substitutes it with a current writer endpoint. + ## Enabling the Aurora Initial Connection Strategy Plugin To enable the Aurora Initial Connection Strategy Plugin, add `initialConnection` to the [`wrapperPlugins`](../UsingTheJdbcDriver.md#connection-plugin-manager-parameters) value. diff --git a/docs/using-the-jdbc-driver/using-plugins/UsingTheFailover2Plugin.md b/docs/using-the-jdbc-driver/using-plugins/UsingTheFailover2Plugin.md index e56f4d766..304eb4984 100644 --- a/docs/using-the-jdbc-driver/using-plugins/UsingTheFailover2Plugin.md +++ b/docs/using-the-jdbc-driver/using-plugins/UsingTheFailover2Plugin.md @@ -57,7 +57,8 @@ In addition to the parameters that you can configure for the underlying driver, | Parameter | Value | Required | Description | Default Value | |---------------------------------------|:-------:|:--------------------------------------------------------------------------------:|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | `failoverMode` | String | No | Defines a mode for failover process. Failover process may prioritize nodes with different roles and connect to them. Possible values:

- `strict-writer` - Failover process follows writer node and connects to a new writer when it changes.
- `reader-or-writer` - During failover, the driver tries to connect to any available/accessible reader node. If no reader is available, the driver will connect to a writer node. This logic mimics the logic of the Aurora read-only cluster endpoint.
- `strict-reader` - During failover, the driver tries to connect to any available reader node. If no reader is available, the driver raises an error. Reader failover to a writer node will only be allowed for single-node clusters. This logic mimics the logic of the Aurora read-only cluster endpoint.

If this parameter is omitted, default value depends on connection url. For Aurora read-only cluster endpoint, it's set to `reader-or-writer`. Otherwise, it's `strict-writer`. | Default value depends on connection url. For Aurora read-only cluster endpoint, it's set to `reader-or-writer`. Otherwise, it's `strict-writer`. | -| `clusterInstanceHostPattern` | String | If connecting using an IP address or custom domain URL: Yes

Otherwise: No | This parameter is not required unless connecting to an AWS RDS cluster via an IP address or custom domain URL. In those cases, this parameter specifies the cluster instance DNS pattern that will be used to build a complete instance endpoint. A "?" character in this pattern should be used as a placeholder for the DB instance identifiers of the instances in the cluster. See [here](#host-pattern) for more information.

Example: `?.my-domain.com`, `any-subdomain.?.my-domain.com:9999`

Use case Example: If your cluster instance endpoints follow this pattern:`instanceIdentifier1.customHost`, `instanceIdentifier2.customHost`, etc. and you want your initial connection to be to `customHost:1234`, then your connection string should look like this: `jdbc:aws-wrapper:mysql://customHost:1234/test?clusterInstanceHostPattern=?.customHost` | If the provided connection string is not an IP address or custom domain, the JDBC Driver will automatically acquire the cluster instance host pattern from the customer-provided connection string. | +| `clusterInstanceHostPattern` | String | If connecting using an IP address or custom domain URL: Yes

Otherwise: No | This parameter is not required unless connecting to an AWS RDS cluster via an IP address or custom domain URL. In those cases, this parameter specifies the cluster instance DNS pattern that will be used to build a complete instance endpoint. A "?" character in this pattern should be used as a placeholder for the DB instance identifiers of the instances in the cluster. See [here](./UsingTheFailoverPlugin.md#host-pattern) for more information.

This parameter is ignored for Aurora Global Databases.

Example: `?.my-domain.com`, `any-subdomain.?.my-domain.com:9999`

Use case Example: If your cluster instance endpoints follow this pattern:`instanceIdentifier1.customHost`, `instanceIdentifier2.customHost`, etc. and you want your initial connection to be to `customHost:1234`, then your connection string should look like this: `jdbc:aws-wrapper:mysql://customHost:1234/test?clusterInstanceHostPattern=?.customHost` | If the provided connection string is not an IP address or custom domain, the JDBC Driver will automatically acquire the cluster instance host pattern from the customer-provided connection string. | +| `globalClusterInstanceHostPatterns` | String | For Global Databases: Yes

Otherwise: No | This parameter is similar to `clusterInstanceHostPattern` parameter but it provides a coma-separated list of instance host patterns. This parameter is required for Aurora Global Databases. The list should contains host pattern for each region of a global database. Each host pattern should be based on an instance endpoint. Custom domain URLs are not supported.

The parameter is ignored for other types of databases (Aurora Clusters, RDS Clusters, plain RDS databases, etc.).

Example: for an Aurora Global Database with two AWS regions `us-east-2` and `us-west-2`, the parameter value is `?.XYZ1.us-east-2.rds.amazonaws.com,?.XYZ2.us-west-2.rds.amazonaws.com`. Please pay attention that user identifiers are different for different AWS regions (`XYZ1` and `XYZ2` as in the example above). | | | `clusterTopologyRefreshRateMs` | Integer | No | Cluster topology refresh rate in milliseconds when a cluster is not in failover. It refers to the regular, slow monitoring rate explained above. | `30000` | | `failoverTimeoutMs` | Integer | No | Maximum allowed time in milliseconds to attempt reconnecting to a new writer or reader instance after a cluster failover is initiated. | `300000` | | `clusterTopologyHighRefreshRateMs` | Integer | No | Interval of time in milliseconds to wait between attempts to update cluster topology after the writer has come back online following a failover event. It corresponds to the increased monitoring rate described earlier. Usually, the topology monitoring component uses this increased monitoring rate for 30s after a new writer was detected. | `100` | diff --git a/docs/using-the-jdbc-driver/using-plugins/UsingTheFailoverPlugin.md b/docs/using-the-jdbc-driver/using-plugins/UsingTheFailoverPlugin.md index aa925fcb1..87d62cfa8 100644 --- a/docs/using-the-jdbc-driver/using-plugins/UsingTheFailoverPlugin.md +++ b/docs/using-the-jdbc-driver/using-plugins/UsingTheFailoverPlugin.md @@ -24,7 +24,8 @@ In addition to the parameters that you can configure for the underlying driver, | Parameter | Value | Required | Description | Default Value | |----------------------------------------|:-------:|:--------------------------------------------------------------------------------:|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | `failoverMode` | String | No | Defines a mode for failover process. Failover process may prioritize nodes with different roles and connect to them. Possible values:

- `strict-writer` - Failover process follows writer node and connects to a new writer when it changes.
- `reader-or-writer` - During failover, the driver tries to connect to any available/accessible reader node. If no reader is available, the driver will connect to a writer node. This logic mimics the logic of the Aurora read-only cluster endpoint.
- `strict-reader` - During failover, the driver tries to connect to any available reader node. If no reader is available, the driver raises an error. Reader failover to a writer node will only be allowed for single-node clusters. This logic mimics the logic of the Aurora read-only cluster endpoint.

If this parameter is omitted, default value depends on connection url. For Aurora read-only cluster endpoint, it's set to `reader-or-writer`. Otherwise, it's `strict-writer`. | Default value depends on connection url. For Aurora read-only cluster endpoint, it's set to `reader-or-writer`. Otherwise, it's `strict-writer`. | -| `clusterInstanceHostPattern` | String | If connecting using an IP address or custom domain URL: Yes

Otherwise: No | This parameter is not required unless connecting to an AWS RDS cluster via an IP address or custom domain URL. In those cases, this parameter specifies the cluster instance DNS pattern that will be used to build a complete instance endpoint. A "?" character in this pattern should be used as a placeholder for the DB instance identifiers of the instances in the cluster. See [here](#host-pattern) for more information.

Example: `?.my-domain.com`, `any-subdomain.?.my-domain.com:9999`

Use case Example: If your cluster instance endpoints follow this pattern:`instanceIdentifier1.customHost`, `instanceIdentifier2.customHost`, etc. and you want your initial connection to be to `customHost:1234`, then your connection string should look like this: `jdbc:aws-wrapper:mysql://customHost:1234/test?clusterInstanceHostPattern=?.customHost` | If the provided connection string is not an IP address or custom domain, the JDBC Driver will automatically acquire the cluster instance host pattern from the customer-provided connection string. | +| `clusterInstanceHostPattern` | String | If connecting using an IP address or custom domain URL: Yes

Otherwise: No | This parameter is not required unless connecting to an AWS RDS cluster via an IP address or custom domain URL. In those cases, this parameter specifies the cluster instance DNS pattern that will be used to build a complete instance endpoint. A "?" character in this pattern should be used as a placeholder for the DB instance identifiers of the instances in the cluster. See [here](#host-pattern) for more information.

This parameter is ignored for Aurora Global Databases.

Example: `?.my-domain.com`, `any-subdomain.?.my-domain.com:9999`

Use case Example: If your cluster instance endpoints follow this pattern:`instanceIdentifier1.customHost`, `instanceIdentifier2.customHost`, etc. and you want your initial connection to be to `customHost:1234`, then your connection string should look like this: `jdbc:aws-wrapper:mysql://customHost:1234/test?clusterInstanceHostPattern=?.customHost` | If the provided connection string is not an IP address or custom domain, the JDBC Driver will automatically acquire the cluster instance host pattern from the customer-provided connection string. | +| `globalClusterInstanceHostPatterns` | String | For Global Databases: Yes

Otherwise: No | This parameter is similar to `clusterInstanceHostPattern` parameter but it provides a coma-separated list of instance host patterns. This parameter is required for Aurora Global Databases. The list should contains host pattern for each region of a global database. Each host pattern should be based on an instance endpoint. Custom domain URLs are not supported.

The parameter is ignored for other types of databases (Aurora Clusters, RDS Clusters, plain RDS databases, etc.).

Example: for an Aurora Global Database with two AWS regions `us-east-2` and `us-west-2`, the parameter value is `?.XYZ1.us-east-2.rds.amazonaws.com,?.XYZ2.us-west-2.rds.amazonaws.com`. Please pay attention that user identifiers are different for different AWS regions (`XYZ1` and `XYZ2` as in the example above). | | | `enableClusterAwareFailover` | Boolean | No | Set to `true` to enable the fast failover behavior offered by the AWS Advanced JDBC Driver. Set to `false` for simple JDBC connections that do not require fast failover functionality. | `true` | | `failoverClusterTopologyRefreshRateMs` | Integer | No | Cluster topology refresh rate in milliseconds during a writer failover process. During the writer failover process, cluster topology may be refreshed at a faster pace than normal to speed up discovery of the newly promoted writer. | `2000` | | `failoverReaderConnectTimeoutMs` | Integer | No | Maximum allowed time in milliseconds to attempt to connect to a reader instance during a reader failover process. | `30000` | diff --git a/wrapper/build.gradle.kts b/wrapper/build.gradle.kts index 3c9793f7d..bb61e1ebd 100644 --- a/wrapper/build.gradle.kts +++ b/wrapper/build.gradle.kts @@ -170,7 +170,7 @@ tasks.withType { violationRules { rule { limit { - minimum = BigDecimal(0.50) + minimum = BigDecimal(0.30) } } } diff --git a/wrapper/src/main/java/software/amazon/jdbc/Driver.java b/wrapper/src/main/java/software/amazon/jdbc/Driver.java index b66e216b4..fe89ebe8c 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/Driver.java +++ b/wrapper/src/main/java/software/amazon/jdbc/Driver.java @@ -392,7 +392,6 @@ public static void clearCaches() { RdsHostListProvider.clearAll(); PluginServiceImpl.clearCache(); DialectManager.resetEndpointCache(); - MonitoringRdsHostListProvider.clearCache(); CustomEndpointMonitorImpl.clearCache(); OpenedConnectionTracker.clearCache(); AwsSecretsManagerCacheHolder.clearCache(); diff --git a/wrapper/src/main/java/software/amazon/jdbc/dialect/AuroraMysqlDialect.java b/wrapper/src/main/java/software/amazon/jdbc/dialect/AuroraMysqlDialect.java index 45a8ab8eb..0e57d407f 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/dialect/AuroraMysqlDialect.java +++ b/wrapper/src/main/java/software/amazon/jdbc/dialect/AuroraMysqlDialect.java @@ -20,7 +20,7 @@ import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; -import java.util.Collections; +import java.util.Arrays; import java.util.List; import software.amazon.jdbc.hostlistprovider.AuroraHostListProvider; import software.amazon.jdbc.hostlistprovider.monitoring.MonitoringRdsHostListProvider; @@ -28,19 +28,19 @@ public class AuroraMysqlDialect extends MysqlDialect { - private static final String TOPOLOGY_QUERY = + protected final String topologyQuery = "SELECT SERVER_ID, CASE WHEN SESSION_ID = 'MASTER_SESSION_ID' THEN TRUE ELSE FALSE END, " + "CPU, REPLICA_LAG_IN_MILLISECONDS, LAST_UPDATE_TIMESTAMP " + "FROM information_schema.replica_host_status " // filter out nodes that haven't been updated in the last 5 minutes + "WHERE time_to_sec(timediff(now(), LAST_UPDATE_TIMESTAMP)) <= 300 OR SESSION_ID = 'MASTER_SESSION_ID' "; - private static final String IS_WRITER_QUERY = + protected final String isWriterQuery = "SELECT SERVER_ID FROM information_schema.replica_host_status " + "WHERE SESSION_ID = 'MASTER_SESSION_ID' AND SERVER_ID = @@aurora_server_id"; - private static final String NODE_ID_QUERY = "SELECT @@aurora_server_id"; - private static final String IS_READER_QUERY = "SELECT @@innodb_read_only"; + protected final String nodeIdQuery = "SELECT @@aurora_server_id"; + protected final String isReaderQuery = "SELECT @@innodb_read_only"; @Override public boolean isDialect(final Connection connection) { @@ -76,7 +76,9 @@ public boolean isDialect(final Connection connection) { @Override public List getDialectUpdateCandidates() { - return Collections.singletonList(DialectCodes.RDS_MULTI_AZ_MYSQL_CLUSTER); + return Arrays.asList( + DialectCodes.GLOBAL_AURORA_MYSQL, + DialectCodes.RDS_MULTI_AZ_MYSQL_CLUSTER); } @Override @@ -90,19 +92,19 @@ public HostListProviderSupplier getHostListProvider() { properties, initialUrl, hostListProviderService, - TOPOLOGY_QUERY, - NODE_ID_QUERY, - IS_READER_QUERY, - IS_WRITER_QUERY, + this.topologyQuery, + this.nodeIdQuery, + this.isReaderQuery, + this.isWriterQuery, pluginService); } return new AuroraHostListProvider( properties, initialUrl, hostListProviderService, - TOPOLOGY_QUERY, - NODE_ID_QUERY, - IS_READER_QUERY); + this.topologyQuery, + this.nodeIdQuery, + this.isReaderQuery); }; } } diff --git a/wrapper/src/main/java/software/amazon/jdbc/dialect/AuroraPgDialect.java b/wrapper/src/main/java/software/amazon/jdbc/dialect/AuroraPgDialect.java index 2371b2905..ebe8da303 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/dialect/AuroraPgDialect.java +++ b/wrapper/src/main/java/software/amazon/jdbc/dialect/AuroraPgDialect.java @@ -20,6 +20,8 @@ import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; +import java.util.Arrays; +import java.util.List; import java.util.logging.Logger; import software.amazon.jdbc.hostlistprovider.AuroraHostListProvider; import software.amazon.jdbc.hostlistprovider.monitoring.MonitoringRdsHostListProvider; @@ -32,14 +34,14 @@ public class AuroraPgDialect extends PgDialect implements AuroraLimitlessDialect { private static final Logger LOGGER = Logger.getLogger(AuroraPgDialect.class.getName()); - private static final String extensionsSql = + protected final String extensionsSql = "SELECT (setting LIKE '%aurora_stat_utils%') AS aurora_stat_utils " + "FROM pg_settings " + "WHERE name='rds.extensions'"; - private static final String topologySql = "SELECT 1 FROM aurora_replica_status() LIMIT 1"; + protected final String topologySql = "SELECT 1 FROM aurora_replica_status() LIMIT 1"; - private static final String TOPOLOGY_QUERY = + protected final String topologyQuery = "SELECT SERVER_ID, CASE WHEN SESSION_ID = 'MASTER_SESSION_ID' THEN TRUE ELSE FALSE END, " + "CPU, COALESCE(REPLICA_LAG_IN_MSEC, 0), LAST_UPDATE_TIMESTAMP " + "FROM aurora_replica_status() " @@ -47,15 +49,22 @@ public class AuroraPgDialect extends PgDialect implements AuroraLimitlessDialect + "WHERE EXTRACT(EPOCH FROM(NOW() - LAST_UPDATE_TIMESTAMP)) <= 300 OR SESSION_ID = 'MASTER_SESSION_ID' " + "OR LAST_UPDATE_TIMESTAMP IS NULL"; - private static final String IS_WRITER_QUERY = + protected final String isWriterQuery = "SELECT SERVER_ID FROM aurora_replica_status() " + "WHERE SESSION_ID = 'MASTER_SESSION_ID' AND SERVER_ID = aurora_db_instance_identifier()"; - private static final String NODE_ID_QUERY = "SELECT aurora_db_instance_identifier()"; - private static final String IS_READER_QUERY = "SELECT pg_is_in_recovery()"; + protected final String nodeIdQuery = "SELECT aurora_db_instance_identifier()"; + protected final String isReaderQuery = "SELECT pg_is_in_recovery()"; protected static final String LIMITLESS_ROUTER_ENDPOINT_QUERY = "select router_endpoint, load from aurora_limitless_router_endpoints()"; + @Override + public List getDialectUpdateCandidates() { + return Arrays.asList(DialectCodes.GLOBAL_AURORA_PG, + DialectCodes.RDS_MULTI_AZ_PG_CLUSTER, + DialectCodes.RDS_PG); + } + @Override public boolean isDialect(final Connection connection) { if (!super.isDialect(connection)) { @@ -136,19 +145,19 @@ public HostListProviderSupplier getHostListProvider() { properties, initialUrl, hostListProviderService, - TOPOLOGY_QUERY, - NODE_ID_QUERY, - IS_READER_QUERY, - IS_WRITER_QUERY, + topologyQuery, + nodeIdQuery, + isReaderQuery, + isWriterQuery, pluginService); } return new AuroraHostListProvider( properties, initialUrl, hostListProviderService, - TOPOLOGY_QUERY, - NODE_ID_QUERY, - IS_READER_QUERY); + topologyQuery, + nodeIdQuery, + isReaderQuery); }; } diff --git a/wrapper/src/main/java/software/amazon/jdbc/dialect/DialectCodes.java b/wrapper/src/main/java/software/amazon/jdbc/dialect/DialectCodes.java index 74c48a67c..47c0de3c5 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/dialect/DialectCodes.java +++ b/wrapper/src/main/java/software/amazon/jdbc/dialect/DialectCodes.java @@ -17,12 +17,14 @@ package software.amazon.jdbc.dialect; public class DialectCodes { + public static final String GLOBAL_AURORA_MYSQL = "global-aurora-mysql"; public static final String AURORA_MYSQL = "aurora-mysql"; public static final String RDS_MYSQL = "rds-mysql"; public static final String MYSQL = "mysql"; // https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/multi-az-db-clusters-concepts.html public static final String RDS_MULTI_AZ_MYSQL_CLUSTER = "rds-multi-az-mysql-cluster"; + public static final String GLOBAL_AURORA_PG = "global-aurora-pg"; public static final String AURORA_PG = "aurora-pg"; public static final String RDS_PG = "rds-pg"; // https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/multi-az-db-clusters-concepts.html diff --git a/wrapper/src/main/java/software/amazon/jdbc/dialect/DialectManager.java b/wrapper/src/main/java/software/amazon/jdbc/dialect/DialectManager.java index 90c031be3..8f7a8f2af 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/dialect/DialectManager.java +++ b/wrapper/src/main/java/software/amazon/jdbc/dialect/DialectManager.java @@ -59,7 +59,9 @@ public class DialectManager implements DialectProvider { put(DialectCodes.RDS_MULTI_AZ_MYSQL_CLUSTER, new RdsMultiAzDbClusterMysqlDialect()); put(DialectCodes.RDS_PG, new RdsPgDialect()); put(DialectCodes.RDS_MULTI_AZ_PG_CLUSTER, new RdsMultiAzDbClusterPgDialect()); + put(DialectCodes.GLOBAL_AURORA_MYSQL, new GlobalAuroraMysqlDialect()); put(DialectCodes.AURORA_MYSQL, new AuroraMysqlDialect()); + put(DialectCodes.GLOBAL_AURORA_PG, new GlobalAuroraPgDialect()); put(DialectCodes.AURORA_PG, new AuroraPgDialect()); put(DialectCodes.UNKNOWN, new UnknownDialect()); } @@ -160,6 +162,12 @@ public Dialect getDialect( if (driverProtocol.contains("mysql")) { RdsUrlType type = this.rdsHelper.identifyRdsType(host); + if (type == RdsUrlType.RDS_GLOBAL_WRITER_CLUSTER) { + this.canUpdate = false; + this.dialectCode = DialectCodes.GLOBAL_AURORA_MYSQL; + this.dialect = knownDialectsByCode.get(DialectCodes.GLOBAL_AURORA_MYSQL); + return this.dialect; + } if (type.isRdsCluster()) { this.canUpdate = true; this.dialectCode = DialectCodes.AURORA_MYSQL; @@ -188,6 +196,12 @@ public Dialect getDialect( this.dialect = knownDialectsByCode.get(DialectCodes.AURORA_PG); return this.dialect; } + if (RdsUrlType.RDS_GLOBAL_WRITER_CLUSTER.equals(type)) { + this.canUpdate = false; + this.dialectCode = DialectCodes.GLOBAL_AURORA_PG; + this.dialect = knownDialectsByCode.get(DialectCodes.GLOBAL_AURORA_PG); + return this.dialect; + } if (type.isRdsCluster()) { this.canUpdate = true; this.dialectCode = DialectCodes.AURORA_PG; diff --git a/wrapper/src/main/java/software/amazon/jdbc/dialect/GlobalAuroraMysqlDialect.java b/wrapper/src/main/java/software/amazon/jdbc/dialect/GlobalAuroraMysqlDialect.java new file mode 100644 index 000000000..dddb66ccb --- /dev/null +++ b/wrapper/src/main/java/software/amazon/jdbc/dialect/GlobalAuroraMysqlDialect.java @@ -0,0 +1,132 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package software.amazon.jdbc.dialect; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.Collections; +import java.util.List; +import software.amazon.jdbc.hostlistprovider.AuroraGlobalDbHostListProvider; +import software.amazon.jdbc.hostlistprovider.monitoring.AuroraGlobalDbMonitoringHostListProvider; +import software.amazon.jdbc.plugin.failover2.FailoverConnectionPlugin; + +public class GlobalAuroraMysqlDialect extends AuroraMysqlDialect { + + protected final String globalDbStatusTableExistQuery = + "SELECT 1 AS tmp FROM information_schema.tables WHERE" + + " upper(table_schema) = 'INFORMATION_SCHEMA' AND upper(table_name) = 'AURORA_GLOBAL_DB_STATUS'"; + + protected final String globalDbStatusQuery = + "SELECT count(1) FROM information_schema.aurora_global_db_status"; + + protected final String globalDbInstanceStatusTableExistQuery = + "SELECT 1 AS tmp FROM information_schema.tables WHERE" + + " upper(table_schema) = 'INFORMATION_SCHEMA' AND upper(table_name) = 'AURORA_GLOBAL_DB_INSTANCE_STATUS'"; + + protected final String globalTopologyQuery = + "SELECT SERVER_ID, CASE WHEN SESSION_ID = 'MASTER_SESSION_ID' THEN TRUE ELSE FALSE END, " + + "VISIBILITY_LAG_IN_MSEC, AWS_REGION " + + "FROM information_schema.aurora_global_db_instance_status "; + + protected final String regionByNodeIdQuery = + "SELECT AWS_REGION FROM information_schema.aurora_global_db_instance_status WHERE SERVER_ID = ?"; + + @Override + public boolean isDialect(final Connection connection) { + Statement stmt = null; + ResultSet rs = null; + try { + stmt = connection.createStatement(); + rs = stmt.executeQuery(this.globalDbStatusTableExistQuery); + + if (rs.next()) { + rs.close(); + stmt.close(); + + stmt = connection.createStatement(); + rs = stmt.executeQuery(this.globalDbInstanceStatusTableExistQuery); + + if (rs.next()) { + rs.close(); + stmt.close(); + + stmt = connection.createStatement(); + rs = stmt.executeQuery(this.globalDbStatusQuery); + + if (rs.next()) { + int awsRegionCount = rs.getInt(1); + return awsRegionCount > 1; + } + } + } + return false; + } catch (final SQLException ex) { + // ignore + } finally { + if (rs != null) { + try { + rs.close(); + } catch (SQLException ex) { + // ignore + } + } + if (stmt != null) { + try { + stmt.close(); + } catch (SQLException ex) { + // ignore + } + } + } + return false; + } + + @Override + public List getDialectUpdateCandidates() { + return Collections.emptyList(); + } + + @Override + public HostListProviderSupplier getHostListProvider() { + return (properties, initialUrl, hostListProviderService, pluginService) -> { + + final FailoverConnectionPlugin failover2Plugin = pluginService.getPlugin(FailoverConnectionPlugin.class); + + if (failover2Plugin != null) { + return new AuroraGlobalDbMonitoringHostListProvider( + properties, + initialUrl, + hostListProviderService, + this.globalTopologyQuery, + this.nodeIdQuery, + this.isReaderQuery, + this.isWriterQuery, + this.regionByNodeIdQuery, + pluginService); + } + return new AuroraGlobalDbHostListProvider( + properties, + initialUrl, + hostListProviderService, + this.globalTopologyQuery, + this.nodeIdQuery, + this.isReaderQuery); + }; + } +} diff --git a/wrapper/src/main/java/software/amazon/jdbc/dialect/GlobalAuroraPgDialect.java b/wrapper/src/main/java/software/amazon/jdbc/dialect/GlobalAuroraPgDialect.java new file mode 100644 index 000000000..6c7abcd43 --- /dev/null +++ b/wrapper/src/main/java/software/amazon/jdbc/dialect/GlobalAuroraPgDialect.java @@ -0,0 +1,145 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package software.amazon.jdbc.dialect; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.Collections; +import java.util.List; +import java.util.logging.Logger; +import software.amazon.jdbc.hostlistprovider.AuroraGlobalDbHostListProvider; +import software.amazon.jdbc.hostlistprovider.monitoring.AuroraGlobalDbMonitoringHostListProvider; +import software.amazon.jdbc.plugin.failover2.FailoverConnectionPlugin; + +public class GlobalAuroraPgDialect extends AuroraPgDialect { + + private static final Logger LOGGER = Logger.getLogger(GlobalAuroraPgDialect.class.getName()); + + protected final String globalDbStatusFuncExistQuery = + "select 'aurora_global_db_status'::regproc"; + + protected final String globalDbInstanceStatusFuncExistQuery = + "select 'aurora_global_db_instance_status'::regproc"; + + protected final String globalTopologyQuery = + "SELECT SERVER_ID, CASE WHEN SESSION_ID = 'MASTER_SESSION_ID' THEN TRUE ELSE FALSE END, " + + "VISIBILITY_LAG_IN_MSEC, AWS_REGION " + + "FROM aurora_global_db_instance_status()"; + + protected final String globalDbStatusQuery = + "SELECT count(1) FROM aurora_global_db_status()"; + + protected final String regionByNodeIdQuery = + "SELECT AWS_REGION FROM aurora_global_db_instance_status() WHERE SERVER_ID = ?"; + + @Override + public boolean isDialect(final Connection connection) { + Statement stmt = null; + ResultSet rs = null; + try { + stmt = connection.createStatement(); + rs = stmt.executeQuery(this.extensionsSql); + if (rs.next()) { + final boolean auroraUtils = rs.getBoolean("aurora_stat_utils"); + LOGGER.finest(() -> String.format("auroraUtils: %b", auroraUtils)); + if (!auroraUtils) { + return false; + } + } + rs.close(); + stmt.close(); + + stmt = connection.createStatement(); + rs = stmt.executeQuery(this.globalDbStatusFuncExistQuery); + + if (rs.next()) { + rs.close(); + stmt.close(); + + stmt = connection.createStatement(); + rs = stmt.executeQuery(this.globalDbInstanceStatusFuncExistQuery); + + if (rs.next()) { + rs.close(); + stmt.close(); + + stmt = connection.createStatement(); + rs = stmt.executeQuery(this.globalDbStatusQuery); + + if (rs.next()) { + int awsRegionCount = rs.getInt(1); + return awsRegionCount > 1; + } + } + } + return false; + } catch (final SQLException ex) { + // ignore + } finally { + if (rs != null) { + try { + rs.close(); + } catch (SQLException ex) { + // ignore + } + } + if (stmt != null) { + try { + stmt.close(); + } catch (SQLException ex) { + // ignore + } + } + } + return false; + } + + @Override + public List getDialectUpdateCandidates() { + return Collections.emptyList(); + } + + @Override + public HostListProviderSupplier getHostListProvider() { + return (properties, initialUrl, hostListProviderService, pluginService) -> { + + final FailoverConnectionPlugin failover2Plugin = pluginService.getPlugin(FailoverConnectionPlugin.class); + + if (failover2Plugin != null) { + return new AuroraGlobalDbMonitoringHostListProvider( + properties, + initialUrl, + hostListProviderService, + this.globalTopologyQuery, + this.nodeIdQuery, + this.isReaderQuery, + this.isWriterQuery, + this.regionByNodeIdQuery, + pluginService); + } + return new AuroraGlobalDbHostListProvider( + properties, + initialUrl, + hostListProviderService, + this.globalTopologyQuery, + this.nodeIdQuery, + this.isReaderQuery); + }; + } +} diff --git a/wrapper/src/main/java/software/amazon/jdbc/dialect/MysqlDialect.java b/wrapper/src/main/java/software/amazon/jdbc/dialect/MysqlDialect.java index b03fb86b4..8137bff0e 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/dialect/MysqlDialect.java +++ b/wrapper/src/main/java/software/amazon/jdbc/dialect/MysqlDialect.java @@ -34,8 +34,9 @@ public class MysqlDialect implements Dialect { private static final List dialectUpdateCandidates = Arrays.asList( - DialectCodes.RDS_MULTI_AZ_MYSQL_CLUSTER, + DialectCodes.GLOBAL_AURORA_MYSQL, DialectCodes.AURORA_MYSQL, + DialectCodes.RDS_MULTI_AZ_MYSQL_CLUSTER, DialectCodes.RDS_MYSQL ); private static MySQLExceptionHandler mySQLExceptionHandler; diff --git a/wrapper/src/main/java/software/amazon/jdbc/dialect/PgDialect.java b/wrapper/src/main/java/software/amazon/jdbc/dialect/PgDialect.java index b3b89af41..97876fd32 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/dialect/PgDialect.java +++ b/wrapper/src/main/java/software/amazon/jdbc/dialect/PgDialect.java @@ -37,6 +37,7 @@ public class PgDialect implements Dialect { private static final List dialectUpdateCandidates = Arrays.asList( + DialectCodes.GLOBAL_AURORA_PG, DialectCodes.AURORA_PG, DialectCodes.RDS_MULTI_AZ_PG_CLUSTER, DialectCodes.RDS_PG); diff --git a/wrapper/src/main/java/software/amazon/jdbc/dialect/RdsMysqlDialect.java b/wrapper/src/main/java/software/amazon/jdbc/dialect/RdsMysqlDialect.java index 0ab09ff52..181d9c9dd 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/dialect/RdsMysqlDialect.java +++ b/wrapper/src/main/java/software/amazon/jdbc/dialect/RdsMysqlDialect.java @@ -28,6 +28,7 @@ public class RdsMysqlDialect extends MysqlDialect { private static final List dialectUpdateCandidates = Arrays.asList( DialectCodes.RDS_MULTI_AZ_MYSQL_CLUSTER, + DialectCodes.GLOBAL_AURORA_MYSQL, DialectCodes.AURORA_MYSQL ); diff --git a/wrapper/src/main/java/software/amazon/jdbc/dialect/RdsPgDialect.java b/wrapper/src/main/java/software/amazon/jdbc/dialect/RdsPgDialect.java index 847f281b3..702b854ac 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/dialect/RdsPgDialect.java +++ b/wrapper/src/main/java/software/amazon/jdbc/dialect/RdsPgDialect.java @@ -36,6 +36,7 @@ public class RdsPgDialect extends PgDialect { private static final List dialectUpdateCandidates = Arrays.asList( DialectCodes.RDS_MULTI_AZ_PG_CLUSTER, + DialectCodes.GLOBAL_AURORA_PG, DialectCodes.AURORA_PG); private static final String extensionsSql = "SELECT (setting LIKE '%rds_tools%') AS rds_tools, " diff --git a/wrapper/src/main/java/software/amazon/jdbc/dialect/UnknownDialect.java b/wrapper/src/main/java/software/amazon/jdbc/dialect/UnknownDialect.java index cb5482936..9179a1038 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/dialect/UnknownDialect.java +++ b/wrapper/src/main/java/software/amazon/jdbc/dialect/UnknownDialect.java @@ -31,6 +31,8 @@ public class UnknownDialect implements Dialect { private static final List dialectUpdateCandidates = Arrays.asList( + DialectCodes.GLOBAL_AURORA_PG, + DialectCodes.GLOBAL_AURORA_MYSQL, DialectCodes.AURORA_PG, DialectCodes.AURORA_MYSQL, DialectCodes.RDS_MULTI_AZ_PG_CLUSTER, diff --git a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/AuroraGlobalDbHostListProvider.java b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/AuroraGlobalDbHostListProvider.java new file mode 100644 index 000000000..6a44a6ce0 --- /dev/null +++ b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/AuroraGlobalDbHostListProvider.java @@ -0,0 +1,104 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package software.amazon.jdbc.hostlistprovider; + +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Timestamp; +import java.time.Instant; +import java.util.Arrays; +import java.util.Map; +import java.util.Properties; +import java.util.stream.Collectors; +import software.amazon.jdbc.AwsWrapperProperty; +import software.amazon.jdbc.HostListProviderService; +import software.amazon.jdbc.HostSpec; +import software.amazon.jdbc.HostSpecBuilder; +import software.amazon.jdbc.PropertyDefinition; +import software.amazon.jdbc.util.ConnectionUrlParser; +import software.amazon.jdbc.util.RdsUtils; +import software.amazon.jdbc.util.StringUtils; + +public class AuroraGlobalDbHostListProvider extends AuroraHostListProvider { + + public static final AwsWrapperProperty GLOBAL_CLUSTER_INSTANCE_HOST_PATTERNS = + new AwsWrapperProperty( + "globalClusterInstanceHostPatterns", + null, + "Coma-separated list of the cluster instance DNS patterns that will be used to " + + "build a complete instance endpoints. " + + "A \"?\" character in these patterns should be used as a placeholder for cluster instance names. " + + "This parameter is required for Global Aurora Databases. " + + "Each region of Global Aurora Database should be specified in the list."); + + protected final RdsUtils rdsUtils = new RdsUtils(); + + protected Map globalClusterInstanceTemplateByAwsRegion; + + static { + PropertyDefinition.registerPluginProperties(AuroraGlobalDbHostListProvider.class); + } + + public AuroraGlobalDbHostListProvider(Properties properties, String originalUrl, + HostListProviderService hostListProviderService, String topologyQuery, + String nodeIdQuery, String isReaderQuery) { + super(properties, originalUrl, hostListProviderService, topologyQuery, nodeIdQuery, isReaderQuery); + } + + @Override + protected void initSettings() throws SQLException { + super.initSettings(); + + String templates = GLOBAL_CLUSTER_INSTANCE_HOST_PATTERNS.getString(properties); + if (StringUtils.isNullOrEmpty(templates)) { + throw new SQLException("Parameter 'globalClusterInstanceHostPatterns' is required for Aurora Global Database."); + } + + HostSpecBuilder hostSpecBuilder = this.hostListProviderService.getHostSpecBuilder(); + this.globalClusterInstanceTemplateByAwsRegion = Arrays.stream(templates.split(",")) + .collect(Collectors.toMap( + rdsUtils::getRdsRegion, + x -> { + HostSpec hostSpec = ConnectionUrlParser.parseHostPortPair(x.trim(), () -> hostSpecBuilder); + this.validateHostPatternSetting(hostSpec.getHost()); + return hostSpec; + })); + } + + @Override + protected HostSpec createHost(final ResultSet resultSet) throws SQLException { + + // suggestedWriterNodeId is not used for Aurora clusters. Topology query can detect a writer for itself. + + // According to the topology query the result set + // should contain 4 columns: node ID, 1/0 (writer/reader), node lag in time (msec), AWS region. + String hostName = resultSet.getString(1); + final boolean isWriter = resultSet.getBoolean(2); + final float nodeLag = resultSet.getFloat(3); + final String awsRegion = resultSet.getString(4); + + // Calculate weight based on node lag in time and CPU utilization. + final long weight = Math.round(nodeLag) * 100L; + + final HostSpec clusterInstanceTemplateForRegion = this.globalClusterInstanceTemplateByAwsRegion.get(awsRegion); + if (clusterInstanceTemplateForRegion == null) { + throw new SQLException("Can't find cluster template for region " + awsRegion); + } + + return createHost(hostName, isWriter, weight, Timestamp.from(Instant.now()), clusterInstanceTemplateForRegion); + } +} diff --git a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/RdsHostListProvider.java b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/RdsHostListProvider.java index eb677a1c2..a29c317f5 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/RdsHostListProvider.java +++ b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/RdsHostListProvider.java @@ -27,7 +27,6 @@ import java.util.Collections; import java.util.Comparator; import java.util.HashMap; -import java.util.HashSet; import java.util.List; import java.util.Map.Entry; import java.util.Objects; @@ -70,7 +69,7 @@ public class RdsHostListProvider implements DynamicHostListProvider { + "after which it will be updated during the next interaction with the connection."); public static final AwsWrapperProperty CLUSTER_ID = new AwsWrapperProperty( - "clusterId", "", + "clusterId", null, "A unique identifier for the cluster. " + "Connections with the same cluster id share a cluster topology cache. " + "If unspecified, a cluster id is automatically created for AWS RDS clusters."); @@ -88,10 +87,8 @@ public class RdsHostListProvider implements DynamicHostListProvider { protected static final RdsUtils rdsHelper = new RdsUtils(); protected static final ConnectionUrlParser connectionUrlParser = new ConnectionUrlParser(); protected static final int defaultTopologyQueryTimeoutMs = 5000; - protected static final long suggestedClusterIdRefreshRateNano = TimeUnit.MINUTES.toNanos(10); protected static final CacheMap> topologyCache = new CacheMap<>(); - protected static final CacheMap suggestedPrimaryClusterIdCache = new CacheMap<>(); - protected static final CacheMap primaryClusterIdCache = new CacheMap<>(); + protected static final CacheMap clusterIdByHostAndPort = new CacheMap<>(); protected final HostListProviderService hostListProviderService; protected final String originalUrl; @@ -110,10 +107,6 @@ public class RdsHostListProvider implements DynamicHostListProvider { protected String clusterId; protected HostSpec clusterInstanceTemplate; - // A primary clusterId is a clusterId that is based off of a cluster endpoint URL - // (rather than a GUID or a value provided by the user). - protected boolean isPrimaryClusterId; - protected volatile boolean isInitialized = false; protected Properties properties; @@ -147,73 +140,59 @@ protected void init() throws SQLException { if (this.isInitialized) { return; } - - // initial topology is based on connection string - this.initialHostList = - connectionUrlParser.getHostsFromConnectionUrl(this.originalUrl, false, - this.hostListProviderService::getHostSpecBuilder); - if (this.initialHostList == null || this.initialHostList.isEmpty()) { - throw new SQLException(Messages.get("RdsHostListProvider.parsedListEmpty", - new Object[] {this.originalUrl})); - } - this.initialHostSpec = this.initialHostList.get(0); - this.hostListProviderService.setInitialConnectionHostSpec(this.initialHostSpec); - - this.clusterId = UUID.randomUUID().toString(); - this.isPrimaryClusterId = false; - this.refreshRateNano = - TimeUnit.MILLISECONDS.toNanos(CLUSTER_TOPOLOGY_REFRESH_RATE_MS.getInteger(properties)); - - HostSpecBuilder hostSpecBuilder = this.hostListProviderService.getHostSpecBuilder(); - String clusterInstancePattern = CLUSTER_INSTANCE_HOST_PATTERN.getString(this.properties); - if (clusterInstancePattern != null) { - this.clusterInstanceTemplate = - ConnectionUrlParser.parseHostPortPair(clusterInstancePattern, () -> hostSpecBuilder); - } else { - this.clusterInstanceTemplate = - hostSpecBuilder - .host(rdsHelper.getRdsInstanceHostPattern(this.initialHostSpec.getHost())) - .hostId(this.initialHostSpec.getHostId()) - .port(this.initialHostSpec.getPort()) - .build(); - } - - validateHostPatternSetting(this.clusterInstanceTemplate.getHost()); - - this.rdsUrlType = rdsHelper.identifyRdsType(this.initialHostSpec.getHost()); - - final String clusterIdSetting = CLUSTER_ID.getString(this.properties); - if (!StringUtils.isNullOrEmpty(clusterIdSetting)) { - this.clusterId = clusterIdSetting; - } else if (rdsUrlType == RdsUrlType.RDS_PROXY) { - // Each proxy is associated with a single cluster, so it's safe to use RDS Proxy Url as cluster - // identification - this.clusterId = this.initialHostSpec.getUrl(); - } else if (rdsUrlType.isRds()) { - final ClusterSuggestedResult clusterSuggestedResult = - getSuggestedClusterId(this.initialHostSpec.getHostAndPort()); - if (clusterSuggestedResult != null && !StringUtils.isNullOrEmpty(clusterSuggestedResult.clusterId)) { - this.clusterId = clusterSuggestedResult.clusterId; - this.isPrimaryClusterId = clusterSuggestedResult.isPrimaryClusterId; - } else { - final String clusterRdsHostUrl = - rdsHelper.getRdsClusterHostUrl(this.initialHostSpec.getHost()); - if (!StringUtils.isNullOrEmpty(clusterRdsHostUrl)) { - this.clusterId = this.clusterInstanceTemplate.isPortSpecified() - ? String.format("%s:%s", clusterRdsHostUrl, this.clusterInstanceTemplate.getPort()) - : clusterRdsHostUrl; - this.isPrimaryClusterId = true; - primaryClusterIdCache.put(this.clusterId, true, suggestedClusterIdRefreshRateNano); - } - } - } - + this.initSettings(); this.isInitialized = true; } finally { lock.unlock(); } } + protected void initSettings() throws SQLException { + + // initial topology is based on connection string + this.initialHostList = + connectionUrlParser.getHostsFromConnectionUrl(this.originalUrl, false, + this.hostListProviderService::getHostSpecBuilder); + if (this.initialHostList == null || this.initialHostList.isEmpty()) { + throw new SQLException(Messages.get("RdsHostListProvider.parsedListEmpty", + new Object[] {this.originalUrl})); + } + this.initialHostSpec = this.initialHostList.get(0); + this.hostListProviderService.setInitialConnectionHostSpec(this.initialHostSpec); + + this.clusterId = UUID.randomUUID().toString(); + this.refreshRateNano = + TimeUnit.MILLISECONDS.toNanos(CLUSTER_TOPOLOGY_REFRESH_RATE_MS.getInteger(properties)); + + HostSpecBuilder hostSpecBuilder = this.hostListProviderService.getHostSpecBuilder(); + String clusterInstancePattern = CLUSTER_INSTANCE_HOST_PATTERN.getString(this.properties); + if (clusterInstancePattern != null) { + this.clusterInstanceTemplate = + ConnectionUrlParser.parseHostPortPair(clusterInstancePattern, () -> hostSpecBuilder); + } else { + this.clusterInstanceTemplate = + hostSpecBuilder + .host(rdsHelper.getRdsInstanceHostPattern(this.initialHostSpec.getHost())) + .hostId(this.initialHostSpec.getHostId()) + .port(this.initialHostSpec.getPort()) + .build(); + } + + validateHostPatternSetting(this.clusterInstanceTemplate.getHost()); + + this.rdsUrlType = rdsHelper.identifyRdsType(this.initialHostSpec.getHost()); + + final String clusterIdSetting = CLUSTER_ID.getString(this.properties); + if (!StringUtils.isNullOrEmpty(clusterIdSetting)) { + this.clusterId = clusterIdSetting; + } else if (this.rdsUrlType != RdsUrlType.IP_ADDRESS) { + final String suggestedClusterId = this.getSuggestedClusterId(this.initialHostSpec.getHostAndPort()); + if (!StringUtils.isNullOrEmpty(suggestedClusterId)) { + this.clusterId = suggestedClusterId; + } + } + } + /** * Get cluster topology. It may require an extra call to database to fetch the latest topology. A * cached copy of topology is returned if it's not yet outdated (controlled by {@link @@ -229,24 +208,8 @@ protected void init() throws SQLException { public FetchTopologyResult getTopology(final Connection conn, final boolean forceUpdate) throws SQLException { init(); - final String suggestedPrimaryClusterId = suggestedPrimaryClusterIdCache.get(this.clusterId); - - // Change clusterId by accepting a suggested one - if (!StringUtils.isNullOrEmpty(suggestedPrimaryClusterId) - && !this.clusterId.equals(suggestedPrimaryClusterId)) { - - final String oldClusterId = this.clusterId; - this.clusterId = suggestedPrimaryClusterId; - this.isPrimaryClusterId = true; - this.clusterIdChanged(oldClusterId); - } - final List cachedHosts = topologyCache.get(this.clusterId); - - // This clusterId is a primary one and is about to create a new entry in the cache. - // When a primary entry is created it needs to be suggested for other (non-primary) entries. - // Remember a flag to do suggestion after cache is updated. - final boolean needToSuggest = cachedHosts == null && this.isPrimaryClusterId; + final boolean needToSuggest = cachedHosts == null; if (cachedHosts == null || forceUpdate) { @@ -262,10 +225,27 @@ public FetchTopologyResult getTopology(final Connection conn, final boolean forc final List hosts = queryForTopology(conn); if (!Utils.isNullOrEmpty(hosts)) { - topologyCache.put(this.clusterId, hosts, this.refreshRateNano); + if (needToSuggest) { - this.suggestPrimaryCluster(hosts); + // check if host list matches any existing topologies + String matchClusterId = this.getMatchedClusterId(hosts); + + // Change clusterId by accepting a suggested one + if (!StringUtils.isNullOrEmpty(matchClusterId)) { + + final String oldClusterId = this.clusterId; + this.clusterId = matchClusterId; + this.clusterIdChanged(oldClusterId); + } + clusterIdByHostAndPort.put(this.initialHostSpec.getHostAndPort(), this.clusterId, this.refreshRateNano); + } + + for (HostSpec hostSpec : hosts) { + clusterIdByHostAndPort.put(hostSpec.getHostAndPort(), this.clusterId, this.refreshRateNano); } + + topologyCache.put(this.clusterId, hosts, this.refreshRateNano); + return new FetchTopologyResult(false, hosts); } } @@ -278,66 +258,36 @@ public FetchTopologyResult getTopology(final Connection conn, final boolean forc } } - protected void clusterIdChanged(final String oldClusterId) { - // do nothing - } + protected String getMatchedClusterId(final @NonNull List hosts) { + if (Utils.isNullOrEmpty(hosts)) { + return null; + } - protected ClusterSuggestedResult getSuggestedClusterId(final String url) { - for (final Entry> entry : topologyCache.getEntries().entrySet()) { - final String key = entry.getKey(); // clusterId - final List hosts = entry.getValue(); - final boolean isPrimaryCluster = primaryClusterIdCache.get(key, false, - suggestedClusterIdRefreshRateNano); - if (key.equals(url)) { - return new ClusterSuggestedResult(url, isPrimaryCluster); + for (HostSpec hostSpec : hosts) { + // TODO: use template pattern port? + String existingClusterId = clusterIdByHostAndPort.get(hostSpec.getHostAndPort()); + if (!StringUtils.isNullOrEmpty(existingClusterId)) { + return existingClusterId; } - if (hosts == null) { - continue; - } - for (final HostSpec host : hosts) { - if (host.getHostAndPort().equals(url)) { - LOGGER.finest(() -> Messages.get("RdsHostListProvider.suggestedClusterId", - new Object[] {key, url})); - return new ClusterSuggestedResult(key, isPrimaryCluster); + + for (final Entry> entry : topologyCache.getEntries().entrySet()) { + for (final HostSpec existingHostSpec : entry.getValue()) { + if (hostSpec.getHostAndPort().equals(existingHostSpec.getHostAndPort())) { + return entry.getKey(); + } } } } + return null; } - protected void suggestPrimaryCluster(final @NonNull List primaryClusterHosts) { - if (Utils.isNullOrEmpty(primaryClusterHosts)) { - return; - } - - final Set primaryClusterHostUrls = new HashSet<>(); - for (final HostSpec hostSpec : primaryClusterHosts) { - primaryClusterHostUrls.add(hostSpec.getUrl()); - } - - for (final Entry> entry : topologyCache.getEntries().entrySet()) { - final String clusterId = entry.getKey(); - final List clusterHosts = entry.getValue(); - final boolean isPrimaryCluster = primaryClusterIdCache.get(clusterId, false, - suggestedClusterIdRefreshRateNano); - final String suggestedPrimaryClusterId = suggestedPrimaryClusterIdCache.get(clusterId); - if (isPrimaryCluster - || !StringUtils.isNullOrEmpty(suggestedPrimaryClusterId) - || Utils.isNullOrEmpty(clusterHosts)) { - continue; - } + protected void clusterIdChanged(final String oldClusterId) { + // do nothing + } - // The entry is non-primary - for (final HostSpec host : clusterHosts) { - if (primaryClusterHostUrls.contains(host.getUrl())) { - // Instance on this cluster matches with one of the instance on primary cluster - // Suggest the primary clusterId to this entry - suggestedPrimaryClusterIdCache.put(clusterId, this.clusterId, - suggestedClusterIdRefreshRateNano); - break; - } - } - } + protected String getSuggestedClusterId(final String hostAndPort) { + return clusterIdByHostAndPort.get(hostAndPort); } /** @@ -447,19 +397,20 @@ protected HostSpec createHost(final ResultSet resultSet) throws SQLException { // Calculate weight based on node lag in time and CPU utilization. final long weight = Math.round(nodeLag) * 100L + Math.round(cpuUtilization); - return createHost(hostName, isWriter, weight, lastUpdateTime); + return createHost(hostName, isWriter, weight, lastUpdateTime, this.clusterInstanceTemplate); } protected HostSpec createHost( String host, final boolean isWriter, final long weight, - final Timestamp lastUpdateTime) { + final Timestamp lastUpdateTime, + final HostSpec clusterInstanceTemplate) { host = host == null ? "?" : host; - final String endpoint = getHostEndpoint(host); - final int port = this.clusterInstanceTemplate.isPortSpecified() - ? this.clusterInstanceTemplate.getPort() + final String endpoint = getHostEndpoint(host, clusterInstanceTemplate); + final int port = clusterInstanceTemplate.isPortSpecified() + ? clusterInstanceTemplate.getPort() : this.initialHostSpec.getPort(); final HostSpec hostSpec = this.hostListProviderService.getHostSpecBuilder() @@ -481,8 +432,8 @@ protected HostSpec createHost( * @param nodeName A host name. * @return Host dns endpoint */ - protected String getHostEndpoint(final String nodeName) { - final String host = this.clusterInstanceTemplate.getHost(); + protected String getHostEndpoint(final String nodeName, final HostSpec clusterInstanceTemplate) { + final String host = clusterInstanceTemplate.getHost(); return host.replace("?", nodeName); } @@ -501,8 +452,7 @@ protected String getHostEndpoint(final String nodeName) { */ public static void clearAll() { topologyCache.clear(); - primaryClusterIdCache.clear(); - suggestedPrimaryClusterIdCache.clear(); + clusterIdByHostAndPort.clear(); } /** @@ -554,8 +504,8 @@ public RdsUrlType getRdsUrlType() throws SQLException { return this.rdsUrlType; } - private void validateHostPatternSetting(final String hostPattern) { - if (!this.rdsHelper.isDnsPatternValid(hostPattern)) { + protected void validateHostPatternSetting(final String hostPattern) { + if (!rdsHelper.isDnsPatternValid(hostPattern)) { // "Invalid value for the 'clusterInstanceHostPattern' configuration setting - the host // pattern must contain a '?' // character as a placeholder for the DB instance identifiers of the instances in the cluster" @@ -564,7 +514,7 @@ private void validateHostPatternSetting(final String hostPattern) { throw new RuntimeException(message); } - final RdsUrlType rdsUrlType = this.rdsHelper.identifyRdsType(hostPattern); + final RdsUrlType rdsUrlType = rdsHelper.identifyRdsType(hostPattern); if (rdsUrlType == RdsUrlType.RDS_PROXY) { // "An RDS Proxy url can't be used as the 'clusterInstanceHostPattern' configuration setting." final String message = @@ -595,17 +545,11 @@ public static void logCache() { for (final Entry> entry : cacheEntries) { final List hosts = entry.getValue(); - final Boolean isPrimaryCluster = primaryClusterIdCache.get(entry.getKey()); - final String suggestedPrimaryClusterId = suggestedPrimaryClusterIdCache.get(entry.getKey()); if (sb.length() > 0) { sb.append("\n"); } sb.append("[").append(entry.getKey()).append("]:\n") - .append("\tisPrimaryCluster: ") - .append(isPrimaryCluster != null && isPrimaryCluster).append("\n") - .append("\tsuggestedPrimaryCluster: ") - .append(suggestedPrimaryClusterId).append("\n") .append("\tHosts: "); if (hosts == null) { @@ -698,15 +642,4 @@ public String getClusterId() throws UnsupportedOperationException, SQLException init(); return this.clusterId; } - - public static class ClusterSuggestedResult { - - public String clusterId; - public boolean isPrimaryClusterId; - - public ClusterSuggestedResult(final String clusterId, final boolean isPrimaryClusterId) { - this.clusterId = clusterId; - this.isPrimaryClusterId = isPrimaryClusterId; - } - } } diff --git a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/AuroraGlobalDbMonitoringHostListProvider.java b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/AuroraGlobalDbMonitoringHostListProvider.java new file mode 100644 index 000000000..8d43804c3 --- /dev/null +++ b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/AuroraGlobalDbMonitoringHostListProvider.java @@ -0,0 +1,92 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package software.amazon.jdbc.hostlistprovider.monitoring; + +import java.sql.SQLException; +import java.util.Arrays; +import java.util.HashMap; +import java.util.Map; +import java.util.Properties; +import java.util.stream.Collectors; +import software.amazon.jdbc.HostListProviderService; +import software.amazon.jdbc.HostSpec; +import software.amazon.jdbc.HostSpecBuilder; +import software.amazon.jdbc.PluginService; +import software.amazon.jdbc.PropertyDefinition; +import software.amazon.jdbc.hostlistprovider.AuroraGlobalDbHostListProvider; +import software.amazon.jdbc.util.ConnectionUrlParser; +import software.amazon.jdbc.util.RdsUtils; +import software.amazon.jdbc.util.StringUtils; + +public class AuroraGlobalDbMonitoringHostListProvider extends MonitoringRdsHostListProvider { + + protected Map globalClusterInstanceTemplateByAwsRegion = new HashMap<>(); + + protected final RdsUtils rdsUtils = new RdsUtils(); + + protected String regionByNodeIdQuery; + + static { + // Register property definition in AuroraGlobalDbHostListProvider class. It's not a mistake. + PropertyDefinition.registerPluginProperties(AuroraGlobalDbHostListProvider.class); + } + + public AuroraGlobalDbMonitoringHostListProvider(Properties properties, String originalUrl, + HostListProviderService hostListProviderService, String globalTopologyQuery, + String nodeIdQuery, String isReaderQuery, String writerTopologyQuery, + String regionByNodeIdQuery, + PluginService pluginService) { + + super(properties, originalUrl, hostListProviderService, globalTopologyQuery, nodeIdQuery, isReaderQuery, + writerTopologyQuery, pluginService); + this.regionByNodeIdQuery = regionByNodeIdQuery; + } + + @Override + protected void initSettings() throws SQLException { + super.initSettings(); + + String templates = AuroraGlobalDbHostListProvider.GLOBAL_CLUSTER_INSTANCE_HOST_PATTERNS.getString(properties); + if (StringUtils.isNullOrEmpty(templates)) { + throw new SQLException("Parameter 'globalClusterInstanceHostPatterns' is required for Aurora Global Database."); + } + + HostSpecBuilder hostSpecBuilder = this.hostListProviderService.getHostSpecBuilder(); + this.globalClusterInstanceTemplateByAwsRegion = Arrays.stream(templates.split(",")) + .collect(Collectors.toMap( + rdsUtils::getRdsRegion, + x -> { + HostSpec hostSpec = ConnectionUrlParser.parseHostPortPair(x.trim(), () -> hostSpecBuilder); + this.validateHostPatternSetting(hostSpec.getHost()); + return hostSpec; + })); + } + + protected ClusterTopologyMonitor initMonitor() { + return monitors.computeIfAbsent(this.clusterId, + (key) -> new GlobalDbClusterTopologyMonitorImpl( + key, topologyCache, this.initialHostSpec, this.properties, this.pluginService, + this.hostListProviderService, this.clusterInstanceTemplate, + this.refreshRateNano, this.highRefreshRateNano, TOPOLOGY_CACHE_EXPIRATION_NANO, + this.topologyQuery, + this.writerTopologyQuery, + this.nodeIdQuery, + this.globalClusterInstanceTemplateByAwsRegion, + this.regionByNodeIdQuery), + MONITOR_EXPIRATION_NANO); + } +} diff --git a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/ClusterTopologyMonitorImpl.java b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/ClusterTopologyMonitorImpl.java index 80a586249..b4c121c1b 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/ClusterTopologyMonitorImpl.java +++ b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/ClusterTopologyMonitorImpl.java @@ -79,7 +79,7 @@ public class ClusterTopologyMonitorImpl implements ClusterTopologyMonitor { protected final long highRefreshRateNano; protected final long topologyCacheExpirationNano; protected final Properties properties; - protected final Properties monitoringProperties; + protected Properties monitoringProperties; protected final PluginService pluginService; protected final HostSpec initialHostSpec; protected final CacheMap> topologyMap; @@ -144,6 +144,13 @@ public ClusterTopologyMonitorImpl( this.writerTopologyQuery = writerTopologyQuery; this.nodeIdQuery = nodeIdQuery; + this.initSettings(); + + this.monitorExecutor.submit(this); + this.monitorExecutor.shutdown(); // No more tasks are accepted by the pool. + } + + protected void initSettings() { this.monitoringProperties = PropertyUtils.copyProperties(properties); this.properties.stringPropertyNames().stream() .filter(p -> p.startsWith(MONITORING_PROPERTY_PREFIX)) @@ -164,9 +171,6 @@ public ClusterTopologyMonitorImpl( PropertyDefinition.CONNECT_TIMEOUT.set( this.monitoringProperties, String.valueOf(defaultConnectionTimeoutMs)); } - - this.monitorExecutor.submit(this); - this.monitorExecutor.shutdown(); // No more tasks are accepted by the pool. } @Override @@ -480,7 +484,8 @@ protected List openAnyConnectionAndUpdateTopology() { } else { final String nodeId = this.getNodeId(this.monitoringConnection.get()); if (!StringUtils.isNullOrEmpty(nodeId)) { - this.writerHostSpec.set(this.createHost(nodeId, true, 0, null)); + this.writerHostSpec.set(this.createHost(nodeId, true, 0, null, + this.getClusterInstanceTemplate(nodeId, this.monitoringConnection.get()))); LOGGER.finest( Messages.get( "ClusterTopologyMonitorImpl.writerMonitoringConnection", @@ -521,6 +526,10 @@ protected List openAnyConnectionAndUpdateTopology() { return hosts; } + protected HostSpec getClusterInstanceTemplate(String nodeId, Connection connection) { + return this.clusterInstanceTemplate; + } + protected String getNodeId(final Connection connection) { try { try (final Statement stmt = connection.createStatement(); @@ -643,7 +652,7 @@ protected String getWriterNodeId(final Connection connection) throws SQLExceptio } protected String getSuggestedWriterNodeId(final Connection connection) throws SQLException { - // Aurora topology query can detect a writer for itself so it doesn't need any suggested writer node ID. + // Aurora topology query can detect a writer for itself, so it doesn't need any suggested writer node ID. return null; // intentionally null } @@ -723,19 +732,20 @@ protected HostSpec createHost( // Calculate weight based on node lag in time and CPU utilization. final long weight = Math.round(nodeLag) * 100L + Math.round(cpuUtilization); - return createHost(hostName, isWriter, weight, lastUpdateTime); + return createHost(hostName, isWriter, weight, lastUpdateTime, this.clusterInstanceTemplate); } protected HostSpec createHost( String nodeName, final boolean isWriter, final long weight, - final Timestamp lastUpdateTime) { + final Timestamp lastUpdateTime, + final HostSpec clusterInstanceTemplate) { nodeName = nodeName == null ? "?" : nodeName; - final String endpoint = getHostEndpoint(nodeName); - final int port = this.clusterInstanceTemplate.isPortSpecified() - ? this.clusterInstanceTemplate.getPort() + final String endpoint = getHostEndpoint(nodeName, clusterInstanceTemplate); + final int port = clusterInstanceTemplate.isPortSpecified() + ? clusterInstanceTemplate.getPort() : this.initialHostSpec.getPort(); final HostSpec hostSpec = this.hostListProviderService.getHostSpecBuilder() @@ -751,8 +761,8 @@ protected HostSpec createHost( return hostSpec; } - protected String getHostEndpoint(final String nodeName) { - final String host = this.clusterInstanceTemplate.getHost(); + protected String getHostEndpoint(final String nodeName, final HostSpec clusterInstanceTemplate) { + final String host = clusterInstanceTemplate.getHost(); return host.replace("?", nodeName); } @@ -847,12 +857,12 @@ public void run() { if (this.monitor.nodeThreadsWriterConnection.get() == null) { // while writer connection isn't yet established this reader connection may update topology if (updateTopology) { - this.readerThreadFetchTopology(connection, writerHostSpec); + this.readerThreadFetchTopology(connection, this.writerHostSpec); } else if (this.monitor.nodeThreadsReaderConnection.get() == null) { if (this.monitor.nodeThreadsReaderConnection.compareAndSet(null, connection)) { // let's use this connection to update topology updateTopology = true; - this.readerThreadFetchTopology(connection, writerHostSpec); + this.readerThreadFetchTopology(connection, this.writerHostSpec); } } } diff --git a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/GlobalDbClusterTopologyMonitorImpl.java b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/GlobalDbClusterTopologyMonitorImpl.java new file mode 100644 index 000000000..ca63ac538 --- /dev/null +++ b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/GlobalDbClusterTopologyMonitorImpl.java @@ -0,0 +1,109 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package software.amazon.jdbc.hostlistprovider.monitoring; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Timestamp; +import java.time.Instant; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.logging.Logger; +import software.amazon.jdbc.HostListProviderService; +import software.amazon.jdbc.HostSpec; +import software.amazon.jdbc.PluginService; +import software.amazon.jdbc.util.CacheMap; +import software.amazon.jdbc.util.StringUtils; + +public class GlobalDbClusterTopologyMonitorImpl extends ClusterTopologyMonitorImpl { + + private static final Logger LOGGER = Logger.getLogger(GlobalDbClusterTopologyMonitorImpl.class.getName()); + + protected final Map globalClusterInstanceTemplateByAwsRegion; + protected final String regionByNodeIdQuery; + + public GlobalDbClusterTopologyMonitorImpl(String clusterId, + CacheMap> topologyMap, + HostSpec initialHostSpec, Properties properties, + PluginService pluginService, + HostListProviderService hostListProviderService, + HostSpec clusterInstanceTemplate, long refreshRateNano, + long highRefreshRateNano, long topologyCacheExpirationNano, String globalTopologyQuery, + String writerTopologyQuery, String nodeIdQuery, + Map globalClusterInstanceTemplateByAwsRegion, + String regionByNodeIdQuery) { + + super(clusterId, topologyMap, initialHostSpec, properties, pluginService, hostListProviderService, + clusterInstanceTemplate, refreshRateNano, highRefreshRateNano, topologyCacheExpirationNano, globalTopologyQuery, + writerTopologyQuery, nodeIdQuery); + this.globalClusterInstanceTemplateByAwsRegion = globalClusterInstanceTemplateByAwsRegion; + this.regionByNodeIdQuery = regionByNodeIdQuery; + } + + @Override + protected HostSpec getClusterInstanceTemplate(String nodeId, Connection connection) { + try { + try (final PreparedStatement stmt = connection.prepareStatement(this.regionByNodeIdQuery)) { + stmt.setString(1, nodeId); + try (final ResultSet resultSet = stmt.executeQuery()) { + if (resultSet.next()) { + String awsRegion = resultSet.getString(1); + if (!StringUtils.isNullOrEmpty(awsRegion)) { + final HostSpec clusterInstanceTemplateForRegion + = this.globalClusterInstanceTemplateByAwsRegion.get(awsRegion); + if (clusterInstanceTemplateForRegion == null) { + throw new SQLException("Can't find cluster template for region " + awsRegion); + } + return clusterInstanceTemplateForRegion; + } + } + } + } + } catch (SQLException ex) { + throw new RuntimeException(ex); + } + return this.clusterInstanceTemplate; + } + + @Override + protected HostSpec createHost( + final ResultSet resultSet, + final String suggestedWriterNodeId) throws SQLException { + + // suggestedWriterNodeId is not used for Aurora clusters. Topology query can detect a writer for itself. + + // According to the topology query the result set + // should contain 4 columns: node ID, 1/0 (writer/reader), node lag in time (msec), AWS region. + String hostName = resultSet.getString(1); + final boolean isWriter = resultSet.getBoolean(2); + final float nodeLag = resultSet.getFloat(3); + final String awsRegion = resultSet.getString(4); + + // Calculate weight based on node lag in time and CPU utilization. + final long weight = Math.round(nodeLag) * 100L; + + final HostSpec clusterInstanceTemplateForRegion = this.globalClusterInstanceTemplateByAwsRegion.get(awsRegion); + if (clusterInstanceTemplateForRegion == null) { + throw new SQLException("Can't find cluster template for region " + awsRegion); + } + + return createHost(hostName, isWriter, weight, Timestamp.from(Instant.now()), clusterInstanceTemplateForRegion); + } +} diff --git a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/MonitoringRdsHostListProvider.java b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/MonitoringRdsHostListProvider.java index 398f9b605..ffee4e6ca 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/MonitoringRdsHostListProvider.java +++ b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/MonitoringRdsHostListProvider.java @@ -85,10 +85,6 @@ public MonitoringRdsHostListProvider( CLUSTER_TOPOLOGY_HIGH_REFRESH_RATE_MS.getLong(this.properties)); } - public static void clearCache() { - clearAll(); - } - public static void closeAllMonitors() { monitors.getEntries().values().forEach(monitor -> { try { @@ -98,7 +94,7 @@ public static void closeAllMonitors() { } }); monitors.clear(); - clearCache(); + clearAll(); } @Override @@ -133,11 +129,22 @@ protected List queryForTopology(final Connection conn) throws SQLExcep @Override protected void clusterIdChanged(final String oldClusterId) { + super.clusterIdChanged(oldClusterId); + + if (this.clusterId.equals(oldClusterId)) { + // clusterId is the same + return; + } + final ClusterTopologyMonitor existingMonitor = monitors.get(oldClusterId, MONITOR_EXPIRATION_NANO); if (existingMonitor != null) { - monitors.computeIfAbsent(this.clusterId, (key) -> existingMonitor, MONITOR_EXPIRATION_NANO); - assert monitors.get(this.clusterId, MONITOR_EXPIRATION_NANO) == existingMonitor; - existingMonitor.setClusterId(this.clusterId); + monitors.computeIfAbsent( + this.clusterId, + (key) -> { + existingMonitor.setClusterId(this.clusterId); + return existingMonitor; + }, + MONITOR_EXPIRATION_NANO); monitors.remove(oldClusterId); } diff --git a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/MultiAzClusterTopologyMonitorImpl.java b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/MultiAzClusterTopologyMonitorImpl.java index c313481e3..4c2e0f938 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/MultiAzClusterTopologyMonitorImpl.java +++ b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/MultiAzClusterTopologyMonitorImpl.java @@ -119,6 +119,6 @@ protected HostSpec createHost( String hostId = resultSet.getString("id"); // "1034958454" final boolean isWriter = hostId.equals(suggestedWriterNodeId); - return createHost(instanceName, isWriter, 0, Timestamp.from(Instant.now())); + return createHost(instanceName, isWriter, 0, Timestamp.from(Instant.now()), this.clusterInstanceTemplate); } } diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/AuroraInitialConnectionStrategyPlugin.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/AuroraInitialConnectionStrategyPlugin.java index 47537f62e..e5d512520 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/AuroraInitialConnectionStrategyPlugin.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/AuroraInitialConnectionStrategyPlugin.java @@ -115,7 +115,7 @@ public Connection connect( return connectFunc.call(); } - if (type == RdsUrlType.RDS_WRITER_CLUSTER) { + if (type == RdsUrlType.RDS_WRITER_CLUSTER || type == RdsUrlType.RDS_GLOBAL_WRITER_CLUSTER) { Connection writerCandidateConn = this.getVerifiedWriterConnection(props, isInitialConnection, connectFunc); if (writerCandidateConn == null) { // Can't get writer connection. Continue with a normal workflow. @@ -160,7 +160,9 @@ private Connection getVerifiedWriterConnection( try { writerCandidate = this.getWriter(); - if (writerCandidate == null || this.rdsUtils.isRdsClusterDns(writerCandidate.getHost())) { + if (writerCandidate == null + || this.rdsUtils.isRdsClusterDns(writerCandidate.getHost()) + || this.rdsUtils.isGlobalDbWriterClusterDns(writerCandidate.getHost())) { // Writer is not found. It seems that topology is outdated. writerCandidateConn = connectFunc.call(); diff --git a/wrapper/src/main/java/software/amazon/jdbc/util/RdsUrlType.java b/wrapper/src/main/java/software/amazon/jdbc/util/RdsUrlType.java index dff1e663a..a7e2a7da7 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/util/RdsUrlType.java +++ b/wrapper/src/main/java/software/amazon/jdbc/util/RdsUrlType.java @@ -24,6 +24,7 @@ public enum RdsUrlType { RDS_PROXY(true, false), RDS_INSTANCE(true, false), RDS_AURORA_LIMITLESS_DB_SHARD_GROUP(true, false), + RDS_GLOBAL_WRITER_CLUSTER(true, true), OTHER(false, false); private final boolean isRds; diff --git a/wrapper/src/main/java/software/amazon/jdbc/util/RdsUtils.java b/wrapper/src/main/java/software/amazon/jdbc/util/RdsUtils.java index 90221cf19..52bdb3ce0 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/util/RdsUtils.java +++ b/wrapper/src/main/java/software/amazon/jdbc/util/RdsUtils.java @@ -166,6 +166,14 @@ public class RdsUtils { ".*(?-green-[0-9a-z]{6})\\..*", Pattern.CASE_INSENSITIVE); + // TODO: check GDB writer endpoint for China regions + private static final Pattern AURORA_GLOBAL_WRITER_DNS_PATTERN = + Pattern.compile( + "^(?.+)\\." + + "(?global-)?" + + "(?[a-zA-Z0-9]+\\.global\\.rds\\.amazonaws\\.com)$", + Pattern.CASE_INSENSITIVE); + private static final Map cachedPatterns = new ConcurrentHashMap<>(); private static final Map cachedDnsPatterns = new ConcurrentHashMap<>(); @@ -313,6 +321,11 @@ public String getRdsClusterHostUrl(final String host) { return null; } + public boolean isGlobalDbWriterClusterDns(final String host) { + final String dnsGroup = getDnsGroup(getPreparedHost(host)); + return dnsGroup != null && dnsGroup.equalsIgnoreCase("global-"); + } + public boolean isIPv4(final String ip) { return !StringUtils.isNullOrEmpty(ip) && IP_V4.matcher(ip).matches(); } @@ -333,6 +346,8 @@ public RdsUrlType identifyRdsType(final String host) { if (isIPv4(host) || isIPv6(host)) { return RdsUrlType.IP_ADDRESS; + } else if (isGlobalDbWriterClusterDns(host)) { + return RdsUrlType.RDS_GLOBAL_WRITER_CLUSTER; } else if (isWriterClusterDns(host)) { return RdsUrlType.RDS_WRITER_CLUSTER; } else if (isReaderClusterDns(host)) { @@ -416,7 +431,8 @@ private String getDnsGroup(final String host) { } return cachedDnsPatterns.computeIfAbsent(host, (k) -> { final Matcher matcher = cacheMatcher(k, - AURORA_DNS_PATTERN, AURORA_CHINA_DNS_PATTERN, AURORA_OLD_CHINA_DNS_PATTERN, AURORA_GOV_DNS_PATTERN); + AURORA_DNS_PATTERN, AURORA_CHINA_DNS_PATTERN, AURORA_OLD_CHINA_DNS_PATTERN, + AURORA_GOV_DNS_PATTERN, AURORA_GLOBAL_WRITER_DNS_PATTERN); return getRegexGroup(matcher, DNS_GROUP); }); } diff --git a/wrapper/src/test/java/integration/container/tests/PerformanceTest.java b/wrapper/src/test/java/integration/container/tests/PerformanceTest.java index 8df6c1669..4b3bba0f0 100644 --- a/wrapper/src/test/java/integration/container/tests/PerformanceTest.java +++ b/wrapper/src/test/java/integration/container/tests/PerformanceTest.java @@ -62,7 +62,6 @@ import software.amazon.jdbc.PropertyDefinition; import software.amazon.jdbc.hostlistprovider.AuroraHostListProvider; import software.amazon.jdbc.hostlistprovider.RdsHostListProvider; -import software.amazon.jdbc.hostlistprovider.monitoring.MonitoringRdsHostListProvider; import software.amazon.jdbc.plugin.OpenedConnectionTracker; import software.amazon.jdbc.plugin.efm.MonitorThreadContainer; import software.amazon.jdbc.plugin.efm2.MonitorServiceImpl; @@ -148,7 +147,7 @@ public void test_FailureDetectionTime_EnhancedMonitoringEnabled(final String efm MonitorThreadContainer.releaseInstance(); MonitorServiceImpl.closeAllMonitors(); AuroraHostListProvider.clearAll(); - MonitoringRdsHostListProvider.clearCache(); + RdsHostListProvider.clearAll(); enhancedFailureMonitoringPerfDataList.clear(); @@ -230,7 +229,7 @@ public void test_FailureDetectionTime_FailoverAndEnhancedMonitoringEnabled(final MonitorThreadContainer.releaseInstance(); MonitorServiceImpl.closeAllMonitors(); AuroraHostListProvider.clearAll(); - MonitoringRdsHostListProvider.clearCache(); + RdsHostListProvider.clearAll(); failoverWithEfmPerfDataList.clear(); @@ -318,7 +317,7 @@ private void test_FailoverTime_SocketTimeout(final String plugins) throws IOExce MonitorThreadContainer.releaseInstance(); MonitorServiceImpl.closeAllMonitors(); AuroraHostListProvider.clearAll(); - MonitoringRdsHostListProvider.clearCache(); + RdsHostListProvider.clearAll(); failoverWithSocketTimeoutPerfDataList.clear(); diff --git a/wrapper/src/test/java/software/amazon/jdbc/hostlistprovider/RdsHostListProviderTest.java b/wrapper/src/test/java/software/amazon/jdbc/hostlistprovider/RdsHostListProviderTest.java index 745364ef5..909203289 100644 --- a/wrapper/src/test/java/software/amazon/jdbc/hostlistprovider/RdsHostListProviderTest.java +++ b/wrapper/src/test/java/software/amazon/jdbc/hostlistprovider/RdsHostListProviderTest.java @@ -331,15 +331,13 @@ void testTopologyCache_SuggestedClusterIdForRds() throws SQLException { RdsHostListProvider provider2 = Mockito.spy( getRdsHostListProvider(mockHostListProviderService, "jdbc:something://cluster-a.cluster-xyz.us-east-2.rds.amazonaws.com/")); - provider2.init(); - - assertEquals(provider1.clusterId, provider2.clusterId); - assertTrue(provider1.isPrimaryClusterId); - assertTrue(provider2.isPrimaryClusterId); + doReturn(topologyClusterA).when(provider2).queryForTopology(any(Connection.class)); final List topologyProvider2 = provider2.refresh(mock(Connection.class)); assertEquals(topologyClusterA, topologyProvider2); + assertEquals(provider1.clusterId, provider2.clusterId); + assertEquals(1, RdsHostListProvider.topologyCache.size()); } @@ -381,8 +379,6 @@ void testTopologyCache_SuggestedClusterIdForInstance() throws SQLException { provider2.init(); assertEquals(provider1.clusterId, provider2.clusterId); - assertTrue(provider1.isPrimaryClusterId); - assertTrue(provider2.isPrimaryClusterId); final List topologyProvider2 = provider2.refresh(mock(Connection.class)); assertEquals(topologyClusterA, topologyProvider2); @@ -427,27 +423,22 @@ void testTopologyCache_AcceptSuggestion() throws SQLException { RdsHostListProvider provider2 = Mockito.spy( getRdsHostListProvider(mockHostListProviderService, "jdbc:something://cluster-a.cluster-xyz.us-east-2.rds.amazonaws.com/")); - provider2.init(); doAnswer(a -> topologyClusterA).when(provider2).queryForTopology(any(Connection.class)); final List topologyProvider2 = provider2.refresh(mock(Connection.class)); assertEquals(topologyClusterA, topologyProvider2); - assertNotEquals(provider1.clusterId, provider2.clusterId); - assertFalse(provider1.isPrimaryClusterId); - assertTrue(provider2.isPrimaryClusterId); - assertEquals(2, RdsHostListProvider.topologyCache.size()); - assertEquals("cluster-a.cluster-xyz.us-east-2.rds.amazonaws.com", - RdsHostListProvider.suggestedPrimaryClusterIdCache.get(provider1.clusterId)); + assertEquals(provider1.clusterId, provider2.clusterId); + assertEquals(1, RdsHostListProvider.topologyCache.size()); + assertEquals(provider1.clusterId, + RdsHostListProvider.clusterIdByHostAndPort.get("cluster-a.cluster-xyz.us-east-2.rds.amazonaws.com")); // RdsHostListProvider.logCache(); topologyProvider1 = provider1.forceRefresh(mock(Connection.class)); assertEquals(topologyClusterA, topologyProvider1); assertEquals(provider1.clusterId, provider2.clusterId); - assertTrue(provider1.isPrimaryClusterId); - assertTrue(provider2.isPrimaryClusterId); // RdsHostListProvider.logCache(); } @@ -647,13 +638,10 @@ void testGetTopology_returnsLatestWriter() throws SQLException { @Test void testClusterUrlUsedAsDefaultClusterId() throws SQLException { - String readerClusterUrl = "mycluster.cluster-ro-XYZ.us-east-1.rds.amazonaws.com"; - String expectedClusterId = "mycluster.cluster-XYZ.us-east-1.rds.amazonaws.com:1234"; - String connectionString = "jdbc:someprotocol://" + readerClusterUrl + ":1234/test"; + String connectionString = "jdbc:someprotocol://mycluster.cluster-ro-XYZ.us-east-1.rds.amazonaws.com:1234/test"; RdsHostListProvider provider1 = Mockito.spy(getRdsHostListProvider( mockHostListProviderService, connectionString)); - assertEquals(expectedClusterId, provider1.getClusterId()); List mockTopology = Collections.singletonList(new HostSpecBuilder(new SimpleHostAvailabilityStrategy()).host("host").build()); @@ -665,7 +653,7 @@ void testClusterUrlUsedAsDefaultClusterId() throws SQLException { RdsHostListProvider provider2 = Mockito.spy(getRdsHostListProvider( mockHostListProviderService, connectionString)); - assertEquals(expectedClusterId, provider2.getClusterId()); + assertEquals(provider1.getClusterId(), provider2.getClusterId()); assertEquals(mockTopology, provider2.getCachedTopology()); verify(provider2, never()).queryForTopology(mockConnection); } diff --git a/wrapper/src/test/java/software/amazon/jdbc/hostlistprovider/RdsMultiAzDbClusterListProviderTest.java b/wrapper/src/test/java/software/amazon/jdbc/hostlistprovider/RdsMultiAzDbClusterListProviderTest.java index a99ead6c6..1ebfed91d 100644 --- a/wrapper/src/test/java/software/amazon/jdbc/hostlistprovider/RdsMultiAzDbClusterListProviderTest.java +++ b/wrapper/src/test/java/software/amazon/jdbc/hostlistprovider/RdsMultiAzDbClusterListProviderTest.java @@ -314,8 +314,6 @@ void testTopologyCache_SuggestedClusterIdForRds() throws SQLException { provider2.init(); assertEquals(provider1.clusterId, provider2.clusterId); - assertTrue(provider1.isPrimaryClusterId); - assertTrue(provider2.isPrimaryClusterId); final List topologyProvider2 = provider2.refresh(Mockito.mock(Connection.class)); assertEquals(topologyClusterA, topologyProvider2); @@ -361,8 +359,6 @@ void testTopologyCache_SuggestedClusterIdForInstance() throws SQLException { provider2.init(); assertEquals(provider1.clusterId, provider2.clusterId); - assertTrue(provider1.isPrimaryClusterId); - assertTrue(provider2.isPrimaryClusterId); final List topologyProvider2 = provider2.refresh(Mockito.mock(Connection.class)); assertEquals(topologyClusterA, topologyProvider2); @@ -414,20 +410,17 @@ void testTopologyCache_AcceptSuggestion() throws SQLException { final List topologyProvider2 = provider2.refresh(Mockito.mock(Connection.class)); assertEquals(topologyClusterA, topologyProvider2); - assertNotEquals(provider1.clusterId, provider2.clusterId); - assertFalse(provider1.isPrimaryClusterId); - assertTrue(provider2.isPrimaryClusterId); - assertEquals(2, RdsMultiAzDbClusterListProvider.topologyCache.size()); - assertEquals("cluster-a.cluster-xyz.us-east-2.rds.amazonaws.com", - RdsMultiAzDbClusterListProvider.suggestedPrimaryClusterIdCache.get(provider1.clusterId)); + assertEquals(provider1.clusterId, provider2.clusterId); + assertEquals(1, RdsMultiAzDbClusterListProvider.topologyCache.size()); + assertEquals(provider1.clusterId, + RdsMultiAzDbClusterListProvider.clusterIdByHostAndPort.get( + "cluster-a.cluster-xyz.us-east-2.rds.amazonaws.com")); // RdsMultiAzDbClusterListProvider.logCache(); topologyProvider1 = provider1.forceRefresh(Mockito.mock(Connection.class)); assertEquals(topologyClusterA, topologyProvider1); assertEquals(provider1.clusterId, provider2.clusterId); - assertTrue(provider1.isPrimaryClusterId); - assertTrue(provider2.isPrimaryClusterId); // RdsMultiAzDbClusterListProvider.logCache(); } diff --git a/wrapper/src/test/java/software/amazon/jdbc/util/RdsUtilsTests.java b/wrapper/src/test/java/software/amazon/jdbc/util/RdsUtilsTests.java index 8c210340b..6a03b2ac0 100644 --- a/wrapper/src/test/java/software/amazon/jdbc/util/RdsUtilsTests.java +++ b/wrapper/src/test/java/software/amazon/jdbc/util/RdsUtilsTests.java @@ -104,6 +104,9 @@ public class RdsUtilsTests { private static final String usIsoEastRegionLimitlessDbShardGroup = "database-test-name.shardgrp-XYZ.rds.us-iso-east-1.c2s.ic.gov"; + private static final String globalDbWriterCluster = + "global-cluster-test-name.global-XYZ.global.rds.amazonaws.com"; + @BeforeEach public void setupTests() { RdsUtils.clearCache(); @@ -397,6 +400,12 @@ public void testGetRdsRegion() { assertEquals(chinaExpectedHostPattern, target.getRdsRegion(oldChinaRegionLimitlessDbShardGroup)); } + @Test + public void testIsGlobalDbWriterClusterDns() { + assertFalse(target.isGlobalDbWriterClusterDns(usEastRegionCluster)); + assertTrue(target.isGlobalDbWriterClusterDns(globalDbWriterCluster)); + } + @Test public void testBrokenPathsHostPattern() { final String incorrectChinaHostPattern = "?.rds.cn-northwest-1.rds.amazonaws.com.cn";