=
zm1Lyfdyz@B$@K<#7Hqi#ILL*hPlcep*e?TD7*-Pt3u4K6^BJ-i|A^RfMNX_wlIM533bTU+qho
zxYt2aKD*q?Jc(iq9-^r9a}`#wA*5kG)+0F?Ch*{Gsz1Y|7-l@ZIcl8k4oasL_{NIZ
z;UOhDsDo0YD>Nmz3Z>0Nkif`MhKzv?jCCX*+UbIP+HfEB?RZ1aOA^Xc@yLHki*w`l
zkVYVOkd)b{hHjWJnvW)CUhiZ0)%M(l4^3ACH&M6L=|fdHzVR6Y?NuQEN5SvwRcaq|
z?(250uQO7w>;hy>T(|V|c@b)1-SE{)QzqyQ=Dcq4DmH}B5GnEYY{4kuCfPj~PI(G8
zclT`tnQGfj@3z-iSu`B=CyI?Sm4tcJtp&U6T&K%sNSb$H8`o6S07A|%gH>xUnHl@SlU%6>
z=k*vi!A2RRXY?B0&B_wxc~H-m7k5hbm{z#QCLe3=g0wRrUnM8Ff3?E`biQuE0n+I*
z8|6E9X3w-|v(5(8`G
zQ+IO0+Pd$nnOr{&&?$R@$k>zV?KFJYfnUSsye9l7#40fz6Ue>LvfE2m3+<&6VPLH<
zYiDg^&!BH@XZWug`u_?LFIQP8SW}{d5$Ed(%(GCRN3!P)lhA@se+dD+6V>w9%_A&}
z=t$v)N4E*9!dPmT#FPtut{w7Q_XYMgN*vl4I$88CDnK75ww-2%QWemcsw`Yw8lOD#
zJx`g5R$BU&Bt|YGFK$j7Cj|Q`ndz$un0d~;SKV=z89D6d$a`ICiE-RstmZ}3Zh7Xd
zrUcq+SOi;>a*I$xeCT5keRF}6pj=!)impip%pRR|7POZeVZ!tiHzO5>G3}!Cy>vjW
z0=u!Bt%G$jPMGG5h$egI=`F@Xy}Qes0)N5l20ico6qa^G**wWQ%t5L`Gi=`JDjn1!
zxlf=70Iv!!u{W~^3gXD_5wyhjP4@<;ntC{XNS2w`me;c>HC26Na%2%~g6S@F6#
zcrnuAdV1%EY30L;c4YwTGf}SU>pZg5sDZ*)XFw`@_jAquf6l>NzhaJyf3ZW-7xqB?
zs~zgv*!<57U+nK+BLysFz3^M@6zUp;xP;HOB#Y=%l=YG4vMdV%>|`vJtDpIUEU+l*
za3-R9Y;wWydAw$9Q?l;Q2b^33%%6>KU!^+iHD7xEqE
zt`zmM(z!9Yc=2I{s;cgeqz}8@F3{vF2>Wja%bLS%ajW)Cr~Gmf5FA=Q`^0D67^1?K
ziC~U?*9P4lX5X1gigGh+
zA(`ui%COT(+R@3g#%omLtP7`k?(c#I;_S)0=tN0QhoeY_}6O4pTgmYzYG6a8~IiA*JSLUqT;Ckn40}n!LQ-up9=b3
zeAa)x_odRm0zz^-p;KK=j3G{)1w1)xfO$Dx0092w=lw$E$z;F%`#+yRnlS(X
literal 0
HcmV?d00001
diff --git a/source/constructs/lib/admin/glue-stack.ts b/source/constructs/lib/admin/glue-stack.ts
index 953eba84..2b1502a8 100644
--- a/source/constructs/lib/admin/glue-stack.ts
+++ b/source/constructs/lib/admin/glue-stack.ts
@@ -56,6 +56,14 @@ export class GlueStack extends Construct {
destinationKeyPrefix: 'job/script',
});
+ new S3Deployment.BucketDeployment(this, 'BatchCreateJdbcDatasource', {
+ memoryLimit: 512,
+ ephemeralStorageSize: Size.mebibytes(100),
+ sources: [S3Deployment.Source.asset('batch-create-jdbc-datasource/template')],
+ destinationBucket: props.bucket,
+ destinationKeyPrefix: 'batch-create-jdbc-datasource/template',
+ });
+
// When upgrading, files with template as the prefix will be deleted
// Therefore, the initial template file will no longer be deployed.
// new S3Deployment.BucketDeployment(this, 'DeploymentTemplate', {
From 1d4914a1aca04547b9f4bcdf5c1bc7201e4220f0 Mon Sep 17 00:00:00 2001
From: junzhong
Date: Tue, 23 Jan 2024 11:27:47 +0800
Subject: [PATCH 006/112] fix(cdk): add batch create jdbc datasource template
---
source/constructs/lib/admin/glue-stack.ts | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/source/constructs/lib/admin/glue-stack.ts b/source/constructs/lib/admin/glue-stack.ts
index 2b1502a8..3dfae1ac 100644
--- a/source/constructs/lib/admin/glue-stack.ts
+++ b/source/constructs/lib/admin/glue-stack.ts
@@ -58,7 +58,7 @@ export class GlueStack extends Construct {
new S3Deployment.BucketDeployment(this, 'BatchCreateJdbcDatasource', {
memoryLimit: 512,
- ephemeralStorageSize: Size.mebibytes(100),
+ ephemeralStorageSize: Size.mebibytes(512),
sources: [S3Deployment.Source.asset('batch-create-jdbc-datasource/template')],
destinationBucket: props.bucket,
destinationKeyPrefix: 'batch-create-jdbc-datasource/template',
From d13dbfe3de885a5a0c0f7711ea17b972cdd2879a Mon Sep 17 00:00:00 2001
From: junzhong
Date: Tue, 23 Jan 2024 11:35:33 +0800
Subject: [PATCH 007/112] fix(cdk): add batch create jdbc datasource template
---
source/constructs/lib/admin/glue-stack.ts | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/source/constructs/lib/admin/glue-stack.ts b/source/constructs/lib/admin/glue-stack.ts
index 3dfae1ac..56e0babd 100644
--- a/source/constructs/lib/admin/glue-stack.ts
+++ b/source/constructs/lib/admin/glue-stack.ts
@@ -59,7 +59,7 @@ export class GlueStack extends Construct {
new S3Deployment.BucketDeployment(this, 'BatchCreateJdbcDatasource', {
memoryLimit: 512,
ephemeralStorageSize: Size.mebibytes(512),
- sources: [S3Deployment.Source.asset('batch-create-jdbc-datasource/template')],
+ sources: [S3Deployment.Source.asset('config/batch-create-jdbc-datasource/template')],
destinationBucket: props.bucket,
destinationKeyPrefix: 'batch-create-jdbc-datasource/template',
});
From 3c3f2c36900e209631937f3b8bd8b608a67071ed Mon Sep 17 00:00:00 2001
From: junzhong
Date: Tue, 23 Jan 2024 13:39:05 +0800
Subject: [PATCH 008/112] fix(cdk): invoke function policy
---
source/constructs/lib/admin/delete-resources-stack.ts | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/source/constructs/lib/admin/delete-resources-stack.ts b/source/constructs/lib/admin/delete-resources-stack.ts
index d1bf3a46..7c9a4521 100644
--- a/source/constructs/lib/admin/delete-resources-stack.ts
+++ b/source/constructs/lib/admin/delete-resources-stack.ts
@@ -80,7 +80,7 @@ export class DeleteResourcesStack extends Construct {
],
resources: [
`arn:${Aws.PARTITION}:events:*:${Aws.ACCOUNT_ID}:rule/*`,
- `arn:${Aws.PARTITION}:lambda:${Aws.REGION}:${Aws.ACCOUNT_ID}:function:${SolutionInfo.SOLUTION_NAME}-RefreshAccount`,
+ `arn:${Aws.PARTITION}:lambda:${Aws.REGION}:${Aws.ACCOUNT_ID}:function:${SolutionInfo.SOLUTION_NAME}-Controller`,
],
});
deleteAdminResourcesRole.addToPolicy(noramlStatement);
From 872ce8dadd2db62c92ab9c1b6d12303d190165b3 Mon Sep 17 00:00:00 2001
From: cuihubin <530051970@qq.com>
Date: Wed, 24 Jan 2024 15:38:15 +0800
Subject: [PATCH 009/112] batch operation
---
source/constructs/api/common/constant.py | 5 +-
.../api/common/exception_handler.py | 3 +
source/constructs/api/data_source/crud.py | 2 +-
source/constructs/api/data_source/main.py | 18 +-
source/constructs/api/data_source/service.py | 298 ++++++++++++------
5 files changed, 231 insertions(+), 95 deletions(-)
diff --git a/source/constructs/api/common/constant.py b/source/constructs/api/common/constant.py
index c3574119..c70a9c8c 100644
--- a/source/constructs/api/common/constant.py
+++ b/source/constructs/api/common/constant.py
@@ -96,8 +96,11 @@ def __setattr__(self, name, value):
const.PUBLIC = 'Public'
const.PRIVATE = 'Private'
const.ZERO = 0
-const.BATCH_CREATE_LIMIT = 100
+const.BATCH_CREATE_LIMIT = 1000
const.BATCH_SHEET = "OriginTemplate"
+const.CONNECTION_DESC_MAX_LEN = 10
+const.BATCH_CREATE_TEMPLATE_PATH = 'batch-create-jdbc-datasource/template/batch-create-jdbc-datasource.xlsx'
+const.BATCH_CREATE_REPORT_PATH = 'batch-create-jdbc-datasource/report'
const.UNSTRUCTURED_FILES = {
"document": ["doc", "docx", "pdf", "ppt", "pptx", "xls", "xlsx", "odp"],
diff --git a/source/constructs/api/common/exception_handler.py b/source/constructs/api/common/exception_handler.py
index 9902d5d7..a4fdedc7 100644
--- a/source/constructs/api/common/exception_handler.py
+++ b/source/constructs/api/common/exception_handler.py
@@ -46,3 +46,6 @@ def __init__(self,
self.code = code
self.message = message
self.ref = ref
+
+ def __msg__(self):
+ return self.message
diff --git a/source/constructs/api/data_source/crud.py b/source/constructs/api/data_source/crud.py
index 4eb220e1..c85253df 100644
--- a/source/constructs/api/data_source/crud.py
+++ b/source/constructs/api/data_source/crud.py
@@ -988,7 +988,7 @@ def get_enable_account_list():
def update_schema_by_account(provider_id, account_id, instance, region, schema):
session = get_session()
jdbc_instance_source = session.query(JDBCInstanceSource).filter(JDBCInstanceSource.account_provider_id == provider_id,
- JDBCInstanceSource.region == region,
+ JDBCInstanceSource.region == region,
JDBCInstanceSource.account_id == account_id,
JDBCInstanceSource.instance_id == instance).first()
if not jdbc_instance_source:
diff --git a/source/constructs/api/data_source/main.py b/source/constructs/api/data_source/main.py
index 3049c469..6b19d21c 100644
--- a/source/constructs/api/data_source/main.py
+++ b/source/constructs/api/data_source/main.py
@@ -352,7 +352,6 @@ def test_jdbc_conn(jdbc_conn_param: schemas.JDBCInstanceSourceBase):
def get_data_location_list():
return service.list_data_location()
-
@router.get("/query-regions-by-provider", response_model=BaseResponse)
@inject_session
def query_regions_by_provider(provider_id: str):
@@ -379,7 +378,6 @@ def list_buckets(account: schemas.AccountInfo):
def query_connection_detail(account: schemas.JDBCInstanceSourceBase):
return service.query_connection_detail(account)
-
@router.post("/jdbc-databases", response_model=BaseResponse[list[str]])
@inject_session
def list_jdbc_databases(source: schemas.JdbcSource):
@@ -390,7 +388,17 @@ def list_jdbc_databases(source: schemas.JdbcSource):
def batch_create(files: List[UploadFile] = File(...)):
return service.batch_create(files[0])
-# @router.post("/snapshop", response_model=BaseResponse)
+@router.post("/query-batch-status", response_model=BaseResponse)
+@inject_session
+def query_batch_status(batch: str):
+ return service.query_batch_status(batch)
+
+@router.post("/download-batch-file", response_model=BaseResponse)
+@inject_session
+def download_batch_file(filename: str):
+ return service.download_batch_file(filename)
+
+# @router.post("/batch-sync", response_model=BaseResponse)
# @inject_session
-# def get_schema_by_snapshot(provider_id: int, account_id: str, instance: str, region: str):
-# return service.get_schema_by_snapshot(provider_id, account_id, instance, region)
+# def batch_sync(connection_list: []):
+# pass
diff --git a/source/constructs/api/data_source/service.py b/source/constructs/api/data_source/service.py
index 69397ed5..ef6918dc 100644
--- a/source/constructs/api/data_source/service.py
+++ b/source/constructs/api/data_source/service.py
@@ -3,6 +3,7 @@
from io import BytesIO
import json
import os
+import random
import re
import time
import traceback
@@ -11,7 +12,6 @@
import boto3
from fastapi import File, UploadFile
import openpyxl
-import pandas as pd
import pymysql
from botocore.exceptions import ClientError
@@ -72,8 +72,9 @@
r'jdbc:oracle:thin://@[\w.-]+:\d+/([\w-]+)',
r'jdbc:oracle:thin://@[\w.-]+:\d+:\w+',
r'jdbc:sqlserver://[\w.-]+:\d+;databaseName=([\w-]+)',
- r'jdbc:sqlserver://[\w.-]+:\d+;database=([\w-]+)'
- ]
+ r'jdbc:sqlserver://[\w.-]+:\d+;database=([\w-]+)']
+
+__s3_client = boto3.client('s3')
def build_s3_targets(bucket, credentials, region, is_init):
s3 = boto3.client('s3',
@@ -402,8 +403,23 @@ def sync_jdbc_connection(jdbc: JDBCInstanceSourceBase):
logger.debug(f"conn_response type is:{type(conn_response)}")
logger.debug(f"conn_response is:{conn_response}")
+ if conn_response.get('ConnectionProperties'):
+ username = conn_response.get('ConnectionProperties', {}).get('USERNAME')
+ password = conn_response.get('ConnectionProperties', {}).get('PASSWORD')
+ secret = conn_response.get('ConnectionProperties', {}).get("SECRET_ID"),
+ url = conn_response.get('ConnectionProperties', {}).get('JDBC_CONNECTION_URL'),
+ jdbc_instance = JDBCInstanceSource(jdbc, jdbc_connection_url=url, master_username=username, password=password, secret=secret)
+ # jdbc_instance.jdbc_connection_url = url
# condition_check(ec2_client, credentials, source.glue_state, conn_response['PhysicalConnectionRequirements'])
- sync(glue_client, lakeformation_client, credentials, crawler_role_arn, jdbc, conn_response['ConnectionProperties']['JDBC_CONNECTION_URL'], source.jdbc_connection_schema)
+ sync(glue_client,
+ lakeformation_client,
+ credentials,
+ crawler_role_arn,
+ jdbc_instance,
+ source.jdbc_connection_schema)
+ else:
+ raise BizException(MessageEnum.BIZ_UNKNOWN_ERR.get_code(),
+ MessageEnum.BIZ_UNKNOWN_ERR.get_msg())
def condition_check(ec2_client, credentials, state, connection: dict):
@@ -492,14 +508,15 @@ def condition_check(ec2_client, credentials, state, connection: dict):
MessageEnum.SOURCE_AVAILABILITY_ZONE_NOT_EXISTS.get_msg())
-def sync(glue, lakeformation, credentials, crawler_role_arn, jdbc: JDBCInstanceSourceBase, url: str, schemas: str):
+def sync(glue, lakeformation, credentials, crawler_role_arn, jdbc: JDBCInstanceSource, schemas: str):
jdbc_targets = []
_, glue_database_name, crawler_name = __gen_resources_name(jdbc)
state, glue_connection_name = crud.get_jdbc_connection_glue_info(jdbc.account_provider_id, jdbc.account_id, jdbc.region, jdbc.instance_id)
if state == ConnectionState.CRAWLING.value:
raise BizException(MessageEnum.SOURCE_CONNECTION_CRAWLING.get_code(),
MessageEnum.SOURCE_CONNECTION_CRAWLING.get_msg())
- db_names = get_db_names(url, schemas)
+ jdbc_source = JdbcSource(connection_url=jdbc.jdbc_connection_url, username=jdbc.master_username, password=jdbc.password, secret_id=jdbc.secret)
+ db_names = get_db_names_4_jdbc(jdbc_source, schemas)
try:
for db_name in db_names:
trimmed_db_name = db_name.strip()
@@ -1366,13 +1383,7 @@ def refresh_third_data_source(provider_id: int, accounts: list[str], type: str):
raise BizException(MessageEnum.SOURCE_REFRESH_FAILED.get_code(),
MessageEnum.SOURCE_REFRESH_FAILED.get_msg())
try:
- # if type == DataSourceType.jdbc.value:
jdbc_detector.detect(provider_id, accounts)
- # elif type == DataSourceType.all.value:
- # s3_detector.detect(accounts)
- # rds_detector.detect(accounts)
- # glue_database_detector.detect(accounts)
- # jdbc_detector.detect(accounts)
except Exception as e:
logger.error(traceback.format_exc())
raise BizException(MessageEnum.SOURCE_CONNECTION_FAILED.get_code(), str(e))
@@ -1645,11 +1656,12 @@ def import_glue_database(glueDataBase: SourceGlueDatabaseBase):
crud.import_glue_database(glueDataBase, response)
def update_jdbc_conn(jdbc_conn: JDBCInstanceSource):
- get_db_names(jdbc_conn.jdbc_connection_url, jdbc_conn.jdbc_connection_schema)
+ jdbc_source = JdbcSource(connection_url=jdbc_conn.jdbc_connection_url, username=jdbc_conn.master_username, password=jdbc_conn.password, secret_id=jdbc_conn.secret)
+ dbnames = get_db_names_4_jdbc(jdbc_source, jdbc_conn.jdbc_connection_schema)
account_id, region = __get_admin_info(jdbc_conn)
res: JDBCInstanceSourceFullInfo = crud.get_jdbc_instance_source_glue(jdbc_conn.account_provider_id, jdbc_conn.account_id, jdbc_conn.region, jdbc_conn.instance_id)
check_connection(res, jdbc_conn, account_id, region)
- update_connection(res, jdbc_conn, account_id, region)
+ update_connection(res, jdbc_conn, account_id, region, dbnames)
def check_connection(res: JDBCInstanceSourceFullInfo, jdbc_instance: JDBCInstanceSource, assume_account, assume_role):
if not res:
@@ -1675,10 +1687,10 @@ def check_connection(res: JDBCInstanceSourceFullInfo, jdbc_instance: JDBCInstanc
else:
pass
-def update_connection(res: JDBCInstanceSourceFullInfo, jdbc_instance: JDBCInstanceSourceUpdate, assume_account, assume_role):
- # logger.info(f"source.glue_connection is: {source.glue_connection}")
+def update_connection(res: JDBCInstanceSourceFullInfo, jdbc_instance: JDBCInstanceSourceUpdate, assume_account, assume_region, db_names):
+ jdbc_targets = __gen_jdbc_targets_from_db_names(res.glue_connection, db_names)
connectionProperties_dict = gen_conn_properties(jdbc_instance)
- response = __glue(account=assume_account, region=assume_role).update_connection(
+ __glue(account=assume_account, region=assume_region).update_connection(
CatalogId=assume_account,
Name=res.glue_connection,
ConnectionInput={
@@ -1695,6 +1707,18 @@ def update_connection(res: JDBCInstanceSourceFullInfo, jdbc_instance: JDBCInstan
}
}
)
+ crawler_role_arn = __gen_role_arn(account_id=assume_account,
+ region=assume_region,
+ role_name='GlueDetectionJobRole')
+ # Update Crawler
+ __update_crawler(res.account_provider_id,
+ res.account_id,
+ res.instance_id,
+ res.region,
+ jdbc_targets,
+ res.glue_crawler,
+ res.glue_database,
+ crawler_role_arn)
crud.update_jdbc_connection_full(jdbc_instance)
def __validate_jdbc_url(url: str):
@@ -1703,8 +1727,10 @@ def __validate_jdbc_url(url: str):
return True
def add_jdbc_conn(jdbcConn: JDBCInstanceSource):
+ print(f"create {jdbcConn.instance_id}!!!")
jdbc_targets = []
- get_db_names(jdbcConn.jdbc_connection_url, jdbcConn.jdbc_connection_schema)
+ create_connection_response = {}
+ # get_db_names(jdbcConn.jdbc_connection_url, jdbcConn.jdbc_connection_schema)
account_id, region = __get_admin_info(jdbcConn)
crawler_role_arn = __gen_role_arn(account_id=account_id,
region=region,
@@ -1720,7 +1746,7 @@ def add_jdbc_conn(jdbcConn: JDBCInstanceSource):
availability_zone = ec2_client.describe_subnets(SubnetIds=[jdbcConn.network_subnet_id])['Subnets'][0]['AvailabilityZone']
try:
connectionProperties_dict = gen_conn_properties(jdbcConn)
- response = __glue(account=account_id, region=region).create_connection(
+ create_connection_response = __glue(account=account_id, region=region).create_connection(
CatalogId=account_id,
ConnectionInput={
'Name': glue_connection_name,
@@ -1741,19 +1767,21 @@ def add_jdbc_conn(jdbcConn: JDBCInstanceSource):
},
)
except ClientError as ce:
- logger.error(traceback.format_exc())
if ce.response['Error']['Code'] == 'InvalidInputException':
raise BizException(MessageEnum.SOURCE_JDBC_INPUT_INVALID.get_code(),
MessageEnum.SOURCE_JDBC_INPUT_INVALID.get_msg())
-
+ if ce.response['Error']['Code'] == 'AlreadyExistsException':
+ raise BizException(MessageEnum.SOURCE_JDBC_ALREADY_EXISTS.get_code(),
+ MessageEnum.SOURCE_JDBC_ALREADY_EXISTS.get_msg())
except Exception as e:
logger.error(traceback.format_exc())
- if response['ResponseMetadata']['HTTPStatusCode'] != 200:
+ if create_connection_response.get('ResponseMetadata', {}).get('HTTPStatusCode') != 200:
raise BizException(MessageEnum.SOURCE_JDBC_CREATE_FAIL.get_code(),
MessageEnum.SOURCE_JDBC_CREATE_FAIL.get_msg())
# Create Crawler
- db_names = get_db_names(jdbcConn.jdbc_connection_url, jdbcConn.jdbc_connection_schema)
+ jdbc_source = JdbcSource(connection_url=jdbcConn.jdbc_connection_url, username=jdbcConn.master_username, password=jdbcConn.password, secret_id=jdbcConn.secret)
+ db_names = get_db_names_4_jdbc(jdbcConn.jdbc_connection_url, jdbcConn.jdbc_connection_schema, jdbc_source)
for db_name in db_names:
trimmed_db_name = db_name.strip()
if trimmed_db_name:
@@ -1762,7 +1790,7 @@ def add_jdbc_conn(jdbcConn: JDBCInstanceSource):
'Path': f"{trimmed_db_name}/%"
})
try:
- response = glue.create_crawler(
+ glue.create_crawler(
Name=crawler_name,
Role=crawler_role_arn,
DatabaseName=glue_database_name,
@@ -1817,9 +1845,6 @@ def add_jdbc_conn(jdbcConn: JDBCInstanceSource):
else:
raise BizException(MessageEnum.BIZ_UNKNOWN_ERR.get_code(),
MessageEnum.BIZ_UNKNOWN_ERR.get_msg())
- except Exception as e:
- raise BizException(MessageEnum.BIZ_UNKNOWN_ERR.get_code(),
- MessageEnum.BIZ_UNKNOWN_ERR.get_msg())
def gen_conn_properties(jdbcConn):
connectionProperties_dict = {}
@@ -1986,7 +2011,6 @@ def __create_jdbc_url(engine: str, host: str, port: str):
# Add S3 bucket, SQS queues access policies
def __update_access_policy_for_account():
s3_resource = boto3.session.Session().resource('s3')
- # for cn_region in const.CN_REGIONS:
# check if s3 bucket, sqs exists
bucket_name = admin_bucket_name
try:
@@ -2302,7 +2326,6 @@ def __list_rds_schema(account, region, credentials, instance_name, payload, rds_
logger.info(schema_path)
return schema_path
-
def __delete_data_source_by_account(account_id: str, region: str):
try:
crud.delete_s3_bucket_source_by_account(account_id=account_id, region=region)
@@ -2313,16 +2336,14 @@ def __delete_data_source_by_account(account_id: str, region: str):
except Exception:
logger.error(traceback.format_exc())
-
def __delete_account(account_id: str, region: str):
try:
crud.delete_account_by_region(account_id=account_id, region=region)
except Exception:
logger.error(traceback.format_exc())
-
def query_glue_connections(account: AccountInfo):
- res, list = []
+ res, list = [], []
account_id, region = __get_admin_info(account)
next_token = ""
@@ -2340,7 +2361,7 @@ def query_glue_connections(account: AccountInfo):
if not next_token:
break
jdbc_list = query_jdbc_connections_sub_info()
- jdbc_dict = {item[0]:f"{convert_provider_id_2_name(item[1])}-{item[2]}" for item in jdbc_list}
+ jdbc_dict = {item[0]: f"{convert_provider_id_2_name(item[1])}-{item[2]}" for item in jdbc_list}
for item in list:
if not item['Name'].startswith(const.SOLUTION_NAME):
if item['Name'] in jdbc_dict:
@@ -2370,35 +2391,36 @@ def query_glue_databases(account: AdminAccountInfo):
def query_account_network(account: AccountInfo):
account_id, region = __get_admin_info(account)
- logger.info(f'accont_id is:{account_id},region is {region}')
ec2_client, __ = __ec2(account=account_id, region=region)
vpcs = query_all_vpc(ec2_client)
- # vpcs = [vpc['VpcId'] for vpc in query_all_vpc(ec2_client)]
vpc_list = [{"vpcId": vpc.get('VpcId'), "name": gen_resource_name(vpc)} for vpc in vpcs]
- # vpc_list = [{"vpcId": vpc['VpcId'], "name": gen_resource_name(vpc)} for vpc in vpcs]
if account.account_provider_id != Provider.AWS_CLOUD.value:
res = __query_third_account_network(vpc_list, ec2_client)
- logger.info(f"query_third_account_network res is {res}")
return res
else:
return __query_aws_account_network(vpc_list, ec2_client)
+# async def add_conn_jdbc_async(jdbcConn: JDBCInstanceSource):
+# key = f"{jdbcConn.account_provider_id}/{jdbcConn.account_id}/{jdbcConn.region}"
+# try:
+# add_jdbc_conn(jdbcConn)
+# return (key, "SUCCESSED", "")
+# except Exception as e:
+# return (key, "FAILED", str(e))
+
def __query_third_account_network(vpc_list, ec2_client: any):
try:
-
response = ec2_client.describe_security_groups(Filters=[
{'Name': 'vpc-id', 'Values': [vpc["vpcId"] for vpc in vpc_list]},
{'Name': 'group-name', 'Values': [const.SECURITY_GROUP_JDBC]}
])
vpc_ids = [item['VpcId'] for item in response['SecurityGroups']]
subnets = ec2_client.describe_subnets(Filters=[{'Name': 'vpc-id', 'Values': [vpc_ids[0]]}])['Subnets']
- # private_subnet = list(filter(lambda x: not x["MapPublicIpOnLaunch"], subnets))
selected_subnet = subnets
subnets_str_from_env = os.getenv('SubnetIds', '')
if subnets_str_from_env:
subnets_from_env = subnets_str_from_env.split(',')
selected_subnet = [item for item in subnets if item.get('SubnetId') in subnets_from_env]
- # target_subnet = private_subnet[0] if private_subnet else subnets[0]
target_subnets = [{'subnetId': subnet["SubnetId"], 'arn': subnet["SubnetArn"], "subnetName": gen_resource_name(subnet)} for subnet in selected_subnet]
vpc_info = ec2_client.describe_vpcs(VpcIds=[vpc_ids[0]])['Vpcs'][0]
return {"vpcs": [{'vpcId': vpc_info['VpcId'],
@@ -2407,15 +2429,6 @@ def __query_third_account_network(vpc_list, ec2_client: any):
'securityGroups': [{'securityGroupId': response['SecurityGroups'][0]['GroupId'],
'securityGroupName': response['SecurityGroups'][0]['GroupName']}]}]
}
- # return {"vpcs": [{'vpcId': vpc_info['VpcId'],
- # 'vpcName': [obj for obj in vpc_info['Tags'] if obj["Key"] == "Name"][0]["Value"],
- # 'subnets': [{'subnetId': target_subnet['SubnetId'],
- # 'arn': target_subnet['SubnetArn'],
- # "subnetName": gen_resource_name(target_subnet)
- # }],
- # 'securityGroups': [{'securityGroupId': response['SecurityGroups'][0]['GroupId'],
- # 'securityGroupName': response['SecurityGroups'][0]['GroupName']}]}]
- # }
except ClientError as ce:
logger.error(traceback.format_exc())
if ce.response['Error']['Code'] == 'InvalidGroup.NotFound':
@@ -2483,7 +2496,6 @@ def list_data_location():
res = sorted(res, key=lambda x: x.account_count, reverse=True)
return res
-
def query_regions_by_provider(provider_id: int):
return crud.query_regions_by_provider(provider_id)
@@ -2511,6 +2523,25 @@ def query_full_provider_resource_infos():
def list_providers():
return crud.query_provider_list()
+def get_db_names_4_jdbc(jdbc: JdbcSource, schemas: str):
+ if not __validate_jdbc_url(jdbc.connection_url):
+ raise BizException(MessageEnum.SOURCE_JDBC_URL_FORMAT_ERROR.get_code(),
+ MessageEnum.SOURCE_JDBC_URL_FORMAT_ERROR.get_msg())
+ # list schemas
+ db_names = set()
+ if jdbc.connection_url.startswith('jdbc:mysql'):
+ schemas = list_jdbc_databases(jdbc)
+ return set(schemas)
+ else:
+ schema = get_schema_from_url(jdbc.connection_url)
+ if schema:
+ db_names.add(schema)
+ if schemas:
+ db_names.update(schemas.splitlines())
+ if not db_names:
+ raise BizException(MessageEnum.SOURCE_JDBC_JDBC_NO_DATABASE.get_code(),
+ MessageEnum.SOURCE_JDBC_JDBC_NO_DATABASE.get_msg())
+ return db_names
def get_db_names(url: str, schemas: str):
if not __validate_jdbc_url(url):
@@ -2528,7 +2559,6 @@ def get_db_names(url: str, schemas: str):
MessageEnum.SOURCE_JDBC_JDBC_NO_DATABASE.get_msg())
return db_names
-
def get_schema_from_url(url):
for pattern in _jdbc_url_patterns:
match = re.match(pattern, url)
@@ -2631,12 +2661,11 @@ def list_jdbc_databases(source: JdbcSource) -> list[str]:
logger.info(databases)
return databases
-
def batch_create(file: UploadFile = File(...)):
time_str = time.time()
- # batch_id=f"batch_create_jdbc_{time_str}"
jdbc_from_excel_set = set()
created_jdbc_list = []
+ account_set = set()
# Check if the file is an Excel file
if not file.filename.endswith('.xlsx'):
raise BizException(MessageEnum.SOURCE_BATCH_CREATE_FORMAT_ERR.get_code(),
@@ -2665,52 +2694,124 @@ def batch_create(file: UploadFile = File(...)):
__add_error_msg(sheet, max_column, row_index, f"The value of {header[1]} must be 0 or 1")
elif not __validate_jdbc_url(str(row[3].value)):
__add_error_msg(sheet, max_column, row_index, f"The value of {header[3]} must be in the format jdbc:protocol://host:port")
- elif f"{row[0].value}/{row[7].value}/{row[8].value}/{row[9].value}" in jdbc_from_excel_set:
+ elif not str(row[3].value).startswith('jdbc:mysql') and not row[4].value:
+ __add_error_msg(sheet, max_column, row_index, f"MySQL-type data source {header[4]} cannot be null")
+ elif len(str(row[2].value)) > const.CONNECTION_DESC_MAX_LEN:
+ __add_error_msg(sheet, max_column, row_index, f"The value of {header[2]} must not exceed 2048")
+ elif f"{row[9].value}/{row[7].value}/{row[8].value}/{row[0].value}" in jdbc_from_excel_set:
__add_error_msg(sheet, max_column, row_index, f"The value of {header[0]}, {header[7]}, {header[8]}, {header[9]} already exist in the preceding rows")
elif f"{row[9].value}/{row[7].value}/{row[8].value}" not in accounts_list:
__add_error_msg(sheet, max_column, row_index, "The account is not existed!")
else:
- jdbc_from_excel_set.add(f"{row[0].value}/{row[7].value}/{row[8].value}/{row[9].value}")
+ jdbc_from_excel_set.add(f"{row[9].value}/{row[7].value}/{row[8].value}/{row[0].value}")
+ account_set.add(f"{row[9].value}/{row[7].value}/{row[8].value}")
created_jdbc_list.append(__gen_created_jdbc(row))
- batch_create_jdbc(created_jdbc_list)
- # TODO:write into excel
- # TODO:upload to S3
- for row_num, row in enumerate(sheet.iter_rows(values_only=True, min_row=3)):
- print(f"{row}")
- return time_str
+ # Query network info
+ if account_set:
+ account_info = list(account_set)[0].split("/")
+ network = query_account_network(AccountInfo(account_provider_id=account_info[0], account_id=account_info[1], region=account_info[2])) \
+ .get('vpcs', [])[0]
+ vpc_id = network.get('vpcId')
+ subnets = [subnet.get('subnetId') for subnet in network.get('subnets')]
+ security_group_id = network.get('securityGroups', [])[0].get('securityGroupId')
+ created_jdbc_list = map_network_jdbc(created_jdbc_list, vpc_id, subnets, security_group_id)
+ batch_result = asyncio.run(batch_add_conn_jdbc(created_jdbc_list))
+ result = {f"{item[0]}/{item[1]}/{item[2]}/{item[3]}": f"{item[4]}/{item[5]}" for item in batch_result}
+ for row_index, row in enumerate(sheet.iter_rows(min_row=3), start=2):
+ if row[10].value:
+ continue
+ v = result.get(f"{row[9].value}/{row[7].value}/{row[8].value}/{row[0].value}")
+ if v:
+ if v.split('/')[0]=="SUCCESSED":
+ __add_success_msg(sheet, max_column, row_index)
+ else:
+ __add_error_msg(sheet, max_column, row_index, v.split('/')[1])
+ else:
+ raise BizException(MessageEnum.SOURCE_BATCH_SHEET_NOT_FOUND.get_code(),
+ MessageEnum.SOURCE_BATCH_SHEET_NOT_FOUND.get_msg())
+ # Write into excel
+ excel_bytes = BytesIO()
+ workbook.save(excel_bytes)
+ excel_bytes.seek(0)
+ # Upload to S3
+ batch_create_ds = f"{const.BATCH_CREATE_REPORT_PATH}/report_{time_str}.xlsx"
+ __s3_client.upload_fileobj(excel_bytes, admin_bucket_name, batch_create_ds)
+ print(f"cost:{time.time()-time_str}")
+ return f'report_{time_str}'
+
+def map_network_jdbc(created_jdbc_list: [JDBCInstanceSource], vpc_id, subnets, security_group_id):
+ res = []
+ for item in created_jdbc_list:
+ item.network_sg_id = security_group_id
+ item.network_subnet_id = random.choice(subnets)
+ res.append(item)
+ return res
+def query_batch_status(filename: str):
+ file_key = f"{const.BATCH_CREATE_REPORT_PATH}/{filename}.xlsx"
+ response = __s3_client.list_objects_v2(Bucket=admin_bucket_name, Prefix=const.BATCH_CREATE_REPORT_PATH)
+ for obj in response.get('Contents', []):
+ if obj['Key'] == file_key:
+ response = __s3_client.get_object(Bucket=admin_bucket_name, Key=file_key)
+ excel_bytes = response['Body'].read()
+ workbook = openpyxl.load_workbook(BytesIO(excel_bytes))
+ try:
+ sheet = workbook[const.BATCH_SHEET]
+ except KeyError:
+ raise BizException(MessageEnum.SOURCE_BATCH_SHEET_NOT_FOUND.get_code(),
+ MessageEnum.SOURCE_BATCH_SHEET_NOT_FOUND.get_msg())
+ for _, row in enumerate(sheet.iter_rows(values_only=True, min_row=3)):
+ if row[10] == "FAILED":
+ return 1
+ return 2
+ return 0
+
+def download_batch_file(filename: str):
+ key = const.BATCH_CREATE_TEMPLATE_PATH if filename == "template" else f'{const.BATCH_CREATE_REPORT_PATH}/{filename}.xlsx'
+ url = __s3_client.generate_presigned_url(
+ ClientMethod="get_object",
+ Params={'Bucket': admin_bucket_name, 'Key': key},
+ ExpiresIn=60
+ )
+ return url
def __add_error_msg(sheet, max_column, row_index, msg):
sheet.cell(row=row_index + 1, column=max_column + 1, value="FAILED")
sheet.cell(row=row_index + 1, column=max_column + 2, value=msg)
- # print(f"$$$$$$$$$ content is : {content}")
- # df = pd.read_excel(BytesIO(content), engine='openpyxl')
- # print(f"$$$$$$$$$ lines is : {df.shape[0]}")
- # df = pd.read_excel(file)
- # if df.shape[0] > const.BATCH_CREATE_LIMIT + 2:
- # raise BizException(MessageEnum.SOURCE_BATCH_CREATE_LIMIT_ERR.get_code(),
- # MessageEnum.SOURCE_BATCH_CREATE_LIMIT_ERR.get_msg())
- # print(f"$$$$$$$$${df.to_json(orient='records')}")
-
- # Further processing if needed
- # jdbc_list = df.to_json(orient='records')
- # asyncio.run(batch_create_jdbc(jdbc_list))
+def __add_success_msg(sheet, max_column, row_index):
+ sheet.cell(row=row_index + 1, column=max_column + 1, value="SUCCESSED")
def __gen_created_jdbc(row):
created_jdbc = JDBCInstanceSource()
- # TODO
+ created_jdbc.instance_id = row[0].value
+ created_jdbc.jdbc_enforce_ssl = "true" if row[1].value == 1 else "false"
+ created_jdbc.description = str(row[2].value)
+ created_jdbc.jdbc_connection_url = str(row[3].value)
+ created_jdbc.jdbc_connection_schema = str(row[4].value).replace(",", "\n") if row[4].value else const.EMPTY_STR
+ created_jdbc.master_username = str(row[5].value)
+ created_jdbc.password = str(row[6].value)
+ created_jdbc.account_id = str(row[7].value)
+ created_jdbc.region = str(row[8].value)
+ created_jdbc.account_provider_id = row[9].value
+ created_jdbc.creation_time = ""
+ created_jdbc.custom_jdbc_cert = ""
+ created_jdbc.custom_jdbc_cert_string = ""
+ created_jdbc.jdbc_driver_class_name = ""
+ created_jdbc.jdbc_driver_jar_uri = ""
+ created_jdbc.last_updated_time = ""
+ created_jdbc.network_availability_zone = ""
+ created_jdbc.secret = ""
+ created_jdbc.skip_custom_jdbc_cert_validation = "false"
return created_jdbc
-
-async def batch_create_jdbc(jdbc_list):
- tasks = [add_jdbc_conn(jdbc) for jdbc in jdbc_list]
- await asyncio.gather(*tasks)
-
+async def batch_add_conn_jdbc(created_jdbc_list):
+ tasks = [asyncio.create_task(__add_jdbc_conn_batch(jdbc)) for jdbc in created_jdbc_list]
+ return await asyncio.gather(*tasks)
def get_schema_by_snapshot(provider_id: int, account_id: str, instance: str, region: str):
res = crud.get_schema_by_snapshot(provider_id, account_id, instance, region)
- return res[0][0].split('\n') if res else None, res[0][1] if res else None
+ return res[0][0].replace(',', '\n').split('\n') if res else None, res[0][1] if res else None
def get_schema_by_real_time(provider_id: int, account_id: str, instance: str, region: str, db_info: bool = False):
db, subnet_id = None, None
@@ -2731,7 +2832,8 @@ def get_schema_by_real_time(provider_id: int, account_id: str, instance: str, re
return db, subnet_id
def sync_schema_by_job(provider_id: int, account_id: str, instance: str, region: str, schema: str):
- jdbc_targets = []
+ jdbc = JDBCInstanceSourceBase(instance_id=instance, account_provider_id=provider_id, account_id=account_id, region=region)
+ account_id, region = __get_admin_info(jdbc)
# Query Info
info = crud.get_crawler_glueDB_by_instance(provider_id, account_id, instance, region)
if not info:
@@ -2740,20 +2842,33 @@ def sync_schema_by_job(provider_id: int, account_id: str, instance: str, region:
region=region,
role_name='GlueDetectionJobRole')
db_names = schema.split("\n")
+ jdbc_targets = __gen_jdbc_targets_from_db_names(info[0][2], db_names)
+ # Update Crawler
+ __update_crawler(provider_id, account_id, instance, region, jdbc_targets, info[0][0], info[0][1], crawler_role_arn)
+ # Update RDS
+ crud.update_schema_by_account(provider_id, account_id, instance, region, schema)
+
+def __gen_jdbc_targets_from_db_names(connection_name, db_names):
+ jdbc_targets = []
for db_name in db_names:
trimmed_db_name = db_name.strip()
if trimmed_db_name:
jdbc_targets.append({
- 'ConnectionName': info[0][2],
+ 'ConnectionName': connection_name,
'Path': f"{trimmed_db_name}/%"
})
- # Update Crawler
- assume_account, assume_region = __get_admin_info(JDBCInstanceSourceBase(account_provider_id=provider_id, account_id=account_id, instance_id=instance, region=region))
+ return jdbc_targets
+
+def __update_crawler(provider_id, account_id, instance, region, jdbc_targets, crawler_name, glue_database, crawler_role_arn):
+ assume_account, assume_region = __get_admin_info(JDBCInstanceSourceBase(account_provider_id=provider_id,
+ account_id=account_id,
+ instance_id=instance,
+ region=region))
try:
__get_glue_client(assume_account, assume_region).update_crawler(
- Name=info[0],
+ Name=crawler_name,
Role=crawler_role_arn,
- DatabaseName=info[1],
+ DatabaseName=glue_database,
Targets={
'JdbcTargets': jdbc_targets,
},
@@ -2766,10 +2881,17 @@ def sync_schema_by_job(provider_id: int, account_id: str, instance: str, region:
logger.error(traceback.format_exc())
raise BizException(MessageEnum.BIZ_UNKNOWN_ERR.get_code(),
MessageEnum.BIZ_UNKNOWN_ERR.get_msg())
- # Update RDS
- crud.update_schema_by_account(provider_id, account_id, instance, region, schema)
def __get_admin_info(jdbc):
account_id = jdbc.account_id if jdbc.account_provider_id == Provider.AWS_CLOUD.value else admin_account_id
region = jdbc.region if jdbc.account_provider_id == Provider.AWS_CLOUD.value else admin_region
return account_id, region
+
+async def __add_jdbc_conn_batch(jdbc: JDBCInstanceSource):
+ try:
+ add_jdbc_conn(jdbc)
+ return jdbc.account_provider_id, jdbc.account_id, jdbc.region, jdbc.instance_id, "SUCCESSED", None
+ except BizException as be:
+ return jdbc.account_provider_id, jdbc.account_id, jdbc.region, jdbc.instance_id, "FAILED", be.__msg__()
+ except Exception as e:
+ return jdbc.account_provider_id, jdbc.account_id, jdbc.region, jdbc.instance_id, "FAILED", str(e)
From d54bbd5e973216a2b39538fca71e7b92815cfcb9 Mon Sep 17 00:00:00 2001
From: junzhong
Date: Thu, 25 Jan 2024 09:21:23 +0800
Subject: [PATCH 010/112] fix(cdk): config
---
source/constructs/api/common/constant.py | 8 ++++----
source/constructs/api/config/main.py | 6 ------
source/constructs/api/discovery_job/service.py | 4 ++--
3 files changed, 6 insertions(+), 12 deletions(-)
diff --git a/source/constructs/api/common/constant.py b/source/constructs/api/common/constant.py
index 551d0329..ebac0610 100644
--- a/source/constructs/api/common/constant.py
+++ b/source/constructs/api/common/constant.py
@@ -100,10 +100,10 @@ def __setattr__(self, name, value):
const.BATCH_SHEET = "OriginTemplate"
const.CONFIG_CONCURRENT_RUN_INSTANCE_NUMBER = 'ConcurrentRunInstanceNumber'
const.CONFIG_CONCURRENT_RUN_INSTANCE_NUMBER_DEFAULT_VALUE = 50
-const.CONFIG_JOB_NUMBER_S3 = 'JobNumberS3'
-const.CONFIG_JOB_NUMBER_S3_DEFAULT_VALUE = 10
-const.CONFIG_JOB_NUMBER_RDS = 'JobNumberRds'
-const.CONFIG_JOB_NUMBER_RDS_DEFAULT_VALUE = 3
+const.CONFIG_SUB_JOB_NUMBER_S3 = 'SubJobNumberS3'
+const.CONFIG_SUB_JOB_NUMBER_S3_DEFAULT_VALUE = 10
+const.CONFIG_SUB_JOB_NUMBER_RDS = 'SubJobNumberRds'
+const.CONFIG_SUB_JOB_NUMBER_RDS_DEFAULT_VALUE = 3
const.CONTROLLER_ACTION = 'Action'
const.CONTROLLER_ACTION_SCHEDULE_JOB = 'ScheduleJob'
const.CONTROLLER_ACTION_CHECK_RUNNING_RUN_DATABASES = 'CheckRunningRunDatabases'
diff --git a/source/constructs/api/config/main.py b/source/constructs/api/config/main.py
index 148229cb..4bd83c1d 100644
--- a/source/constructs/api/config/main.py
+++ b/source/constructs/api/config/main.py
@@ -23,9 +23,3 @@ def set_config(configs: list[schemas.ConfigBase]):
@inject_session
def list_subnets():
return service.list_subnets()
-
-
-@router.get("/run-database-ip-count", response_model=BaseResponse[int])
-@inject_session
-def get_run_database_ip_count(database_type: str):
- return discovery_job_service.get_run_database_ip_count(database_type)
diff --git a/source/constructs/api/discovery_job/service.py b/source/constructs/api/discovery_job/service.py
index 3d820ac0..38d7750e 100644
--- a/source/constructs/api/discovery_job/service.py
+++ b/source/constructs/api/discovery_job/service.py
@@ -219,8 +219,8 @@ def start_sample_job(job_id: int, table_name: str):
def __get_job_number(database_type: str) -> int:
if database_type in [DatabaseType.S3.value, DatabaseType.GLUE.value]:
- return int(config_service.get_config(const.CONFIG_JOB_NUMBER_S3, const.CONFIG_JOB_NUMBER_S3_DEFAULT_VALUE))
- return int(config_service.get_config(const.CONFIG_JOB_NUMBER_RDS, const.CONFIG_JOB_NUMBER_RDS_DEFAULT_VALUE))
+ return int(config_service.get_config(const.CONFIG_SUB_JOB_NUMBER_S3, const.CONFIG_SUB_JOB_NUMBER_S3_DEFAULT_VALUE))
+ return int(config_service.get_config(const.CONFIG_SUB_JOB_NUMBER_RDS, const.CONFIG_SUB_JOB_NUMBER_RDS_DEFAULT_VALUE))
def get_run_database_ip_count(database_type: str) -> int:
From e254f97ffa63e3471b5bd6f836578c2fdb8b1e3f Mon Sep 17 00:00:00 2001
From: cuihubin <530051970@qq.com>
Date: Thu, 25 Jan 2024 11:19:38 +0800
Subject: [PATCH 011/112] merge conflict
---
source/constructs/api/common/constant.py | 6 +++---
source/constructs/api/data_source/main.py | 8 ++++----
source/constructs/api/data_source/service.py | 2 +-
3 files changed, 8 insertions(+), 8 deletions(-)
diff --git a/source/constructs/api/common/constant.py b/source/constructs/api/common/constant.py
index e6b894ec..dbe52908 100644
--- a/source/constructs/api/common/constant.py
+++ b/source/constructs/api/common/constant.py
@@ -98,11 +98,11 @@ def __setattr__(self, name, value):
const.ZERO = 0
const.BATCH_CREATE_LIMIT = 1000
const.BATCH_SHEET = "OriginTemplate"
-<<<<<<< HEAD
+
const.CONNECTION_DESC_MAX_LEN = 10
const.BATCH_CREATE_TEMPLATE_PATH = 'batch-create-jdbc-datasource/template/batch-create-jdbc-datasource.xlsx'
const.BATCH_CREATE_REPORT_PATH = 'batch-create-jdbc-datasource/report'
-=======
+
const.CONFIG_CONCURRENT_RUN_INSTANCE_NUMBER = 'ConcurrentRunInstanceNumber'
const.CONFIG_CONCURRENT_RUN_INSTANCE_NUMBER_DEFAULT_VALUE = 50
const.CONFIG_SUB_JOB_NUMBER_S3 = 'SubJobNumberS3'
@@ -114,7 +114,7 @@ def __setattr__(self, name, value):
const.CONTROLLER_ACTION_CHECK_RUNNING_RUN_DATABASES = 'CheckRunningRunDatabases'
const.CONTROLLER_ACTION_CHECK_PENDING_RUN_DATABASES = 'CheckPendingRunDatabases'
const.CONTROLLER_ACTION_REFRESH_ACCOUNT = 'RefreshAccount'
->>>>>>> 488eedeb27411ab78c59e3d7874eeda183fcf07d
+
const.UNSTRUCTURED_FILES = {
"document": ["doc", "docx", "pdf", "ppt", "pptx", "xls", "xlsx", "odp"],
diff --git a/source/constructs/api/data_source/main.py b/source/constructs/api/data_source/main.py
index 4d538501..48e142e0 100644
--- a/source/constructs/api/data_source/main.py
+++ b/source/constructs/api/data_source/main.py
@@ -398,7 +398,7 @@ def query_batch_status(batch: str):
def download_batch_file(filename: str):
return service.download_batch_file(filename)
-@router.post("/batch-sync-jdbc", response_model=BaseResponse)
-@inject_session
-def batch_sync_jdbc(connection_list: [schemas.JDBCInstanceSourceBase]):
- return service.batch_sync_jdbc(connection_list)
+# @router.post("/batch-sync-jdbc", response_model=BaseResponse)
+# @inject_session
+# def batch_sync_jdbc(connection_list: [schemas.JDBCInstanceSourceBase]):
+# return service.batch_sync_jdbc(connection_list)
diff --git a/source/constructs/api/data_source/service.py b/source/constructs/api/data_source/service.py
index b077cb9e..77d61603 100644
--- a/source/constructs/api/data_source/service.py
+++ b/source/constructs/api/data_source/service.py
@@ -31,7 +31,7 @@
from discovery_job.service import can_delete_database as can_delete_job_database
from discovery_job.service import delete_account as delete_job_by_account
from discovery_job.service import delete_database as delete_job_database
-from source.constructs.api.data_source.jdbc_schema import list_jdbc_databases
+from .jdbc_schema import list_jdbc_databases
from . import s3_detector, rds_detector, glue_database_detector, jdbc_detector, crud
from .schemas import (AccountInfo, AdminAccountInfo,
JDBCInstanceSource, JDBCInstanceSourceUpdate, JdbcSource,
From b9964847733c022310c8e230e9cb4992f3933f1d Mon Sep 17 00:00:00 2001
From: Magic Chen
Date: Thu, 25 Jan 2024 16:42:50 +0800
Subject: [PATCH 012/112] fix: fix upload status
---
.../src/pages/batch-operation/index.tsx | 54 +++++++++++--------
source/portal/src/ts/common.ts | 1 +
2 files changed, 34 insertions(+), 21 deletions(-)
diff --git a/source/portal/src/pages/batch-operation/index.tsx b/source/portal/src/pages/batch-operation/index.tsx
index 25a1c3f5..87f49a79 100644
--- a/source/portal/src/pages/batch-operation/index.tsx
+++ b/source/portal/src/pages/batch-operation/index.tsx
@@ -20,10 +20,11 @@ import Navigation from 'pages/left-menu/Navigation';
import { RouterEnum } from 'routers/routerEnum';
import { useTranslation } from 'react-i18next';
import HelpInfo from 'common/HelpInfo';
-import { buildDocLink } from 'ts/common';
+import { BATCH_SOURCE_ID, buildDocLink } from 'ts/common';
import axios from 'axios';
import { BASE_URL } from 'tools/apiRequest';
import { downloadBatchFiles, queryBatchStatus } from 'apis/data-source/api';
+import { alertMsg } from 'tools/tools';
enum BatchOperationStatus {
NotStarted = 'NotStarted',
@@ -61,19 +62,19 @@ const BatchOperationContent: React.FC = (
const response: any = await queryBatchStatus({
batch: fileId,
});
- const status = response.data; // 0: Inprogress, 1: Completed, 2: Error
+ const status = response.data; // 0: Inprogress, 1: Error, 2: Completed
if (status === 1 || status === 2) {
clearInterval(statusInterval);
}
if (status === 1) {
- updateStatus(BatchOperationStatus.Completed);
- } else if (status === 2) {
updateStatus(BatchOperationStatus.Error);
+ } else if (status === 2) {
+ updateStatus(BatchOperationStatus.Completed);
} else {
updateStatus(BatchOperationStatus.Inprogress);
}
} catch (error) {
- console.error('查询状态失败:', error);
+ console.error('error:', error);
clearInterval(statusInterval);
}
};
@@ -112,12 +113,19 @@ const BatchOperationContent: React.FC = (
},
}
);
- setLoadingUpload(false);
- const fileId = response.data.data;
- localStorage.setItem('batchFileId', fileId);
- updateStatus(BatchOperationStatus.Inprogress);
- statusInterval = setInterval(() => queryStatus(fileId), 5000);
console.log(response.data);
+ setLoadingUpload(false);
+ if (response.data.status === 'success') {
+ const fileId = response.data.data;
+ localStorage.setItem(BATCH_SOURCE_ID, fileId);
+ updateStatus(BatchOperationStatus.Inprogress);
+ statusInterval = setInterval(() => {
+ queryStatus(fileId);
+ }, 5000);
+ } else {
+ setUploadProgress(0);
+ alertMsg(response.data.message ?? '', 'error');
+ }
} catch (error) {
setLoadingUpload(false);
console.error(error);
@@ -125,10 +133,12 @@ const BatchOperationContent: React.FC = (
};
useEffect(() => {
- const fileId = localStorage.getItem('batchFileId');
+ const fileId = localStorage.getItem(BATCH_SOURCE_ID);
if (fileId) {
queryStatus(fileId);
- statusInterval = setInterval(() => queryStatus(fileId), 5000);
+ statusInterval = setInterval(() => {
+ queryStatus(fileId);
+ }, 5000);
}
return () => {
clearInterval(statusInterval);
@@ -235,13 +245,14 @@ const BatchOperation: React.FC = () => {
const downloadReport = async () => {
console.log('download report');
- const fileName = localStorage.getItem('batchFileId');
+ const fileName = localStorage.getItem(BATCH_SOURCE_ID);
if (fileName) {
- const response = await downloadBatchFiles({
- filename: 'batch_1705900337.8425026',
+ const response: any = await downloadBatchFiles({
+ // filename: 'batch_1705900337.8425026',
+ filename: fileName,
});
console.info('response:', response);
- // TODO: download file
+ window.open(response.data);
}
};
@@ -259,6 +270,9 @@ const BatchOperation: React.FC = () => {
{t('button.downloadReport')}
),
+ onDismiss: () => {
+ setFlashBar([]);
+ },
},
]);
}
@@ -276,6 +290,9 @@ const BatchOperation: React.FC = () => {
{t('button.downloadReport')}
),
+ onDismiss: () => {
+ setFlashBar([]);
+ },
},
]);
}
@@ -289,11 +306,6 @@ const BatchOperation: React.FC = () => {
content:
'Creating databases, Please do not close this window. It will takes less than 15 minutes.',
id: 'info',
- action: (
-
- ),
},
]);
}
diff --git a/source/portal/src/ts/common.ts b/source/portal/src/ts/common.ts
index 9b312d5e..f5644df4 100644
--- a/source/portal/src/ts/common.ts
+++ b/source/portal/src/ts/common.ts
@@ -11,6 +11,7 @@ export const EN_DOC_LINK =
'https://awslabs.github.io/sensitive-data-protection-on-aws/en';
export const SDPS_DEBUG_MODE = 'SDPS_DEBUG_MODE';
+export const BATCH_SOURCE_ID = 'SDPS_BATCH_FILE_ID';
export interface ColumnList {
id: string;
label: string;
From 27deadfe36a6337eedcb2a07fa4055f129ef95ec Mon Sep 17 00:00:00 2001
From: Magic Chen
Date: Thu, 25 Jan 2024 16:55:42 +0800
Subject: [PATCH 013/112] fix: fix dlownload api
---
.../src/pages/batch-operation/index.tsx | 26 ++++++++++++++++---
1 file changed, 22 insertions(+), 4 deletions(-)
diff --git a/source/portal/src/pages/batch-operation/index.tsx b/source/portal/src/pages/batch-operation/index.tsx
index 87f49a79..fbda60b3 100644
--- a/source/portal/src/pages/batch-operation/index.tsx
+++ b/source/portal/src/pages/batch-operation/index.tsx
@@ -49,7 +49,7 @@ let statusInterval: any;
const BatchOperationContent: React.FC = (
props: BatchOperationContentProps
) => {
- const { t } = useTranslation();
+ const { t, i18n } = useTranslation();
const { updateStatus } = props;
const [uploadDisabled, setUploadDisabled] = useState(false);
const [files, setFiles] = useState([] as any);
@@ -132,6 +132,17 @@ const BatchOperationContent: React.FC = (
}
};
+ const downloadReport = async () => {
+ console.log('download template');
+ const fileName = `template_${i18n.language}`;
+ if (fileName) {
+ const response: any = await downloadBatchFiles({
+ filename: fileName,
+ });
+ window.open(response.data);
+ }
+ };
+
useEffect(() => {
const fileId = localStorage.getItem(BATCH_SOURCE_ID);
if (fileId) {
@@ -155,10 +166,17 @@ const BatchOperationContent: React.FC = (
}
>
-
-
+ {/* */}
+
+
Date: Thu, 25 Jan 2024 16:59:26 +0800
Subject: [PATCH 014/112] fix: fix download template
---
source/portal/src/pages/batch-operation/index.tsx | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/source/portal/src/pages/batch-operation/index.tsx b/source/portal/src/pages/batch-operation/index.tsx
index fbda60b3..0edce79f 100644
--- a/source/portal/src/pages/batch-operation/index.tsx
+++ b/source/portal/src/pages/batch-operation/index.tsx
@@ -134,7 +134,7 @@ const BatchOperationContent: React.FC = (
const downloadReport = async () => {
console.log('download template');
- const fileName = `template_${i18n.language}`;
+ const fileName = `template-${i18n.language}`;
if (fileName) {
const response: any = await downloadBatchFiles({
filename: fileName,
From 5c1bd145ba87f93e97f6192d3e56d724f2902384 Mon Sep 17 00:00:00 2001
From: Magic Chen
Date: Thu, 25 Jan 2024 17:54:07 +0800
Subject: [PATCH 015/112] fix: batch upload files
---
.../src/pages/batch-operation/index.tsx | 37 +++++++++++++------
1 file changed, 25 insertions(+), 12 deletions(-)
diff --git a/source/portal/src/pages/batch-operation/index.tsx b/source/portal/src/pages/batch-operation/index.tsx
index 0edce79f..47cacf22 100644
--- a/source/portal/src/pages/batch-operation/index.tsx
+++ b/source/portal/src/pages/batch-operation/index.tsx
@@ -8,7 +8,6 @@ import {
FlashbarProps,
FormField,
Header,
- Icon,
ProgressBar,
SpaceBetween,
StatusIndicator,
@@ -36,6 +35,14 @@ interface BatchOperationContentProps {
updateStatus: (status: BatchOperationStatus) => void;
}
+const startDownload = (url: string) => {
+ const link = document.createElement('a');
+ link.href = url;
+ document.body.appendChild(link);
+ link.click();
+ document.body.removeChild(link);
+};
+
const AddAccountHeader: React.FC = () => {
const { t } = useTranslation();
return (
@@ -56,13 +63,14 @@ const BatchOperationContent: React.FC = (
const [errors, setErrors] = useState([] as any);
const [uploadProgress, setUploadProgress] = useState(0);
const [loadingUpload, setLoadingUpload] = useState(false);
+ const [loadingDownload, setLoadingDownload] = useState(false);
const queryStatus = async (fileId: string) => {
try {
- const response: any = await queryBatchStatus({
+ const status: any = await queryBatchStatus({
batch: fileId,
});
- const status = response.data; // 0: Inprogress, 1: Error, 2: Completed
+ // 0: Inprogress, 1: Error, 2: Completed
if (status === 1 || status === 2) {
clearInterval(statusInterval);
}
@@ -134,12 +142,14 @@ const BatchOperationContent: React.FC = (
const downloadReport = async () => {
console.log('download template');
+ setLoadingDownload(true);
const fileName = `template-${i18n.language}`;
if (fileName) {
- const response: any = await downloadBatchFiles({
+ const url: any = await downloadBatchFiles({
filename: fileName,
});
- window.open(response.data);
+ setLoadingDownload(false);
+ startDownload(url);
}
};
@@ -173,7 +183,7 @@ const BatchOperationContent: React.FC = (
downloadReport();
}}
variant="link"
- download
+ loading={loadingDownload}
>
{t('datasource:batch.step1Download')}
@@ -260,17 +270,18 @@ const BatchOperation: React.FC = () => {
);
const [status, setStatus] = useState(BatchOperationStatus.NotStarted);
+ const [loadingDownload, setLoadingDownload] = useState(false);
const downloadReport = async () => {
console.log('download report');
+ setLoadingDownload(true);
const fileName = localStorage.getItem(BATCH_SOURCE_ID);
if (fileName) {
- const response: any = await downloadBatchFiles({
- // filename: 'batch_1705900337.8425026',
+ const url: any = await downloadBatchFiles({
filename: fileName,
});
- console.info('response:', response);
- window.open(response.data);
+ setLoadingDownload(false);
+ startDownload(url);
}
};
@@ -284,11 +295,12 @@ const BatchOperation: React.FC = () => {
content: 'Please download the report and check the result.',
id: 'success',
action: (
-
+
+ )}
+ >
);
};
@@ -108,8 +242,8 @@ const SystemSetting: React.FC = () => {
const breadcrumbItems = [
{ text: t('breadcrumb.home'), href: RouterEnum.Home.path },
{
- text: t('breadcrumb.dataSourceConnection'),
- href: RouterEnum.DataSourceConnection.path,
+ text: t('nav.systemSettings'),
+ href: '',
},
];
return (
diff --git a/source/portal/src/pages/system-settings/typs/config-typs.ts b/source/portal/src/pages/system-settings/typs/config-typs.ts
new file mode 100644
index 00000000..10abca8a
--- /dev/null
+++ b/source/portal/src/pages/system-settings/typs/config-typs.ts
@@ -0,0 +1,10 @@
+export interface ConfigItem {
+ config_key: string;
+ config_value: string;
+}
+
+export interface ConfigSubnet {
+ subnet_id: string;
+ name: string;
+ available_ip_address_count: number;
+}
From 8fc7a2a04ed619d0d7fc8726e56c6c885eea13eb Mon Sep 17 00:00:00 2001
From: Magic Chen
Date: Fri, 26 Jan 2024 15:35:04 +0800
Subject: [PATCH 021/112] fix: fix pending status color
---
source/portal/src/pages/glue-job/index.tsx | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/source/portal/src/pages/glue-job/index.tsx b/source/portal/src/pages/glue-job/index.tsx
index 9c899539..f37017f4 100644
--- a/source/portal/src/pages/glue-job/index.tsx
+++ b/source/portal/src/pages/glue-job/index.tsx
@@ -153,7 +153,7 @@ const GlueJobContent = () => {
if (jobRowData.state === 'Active (idle)') {
tempType = CLSAAIFIED_TYPE.SystemMark;
}
- if (jobRowData.state === 'Running') {
+ if (jobRowData.state === 'Running' || jobRowData.state === 'Pending') {
tempType = CLSAAIFIED_TYPE.System;
}
if (jobRowData.state === 'Stopped') {
From c81f85774b0f03deceb35cbba2ced32fef6eece3 Mon Sep 17 00:00:00 2001
From: Magic Chen
Date: Sat, 27 Jan 2024 21:45:10 +0800
Subject: [PATCH 022/112] chore: add mysql and other in add jdbc
---
.../portal/public/locales/en/datasource.json | 6 +-
.../portal/public/locales/zh/datasource.json | 6 +-
source/portal/src/index.scss | 43 +++--
.../componments/JDBCConnection.tsx | 154 +++++++++++++-----
.../componments/JDBCConnectionEdit.tsx | 30 ++--
source/portal/src/ts/common.ts | 7 +
6 files changed, 180 insertions(+), 66 deletions(-)
diff --git a/source/portal/public/locales/en/datasource.json b/source/portal/public/locales/en/datasource.json
index cb0fc6e6..c31d21ff 100644
--- a/source/portal/public/locales/en/datasource.json
+++ b/source/portal/public/locales/en/datasource.json
@@ -90,7 +90,11 @@
"chooseSubnet": "Choose one subnet",
"sg": "Security groups",
"sgDesc": "Choose one or more security groups to allow access to the data store in your VPC subnet. Security groups are associated to the ENI attached to your subnet. You must choose at least one security group with a self-referencing inbound rule for all TCP ports.",
- "chooseSG": "Choose one or more security groups"
+ "chooseSG": "Choose one or more security groups",
+ "mysql": "MySQL (Auto discovery)",
+ "other": "Others",
+ "otherError": "Other JDBC URL can not start with 'jdbc:mysql://'",
+ "databaseError": "JDBC Database can not be empty."
},
"batch": {
"name": "Batch Operation",
diff --git a/source/portal/public/locales/zh/datasource.json b/source/portal/public/locales/zh/datasource.json
index fe78a820..7ae6a3c9 100644
--- a/source/portal/public/locales/zh/datasource.json
+++ b/source/portal/public/locales/zh/datasource.json
@@ -90,7 +90,11 @@
"chooseSubnet": "选择一个子网",
"sg": "安全组",
"sgDesc": "选择一个或多个安全组以允许访问在你的 VPC 子网中的数据存储。安全组与你的子网关联的ENI相关联。你必须选择至少一个对所有 TCP 端口有自我引用入站规则的安全组。",
- "chooseSG": "选择一个或多个安全组"
+ "chooseSG": "选择一个或多个安全组",
+ "mysql": "MySQL(自动发现)",
+ "other": "其他",
+ "otherError": "其他 JDBC URL 不能以 'jdbc:mysql://' 开头",
+ "databaseError": "JDBC 数据库不能为空。"
},
"batch": {
"name": "批量操作",
diff --git a/source/portal/src/index.scss b/source/portal/src/index.scss
index 5f7ef13e..f25e65da 100644
--- a/source/portal/src/index.scss
+++ b/source/portal/src/index.scss
@@ -1,13 +1,15 @@
body {
margin: 0;
- font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", "Roboto", "Oxygen", "Ubuntu", "Cantarell", "Fira Sans",
- "Droid Sans", "Helvetica Neue", sans-serif;
+ font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', 'Roboto', 'Oxygen',
+ 'Ubuntu', 'Cantarell', 'Fira Sans', 'Droid Sans', 'Helvetica Neue',
+ sans-serif;
-webkit-font-smoothing: antialiased;
-moz-osx-font-smoothing: grayscale;
}
code {
- font-family: source-code-pro, Menlo, Monaco, Consolas, "Courier New", monospace;
+ font-family: source-code-pro, Menlo, Monaco, Consolas, 'Courier New',
+ monospace;
}
.hand-pointer {
@@ -280,7 +282,13 @@ code {
color: #3a3a3a;
text-shadow: 0 1px 0 rgba(255, 255, 255, 0.75);
background-color: #f7f7f7;
- background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#ffffff), to(#d2d2d2));
+ background-image: -webkit-gradient(
+ linear,
+ 0 0,
+ 0 100%,
+ from(#ffffff),
+ to(#d2d2d2)
+ );
background-image: -webkit-linear-gradient(top, #ffffff, #d2d2d2);
background-image: -moz-linear-gradient(top, #ffffff, #d2d2d2);
background-image: -ms-linear-gradient(top, #ffffff, #d2d2d2);
@@ -313,7 +321,7 @@ code {
.popover .arrow:after {
z-index: -1;
- content: "";
+ content: '';
}
.popover.top .arrow {
@@ -379,20 +387,20 @@ code {
}
.horizon-bar-chart {
- [class^="awsui_grid_"] {
+ [class^='awsui_grid_'] {
display: none !important;
}
- [class^="awsui_labels-left_"] {
+ [class^='awsui_labels-left_'] {
display: none !important;
}
- [class*="awsui_axis--emphasized_"] {
+ [class*='awsui_axis--emphasized_'] {
display: none !important;
}
- [class^="awsui_chart-container__vertical_"] {
- [class^="awsui_labels-bottom_"] {
+ [class^='awsui_chart-container__vertical_'] {
+ [class^='awsui_labels-bottom_'] {
display: none !important;
}
@@ -471,7 +479,7 @@ code {
.custom-badge {
background-color: #d1d5db;
color: #fff;
- font-family: "Open Sans", "Helvetica Neue", Roboto, Arial, sans-serif;
+ font-family: 'Open Sans', 'Helvetica Neue', Roboto, Arial, sans-serif;
font-size: 12px;
letter-spacing: 0.005em;
line-height: 22px;
@@ -506,3 +514,16 @@ code {
.add-jdbc-container {
padding: 20px;
}
+
+.jdbc-prefix {
+ // padding: 10px;
+ border: 2px solid #7d8998;
+ border-radius: 8px 0 0 8px;
+ position: relative;
+ z-index: 10;
+ font-size: 14px;
+ line-height: 22px;
+ margin-right: -8px;
+ background-color: #eee;
+ padding: 4px 8px 4px 12px;
+}
diff --git a/source/portal/src/pages/data-source-connection/componments/JDBCConnection.tsx b/source/portal/src/pages/data-source-connection/componments/JDBCConnection.tsx
index fcbea762..98f58d71 100644
--- a/source/portal/src/pages/data-source-connection/componments/JDBCConnection.tsx
+++ b/source/portal/src/pages/data-source-connection/componments/JDBCConnection.tsx
@@ -25,8 +25,8 @@ import {
import { useEffect, useState } from 'react';
import { useTranslation } from 'react-i18next';
import { alertMsg } from 'tools/tools';
-import { i18ns } from '../types/s3_selector_config';
import { DropdownStatusProps } from '@cloudscape-design/components/internal/components/dropdown-status';
+import { checkJDBCIsMySQL } from 'ts/common';
interface JDBCConnectionProps {
providerId: number;
@@ -102,6 +102,11 @@ const JDBCConnection: React.FC = (
const [secretItem, setSecretItem] = useState(null);
const [loadingJdbcDatabase, setLoadingJdbcDatabase] = useState(false);
+ const [jdbcConnType, setJdbcConnType] = useState('mysql');
+ const [tmpJDBCUrl, setTmpJDBCUrl] = useState('');
+ const [otherJDBCUrlError, setOtherJDBCUrlError] = useState(false);
+ const [jdbcDatabaseEmptyError, setJdbcDatabaseEmptyError] = useState(false);
+
useEffect(() => {
if (credentialType === 'secret_manager') {
loadAccountSecrets();
@@ -341,6 +346,14 @@ const JDBCConnection: React.FC = (
};
const addJdbcConnection = async () => {
+ if (jdbcConnType === 'other' && checkJDBCIsMySQL(tmpJDBCUrl)) {
+ setOtherJDBCUrlError(true);
+ return;
+ }
+ if (!jdbcConnectionData?.new?.jdbc_connection_schema?.trim()) {
+ setJdbcDatabaseEmptyError(true);
+ return;
+ }
setLoadingImport(true);
if (jdbcConnectionData.createType === 'import') {
try {
@@ -390,6 +403,14 @@ const JDBCConnection: React.FC = (
setJdbcConnectionData({ ...jdbcConnectionData, new: temp });
};
+ useEffect(() => {
+ let jdbcURLStr = tmpJDBCUrl;
+ if (jdbcConnType === 'mysql') {
+ jdbcURLStr = 'jdbc:mysql://' + tmpJDBCUrl;
+ }
+ changeJDBCUrl(jdbcURLStr);
+ }, [tmpJDBCUrl]);
+
const changeDatabase = (detail: any) => {
// console.log(detail)
let temp = jdbcConnectionData.new;
@@ -461,39 +482,39 @@ const JDBCConnection: React.FC = (
setBuckets(res);
};
- const changeJDBCcertificate = (detail: any) => {
- let temp = jdbcConnectionData.new;
- temp = { ...temp, custom_jdbc_cert: detail.resource.uri };
- setJdbcConnectionData({ ...jdbcConnectionData, new: temp });
- };
-
- const changeSkipCerValid = (detail: any) => {
- // console.log("skip!!!",detail)
- let temp = jdbcConnectionData.new;
- temp = {
- ...temp,
- skip_custom_jdbc_cert_validation: detail ? 'true' : 'false',
- };
- setJdbcConnectionData({ ...jdbcConnectionData, new: temp });
- };
-
- const changeJDBCCertString = (detail: any) => {
- let temp = jdbcConnectionData.new;
- temp = { ...temp, custom_jdbc_cert_string: detail };
- setJdbcConnectionData({ ...jdbcConnectionData, new: temp });
- };
-
- const changeDriverClassName = (detail: any) => {
- let temp = jdbcConnectionData.new;
- temp = { ...temp, jdbc_driver_class_name: detail };
- setJdbcConnectionData({ ...jdbcConnectionData, new: temp });
- };
-
- const changeDriverPath = (detail: any) => {
- let temp = jdbcConnectionData.new;
- temp = { ...temp, jdbc_driver_jar_uri: detail.resource.uri };
- setJdbcConnectionData({ ...jdbcConnectionData, new: temp });
- };
+ // const changeJDBCcertificate = (detail: any) => {
+ // let temp = jdbcConnectionData.new;
+ // temp = { ...temp, custom_jdbc_cert: detail.resource.uri };
+ // setJdbcConnectionData({ ...jdbcConnectionData, new: temp });
+ // };
+
+ // const changeSkipCerValid = (detail: any) => {
+ // // console.log("skip!!!",detail)
+ // let temp = jdbcConnectionData.new;
+ // temp = {
+ // ...temp,
+ // skip_custom_jdbc_cert_validation: detail ? 'true' : 'false',
+ // };
+ // setJdbcConnectionData({ ...jdbcConnectionData, new: temp });
+ // };
+
+ // const changeJDBCCertString = (detail: any) => {
+ // let temp = jdbcConnectionData.new;
+ // temp = { ...temp, custom_jdbc_cert_string: detail };
+ // setJdbcConnectionData({ ...jdbcConnectionData, new: temp });
+ // };
+
+ // const changeDriverClassName = (detail: any) => {
+ // let temp = jdbcConnectionData.new;
+ // temp = { ...temp, jdbc_driver_class_name: detail };
+ // setJdbcConnectionData({ ...jdbcConnectionData, new: temp });
+ // };
+
+ // const changeDriverPath = (detail: any) => {
+ // let temp = jdbcConnectionData.new;
+ // temp = { ...temp, jdbc_driver_jar_uri: detail.resource.uri };
+ // setJdbcConnectionData({ ...jdbcConnectionData, new: temp });
+ // };
const changeUserName = (detail: any) => {
let temp = jdbcConnectionData.new;
@@ -515,6 +536,11 @@ const JDBCConnection: React.FC = (
};
const findDatabase = async () => {
+ if (jdbcConnType === 'other' && checkJDBCIsMySQL(tmpJDBCUrl)) {
+ setOtherJDBCUrlError(true);
+ return;
+ }
+
setLoadingImport(true);
setLoadingJdbcDatabase(true);
const requestParam = {
@@ -734,18 +760,55 @@ const JDBCConnection: React.FC = (
value={jdbcConnectionData.new.description}
/>
+
+
+ {
+ setOtherJDBCUrlError(false);
+ setTmpJDBCUrl('');
+ changeDatabase('');
+ setJdbcConnType(detail.value);
+ }}
+ value={jdbcConnType}
+ items={[
+ {
+ label: t('datasource:jdbc.mysql'),
+ value: 'mysql',
+ },
+ { label: t('datasource:jdbc.other'), value: 'other' },
+ ]}
+ />
+
+
<>
- changeJDBCUrl(e.detail.value)}
- placeholder="jdbc:protocol://host:port"
- value={jdbcConnectionData.new.jdbc_connection_url}
- />
+
+ {jdbcConnType === 'mysql' && (
+
jdbc:mysql://
+ )}
+
+ {
+ setOtherJDBCUrlError(false);
+ setTmpJDBCUrl(e.detail.value);
+ }}
+ placeholder={
+ jdbcConnType === 'mysql'
+ ? 'host:port'
+ : 'jdbc:protocol://host:port'
+ }
+ value={tmpJDBCUrl}
+ />
+
+
{/* = (
{
+ changeDatabase('');
changeUserName(detail.value);
}}
/>
@@ -829,6 +893,7 @@ const JDBCConnection: React.FC = (
type="password"
value={jdbcConnectionData.new.password}
onChange={({ detail }) => {
+ changeDatabase('');
changePassword(detail.value);
}}
/>
@@ -844,6 +909,7 @@ const JDBCConnection: React.FC = (
props.providerId !== 1 && (
{
+ setJdbcDatabaseEmptyError(false);
findDatabase();
}}
iconName="search"
@@ -853,9 +919,17 @@ const JDBCConnection: React.FC = (
)
}
+ errorText={
+ jdbcDatabaseEmptyError
+ ? t('datasource:jdbc.databaseError')
+ : ''
+ }
>
+
+
+ {
+ setOtherJDBCUrlError(false);
+ setTmpJDBCUrl('');
+ changeDatabase('');
+ setJdbcConnType(detail.value);
+ }}
+ value={jdbcConnType}
+ items={[
+ {
+ disabled: true,
+ label: t('datasource:jdbc.mysql'),
+ value: 'mysql',
+ },
+ {
+ disabled: true,
+ label: t('datasource:jdbc.other'),
+ value: 'other',
+ },
+ ]}
+ />
+
+
<>
{
- findDatabase();
- }}
- iconName="search"
- disabled={props.providerId === 1 || loadingJdbcDatabase}
- >
- {t('datasource:jdbc.findDatabase')}
-
- )
+ errorText={
+ otherJDBCUrlError ? t('datasource:jdbc.otherError') : ''
}
>
- changeJDBCUrl(e.detail.value)}
- placeholder="jdbc:protocol://host:port"
- value={jdbcConnectionData.jdbc_connection_url}
- />
-
-
-
+
= (
{
+ changeDatabase('');
changeUserName(detail.value);
}}
/>
@@ -740,12 +796,46 @@ const JDBCConnectionEdit: React.FC = (
type="password"
value={jdbcConnectionData.password}
onChange={({ detail }) => {
+ changeDatabase('');
changePassword(detail.value);
}}
/>
>
)}
+
+ {
+ setJdbcDatabaseEmptyError(false);
+ findDatabase();
+ }}
+ iconName="search"
+ disabled={props.providerId === 1 || loadingJdbcDatabase}
+ >
+ {t('datasource:jdbc.findDatabase')}
+
+ )
+ }
+ errorText={
+ jdbcDatabaseEmptyError
+ ? t('datasource:jdbc.databaseError')
+ : ''
+ }
+ >
+
+
Date: Sun, 28 Jan 2024 09:23:03 +0800
Subject: [PATCH 024/112] chore: add batch i18n
---
.../portal/public/locales/en/datasource.json | 18 +++++++-
.../portal/public/locales/zh/datasource.json | 18 +++++++-
.../src/pages/batch-operation/index.tsx | 41 ++++++++++---------
3 files changed, 55 insertions(+), 22 deletions(-)
diff --git a/source/portal/public/locales/en/datasource.json b/source/portal/public/locales/en/datasource.json
index c31d21ff..4d13346a 100644
--- a/source/portal/public/locales/en/datasource.json
+++ b/source/portal/public/locales/en/datasource.json
@@ -107,6 +107,22 @@
"step2Desc": "Fill the information in the template",
"step2Tips1": "Making sure no duplicates",
"step3Title": "Step 3: Upload the template with filled information",
- "uploadTitle": "Fill in the template and upload"
+ "uploadTitle": "Fill in the template and upload",
+ "fileExtensionError": "Uploaded file must have an xlsx extension.",
+ "chooseFiles": "Choose files",
+ "chooseFile": "Choose file",
+ "dropFilesUpload": "Drop files to upload",
+ "dropFileUpload": "Drop file to upload",
+ "removeFile": "Remove file ",
+ "showFewer": "Show fewer files",
+ "showMore": "Show more files",
+ "error": "Error",
+ "only": ".xlsx files only",
+ "successTitle": "Successfully create data sources",
+ "successDesc": "Please download the report and check the result.",
+ "failedTitle": "Failed create data sources in batch",
+ "failedDesc": "Please download the report and fix the data to upload again to retry.",
+ "inProgress": "In progress",
+ "inProgressDesc": "Creating databases, Please do not close this window. It will takes less than 15 minutes."
}
}
diff --git a/source/portal/public/locales/zh/datasource.json b/source/portal/public/locales/zh/datasource.json
index 7ae6a3c9..36fbdd36 100644
--- a/source/portal/public/locales/zh/datasource.json
+++ b/source/portal/public/locales/zh/datasource.json
@@ -107,6 +107,22 @@
"step2Desc": "填写模板中的信息",
"step2Tips1": "确保没有重复项",
"step3Title": "第 3 步:上传填写信息的模板",
- "uploadTitle": "填写模板并上传"
+ "uploadTitle": "填写模板并上传",
+ "fileExtensionError": "上传的文件必须是 xlsx 格式。",
+ "chooseFiles": "选择文件",
+ "chooseFile": "选择文件",
+ "dropFilesUpload": "拖放文件以上传",
+ "dropFileUpload": "拖放文件以上传",
+ "removeFile": "移除文件",
+ "showFewer": "显示较少文件",
+ "showMore": "显示更多文件",
+ "error": "错误",
+ "only": "仅限 .xlsx 文件",
+ "successTitle": "成功创建数据源",
+ "successDesc": "请下载报告并检查结果。",
+ "failedTitle": "批量创建数据源失败",
+ "failedDesc": "请下载报告并修正数据,然后重新上传以重试。",
+ "inProgress": "进行中",
+ "inProgressDesc": "正在创建数据库,请不要关闭此窗口。预计耗时不超过15分钟。"
}
}
diff --git a/source/portal/src/pages/batch-operation/index.tsx b/source/portal/src/pages/batch-operation/index.tsx
index 47cacf22..f13241d7 100644
--- a/source/portal/src/pages/batch-operation/index.tsx
+++ b/source/portal/src/pages/batch-operation/index.tsx
@@ -93,7 +93,7 @@ const BatchOperationContent: React.FC = (
setErrors([]);
setUploadDisabled(false);
} else {
- setErrors(['Uploaded file must have an xlsx extension.']);
+ setErrors([t('datasource:batch.fileExtensionError')]);
setUploadDisabled(true);
}
setFiles(file);
@@ -206,23 +206,26 @@ const BatchOperationContent: React.FC = (
}
>
-
+
{
changeFile(detail.value);
}}
value={files}
i18nStrings={{
- uploadButtonText: (e) => (e ? 'Choose files' : 'Choose file'),
+ uploadButtonText: (e) =>
+ e
+ ? t('datasource:batch.chooseFiles')
+ : t('datasource:batch.chooseFile'),
dropzoneText: (e) =>
- e ? 'Drop files to upload' : 'Drop file to upload',
- removeFileAriaLabel: (e) => `Remove file ${e + 1}`,
- limitShowFewer: 'Show fewer files',
- limitShowMore: 'Show more files',
- errorIconAriaLabel: 'Error',
+ e
+ ? t('datasource:batch.dropFilesUpload')
+ : t('datasource:batch.dropFileUpload'),
+ removeFileAriaLabel: (e) =>
+ `${t('datasource:batch.removeFile')} ${e + 1}`,
+ limitShowFewer: t('datasource:batch.showFewer'),
+ limitShowMore: t('datasource:batch.showMore'),
+ errorIconAriaLabel: t('datasource:batch.error'),
}}
invalid
fileErrors={errors}
@@ -231,7 +234,7 @@ const BatchOperationContent: React.FC = (
showFileSize
showFileThumbnail
tokenLimit={1}
- constraintText=".xlsx files only"
+ constraintText={t('datasource:batch.only')}
/>
{uploadProgress > 0 && (
@@ -289,10 +292,10 @@ const BatchOperation: React.FC = () => {
if (status === BatchOperationStatus.Completed) {
setFlashBar([
{
- header: 'Successfully create data sources',
+ header: t('datasource:batch.successTitle'),
type: 'success',
dismissible: true,
- content: 'Please download the report and check the result.',
+ content: t('datasource:batch.successDesc'),
id: 'success',
action: (
@@ -309,11 +312,10 @@ const BatchOperation: React.FC = () => {
if (status === BatchOperationStatus.Error) {
setFlashBar([
{
- header: 'Failed create data sources in batch',
+ header: t('datasource:batch.failedTitle'),
type: 'error',
dismissible: true,
- content:
- 'Please download the report and fix the data to upload again to retry.',
+ content: t('datasource:batch.failedDesc'),
id: 'error',
action: (
@@ -331,11 +333,10 @@ const BatchOperation: React.FC = () => {
setFlashBar([
{
loading: true,
- header: 'In progress',
+ header: t('datasource:batch.inProgress'),
type: 'info',
dismissible: false,
- content:
- 'Creating databases, Please do not close this window. It will takes less than 15 minutes.',
+ content: t('datasource:batch.inProgressDesc'),
id: 'info',
},
]);
From e48d6df7d53182e95566a2991d8c224c9fd59339 Mon Sep 17 00:00:00 2001
From: Magic Chen
Date: Sun, 28 Jan 2024 09:31:33 +0800
Subject: [PATCH 025/112] fix: calculate ip
---
source/portal/public/locales/en/common.json | 2 +-
source/portal/public/locales/zh/common.json | 2 +-
source/portal/src/pages/system-settings/index.tsx | 4 +++-
3 files changed, 5 insertions(+), 3 deletions(-)
diff --git a/source/portal/public/locales/en/common.json b/source/portal/public/locales/en/common.json
index 087fe968..6d9d827a 100644
--- a/source/portal/public/locales/en/common.json
+++ b/source/portal/public/locales/en/common.json
@@ -441,7 +441,7 @@
"subnet": "Subnet ",
"subnetNameDesc": "Total number of left IPs in subnet",
"currentIPLeft": "Current IP left",
- "subnetDesc": "IP usage per subnet = 3 + (Number of sub-job runs can be used for 1 RDS scan * 2)",
+ "subnetDesc": "IP usage per subnet = (3 + (Number of sub-job runs can be used for 1 RDS scan * 2)) * Number of RDS instance detected concurrently",
"estimateResult": "Based on the above settings, for each job run it will consume {{ipCount}} IPs maximum per subnet.",
"estimateError": "The IP in discovery job can not be more than the IP left of subnets. Please adjust the settings.",
"estimateSuccess": "Config validate successfully.",
diff --git a/source/portal/public/locales/zh/common.json b/source/portal/public/locales/zh/common.json
index 211a7018..a732a381 100644
--- a/source/portal/public/locales/zh/common.json
+++ b/source/portal/public/locales/zh/common.json
@@ -440,7 +440,7 @@
"rdsSubJobRunNumberDesc": "1 次 RDS 扫描可运行多少次 Glue 作业",
"subnet": "子网 ",
"subnetNameDesc": "子网中剩余IP总数",
- "subnetDesc": "每个子网的 IP 使用量 = 3 + (可用于 1 次 RDS 扫描的子作业运行数量 * 2)",
+ "subnetDesc": "每个子网的 IP 使用量 = (3 + (可用于 1 次 RDS 扫描的子作业运行数量 * 2)) * 敏感发现作业中同时检测到的 RDS 实例数",
"estimateResult": "根据上述设置,对于每个作业运行,每个子网最多将消耗 {{ipCount}} 个 IP。",
"estimateError": "发现作业中的 IP 不能超过子网剩余的 IP。请调整设置。",
"estimateSuccess": "配置验证成功。",
diff --git a/source/portal/src/pages/system-settings/index.tsx b/source/portal/src/pages/system-settings/index.tsx
index 6d58e418..da94a24f 100644
--- a/source/portal/src/pages/system-settings/index.tsx
+++ b/source/portal/src/pages/system-settings/index.tsx
@@ -99,7 +99,9 @@ const SystemSettingContent = () => {
};
const calculateEstimateRestIPs = () => {
- return 3 + parseInt(subJobNumberRDS) * 2;
+ return (
+ (3 + parseInt(subJobNumberRDS) * 2) * parseInt(concurrentRunJobNumber)
+ );
};
const estimateIPs = () => {
From fe7a3d40a262f4012820b5f48ce66f539dd6ad6b Mon Sep 17 00:00:00 2001
From: Magic Chen
Date: Sun, 28 Jan 2024 09:40:44 +0800
Subject: [PATCH 026/112] fix: show pending in job detail
---
source/portal/public/locales/en/common.json | 1 +
source/portal/public/locales/zh/common.json | 1 +
source/portal/src/pages/common-badge/index.tsx | 3 +++
source/portal/src/pages/common-badge/style.scss | 4 ++++
.../src/pages/common-badge/types/badge_type.ts | 1 +
source/portal/src/pages/glue-job/index.tsx | 14 +++++++++++++-
6 files changed, 23 insertions(+), 1 deletion(-)
diff --git a/source/portal/public/locales/en/common.json b/source/portal/public/locales/en/common.json
index 6d9d827a..d2964144 100644
--- a/source/portal/public/locales/en/common.json
+++ b/source/portal/public/locales/en/common.json
@@ -398,6 +398,7 @@
"changeEnableError": "Change enable error",
"selectCategoryLabel": "Please select category/label",
"pending": "Pending",
+ "PENDING": "PENDING",
"SUCCEEDED": "SUCCEEDED",
"RUNNING": "RUNNING",
"FAILED": "FAILED",
diff --git a/source/portal/public/locales/zh/common.json b/source/portal/public/locales/zh/common.json
index a732a381..2a8a681d 100644
--- a/source/portal/public/locales/zh/common.json
+++ b/source/portal/public/locales/zh/common.json
@@ -398,6 +398,7 @@
"changeEnableError": "更改启用错误",
"selectCategoryLabel": "请选择类别/标签",
"pending": "等待",
+ "PENDING": "等待",
"SUCCEEDED": "成功",
"RUNNING": "运行中",
"FAILED": "失败",
diff --git a/source/portal/src/pages/common-badge/index.tsx b/source/portal/src/pages/common-badge/index.tsx
index b66bee05..af5e7476 100644
--- a/source/portal/src/pages/common-badge/index.tsx
+++ b/source/portal/src/pages/common-badge/index.tsx
@@ -59,6 +59,9 @@ const CommonBadge: React.FC = (props: CommonBadgeProps) => {
'failed-badge':
badgeLabel === CLSAAIFIED_TYPE.Failed ||
labelType === CLSAAIFIED_TYPE.Failed,
+ 'pending-badge':
+ badgeLabel === CLSAAIFIED_TYPE.Pending ||
+ labelType === CLSAAIFIED_TYPE.Pending,
});
let iconName: any = 'status-pending';
diff --git a/source/portal/src/pages/common-badge/style.scss b/source/portal/src/pages/common-badge/style.scss
index 11462b57..e84f4e4b 100644
--- a/source/portal/src/pages/common-badge/style.scss
+++ b/source/portal/src/pages/common-badge/style.scss
@@ -74,3 +74,7 @@
.failed-badge {
color: #d91515;
}
+
+.pending-badge {
+ color: #cccccc;
+}
diff --git a/source/portal/src/pages/common-badge/types/badge_type.ts b/source/portal/src/pages/common-badge/types/badge_type.ts
index 1bdc35e2..250f160b 100644
--- a/source/portal/src/pages/common-badge/types/badge_type.ts
+++ b/source/portal/src/pages/common-badge/types/badge_type.ts
@@ -19,6 +19,7 @@ export const CLSAAIFIED_TYPE = {
Completed: 'Completed',
Stopped: 'Stopped',
Crawling: 'Crawling',
+ Pending: 'Pending',
};
export const PRIVARY_TYPE_DATA = {
diff --git a/source/portal/src/pages/glue-job/index.tsx b/source/portal/src/pages/glue-job/index.tsx
index f37017f4..32b28f27 100644
--- a/source/portal/src/pages/glue-job/index.tsx
+++ b/source/portal/src/pages/glue-job/index.tsx
@@ -153,7 +153,7 @@ const GlueJobContent = () => {
if (jobRowData.state === 'Active (idle)') {
tempType = CLSAAIFIED_TYPE.SystemMark;
}
- if (jobRowData.state === 'Running' || jobRowData.state === 'Pending') {
+ if (jobRowData.state === 'Running') {
tempType = CLSAAIFIED_TYPE.System;
}
if (jobRowData.state === 'Stopped') {
@@ -165,6 +165,9 @@ const GlueJobContent = () => {
if (jobRowData.state === 'Failed') {
tempType = CLSAAIFIED_TYPE.Failed;
}
+ if (jobRowData.state === 'Pending') {
+ tempType = CLSAAIFIED_TYPE.Pending;
+ }
return (
{
const getProcessData = (processData: any) => {
const totalJobCount =
+ processData.pending_count +
processData.success_count +
processData.running_count +
processData.fail_count +
@@ -246,6 +250,14 @@ const GlueJobContent = () => {
processData.stopped_count +
processData.not_existed_count;
const tmpColumnChartData: ColumnChartData[] = [
+ {
+ title: t('PENDING'),
+ type: 'bar',
+ valueFormatter: (e: any) =>
+ `${processData.pending_count} (${(100 * e).toFixed(0)}%)`,
+ data: [{ x: '', y: processData.pending_count / totalJobCount }],
+ color: '#CCCCCC',
+ },
{
title: t('SUCCEEDED'),
type: 'bar',
From a10020641abad0c9ff602dbe3cfe02245ec80bdf Mon Sep 17 00:00:00 2001
From: Magic Chen
Date: Sun, 28 Jan 2024 10:02:28 +0800
Subject: [PATCH 027/112] chore: add tips when user close tips
---
.../portal/public/locales/en/datasource.json | 3 +-
.../portal/public/locales/zh/datasource.json | 3 +-
.../src/pages/batch-operation/index.tsx | 62 ++++++++++++++++---
.../src/pages/system-settings/index.tsx | 4 +-
4 files changed, 59 insertions(+), 13 deletions(-)
diff --git a/source/portal/public/locales/en/datasource.json b/source/portal/public/locales/en/datasource.json
index 4d13346a..11de7e66 100644
--- a/source/portal/public/locales/en/datasource.json
+++ b/source/portal/public/locales/en/datasource.json
@@ -123,6 +123,7 @@
"failedTitle": "Failed create data sources in batch",
"failedDesc": "Please download the report and fix the data to upload again to retry.",
"inProgress": "In progress",
- "inProgressDesc": "Creating databases, Please do not close this window. It will takes less than 15 minutes."
+ "inProgressDesc": "Creating databases, Please do not close this window. It will takes less than 15 minutes.",
+ "dismissAlert": "Please make sure that you have downloaded the batch import report. Once this window is closed, the report will not be available for download again."
}
}
diff --git a/source/portal/public/locales/zh/datasource.json b/source/portal/public/locales/zh/datasource.json
index 36fbdd36..4cc71a02 100644
--- a/source/portal/public/locales/zh/datasource.json
+++ b/source/portal/public/locales/zh/datasource.json
@@ -123,6 +123,7 @@
"failedTitle": "批量创建数据源失败",
"failedDesc": "请下载报告并修正数据,然后重新上传以重试。",
"inProgress": "进行中",
- "inProgressDesc": "正在创建数据库,请不要关闭此窗口。预计耗时不超过15分钟。"
+ "inProgressDesc": "正在创建数据库,请不要关闭此窗口。预计耗时不超过15分钟。",
+ "dismissAlert": "请确保已经下载了批量导入报告,关闭此窗口后,报告将不可再次下载。"
}
}
diff --git a/source/portal/src/pages/batch-operation/index.tsx b/source/portal/src/pages/batch-operation/index.tsx
index f13241d7..8d56a33d 100644
--- a/source/portal/src/pages/batch-operation/index.tsx
+++ b/source/portal/src/pages/batch-operation/index.tsx
@@ -1,5 +1,6 @@
import {
AppLayout,
+ Box,
Button,
Container,
ContentLayout,
@@ -8,6 +9,7 @@ import {
FlashbarProps,
FormField,
Header,
+ Modal,
ProgressBar,
SpaceBetween,
StatusIndicator,
@@ -89,12 +91,14 @@ const BatchOperationContent: React.FC = (
const changeFile = (file: any) => {
setUploadProgress(0);
- if (file[0].name.endsWith('.xlsx') === true) {
- setErrors([]);
- setUploadDisabled(false);
- } else {
- setErrors([t('datasource:batch.fileExtensionError')]);
- setUploadDisabled(true);
+ if (file && file.length > 0) {
+ if (file[0].name.endsWith('.xlsx') === true) {
+ setErrors([]);
+ setUploadDisabled(false);
+ } else {
+ setErrors([t('datasource:batch.fileExtensionError')]);
+ setUploadDisabled(true);
+ }
}
setFiles(file);
};
@@ -274,6 +278,7 @@ const BatchOperation: React.FC = () => {
const [status, setStatus] = useState(BatchOperationStatus.NotStarted);
const [loadingDownload, setLoadingDownload] = useState(false);
+ const [showConfirm, setShowConfirm] = useState(false);
const downloadReport = async () => {
console.log('download report');
@@ -288,6 +293,16 @@ const BatchOperation: React.FC = () => {
}
};
+ const confirmDismissNotification = () => {
+ localStorage.removeItem(BATCH_SOURCE_ID);
+ setFlashBar([]);
+ setShowConfirm(false);
+ };
+
+ const onDismissNotification = () => {
+ setShowConfirm(true);
+ };
+
useEffect(() => {
if (status === BatchOperationStatus.Completed) {
setFlashBar([
@@ -303,8 +318,7 @@ const BatchOperation: React.FC = () => {
),
onDismiss: () => {
- localStorage.removeItem(BATCH_SOURCE_ID);
- setFlashBar([]);
+ onDismissNotification();
},
},
]);
@@ -323,8 +337,7 @@ const BatchOperation: React.FC = () => {
),
onDismiss: () => {
- localStorage.removeItem(BATCH_SOURCE_ID);
- setFlashBar([]);
+ onDismissNotification();
},
},
]);
@@ -376,6 +389,35 @@ const BatchOperation: React.FC = () => {
},
]}
/>
+ setShowConfirm(false)}
+ visible={showConfirm}
+ footer={
+
+
+ {
+ setShowConfirm(false);
+ }}
+ >
+ {t('button.cancel')}
+
+ {
+ confirmDismissNotification();
+ }}
+ >
+ {t('confirm')}
+
+
+
+ }
+ header={t('confirm')}
+ >
+ {t('datasource:batch.dismissAlert')}
+
}
headerSelector="#header"
diff --git a/source/portal/src/pages/system-settings/index.tsx b/source/portal/src/pages/system-settings/index.tsx
index da94a24f..6703747e 100644
--- a/source/portal/src/pages/system-settings/index.tsx
+++ b/source/portal/src/pages/system-settings/index.tsx
@@ -166,6 +166,8 @@ const SystemSettingContent = () => {
value={concurrentRunJobNumber}
placeholder="100"
onChange={(e) => {
+ setIpError(false);
+ setIpSuccess(false);
setConcurrentRunJobNumber(e.detail.value);
}}
/>
@@ -190,7 +192,7 @@ const SystemSettingContent = () => {
{subnetItems.map((item) => {
return (
-
+
Date: Mon, 29 Jan 2024 14:45:26 +0800
Subject: [PATCH 028/112] fix(fe): update subnet Desc
---
source/portal/public/locales/zh/common.json | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/source/portal/public/locales/zh/common.json b/source/portal/public/locales/zh/common.json
index 2a8a681d..eeadefa5 100644
--- a/source/portal/public/locales/zh/common.json
+++ b/source/portal/public/locales/zh/common.json
@@ -435,13 +435,13 @@
"title": "设置",
"desc": "敏感数据发现作业的总体设置",
"rdsDataSourceDiscovery": "RDS数据源数据发现",
- "rdsDetectedConcurrency": "敏感发现作业中同时检测到的 RDS 实例数",
+ "rdsDetectedConcurrency": "敏感发现作业中同时检测的 RDS 实例数",
"rdsDetectedConcurrencyDesc": "将同时扫描多少个 RDS 实例",
- "rdsSubJobRunNumber": "1 次 RDS 扫描可使用的子作业运行次数",
+ "rdsSubJobRunNumber": "扫描1个RDS实例使用的子任务个数",
"rdsSubJobRunNumberDesc": "1 次 RDS 扫描可运行多少次 Glue 作业",
"subnet": "子网 ",
"subnetNameDesc": "子网中剩余IP总数",
- "subnetDesc": "每个子网的 IP 使用量 = (3 + (可用于 1 次 RDS 扫描的子作业运行数量 * 2)) * 敏感发现作业中同时检测到的 RDS 实例数",
+ "subnetDesc": "每个子网的 IP 使用量 = (3 + (可用于 1 次 RDS 扫描的子作业运行数量 * 2)) * 敏感发现作业中同时检测的 RDS 实例数",
"estimateResult": "根据上述设置,对于每个作业运行,每个子网最多将消耗 {{ipCount}} 个 IP。",
"estimateError": "发现作业中的 IP 不能超过子网剩余的 IP。请调整设置。",
"estimateSuccess": "配置验证成功。",
From f2f67b65c98639858bdb51126acd7a1aa60d5c26 Mon Sep 17 00:00:00 2001
From: Magic Chen
Date: Mon, 29 Jan 2024 18:08:32 +0800
Subject: [PATCH 029/112] fix: load database do not load page
---
.../data-source-connection/componments/JDBCConnectionEdit.tsx | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/source/portal/src/pages/data-source-connection/componments/JDBCConnectionEdit.tsx b/source/portal/src/pages/data-source-connection/componments/JDBCConnectionEdit.tsx
index 078fd5ec..c331971a 100644
--- a/source/portal/src/pages/data-source-connection/componments/JDBCConnectionEdit.tsx
+++ b/source/portal/src/pages/data-source-connection/componments/JDBCConnectionEdit.tsx
@@ -496,7 +496,6 @@ const JDBCConnectionEdit: React.FC = (
setOtherJDBCUrlError(true);
return;
}
- setIsLoading(true);
setLoadingJdbcDatabase(true);
const requestParam = {
connection_url: jdbcConnectionData.jdbc_connection_url,
@@ -510,7 +509,6 @@ const JDBCConnectionEdit: React.FC = (
} catch (error) {
alertMsg(error + '', 'error');
}
- setIsLoading(false);
setLoadingJdbcDatabase(false);
};
@@ -812,6 +810,7 @@ const JDBCConnectionEdit: React.FC = (
secondaryControl={
props.providerId !== 1 && (
{
setJdbcDatabaseEmptyError(false);
findDatabase();
From 44ecf27bfe6f5895da614a4c170c90ebeb34de55 Mon Sep 17 00:00:00 2001
From: cuihubin <530051970@qq.com>
Date: Wed, 31 Jan 2024 17:51:38 +0800
Subject: [PATCH 030/112] delete crawler and glue db when delete connection
---
source/constructs/api/data_source/crud.py | 8 +-------
source/constructs/api/data_source/service.py | 14 +++++++++++---
source/portal/public/locales/zh/datasource.json | 2 +-
.../componments/DataSourceList.tsx | 4 ++--
4 files changed, 15 insertions(+), 13 deletions(-)
diff --git a/source/constructs/api/data_source/crud.py b/source/constructs/api/data_source/crud.py
index 6bdf965a..152ae89b 100644
--- a/source/constructs/api/data_source/crud.py
+++ b/source/constructs/api/data_source/crud.py
@@ -859,18 +859,12 @@ def copy_properties(jdbc_instance_target: JDBCInstanceSource, jdbc_instance_orig
jdbc_instance_target.jdbc_driver_class_name = jdbc_instance_origin.jdbc_driver_class_name
jdbc_instance_target.jdbc_driver_jar_uri = jdbc_instance_origin.jdbc_driver_jar_uri
jdbc_instance_target.detection_history_id = 0
- # jdbc_instance_target.instance_class = jdbc_instance_origin.instance_class
- # jdbc_instance_target.instance_status = jdbc_instance_origin.instance_status
jdbc_instance_target.account_provider_id = jdbc_instance_origin.account_provider_id
jdbc_instance_target.account_id = jdbc_instance_origin.account_id
jdbc_instance_target.region = jdbc_instance_origin.region
- # jdbc_instance_target.data_source_id = jdbc_instance_origin.data_source_id
- # jdbc_instance_target.detection_history_id = jdbc_instance_origin.detection_history_id
jdbc_instance_target.glue_database = jdbc_instance_origin.glue_database
- # jdbc_instance_target.glue_crawler = jdbc_instance_origin.glue_crawler
+ jdbc_instance_target.glue_crawler = jdbc_instance_origin.glue_crawler
jdbc_instance_target.glue_connection = jdbc_instance_origin.glue_connection
- # jdbc_instance_target.glue_vpc_endpoint = jdbc_instance_origin.glue_vpc_endpoint
- # jdbc_instance_target.glue_state = jdbc_instance_origin.glue_state
jdbc_instance_target.create_type = jdbc_instance_origin.create_type
return jdbc_instance_target
diff --git a/source/constructs/api/data_source/service.py b/source/constructs/api/data_source/service.py
index ce9a7823..184608cc 100644
--- a/source/constructs/api/data_source/service.py
+++ b/source/constructs/api/data_source/service.py
@@ -396,7 +396,14 @@ def sync_jdbc_connection(jdbc: JDBCInstanceSourceBase):
password = conn_response.get('ConnectionProperties', {}).get('PASSWORD')
secret = conn_response.get('ConnectionProperties', {}).get("SECRET_ID"),
url = conn_response.get('ConnectionProperties', {}).get('JDBC_CONNECTION_URL'),
- jdbc_instance = JDBCInstanceSource(jdbc, jdbc_connection_url=url, master_username=username, password=password, secret=secret)
+ jdbc_instance = JDBCInstanceSource(instance_id=jdbc.instance_id,
+ account_provider_id=jdbc.account_provider_id,
+ account_id=jdbc.account_id,
+ region=jdbc.region,
+ jdbc_connection_url=url,
+ master_username=username,
+ password=password,
+ secret=secret)
# jdbc_instance.jdbc_connection_url = url
# condition_check(ec2_client, credentials, source.glue_state, conn_response['PhysicalConnectionRequirements'])
sync(glue_client,
@@ -693,6 +700,7 @@ def before_delete_jdbc_connection(provider_id, account, region, instance_id, dat
MessageEnum.DISCOVERY_JOB_CAN_NOT_DELETE_DATABASE.get_msg())
else:
logger.info(f"delete jdbc connection: {account},{region},{database_type},{jdbc_instance.instance_id}")
+ return jdbc_instance.glue_crawler
def gen_assume_account(provider_id, account, region):
account = account if provider_id == Provider.AWS_CLOUD.value else admin_account_id
@@ -1721,7 +1729,6 @@ def __validate_jdbc_url(url: str):
return True
def add_jdbc_conn(jdbcConn: JDBCInstanceSource):
- print(f"create {jdbcConn.instance_id}!!!")
jdbc_targets = []
create_connection_response = {}
# get_db_names(jdbcConn.jdbc_connection_url, jdbcConn.jdbc_connection_schema)
@@ -1772,7 +1779,8 @@ def add_jdbc_conn(jdbcConn: JDBCInstanceSource):
if create_connection_response.get('ResponseMetadata', {}).get('HTTPStatusCode') != 200:
raise BizException(MessageEnum.SOURCE_JDBC_CREATE_FAIL.get_code(),
MessageEnum.SOURCE_JDBC_CREATE_FAIL.get_msg())
-
+ # Creare Glue database
+ glue.create_database(DatabaseInput={'Name': glue_database_name})
# Create Crawler
jdbc_source = JdbcSource(connection_url=jdbcConn.jdbc_connection_url, username=jdbcConn.master_username, password=jdbcConn.password, secret_id=jdbcConn.secret)
db_names = get_db_names_4_jdbc(jdbc_source, jdbcConn.jdbc_connection_schema)
diff --git a/source/portal/public/locales/zh/datasource.json b/source/portal/public/locales/zh/datasource.json
index fe78a820..4fbfcb8c 100644
--- a/source/portal/public/locales/zh/datasource.json
+++ b/source/portal/public/locales/zh/datasource.json
@@ -98,7 +98,7 @@
"tab": "批量创建数据源",
"step1Title": "第 1 步:下载模板",
"step1Desc": "按照模板中的说明填写信息",
- "step1Download": "下载模板'BatchCreateDataSourceTemplate.xlsx'",
+ "step1Download": "下载模板",
"step2Title": "第 2 步:按照提示填写模板",
"step2Desc": "填写模板中的信息",
"step2Tips1": "确保没有重复项",
diff --git a/source/portal/src/pages/data-source-connection/componments/DataSourceList.tsx b/source/portal/src/pages/data-source-connection/componments/DataSourceList.tsx
index 9bc6970c..6bd57d3c 100644
--- a/source/portal/src/pages/data-source-connection/componments/DataSourceList.tsx
+++ b/source/portal/src/pages/data-source-connection/componments/DataSourceList.tsx
@@ -225,7 +225,7 @@ const DataSourceList: React.FC = memo((props: any) => {
},
{
text: t('button.deleteDataSource'),
- id: 'disconnect_dc',
+ id: 'deleteDataSource',
disabled:
tagType === DATA_TYPE_ENUM.rds || selectedItems.length === 0,
},
@@ -615,7 +615,7 @@ const DataSourceList: React.FC = memo((props: any) => {
await hideDataSourceJDBC(requestParam);
}
showHideSpinner(false);
- alertMsg(t('disconnectSuccess'), 'success');
+ alertMsg(t('deleteSuccess'), 'success');
setSelectedItems([]);
getPageData();
return;
From 1e68ee25c591d4d26565d3c0d02e9ef0b867ff85 Mon Sep 17 00:00:00 2001
From: junzhong
Date: Thu, 1 Feb 2024 09:14:11 +0800
Subject: [PATCH 031/112] fix(cdk): add glue:TagResource policy
---
source/constructs/lib/admin/api-stack.ts | 1 +
source/constructs/lib/admin/glue-stack.ts | 7 +++++--
source/constructs/lib/agent/AgentRole-stack.ts | 2 ++
3 files changed, 8 insertions(+), 2 deletions(-)
diff --git a/source/constructs/lib/admin/api-stack.ts b/source/constructs/lib/admin/api-stack.ts
index dfa4d8a4..02800bbb 100644
--- a/source/constructs/lib/admin/api-stack.ts
+++ b/source/constructs/lib/admin/api-stack.ts
@@ -176,6 +176,7 @@ export class ApiStack extends Construct {
'glue:GetPartition',
'glue:GetPartitions',
'glue:BatchGetPartition',
+ 'glue:TagResource',
's3:PutObject',
's3:DeleteObject',
's3:GetObject',
diff --git a/source/constructs/lib/admin/glue-stack.ts b/source/constructs/lib/admin/glue-stack.ts
index 56e0babd..06e31078 100644
--- a/source/constructs/lib/admin/glue-stack.ts
+++ b/source/constructs/lib/admin/glue-stack.ts
@@ -213,9 +213,12 @@ export class GlueStack extends Construct {
}));
const noramlStatement = new PolicyStatement({
effect: Effect.ALLOW,
- actions: ['glue:GetTable',
+ actions: [
+ 'glue:GetTable',
'glue:BatchCreatePartition',
- 'glue:CreatePartition'],
+ 'glue:CreatePartition',
+ 'glue:TagResource',
+ ],
resources: [`arn:${Aws.PARTITION}:glue:*:${Aws.ACCOUNT_ID}:table/${SolutionInfo.SOLUTION_GLUE_DATABASE}/*`,
`arn:${Aws.PARTITION}:glue:*:${Aws.ACCOUNT_ID}:database/${SolutionInfo.SOLUTION_GLUE_DATABASE}`,
`arn:${Aws.PARTITION}:glue:*:${Aws.ACCOUNT_ID}:catalog`],
diff --git a/source/constructs/lib/agent/AgentRole-stack.ts b/source/constructs/lib/agent/AgentRole-stack.ts
index ce32d5bc..39a8b4ae 100755
--- a/source/constructs/lib/agent/AgentRole-stack.ts
+++ b/source/constructs/lib/agent/AgentRole-stack.ts
@@ -61,6 +61,7 @@ export class AgentRoleStack extends Construct {
'glue:GetConnection',
'glue:GetConnections',
'glue:UpdateConnection',
+ 'glue:TagResource',
],
resources: ['*'],
}),
@@ -233,6 +234,7 @@ export class AgentRoleStack extends Construct {
'glue:BatchUpdatePartition',
'glue:BatchGetPartition',
'glue:BatchGetCustomEntityTypes',
+ 'glue:TagResource',
'ec2:DescribeVpcEndpoints',
'ec2:DescribeRouteTables',
'ec2:CreateNetworkInterface',
From 53143e401cc5039f982c68065a1a021a74447e8a Mon Sep 17 00:00:00 2001
From: cuihubin <530051970@qq.com>
Date: Thu, 1 Feb 2024 10:48:57 +0800
Subject: [PATCH 032/112] update jdbc create logic
---
source/constructs/api/catalog/service.py | 5 +-
source/constructs/api/data_source/service.py | 4 +-
.../portal/public/locales/en/datasource.json | 4 +-
.../portal/public/locales/zh/datasource.json | 2 +
.../componments/JDBCConnection.tsx | 50 ++++++++++++----
.../componments/JDBCConnectionEdit.tsx | 57 +++++++++++++------
6 files changed, 87 insertions(+), 35 deletions(-)
diff --git a/source/constructs/api/catalog/service.py b/source/constructs/api/catalog/service.py
index 296f4246..59bbc5b2 100644
--- a/source/constructs/api/catalog/service.py
+++ b/source/constructs/api/catalog/service.py
@@ -430,7 +430,7 @@ def sync_crawler_result(
if delete_glue_table_names:
logger.info("batch delete glue tables" + json.dumps(delete_glue_table_names))
glue_client.batch_delete_table(DatabaseName=glue_database_name,
- TablesToDelete=delete_glue_table_names)
+ TablesToDelete=delete_glue_table_names)
except Exception as err:
logger.exception("batch delete glue tables error" + str(err))
if logger.isEnabledFor(logging.DEBUG):
@@ -761,7 +761,6 @@ def __convert_identifiers_to_dict(identifiers: str):
result_dict[i[0]] = i[1]
return result_dict
-
def sync_job_detection_result(
account_id: str,
region: str,
@@ -848,7 +847,7 @@ def sync_job_detection_result(
not overwrite and catalog_column.manual_tag != const.MANUAL)):
column_dict = {
"id": catalog_column.id,
- "identifier": json.dumps(identifier_dict),
+ "identifier": json.dumps(identifier_dict, ensure_ascii=False),
"column_value_example": column_sample_data,
"column_path": column_path,
"privacy": column_privacy,
diff --git a/source/constructs/api/data_source/service.py b/source/constructs/api/data_source/service.py
index 184608cc..01718f15 100644
--- a/source/constructs/api/data_source/service.py
+++ b/source/constructs/api/data_source/service.py
@@ -400,10 +400,10 @@ def sync_jdbc_connection(jdbc: JDBCInstanceSourceBase):
account_provider_id=jdbc.account_provider_id,
account_id=jdbc.account_id,
region=jdbc.region,
- jdbc_connection_url=url,
+ jdbc_connection_url=url[0],
master_username=username,
password=password,
- secret=secret)
+ secret=secret[0])
# jdbc_instance.jdbc_connection_url = url
# condition_check(ec2_client, credentials, source.glue_state, conn_response['PhysicalConnectionRequirements'])
sync(glue_client,
diff --git a/source/portal/public/locales/en/datasource.json b/source/portal/public/locales/en/datasource.json
index 11de7e66..30b0aaf5 100644
--- a/source/portal/public/locales/en/datasource.json
+++ b/source/portal/public/locales/en/datasource.json
@@ -58,7 +58,7 @@
"certValidationDesc": "By default your custom certificate is validated before use. Turn on this option to skip validation of the certificate algorithm and key length during connection.",
"skipValidation": "Skip certificate validation",
"findDatabase": "Find Database",
- "customJDBCCertString": "Custom JDBC certificate string",
+ "customJDBCCertString": "Custom JDBC certificate string",
"customJDBCCertStringDesc": "Enter your database specific custom certificate info.",
"customJDBCCertConstraint": "For Oracle Database this maps to SSL_SERVER_CERT_DN, and for SQL Server it maps to hostNameInCertificate.",
"description": "Description - optional",
@@ -80,6 +80,8 @@
"selectSecret": "Please select secret",
"username": "Username",
"password": "Password",
+ "inputUsername": "Please input username",
+ "inputPassword": "Please input password",
"networkOption": "Network options",
"networkDesc": "If your Amazon Glue job needs to jdbc resource which existed in other vpc or other cloud provider environment, you must provide additional VPC-specific configuration information.",
"vpc": "VPC",
diff --git a/source/portal/public/locales/zh/datasource.json b/source/portal/public/locales/zh/datasource.json
index 60b2c31d..9287250f 100644
--- a/source/portal/public/locales/zh/datasource.json
+++ b/source/portal/public/locales/zh/datasource.json
@@ -80,6 +80,8 @@
"selectSecret": "请选择密钥",
"username": "用户名",
"password": "密码",
+ "inputUsername": "请输入用户名",
+ "inputPassword": "请输入密码",
"networkOption": "网络选项",
"networkDesc": "如果你的 AWS Glue 工作需要连接到其他VPC或者其他云供应商环境中的 jdbc 资源,你需要提供额外的 VPC 特定的配置信息。",
"vpc": "VPC",
diff --git a/source/portal/src/pages/data-source-connection/componments/JDBCConnection.tsx b/source/portal/src/pages/data-source-connection/componments/JDBCConnection.tsx
index 98f58d71..a4b5a307 100644
--- a/source/portal/src/pages/data-source-connection/componments/JDBCConnection.tsx
+++ b/source/portal/src/pages/data-source-connection/componments/JDBCConnection.tsx
@@ -12,6 +12,7 @@ import {
Tiles,
Textarea,
Modal,
+ Grid,
} from '@cloudscape-design/components';
import {
listGlueConnection,
@@ -864,6 +865,9 @@ const JDBCConnection: React.FC = (
{credential === 'secret' && (
+
+ {props.providerId !== 1 && (
+
+ {
+ setJdbcDatabaseEmptyError(false);
+ findDatabase();
+ }}
+ iconName="search"
+ loading={props.providerId === 1 || loadingJdbcDatabase}
+ >
+ {t('datasource:jdbc.findDatabase')}
+
+
+ )}
+
)}
{credential === 'password' && (
- <>
+
{
changeDatabase('');
@@ -890,6 +912,7 @@ const JDBCConnection: React.FC = (
{
@@ -898,15 +921,9 @@ const JDBCConnection: React.FC = (
}}
/>
- >
- )}
- */}
+ {props.providerId !== 1 && (
+
{
setJdbcDatabaseEmptyError(false);
@@ -917,8 +934,16 @@ const JDBCConnection: React.FC = (
>
{t('datasource:jdbc.findDatabase')}
- )
- }
+
+ )}
+ {/* */}
+
+ )}
+
= (
}
>
{credential === 'secret' && (
+
+ {props.providerId !== 1 && (
+
+ {
+ setJdbcDatabaseEmptyError(false);
+ findDatabase();
+ }}
+ iconName="search"
+ loading={props.providerId === 1 || loadingJdbcDatabase}
+ >
+ {t('datasource:jdbc.findDatabase')}
+
+
+ )}
+
)}
{credential === 'password' && (
- <>
+
{
changeDatabase('');
@@ -791,6 +813,7 @@ const JDBCConnectionEdit: React.FC = (
{
@@ -799,7 +822,21 @@ const JDBCConnectionEdit: React.FC = (
}}
/>
- >
+ {props.providerId !== 1 && (
+
+ {
+ setJdbcDatabaseEmptyError(false);
+ findDatabase();
+ }}
+ iconName="search"
+ loading={props.providerId === 1 || loadingJdbcDatabase}
+ >
+ {t('datasource:jdbc.findDatabase')}
+
+
+ )}
+
)}
= (
label={t('datasource:jdbc.jdbcDatabase')}
description={t('datasource:jdbc.jdbcDatabaseDesc')}
constraintText={t('datasource:jdbc.jdbcDatabaseConstraint')}
- secondaryControl={
- props.providerId !== 1 && (
- {
- setJdbcDatabaseEmptyError(false);
- findDatabase();
- }}
- iconName="search"
- disabled={props.providerId === 1 || loadingJdbcDatabase}
- >
- {t('datasource:jdbc.findDatabase')}
-
- )
- }
errorText={
jdbcDatabaseEmptyError
? t('datasource:jdbc.databaseError')
@@ -829,6 +851,7 @@ const JDBCConnectionEdit: React.FC = (
}
>