Skip to content

Adding Hadoop hadoop-hdfs-rbf into the openctest framework #32

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 18 commits into
base: main
Choose a base branch
from
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion core/add_project.sh
Original file line number Diff line number Diff line change
@@ -3,7 +3,7 @@
function setup_hadoop() {
[ ! -d "app/ctest-hadoop" ] && git clone https://github.com/xlab-uiuc/hadoop.git app/ctest-hadoop
cd app/ctest-hadoop
git fetch && git checkout ctest-injection
git fetch && git checkout trunk
home_dir=$PWD
cd $home_dir/hadoop-common-project/hadoop-common
mvn clean install -DskipTests
12 changes: 11 additions & 1 deletion core/ctest_const.py
Original file line number Diff line number Diff line change
@@ -9,18 +9,21 @@

HCOMMON = "hadoop-common"
HDFS = "hadoop-hdfs"
HDFSRBF = "hadoop-hdfs-rbf"
HBASE = "hbase-server"
ZOOKEEPER = "zookeeper-server"
ALLUXIO = "alluxio-core"

CTEST_HADOOP_DIR = os.path.join(APP_DIR, "ctest-hadoop")
# CTEST_HADOOP_DIR = os.path.join(APP_DIR, "../../../hadoop")
CTEST_HADOOP_DIR = os.path.join(CUR_DIR, "../../hadoop")
CTEST_HBASE_DIR = os.path.join(APP_DIR, "ctest-hbase")
CTEST_ZK_DIR = os.path.join(APP_DIR, "ctest-zookeeper")
CTEST_ALLUXIO_DIR = os.path.join(APP_DIR, "ctest-alluxio")

PROJECT_DIR = {
HCOMMON: CTEST_HADOOP_DIR,
HDFS: CTEST_HADOOP_DIR,
HDFSRBF: CTEST_HADOOP_DIR,
HBASE: CTEST_HBASE_DIR,
ZOOKEEPER: CTEST_ZK_DIR,
ALLUXIO: CTEST_ALLUXIO_DIR,
@@ -31,6 +34,7 @@
MODULE_SUBDIR = {
HCOMMON: "hadoop-common-project/hadoop-common",
HDFS: "hadoop-hdfs-project/hadoop-hdfs",
HDFSRBF: "hadoop-hdfs-project/hadoop-hdfs-rbf",
HBASE: "hbase-server",
ZOOKEEPER: "zookeeper-server",
ALLUXIO: "core",
@@ -46,6 +50,7 @@
SUREFIRE_DIR = {
HCOMMON: [os.path.join(CTEST_HADOOP_DIR, MODULE_SUBDIR[HCOMMON], SUREFIRE_SUBDIR)],
HDFS: [os.path.join(CTEST_HADOOP_DIR, MODULE_SUBDIR[HDFS], SUREFIRE_SUBDIR)],
HDFSRBF: [os.path.join(CTEST_HADOOP_DIR, MODULE_SUBDIR[HDFSRBF], SUREFIRE_SUBDIR)],
HBASE: [os.path.join(CTEST_HBASE_DIR, MODULE_SUBDIR[HBASE], SUREFIRE_SUBDIR)],
ZOOKEEPER: [os.path.join(CTEST_ZK_DIR, MODULE_SUBDIR[ZOOKEEPER], SUREFIRE_SUBDIR)],
ALLUXIO: [
@@ -72,6 +77,7 @@
DEFAULT_CONF_FILE = {
HCOMMON: os.path.join(DEFAULT_CONF_DIR, HCOMMON + "-default.tsv"),
HDFS: os.path.join(DEFAULT_CONF_DIR, HDFS + "-default.tsv"),
HDFSRBF: os.path.join(DEFAULT_CONF_DIR, HDFSRBF + "-default.tsv"),
HBASE: os.path.join(DEFAULT_CONF_DIR, HBASE + "-default.tsv"),
ALLUXIO: os.path.join(DEFAULT_CONF_DIR, ALLUXIO + "-default.tsv"),
ZOOKEEPER: os.path.join(DEFAULT_CONF_DIR, ZOOKEEPER + "-default.tsv")
@@ -87,6 +93,10 @@
os.path.join(CTEST_HADOOP_DIR, "hadoop-hdfs-project/hadoop-hdfs/target/classes/core-ctest.xml"),
os.path.join(CTEST_HADOOP_DIR, "hadoop-hdfs-project/hadoop-hdfs/target/classes/hdfs-ctest.xml")
],
HDFSRBF: [
os.path.join(CTEST_HADOOP_DIR, "hadoop-hdfs-project/hadoop-hdfs-rbf/target/classes/core-ctest.xml"),
os.path.join(CTEST_HADOOP_DIR, "hadoop-hdfs-project/hadoop-hdfs-rbf/target/classes/hdfs-rbf-ctest.xml")
],
HBASE: [
os.path.join(CTEST_HBASE_DIR, "hbase-server/target/classes/core-ctest.xml"),
os.path.join(CTEST_HBASE_DIR, "hbase-server/target/classes/hbase-ctest.xml")
66 changes: 66 additions & 0 deletions core/default_configs/hadoop-hdfs-rbf-default.tsv
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
dfs.federation.router.default.nameserviceId Nameservice identifier of the default subcluster to monitor.
dfs.federation.router.default.nameservice.enable true The default subcluster is enabled to read and write files.
dfs.federation.router.rpc.enable true If true, the RPC service to handle client requests in the router is enabled.
dfs.federation.router.rpc-address 0.0.0.0:8888 RPC address that handles all clients requests. The value of this property will take the form of router-host1:rpc-port.
dfs.federation.router.rpc-bind-host The actual address the RPC server will bind to. If this optional address is set, it overrides only the hostname portion of dfs.federation.router.rpc-address. This is useful for making the name node listen on all interfaces by setting it to 0.0.0.0.
dfs.federation.router.handler.count 10 The number of server threads for the router to handle RPC requests from clients.
dfs.federation.router.handler.queue.size 100 The size of the queue for the number of handlers to handle RPC client requests.
dfs.federation.router.reader.count 1 The number of readers for the router to handle RPC client requests.
dfs.federation.router.reader.queue.size 100 The size of the queue for the number of readers for the router to handle RPC client requests.
dfs.federation.router.connection.creator.queue-size 100 Size of async connection creator queue.
dfs.federation.router.connection.pool-size 1 Size of the pool of connections from the router to namenodes.
dfs.federation.router.connection.min-active-ratio 0.5f Minimum active ratio of connections from the router to namenodes.
dfs.federation.router.connection.clean.ms 10000 Time interval, in milliseconds, to check if the connection pool should remove unused connections.
dfs.federation.router.connection.pool.clean.ms 60000 Time interval, in milliseconds, to check if the connection manager should remove unused connection pools.
dfs.federation.router.metrics.enable true If the metrics in the router are enabled.
dfs.federation.router.dn-report.time-out 1000 Time out, in milliseconds for getDatanodeReport.
dfs.federation.router.dn-report.cache-expire 10s Expiration time in seconds for datanodereport.
dfs.federation.router.metrics.class org.apache.hadoop.hdfs.server.federation.metrics.FederationRPCPerformanceMonitor Class to monitor the RPC system in the router. It must implement the RouterRpcMonitor interface.
dfs.federation.router.admin.enable true If true, the RPC admin service to handle client requests in the router is enabled.
dfs.federation.router.admin-address 0.0.0.0:8111 RPC address that handles the admin requests. The value of this property will take the form of router-host1:rpc-port.
dfs.federation.router.admin-bind-host The actual address the RPC admin server will bind to. If this optional address is set, it overrides only the hostname portion of dfs.federation.router.admin-address. This is useful for making the name node listen on all interfaces by setting it to 0.0.0.0.
dfs.federation.router.admin.handler.count 1 The number of server threads for the router to handle RPC requests from admin.
dfs.federation.router.http-address 0.0.0.0:50071 HTTP address that handles the web requests to the Router. The value of this property will take the form of router-host1:http-port.
dfs.federation.router.http-bind-host The actual address the HTTP server will bind to. If this optional address is set, it overrides only the hostname portion of dfs.federation.router.http-address. This is useful for making the name node listen on all interfaces by setting it to 0.0.0.0.
dfs.federation.router.https-address 0.0.0.0:50072 HTTPS address that handles the web requests to the Router. The value of this property will take the form of router-host1:https-port.
dfs.federation.router.https-bind-host The actual address the HTTPS server will bind to. If this optional address is set, it overrides only the hostname portion of dfs.federation.router.https-address. This is useful for making the name node listen on all interfaces by setting it to 0.0.0.0.
dfs.federation.router.http.enable true If the HTTP service to handle client requests in the router is enabled.
dfs.federation.router.file.resolver.client.class org.apache.hadoop.hdfs.server.federation.resolver.MountTableResolver Class to resolve files to subclusters. To enable multiple subclusters for a mount point, set to org.apache.hadoop.hdfs.server.federation.resolver.MultipleDestinationMountTableResolver.
dfs.federation.router.namenode.resolver.client.class org.apache.hadoop.hdfs.server.federation.resolver.MembershipNamenodeResolver Class to resolve the namenode for a subcluster.
dfs.federation.router.store.enable true If true, the Router connects to the State Store.
dfs.federation.router.store.serializer org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreSerializerPBImpl Class to serialize State Store records.
dfs.federation.router.store.driver.class org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreZooKeeperImpl Class to implement the State Store. There are three implementation classes currently being supported: org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreFileImpl, org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreFileSystemImpl and org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreZooKeeperImpl. These implementation classes use the local file, filesystem and ZooKeeper as a backend respectively. By default it uses the ZooKeeper as the default State Store.
dfs.federation.router.store.connection.test 60000 How often to check for the connection to the State Store in milliseconds.
dfs.federation.router.cache.ttl 1m How often to refresh the State Store caches in milliseconds. This setting supports multiple time unit suffixes as described in dfs.heartbeat.interval. If no suffix is specified then milliseconds is assumed.
dfs.federation.router.store.membership.expiration 300000 Expiration time in milliseconds for a membership record.
dfs.federation.router.store.membership.expiration.deletion -1 Deletion time in milliseconds for a membership record. If an expired membership record exists beyond this time, it will be deleted. If this value is negative, the deletion is disabled.
dfs.federation.router.heartbeat.enable true If true, the Router heartbeats into the State Store.
dfs.federation.router.heartbeat.interval 5000 How often the Router should heartbeat into the State Store in milliseconds.
dfs.federation.router.heartbeat-state.interval 5s How often the Router should heartbeat its state into the State Store in milliseconds. This setting supports multiple time unit suffixes as described in dfs.federation.router.quota-cache.update.interval.
dfs.federation.router.namenode.heartbeat.enable If true, get namenode heartbeats and send into the State Store. If not explicitly specified takes the same value as for dfs.federation.router.heartbeat.enable.
dfs.federation.router.store.router.expiration 5m Expiration time in milliseconds for a router state record. This setting supports multiple time unit suffixes as described in dfs.federation.router.quota-cache.update.interval.
dfs.federation.router.store.router.expiration.deletion -1 Deletion time in milliseconds for a router state record. If an expired router state record exists beyond this time, it will be deleted. If this value is negative, the deletion is disabled.
dfs.federation.router.safemode.enable true
dfs.federation.router.safemode.extension 30s Time after startup that the Router is in safe mode. This setting supports multiple time unit suffixes as described in dfs.heartbeat.interval. If no suffix is specified then milliseconds is assumed.
dfs.federation.router.safemode.expiration 3m Time without being able to reach the State Store to enter safe mode. This setting supports multiple time unit suffixes as described in dfs.heartbeat.interval. If no suffix is specified then milliseconds is assumed.
dfs.federation.router.monitor.namenode The identifier of the namenodes to monitor and heartbeat.
dfs.federation.router.monitor.localnamenode.enable true If true, the Router should monitor the namenode in the local machine.
dfs.federation.router.mount-table.max-cache-size 10000 Maximum number of mount table cache entries to have. By default, remove cache entries if we have more than 10k.
dfs.federation.router.mount-table.cache.enable true Set to true to enable mount table cache (Path to Remote Location cache). Disabling the cache is recommended when a large amount of unique paths are queried.
dfs.federation.router.quota.enable false Set to true to enable quota system in Router. When it's enabled, setting or clearing sub-cluster's quota directly is not recommended since Router Admin server will override sub-cluster's quota with global quota.
dfs.federation.router.quota-cache.update.interval 60s Interval time for updating quota usage cache in Router. This property is used only if the value of dfs.federation.router.quota.enable is true. This setting supports multiple time unit suffixes as described in dfs.heartbeat.interval. If no suffix is specified then milliseconds is assumed.
dfs.federation.router.client.thread-size 32 Max threads size for the RouterClient to execute concurrent requests.
dfs.federation.router.client.retry.max.attempts 3 Max retry attempts for the RouterClient talking to the Router.
dfs.federation.router.client.reject.overload false Set to true to reject client requests when we run out of RPC client threads.
dfs.federation.router.client.allow-partial-listing true If the Router can return a partial list of files in a multi-destination mount point when one of the subclusters is unavailable. True may return a partial list of files if a subcluster is down. False will fail the request if one is unavailable.
dfs.federation.router.client.mount-status.time-out 1s Set a timeout for the Router when listing folders containing mount points. In this process, the Router checks the mount table and then it checks permissions in the subcluster. After the time out, we return the default values.
dfs.federation.router.connect.max.retries.on.timeouts 0 Maximum number of retries for the IPC Client when connecting to the subclusters. By default, it doesn't let the IPC retry and the Router handles it.
dfs.federation.router.connect.timeout 2s Time out for the IPC client connecting to the subclusters. This should be short as the Router has knowledge of the state of the Routers.
dfs.federation.router.keytab.file The keytab file used by router to login as its service principal. The principal name is configured with dfs.federation.router.kerberos.principal.
dfs.federation.router.kerberos.principal The Router service principal. This is typically set to router/[email protected]. Each Router will substitute _HOST with its own fully qualified hostname at startup. The _HOST placeholder allows using the same configuration setting on both Router in an HA setup.
dfs.federation.router.kerberos.principal.hostname Optional. The hostname for the Router containing this configuration file. Will be different for each machine. Defaults to current hostname.
dfs.federation.router.kerberos.internal.spnego.principal ${dfs.web.authentication.kerberos.principal} The server principal used by the Router for web UI SPNEGO authentication when Kerberos security is enabled. This is typically set to HTTP/[email protected] The SPNEGO server principal begins with the prefix HTTP/ by convention. If the value is '*', the web server will attempt to login with every principal specified in the keytab file dfs.web.authentication.kerberos.keytab.
dfs.federation.router.mount-table.cache.update false Set true to enable MountTableRefreshService. This service updates mount table cache immediately after adding, modifying or deleting the mount table entries. If this service is not enabled mount table cache are refreshed periodically by StateStoreCacheUpdateService
dfs.federation.router.mount-table.cache.update.timeout 1m This property defines how long to wait for all the admin servers to finish their mount table cache update. This setting supports multiple time unit suffixes as described in dfs.federation.router.safemode.extension.
dfs.federation.router.mount-table.cache.update.client.max.time 5m Remote router mount table cache is updated through RouterClient(RPC call). To improve performance, RouterClient connections are cached but it should not be kept in cache forever. This property defines the max time a connection can be cached. This setting supports multiple time unit suffixes as described in dfs.federation.router.safemode.extension.
dfs.federation.router.secret.manager.class org.apache.hadoop.hdfs.server.federation.router.security.token.ZKDelegationTokenSecretManagerImpl Class to implement state store to delegation tokens. Default implementation uses zookeeper as the backend to store delegation tokens.

Large diffs are not rendered by default.

4 changes: 2 additions & 2 deletions core/generate_ctest/inject.py
Original file line number Diff line number Diff line change
@@ -21,7 +21,7 @@ def inject_config(param_value_pairs):
for p, v in param_value_pairs.items():
file.write(p + "=" + v + "\n")
file.close()
elif project in [HCOMMON, HDFS, HBASE]:
elif project in [HCOMMON, HDFS, HDFSRBF, HBASE]:
conf = ET.Element("configuration")
for p, v in param_value_pairs.items():
prop = ET.SubElement(conf, "property")
@@ -46,7 +46,7 @@ def clean_conf_file(project):
file = open(inject_path, "w")
file.write("\n")
file.close()
elif project in [HCOMMON, HDFS, HBASE]:
elif project in [HCOMMON, HDFS, HDFSRBF, HBASE]:
conf = ET.Element("configuration")
for inject_path in INJECTION_PATH[project]:
file = open(inject_path, "wb")
6 changes: 3 additions & 3 deletions core/generate_ctest/program_input.py
Original file line number Diff line number Diff line change
@@ -4,11 +4,11 @@
# run mode
"run_mode": "generate_ctest", # string
# name of the project, i.e. hadoop-common, hadoop-hdfs, see constant.py
"project": "hadoop-common", # string
"project": "hadoop-hdfs-rbf", # string
# path to param -> tests json mapping
"mapping_path": "../../data/ctest_mapping/opensource-hadoop-common.json", # string
"mapping_path": "../../data/ctest_mapping/opensource-hadoop-hdfs-rbf.json", # string
# good values of params tests will be run against
"param_value_tsv": "sample-hadoop-common.tsv", # string
"param_value_tsv": "hadoop-hdfs-rbf.tsv", # string
# display the terminal output live, without saving any results
"display_mode": False, # bool
# whether to use mvn test or mvn surefire:test
2 changes: 1 addition & 1 deletion core/generate_ctest/run_test.py
Original file line number Diff line number Diff line change
@@ -59,5 +59,5 @@ def run_test_seperate(param, value, associated_tests):
os.chdir(CUR_DIR)
print(">>>>[ctest_core] chdir to {}".format(CUR_DIR))
print(">>>>[ctest_core] python-timed for running config pair: {}".format(duration))
clean_conf_file(project)
# clean_conf_file(project)
return tr
90 changes: 90 additions & 0 deletions core/generate_value/hadoop-common-generated-values.tsv
Original file line number Diff line number Diff line change
@@ -0,0 +1,90 @@
hadoop.http.filter.initializers SKIP SKIP
hadoop.security.group.mapping SKIP SKIP
hadoop.security.dns.log-slow-lookups.threshold.ms 500 2000
hadoop.security.groups.cache.secs 150 600
hadoop.security.groups.cache.warn.after.ms 2500 10000
hadoop.security.groups.cache.background.reload.threads 1 6
hadoop.security.group.mapping.ldap.connection.timeout.ms 30000 120000
hadoop.security.group.mapping.ldap.search.filter.user xdsuper samsuper
hadoop.security.group.mapping.ldap.search.filter.group xdgroup samgroup
hadoop.security.group.mapping.ldap.search.attr.member SKIP SKIP
hadoop.security.group.mapping.ldap.search.attr.group.name SKIP SKIP
hadoop.security.group.mapping.ldap.posix.attr.gid.name SKIP SKIP
hadoop.security.group.mapping.ldap.directory.search.timeout 5000 20000
hadoop.security.uid.cache.secs 7200 28800
hadoop.rpc.protection integrity privacy
hadoop.security.saslproperties.resolver.class SKIP SKIP
hadoop.security.sensitive-config-keys SKIP SKIP
hadoop.kerberos.kinit.command SKIP SKIP
hadoop.kerberos.min.seconds.before.relogin 30 120
io.file.buffer.size 2048 8192
io.bytes.per.checksum 256 1024
io.serializations org.apache.hadoop.io.serializer.WritableSerialization org.apache.hadoop.io.serializer.avro.AvroSpecificSerialization
io.seqfile.local.dir /valid/file1 /valid/file2
io.map.index.interval 64 256
fs.defaultFS SKIP SKIP
fs.trash.interval 1 -1
fs.trash.checkpoint.interval 1 -1
fs.AbstractFileSystem.file.impl SKIP SKIP
fs.AbstractFileSystem.har.impl SKIP SKIP
fs.AbstractFileSystem.viewfs.impl SKIP SKIP
fs.viewfs.rename.strategy SAME_TARGET_URI_ACROSS_MOUNTPOINT SAME_FILESYSTEM_ACROSS_MOUNTPOINT
fs.ftp.host 127.0.0.1 SKIP
fs.df.interval 30000 120000
fs.s3a.impl SKIP SKIP
io.seqfile.compress.blocksize 500000 2000000
io.mapfile.bloom.error.rate 0.0025 0.01
hadoop.util.hash.type SKIP SKIP
ipc.client.idlethreshold 2000 8000
ipc.client.kill.max 1 20
ipc.client.connection.maxidletime 5000 20000
ipc.client.connect.max.retries 1 20
ipc.client.connect.retry.interval 500 2000
ipc.ping.interval 30000 120000
ipc.client.rpc-timeout.ms 1 -1
ipc.maximum.data.length 33554432 134217728
ipc.maximum.response.length 67108864 268435456
hadoop.rpc.socket.factory.class.default SKIP SKIP
net.topology.script.file.name /valid/file1 /valid/file2
net.topology.script.number.args 50 200
net.topology.table.file.name /valid/file1 /valid/file2
file.stream-buffer-size 2048 8192
file.bytes-per-checksum 256 1024
file.blocksize 33554432 134217728
file.replication 0 2
ftp.bytes-per-checksum 256 1024
ftp.client-write-packet-size 32768 131072
ftp.replication 1 6
tfile.fs.output.buffer.size 131072 524288
tfile.fs.input.buffer.size 131072 524288
hadoop.http.authentication.type SKIP SKIP
hadoop.http.authentication.signature.secret.file /valid/file1 /valid/file2
hadoop.http.authentication.kerberos.principal HTTP MUST
hadoop.http.authentication.kerberos.keytab /valid/file1 /valid/file2
hadoop.http.staticuser.user xdsuper samsuper
ha.zookeeper.session-timeout.ms 5000 20000
ha.zookeeper.parent-znode /valid/file1 /valid/file2
ha.zookeeper.acl SKIP SKIP
hadoop.ssl.keystores.factory.class SKIP SKIP
hadoop.ssl.server.conf /valid/file1 /valid/file2
hadoop.ssl.client.conf /valid/file1 /valid/file2
hadoop.ssl.enabled.protocols TLSv1 SSLv2Hello
fs.permissions.umask-mode 007 002
ha.health-monitor.rpc-timeout.ms 22500 90000
ha.failover-controller.new-active.rpc-timeout.ms 30000 120000
ha.failover-controller.graceful-fence.rpc-timeout.ms 2500 10000
ha.failover-controller.graceful-fence.connection.retries 0 2
ha.failover-controller.cli-check.rpc-timeout.ms 10000 40000
hadoop.user.group.static.mapping.overrides SKIP SKIP
hadoop.security.crypto.cipher.suite SKIP SKIP
hadoop.security.crypto.jce.provider SKIP SKIP
hadoop.security.crypto.buffer.size 4096 16384
hadoop.security.java.secure.random.algorithm SKIP SKIP
hadoop.security.random.device.file.path /valid/file1 /valid/file2
hadoop.security.kms.client.authentication.retry-count 0 2
hadoop.security.kms.client.encrypted.key.cache.size 250 1000
hadoop.security.kms.client.encrypted.key.cache.expiry 21600000 86400000
hadoop.security.kms.client.failover.sleep.base.millis 50 200
fs.client.htrace.sampler.classes SKIP SKIP
hadoop.caller.context.max.size 64 256
hadoop.caller.context.signature.max.size 20 80
88 changes: 88 additions & 0 deletions core/generate_value/hadoop-hdfs-rbf-generated-values.tsv
Original file line number Diff line number Diff line change
@@ -0,0 +1,88 @@
dfs.federation.router.default.nameserviceId SKIP SKIP
dfs.federation.router.default.nameservice.enable false SKIP
dfs.federation.router.rpc.enable false SKIP
dfs.federation.router.rpc-address 0.0.0.0:3000 0.0.0.0:3001
dfs.federation.router.rpc-bind-host SKIP SKIP
dfs.federation.router.handler.count 1 20
dfs.federation.router.handler.queue.size 50 200
dfs.federation.router.reader.count 0 2
dfs.federation.router.reader.queue.size 50 200
dfs.federation.router.connection.creator.queue-size 50 200
dfs.federation.router.connection.pool-size 0 2
dfs.federation.router.connection.min-active-ratio 0.25 1.0
dfs.federation.router.connection.clean.ms 5000 20000
dfs.federation.router.enable.multiple.socket true SKIP
dfs.federation.router.max.concurrency.per.connection 0 2
dfs.federation.router.connection.pool.clean.ms 30000 120000
dfs.federation.router.metrics.enable false SKIP
dfs.federation.router.dn-report.time-out 500 2000
dfs.federation.router.dn-report.cache-expire 1s 20s
dfs.federation.router.enable.get.dn.usage false SKIP
dfs.federation.router.metrics.class SKIP SKIP
dfs.federation.router.admin.enable false SKIP
dfs.federation.router.admin-address 0.0.0.0:3000 0.0.0.0:3001
dfs.federation.router.admin-bind-host SKIP SKIP
dfs.federation.router.admin.handler.count 0 2
dfs.federation.router.admin.mount.check.enable true SKIP
dfs.federation.router.http-address 0.0.0.0:3000 0.0.0.0:3001
dfs.federation.router.http-bind-host SKIP SKIP
dfs.federation.router.https-address 0.0.0.0:3000 0.0.0.0:3001
dfs.federation.router.https-bind-host SKIP SKIP
dfs.federation.router.http.enable false SKIP
dfs.federation.router.fs-limits.max-component-length 1 -1
dfs.federation.router.file.resolver.client.class SKIP SKIP
dfs.federation.router.namenode.resolver.client.class SKIP SKIP
dfs.federation.router.store.enable false SKIP
dfs.federation.router.store.serializer SKIP SKIP
dfs.federation.router.store.driver.class SKIP SKIP
dfs.federation.router.store.connection.test 30000 120000
dfs.federation.router.cache.ttl 10m 2m
dfs.federation.router.store.membership.expiration 150000 600000
dfs.federation.router.store.membership.expiration.deletion 0 -2
dfs.federation.router.heartbeat.enable false SKIP
dfs.federation.router.heartbeat.interval 2500 10000
dfs.federation.router.health.monitor.timeout 1s 60s
dfs.federation.router.heartbeat-state.interval 1s 10s
dfs.federation.router.namenode.heartbeat.enable SKIP SKIP
dfs.federation.router.store.router.expiration 1m 10m
dfs.federation.router.store.router.expiration.deletion 0 -2
dfs.federation.router.safemode.enable false SKIP
dfs.federation.router.safemode.extension 1s 60s
dfs.federation.router.safemode.expiration 1m 6m
dfs.federation.router.monitor.namenode SKIP SKIP
dfs.federation.router.monitor.namenode.nameservice.resolution-enabled true SKIP
dfs.federation.router.monitor.namenode.nameservice.resolver.impl SKIP SKIP
dfs.federation.router.monitor.localnamenode.enable false SKIP
dfs.federation.router.mount-table.max-cache-size 5000 20000
dfs.federation.router.mount-table.cache.enable false SKIP
dfs.federation.router.quota.enable true SKIP
dfs.federation.router.quota-cache.update.interval 1s 120s
dfs.federation.router.client.thread-size 16 64
dfs.federation.router.client.retry.max.attempts 1 6
dfs.federation.router.client.reject.overload true SKIP
dfs.federation.router.client.allow-partial-listing false SKIP
dfs.federation.router.client.mount-status.time-out 10s 2s
dfs.federation.router.connect.max.retries.on.timeouts 1 -1
dfs.federation.router.connect.timeout 1s 4s
dfs.federation.router.keytab.file /valid/file1 /valid/file2
dfs.federation.router.kerberos.principal SKIP SKIP
dfs.federation.router.kerberos.principal.hostname 127.0.0.1 SKIP
dfs.federation.router.kerberos.internal.spnego.principal SKIP SKIP
dfs.federation.router.mount-table.cache.update true SKIP
dfs.federation.router.mount-table.cache.update.timeout 10m 2m
dfs.federation.router.mount-table.cache.update.client.max.time 1m 10m
dfs.federation.router.secret.manager.class SKIP SKIP
dfs.federation.router.top.num.token.realowners 1 20
dfs.federation.router.fairness.policy.controller.class SKIP SKIP
dfs.federation.router.fairness.handler.count.EXAMPLENAMESERVICE SKIP SKIP
dfs.federation.router.fairness.acquire.timeout 10s 2s
dfs.federation.router.federation.rename.bandwidth 1 20
dfs.federation.router.federation.rename.map 1 20
dfs.federation.router.federation.rename.delay 500 2000
dfs.federation.router.federation.rename.diff 1 -1
dfs.federation.router.federation.rename.option DISTCP SKIP
dfs.federation.router.federation.rename.force.close.open.file false SKIP
dfs.federation.router.federation.rename.trash SKIP SKIP
dfs.federation.router.observer.read.default true SKIP
dfs.federation.router.observer.read.overrides SKIP SKIP
dfs.federation.router.observer.federated.state.propagation.maxsize 1 10
5 changes: 5 additions & 0 deletions core/generate_value/value_generation.py
Original file line number Diff line number Diff line change
@@ -25,6 +25,9 @@ def read_tsv(module):
if module == "zookeeper-server":
assert len(params) == 32
return 32
elif module == "hadoop-hdfs-rbf":
assert len(params) == 88
return 88
else:
assert len(params) == 90
return 90
@@ -105,6 +108,8 @@ def print_params(module):
f = open(module + output, "w")
if module == "zookeeper-server":
assert len(params) == 32
elif module == "hadoop-hdfs-rbf":
assert len(params) == 88
else:
assert len(params) >= 90
for param in params:
2 changes: 2 additions & 0 deletions core/identify_param/add_project.sh
Original file line number Diff line number Diff line change
@@ -11,6 +11,8 @@ function setup_hadoop() {
mvn clean install -DskipTests
cd $home_dir/hadoop-hdfs-project/hadoop-hdfs
mvn package -DskipTests
cd $home_dir/hadoop-hdfs-project/hadoop-hdfs-rbf
mvn package -DskipTests
}

function setup_hbase() {
13 changes: 12 additions & 1 deletion core/identify_param/constant.py
Original file line number Diff line number Diff line change
@@ -3,21 +3,24 @@
CUR_DIR = os.path.dirname(os.path.realpath(__file__))
APP_DIR = os.path.join(CUR_DIR, "app")

CTEST_HADOOP_DIR = os.path.join(APP_DIR, "ctest-hadoop")
# CTEST_HADOOP_DIR = os.path.join(APP_DIR, "ctest-hadoop")
CTEST_HADOOP_DIR = os.path.join(CUR_DIR, "../../../hadoop")
CTEST_HBASE_DIR = os.path.join(APP_DIR, "ctest-hbase")
CTEST_ZOOKEEPER_DIR = os.path.join(APP_DIR, "ctest-zookeeper")
CTEST_ALLUXIO_DIR = os.path.join(APP_DIR, "ctest-alluxio")

MODULE_PATH = {
"hadoop-common": CTEST_HADOOP_DIR,
"hadoop-hdfs": CTEST_HADOOP_DIR,
"hadoop-hdfs-rbf": CTEST_HADOOP_DIR,
"hbase-server": CTEST_HBASE_DIR,
"alluxio-core": CTEST_ALLUXIO_DIR
}

SRC_SUBDIR = {
"hadoop-common": "hadoop-common-project/hadoop-common",
"hadoop-hdfs": "hadoop-hdfs-project/hadoop-hdfs",
"hadoop-hdfs-rbf": "hadoop-hdfs-project/hadoop-hdfs-rbf",
"hbase-server": "hbase-server",
"zookeeper-server": "zookeeper-server",
"alluxio-core": "core"
@@ -26,6 +29,7 @@
MVN_TEST_PATH = {
"hadoop-common": os.path.join(CTEST_HADOOP_DIR, SRC_SUBDIR["hadoop-common"]),
"hadoop-hdfs": os.path.join(CTEST_HADOOP_DIR, SRC_SUBDIR["hadoop-hdfs"]),
"hadoop-hdfs-rbf": os.path.join(CTEST_HADOOP_DIR, SRC_SUBDIR["hadoop-hdfs-rbf"]),
"hbase-server": os.path.join(CTEST_HBASE_DIR, SRC_SUBDIR["hbase-server"]),
"zookeeper-server": os.path.join(CTEST_ZOOKEEPER_DIR, SRC_SUBDIR["zookeeper-server"]),
"alluxio-core": os.path.join(CTEST_ALLUXIO_DIR, SRC_SUBDIR["alluxio-core"]),
@@ -34,6 +38,7 @@
LOCAL_CONF_PATH = {
"hadoop-common": "results/hadoop-common/conf_params.txt",
"hadoop-hdfs": "results/hadoop-hdfs/conf_params.txt",
"hadoop-hdfs-rbf": "results/hadoop-hdfs-rbf/conf_params.txt",
"hbase-server": "results/hbase-server/conf_params.txt",
"zookeeper-server": "results/zookeeper-server/conf_params.txt",
"alluxio-core": "results/alluxio-core/conf_params.txt"
@@ -48,6 +53,9 @@
"hadoop-hdfs": [
os.path.join(CTEST_HADOOP_DIR, SRC_SUBDIR["hadoop-hdfs"], SUREFIRE_SUBDIR)
],
"hadoop-hdfs-rbf": [
os.path.join(CTEST_HADOOP_DIR, SRC_SUBDIR["hadoop-hdfs-rbf"], SUREFIRE_SUBDIR)
],
"hbase-server": [
os.path.join(CTEST_HBASE_DIR, "hbase-server", SUREFIRE_SUBDIR)
],
@@ -75,6 +83,9 @@
"hadoop-hdfs": [
os.path.join("surefire-reports/hdfs/hadoop-hdfs", LOCAL_SUREFIRE_SUFFIX)
],
"hadoop-hdfs-rbf": [
os.path.join("surefire-reports/hdfs/hadoop-hdfs-rbf", LOCAL_SUREFIRE_SUFFIX)
],
"hbase-server": [
os.path.join("surefire-reports/hbase/hbase-server", LOCAL_SUREFIRE_SUFFIX)
],
4 changes: 2 additions & 2 deletions core/identify_param/identify_param.sh
Original file line number Diff line number Diff line change
@@ -12,9 +12,9 @@ function main() {
usage
else
case $project in
hadoop-common | hadoop-hdfs | hbase-server | zookeeper-server | alluxio-core) python3 runner.py $project; python3 collector.py $project ;;
hadoop-common | hadoop-hdfs | hadoop-hdfs-rbf | hbase-server | zookeeper-server | alluxio-core) python3 runner.py $project; python3 collector.py $project ;;
-h | --help) usage ;;
*) echo "Unexpected project: $project - only support hadoop-common, hadoop-hdfs, hbase-server, zookeeper-server and alluxio-core." ;;
*) echo "Unexpected project: $project - only support hadoop-common, hadoop-hdfs, hadoop-hdfs-rbf, hbase-server, zookeeper-server and alluxio-core." ;;
esac
fi
}
3,421 changes: 3,421 additions & 0 deletions core/identify_param/results/hadoop-hdfs-rbf/conf_params.txt

Large diffs are not rendered by default.

30,587 changes: 30,587 additions & 0 deletions core/identify_param/results/hadoop-hdfs-rbf/param_unset_getter_map.json

Large diffs are not rendered by default.

137 changes: 137 additions & 0 deletions core/identify_param/results/hadoop-hdfs-rbf/test_method_list.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,137 @@
[
"org.apache.hadoop.hdfs.server.federation.resolver.TestMountTableResolver#testTrailingSlashInInputPath",
"org.apache.hadoop.hdfs.server.federation.resolver.order.TestAvailableSpaceResolver#testResolverWithNoPreference",
"org.apache.hadoop.hdfs.server.federation.resolver.TestMountTableResolver#testRemoveLeafNode",
"org.apache.hadoop.hdfs.server.federation.resolver.TestMultipleDestinationResolver#testResolveSubdirectories",
"org.apache.hadoop.hdfs.server.federation.resolver.TestMountTableResolver#testGetMounts",
"org.apache.hadoop.hdfs.server.federation.resolver.TestMountTableResolver#testCacheCleaning",
"org.apache.hadoop.hdfs.server.federation.resolver.TestMountTableResolver#testLocationCache",
"org.apache.hadoop.hdfs.server.federation.resolver.TestMultipleDestinationResolver#testRandomEqualDistribution",
"org.apache.hadoop.hdfs.server.federation.resolver.TestMultipleDestinationResolver#testLocalResolver",
"org.apache.hadoop.hdfs.server.federation.resolver.TestMountTableResolver#testRefreshEntries",
"org.apache.hadoop.hdfs.server.federation.resolver.TestMountTableResolver#testInvalidateCache",
"org.apache.hadoop.hdfs.server.federation.resolver.TestMountTableResolver#testMountTableScalability",
"org.apache.hadoop.hdfs.server.federation.resolver.TestMultipleDestinationResolver#testRandomResolver",
"org.apache.hadoop.hdfs.server.federation.router.TestRouterNetworkTopologyServlet#testPrintTopologyJsonFormat",
"org.apache.hadoop.hdfs.server.federation.resolver.TestMultipleDestinationResolver#testHashFirst",
"org.apache.hadoop.hdfs.server.federation.router.TestRouterMountTableCacheRefreshSecure#testMountTableEntriesCacheUpdatedAfterRemoveAPICall",
"org.apache.hadoop.hdfs.server.federation.router.TestRouterMountTableCacheRefreshSecure#testCachedRouterClientBehaviourAfterRouterStoped",
"org.apache.hadoop.hdfs.server.federation.resolver.TestMountTableResolver#testGetMountPointOfConsecutiveSlashes",
"org.apache.hadoop.hdfs.server.federation.resolver.TestMountTableResolver#testMuiltipleDestinations",
"org.apache.hadoop.hdfs.server.federation.router.TestRouterNetworkTopologyServlet#testPrintTopologyNoDatanodesTextFormat",
"org.apache.hadoop.hdfs.server.federation.router.TestRouterMountTableCacheRefreshSecure#testMountTableEntriesCacheUpdatedAfterAddAPICall",
"org.apache.hadoop.hdfs.server.federation.resolver.TestMountTableResolver#testRemoveVirtualNode",
"org.apache.hadoop.hdfs.server.federation.resolver.TestInitializeMountTableResolver#testRouterDefaultNameservice",
"org.apache.hadoop.hdfs.server.federation.router.TestRouterNetworkTopologyServlet#testPrintTopologyNoDatanodesJsonFormat",
"org.apache.hadoop.hdfs.server.federation.resolver.TestInitializeMountTableResolver#testDefaultNameserviceIsMissing",
"org.apache.hadoop.hdfs.server.federation.resolver.TestMultipleDestinationResolver#testHashAll",
"org.apache.hadoop.hdfs.server.federation.resolver.TestMultipleDestinationResolver#testSingleDestination",
"org.apache.hadoop.hdfs.server.federation.resolver.TestMountTableResolver#testGetMountPoint",
"org.apache.hadoop.hdfs.server.federation.resolver.TestMountTableResolver#testRemoveSubTree",
"org.apache.hadoop.hdfs.server.federation.router.TestRouterMissingFolderMulti#testOneMissing",
"org.apache.hadoop.hdfs.server.federation.resolver.TestMountTableResolver#testDestinationOfConsecutiveSlash",
"org.apache.hadoop.hdfs.server.federation.resolver.TestMountTableResolver#testDisableLocalCache",
"org.apache.hadoop.hdfs.server.federation.resolver.TestInitializeMountTableResolver#testRouterDefaultNameserviceDisabled",
"org.apache.hadoop.hdfs.server.federation.resolver.TestMountTableResolver#testSuccessiveSlashesInInputPath",
"org.apache.hadoop.hdfs.server.federation.router.TestRouterMissingFolderMulti#testFileNotFound",
"org.apache.hadoop.hdfs.server.federation.resolver.TestMountTableResolver#testDefaultNameServiceEnable",
"org.apache.hadoop.hdfs.server.federation.resolver.order.TestAvailableSpaceResolver#testResolverWithDefaultPreference",
"org.apache.hadoop.hdfs.server.federation.router.TestRouterNetworkTopologyServlet#testPrintTopologyTextFormat",
"org.apache.hadoop.hdfs.server.federation.resolver.TestMultipleDestinationResolver#testReadOnly",
"org.apache.hadoop.hdfs.server.federation.resolver.TestMountTableResolver#testGetMountPoints",
"org.apache.hadoop.hdfs.server.federation.router.TestRouterMountTableCacheRefreshSecure#testMountTableEntriesCacheUpdatedAfterUpdateAPICall",
"org.apache.hadoop.hdfs.server.federation.router.TestRouterMissingFolderMulti#testSuccess",
"org.apache.hadoop.hdfs.server.federation.resolver.TestInitializeMountTableResolver#testDefaultNameserviceWithEmptyString",
"org.apache.hadoop.hdfs.server.federation.resolver.TestMultipleDestinationResolver#testHashEqualDistribution",
"org.apache.hadoop.hdfs.server.federation.resolver.order.TestLocalResolver#testLocalResolver",
"org.apache.hadoop.hdfs.server.federation.resolver.TestMultipleDestinationResolver#testExtractTempFileName",
"org.apache.hadoop.hdfs.server.federation.resolver.TestMountTableResolver#testUpdate",
"org.apache.hadoop.hdfs.server.federation.resolver.TestMountTableResolver#testGetMountsOfConsecutiveSlashes",
"org.apache.hadoop.hdfs.server.federation.resolver.TestMountTableResolver#testDestination",
"org.apache.hadoop.hdfs.server.federation.store.driver.TestStateStoreFile#testInsert",
"org.apache.hadoop.hdfs.server.federation.store.TestStateStoreMembershipState#testRegistrationQuorumAllExpired",
"org.apache.hadoop.hdfs.server.federation.store.TestStateStoreMountTable#testSynchronizeMountTable",
"org.apache.hadoop.hdfs.server.federation.store.driver.TestStateStoreFile#testMetrics",
"org.apache.hadoop.hdfs.server.federation.store.driver.TestStateStoreFile#testFetchErrors",
"org.apache.hadoop.hdfs.server.federation.store.driver.TestStateStoreZK#testGetNullRecord",
"org.apache.hadoop.hdfs.server.federation.resolver.TestNamenodeResolver#testStateStoreDisconnected",
"org.apache.hadoop.hdfs.server.federation.store.TestStateStoreMountTable#testAddMountTableEntry",
"org.apache.hadoop.hdfs.server.federation.store.driver.TestStateStoreFile#testUpdate",
"org.apache.hadoop.hdfs.server.federation.store.driver.TestStateStoreFileSystem#testMetrics",
"org.apache.hadoop.hdfs.server.federation.store.TestStateStoreRouterState#testStateStoreDisconnected",
"org.apache.hadoop.hdfs.server.federation.store.driver.TestStateStoreFileSystem#testUpdate",
"org.apache.hadoop.hdfs.server.federation.store.TestStateStoreRouterState#testUpdateRouterStatus",
"org.apache.hadoop.hdfs.server.federation.resolver.TestNamenodeResolver#testRegistrationNamenodeSelection",
"org.apache.hadoop.hdfs.server.federation.store.records.TestMembershipState#testGetterSetter",
"org.apache.hadoop.hdfs.server.federation.store.records.TestRouterState#testSerialization",
"org.apache.hadoop.hdfs.server.federation.store.records.TestMountTable#testReadOnly",
"org.apache.hadoop.hdfs.server.federation.store.driver.TestStateStoreFileSystem#testFetchErrors",
"org.apache.hadoop.hdfs.server.federation.store.driver.TestStateStoreFile#testDelete",
"org.apache.hadoop.hdfs.server.federation.store.TestStateStoreMountTable#testUpdateMountTableEntry",
"org.apache.hadoop.hdfs.server.federation.store.driver.TestStateStoreFileSystem#testDelete",
"org.apache.hadoop.hdfs.server.federation.store.records.TestMountTable#testSerialization",
"org.apache.hadoop.hdfs.server.federation.store.TestStateStoreMembershipState#testNamenodeStateOverride",
"org.apache.hadoop.hdfs.server.federation.store.TestStateStoreMembershipState#testRegistrationMajorityQuorum",
"org.apache.hadoop.hdfs.server.federation.store.records.TestMembershipState#testSerialization",
"org.apache.hadoop.hdfs.server.federation.store.TestStateStoreRouterState#testGetAllRouterStates",
"org.apache.hadoop.hdfs.server.federation.store.TestStateStoreMembershipState#testRegistrationQuorumExcludesExpired",
"org.apache.hadoop.hdfs.server.federation.store.driver.TestStateStoreZK#testDelete",
"org.apache.hadoop.hdfs.server.federation.store.records.TestMountTable#testValidation",
"org.apache.hadoop.hdfs.server.federation.store.TestStateStoreMembershipState#testStateStoreDisconnected",
"org.apache.hadoop.hdfs.server.federation.store.TestStateStoreMountTable#testStateStoreDisconnected",
"org.apache.hadoop.hdfs.server.federation.store.driver.TestStateStoreZK#testInsert",
"org.apache.hadoop.hdfs.server.federation.store.records.TestMountTable#testQuota",
"org.apache.hadoop.hdfs.server.federation.store.TestStateStoreMembershipState#testRegistrationExpiredAndDeletion",
"org.apache.hadoop.hdfs.server.federation.store.records.TestMountTable#testGetterSetter",
"org.apache.hadoop.hdfs.server.federation.store.driver.TestStateStoreZK#testUpdate",
"org.apache.hadoop.hdfs.server.federation.store.driver.TestStateStoreFileSystem#testInsert",
"org.apache.hadoop.hdfs.server.federation.resolver.TestNamenodeResolver#testCacheUpdateOnNamenodeStateUpdateWithIp",
"org.apache.hadoop.hdfs.server.federation.store.TestStateStoreMembershipState#testNamespaceInfoWithUnavailableNameNodeRegistration",
"org.apache.hadoop.hdfs.server.federation.store.TestStateStoreMembershipState#testRegistrationNoQuorum",
"org.apache.hadoop.hdfs.server.federation.store.records.TestMountTable#testOrder",
"org.apache.hadoop.hdfs.server.federation.store.driver.TestStateStoreZK#testFetchErrors",
"org.apache.hadoop.hdfs.server.federation.store.TestStateStoreMountTable#testRemoveMountTableEntry",
"org.apache.hadoop.hdfs.server.federation.store.records.TestMountTable#testFaultTolerant",
"org.apache.hadoop.hdfs.server.federation.resolver.TestNamenodeResolver#testRegistrationExpired",
"org.apache.hadoop.hdfs.server.federation.store.TestStateStoreDisabledNameservice#testDisableNameservice",
"org.apache.hadoop.hdfs.server.federation.store.records.TestRouterState#testGetterSetter",
"org.apache.hadoop.hdfs.server.federation.resolver.TestNamenodeResolver#testCacheUpdateOnNamenodeStateUpdate",
"org.apache.hadoop.hdfs.server.federation.store.TestStateStoreRouterState#testRouterStateExpiredAndDeletion",
"org.apache.hadoop.hdfs.server.federation.router.TestRouterRpcMultiDestination#testProxyGetFileInfoAcessException",
"org.apache.hadoop.hdfs.server.federation.router.TestRouterRpcMultiDestination#testProxyStoragePolicy",
"org.apache.hadoop.hdfs.server.federation.router.TestRouterRpcMultiDestination#testProxyGetBlocks",
"org.apache.hadoop.hdfs.server.federation.router.TestRouterRpcMultiDestination#testPreviousBlockNotNull",
"org.apache.hadoop.hdfs.server.federation.router.TestRouterRpcMultiDestination#testMkdirsWithCallerContext",
"org.apache.hadoop.hdfs.server.federation.router.TestRouterRpcMultiDestination#testSubclusterDown",
"org.apache.hadoop.hdfs.server.federation.router.TestRouterRpcMultiDestination#testGetContentSummaryEc",
"org.apache.hadoop.hdfs.server.federation.router.TestRouterRpcMultiDestination#testContentSummaryWithSnapshot",
"org.apache.hadoop.hdfs.server.federation.router.TestRouterRpcMultiDestination#testGetCachedDatanodeReport",
"org.apache.hadoop.hdfs.server.federation.router.TestRouterRpcMultiDestination#testGetCurrentTXIDandRollEdits",
"org.apache.hadoop.hdfs.server.federation.router.TestRouterRpcMultiDestination#testAllowDisallowSnapshots",
"org.apache.hadoop.hdfs.server.federation.router.TestRouterRpcMultiDestination#testProxyGetDatanodeStorageReport",
"org.apache.hadoop.hdfs.server.federation.router.TestRouterRpcMultiDestination#testProxyChownFiles",
"org.apache.hadoop.hdfs.server.federation.router.TestRouterRpcMultiDestination#testMkdirWithDisableNameService",
"org.apache.hadoop.hdfs.server.federation.router.TestRouterRpcMultiDestination#testProxyGetTransactionID",
"org.apache.hadoop.hdfs.server.federation.router.TestRouterRpcMultiDestination#testProxyTruncateFile",
"org.apache.hadoop.hdfs.server.federation.router.TestRouterRpcMultiDestination#testIsFileClosed",
"org.apache.hadoop.hdfs.server.federation.router.TestRouterRpcMultiDestination#testRecoverLease",
"org.apache.hadoop.hdfs.server.federation.router.TestRouterRpcSingleNS#testSaveNamespace",
"org.apache.hadoop.hdfs.server.federation.router.TestRouterRpcMultiDestination#testProxyGetMostRecentCheckpointTxId",
"org.apache.hadoop.hdfs.server.federation.router.TestRouterRpcMultiDestination#testProxyOpWithRemoteException",
"org.apache.hadoop.hdfs.server.federation.router.TestRouterRpcMultiDestination#testProxyGetPreferedBlockSize",
"org.apache.hadoop.hdfs.server.federation.router.TestRouterRpcMultiDestination#testProxyGetAndUnsetStoragePolicy",
"org.apache.hadoop.hdfs.server.federation.router.TestRouterRpcMultiDestination#testManageSnapshot",
"org.apache.hadoop.hdfs.server.federation.router.TestRouterRpcMultiDestination#testProxyListFiles",
"org.apache.hadoop.hdfs.server.federation.router.TestRouterRpcMultiDestination#testProxyGetStats",
"org.apache.hadoop.hdfs.server.federation.router.TestRouterRpcMultiDestination#testGetSnapshotListing",
"org.apache.hadoop.hdfs.server.federation.router.TestRouterRpcMultiDestination#testRenewLeaseForReplicaFile",
"org.apache.hadoop.hdfs.server.federation.router.TestRouterRpcMultiDestination#testNamenodeMetricsEnteringMaintenanceNodes",
"org.apache.hadoop.hdfs.server.federation.router.TestRouterRpcMultiDestination#testRpcService",
"org.apache.hadoop.hdfs.server.federation.router.TestRouterRpcMultiDestination#testSetBalancerBandwidth",
"org.apache.hadoop.hdfs.server.federation.router.TestRouterRpcMultiDestination#testProxyGetDatanodeReport",
"org.apache.hadoop.hdfs.server.federation.router.TestRouterRpcMultiDestination#testCacheAdmin",
"org.apache.hadoop.hdfs.server.federation.router.TestRouterRpcMultiDestination#testGetReplicatedBlockStats",
"org.apache.hadoop.hdfs.server.federation.router.TestRouterRpcMultiDestination#testGetServerDefaults",
"org.apache.hadoop.hdfs.server.federation.router.TestRouterRpcSingleNS#testGetCurrentTXIDandRollEdits",
"org.apache.hadoop.hdfs.server.federation.router.TestFederationUtil#testInstanceCreation"
]
2 changes: 1 addition & 1 deletion core/identify_param/runner.py
Original file line number Diff line number Diff line change
@@ -65,7 +65,7 @@ def skipTrace(self, trace):
return True
if "sun.reflect" in trace:
return True
if self.module == "hadoop-common" or self.module == "hadoop-hdfs" or self.module == "hbase-server":
if self.module == "hadoop-common" or self.module == "hadoop-hdfs" or self.module == "hadoop-hdfs-rbf" or self.module == "hbase-server":
if "org.apache.hadoop.conf" in trace and "Test" not in trace:
return True
if "org.mockito" in trace:
95 changes: 95 additions & 0 deletions core/patch/hdfs-rbf/interception.patch
Original file line number Diff line number Diff line change
@@ -0,0 +1,95 @@
diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml
index 530e18e4b83..9fc70dbeccc 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -509,6 +509,7 @@
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
+ <version>3.0.0-M4</version>
<configuration>
<systemPropertyVariables>
<runningWithNative>${runningWithNative}</runningWithNative>
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
index 818ef37eb79..39461edaee9 100755
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
@@ -786,6 +786,7 @@ private void handleDeprecation() {
// Add default resources
addDefaultResource("core-default.xml");
addDefaultResource("core-site.xml");
+ addDefaultResource("core-ctest.xml"); //CTEST

// print deprecation warning if hadoop-site.xml is found in classpath
ClassLoader cL = Thread.currentThread().getContextClassLoader();
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java
index 65efaf9dfae..d3f8ab40ad3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java
@@ -38,6 +38,7 @@
Configuration.addDefaultResource("hdfs-rbf-default.xml");
Configuration.addDefaultResource("hdfs-site.xml");
Configuration.addDefaultResource("hdfs-rbf-site.xml");
+ Configuration.addDefaultResource("hdfs-rbf-ctest.xml"); //CTEST
}

public HdfsConfiguration() {
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml
index 0309535b91c..b0f7f1c4258 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml
@@ -126,6 +126,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
+ <version>3.0.0-M4</version>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/core-ctest.xml b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/core-ctest.xml
new file mode 100644
index 00000000000..db723656af7
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/core-ctest.xml
@@ -0,0 +1,6 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<configuration>
+
+</configuration>
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-ctest.xml b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-ctest.xml
new file mode 100644
index 00000000000..db723656af7
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-ctest.xml
@@ -0,0 +1,6 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<configuration>
+
+</configuration>
\ No newline at end of file
diff --git a/pom.xml b/pom.xml
index f4e435c7493..e47571761ae 100644
--- a/pom.xml
+++ b/pom.xml
@@ -142,6 +142,13 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/x
<build>
<pluginManagement>
<plugins>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-surefire-plugin</artifactId>
+ <configuration>
+ <reportFormat>plain</reportFormat>
+ </configuration>
+ </plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-dependency-plugin</artifactId>
--
2.25.1

111 changes: 111 additions & 0 deletions core/patch/hdfs-rbf/logging.patch
Original file line number Diff line number Diff line change
@@ -0,0 +1,111 @@
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
index 39461edaee9..6eea8b8b768 100755
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
@@ -1197,6 +1197,14 @@ private String substituteVars(String expr) {
+ MAX_SUBST + " " + expr);
}

+ private String getStackTrace() {
+ String stacktrace = " ";
+ for (StackTraceElement element : Thread.currentThread().getStackTrace()) {
+ stacktrace = stacktrace.concat(element.getClassName() + "\t");
+ }
+ return stacktrace;
+ }
+
/**
* Get the environment variable value if
* {@link #restrictSystemProps} does not block this.
@@ -1241,11 +1249,14 @@ String getProperty(String key) {
* or null if no such property exists.
*/
public String get(String name) {
+ String ctestParam = name; //CTEST
String[] names = handleDeprecation(deprecationContext.get(), name);
String result = null;
for(String n : names) {
+ ctestParam = n; //CTEST
result = substituteVars(getProps().getProperty(n));
}
+ LOG.warn("[CTEST][GET-PARAM] " + ctestParam); //CTEST
return result;
}

@@ -1333,11 +1344,14 @@ public String getTrimmed(String name, String defaultValue) {
* its replacing property and null if no such property exists.
*/
public String getRaw(String name) {
+ String ctestParam = name; //CTEST
String[] names = handleDeprecation(deprecationContext.get(), name);
String result = null;
for(String n : names) {
+ ctestParam = n; //CTEST
result = getProps().getProperty(n);
}
+ LOG.warn("[CTEST][GET-PARAM] " + ctestParam); //CTEST
return result;
}

@@ -1385,6 +1399,10 @@ public void set(String name, String value) {
set(name, value, null);
}

+ public void set(String name, String value, String source) {
+ set(name, value, source, true);
+ }
+
/**
* Set the <code>value</code> of the <code>name</code> property. If
* <code>name</code> is deprecated, it also sets the <code>value</code> to
@@ -1397,7 +1415,7 @@ public void set(String name, String value) {
* (For debugging).
* @throws IllegalArgumentException when the value or name is null.
*/
- public void set(String name, String value, String source) {
+ public void set(String name, String value, String source, boolean log_enabled) {
Preconditions.checkArgument(
name != null,
"Property name must not be null");
@@ -1409,6 +1427,7 @@ public void set(String name, String value, String source) {
if (deprecations.getDeprecatedKeyMap().isEmpty()) {
getProps();
}
+ if(log_enabled) LOG.warn("[CTEST][SET-PARAM] " + name + getStackTrace()); //CTEST
getOverlay().setProperty(name, value);
getProps().setProperty(name, value);
String newSource = (source == null ? "programmatically" : source);
@@ -1419,6 +1438,7 @@ public void set(String name, String value, String source) {
if(altNames != null) {
for(String n: altNames) {
if(!n.equals(name)) {
+ if(log_enabled) LOG.warn("[CTEST][SET-PARAM] " + n + getStackTrace()); //CTEST
getOverlay().setProperty(n, value);
getProps().setProperty(n, value);
putIntoUpdatingResource(n, new String[] {newSource});
@@ -1430,6 +1450,7 @@ public void set(String name, String value, String source) {
String[] names = handleDeprecation(deprecationContext.get(), name);
String altSource = "because " + name + " is deprecated";
for(String n : names) {
+ if(log_enabled) LOG.warn("[CTEST][SET-PARAM] " + n + getStackTrace()); //CTEST
getOverlay().setProperty(n, value);
getProps().setProperty(n, value);
putIntoUpdatingResource(n, new String[] {altSource});
@@ -1502,11 +1523,14 @@ private synchronized Properties getOverlay() {
* doesn't exist.
*/
public String get(String name, String defaultValue) {
+ String ctestParam = name; //CTEST
String[] names = handleDeprecation(deprecationContext.get(), name);
String result = null;
for(String n : names) {
+ ctestParam = n; //CTEST
result = substituteVars(getProps().getProperty(n, defaultValue));
}
+ LOG.warn("[CTEST][GET-PARAM] " + ctestParam); //CTEST
return result;
}

--
2.25.1

4 changes: 2 additions & 2 deletions core/run_ctest/inject.py
Original file line number Diff line number Diff line change
@@ -21,7 +21,7 @@ def inject_config(param_value_pairs):
for p, v in param_value_pairs.items():
file.write(p + "=" + v + "\n")
file.close()
elif project in [HCOMMON, HDFS, HBASE]:
elif project in [HCOMMON, HDFS, HDFSRBF, HBASE]:
conf = ET.Element("configuration")
for p, v in param_value_pairs.items():
prop = ET.SubElement(conf, "property")
@@ -46,7 +46,7 @@ def clean_conf_file(project):
file = open(inject_path, "w")
file.write("\n")
file.close()
elif project in [HCOMMON, HDFS, HBASE]:
elif project in [HCOMMON, HDFS, HDFSRBF, HBASE]:
conf = ET.Element("configuration")
for inject_path in INJECTION_PATH[project]:
file = open(inject_path, "wb")
2 changes: 1 addition & 1 deletion core/run_ctest/parse_input.py
Original file line number Diff line number Diff line change
@@ -39,7 +39,7 @@ def load_default_conf(path):

def parse_conf_file(path):
"""parse config file"""
if project in [HCOMMON, HDFS, HBASE]:
if project in [HCOMMON, HDFS, HDFSRBF, HBASE]:
return parse_conf_file_xml(path)
else:
# parsing for alluxio and zookeeper conf file format
6 changes: 3 additions & 3 deletions core/run_ctest/program_input.py
Original file line number Diff line number Diff line change
@@ -4,11 +4,11 @@
# run mode
"run_mode": "run_ctest", # string
# name of the project, i.e. hadoop-common, hadoop-hdfs
"project": "hadoop-common", # string
"project": "hadoop-hdfs-rbf", # string
# path to param -> tests json mapping
"mapping_path": "../../data/ctest_mapping/opensource-hadoop-common.json", # string
"mapping_path": "../../data/ctest_mapping/opensource-hadoop-hdfs-rbf.json", # string
# input directory hosting configuration files to be test, target-project-format specific
"conf_file_dir": "sample-hadoop-common", # string
"conf_file_dir": "hadoop-hdfs-rbf", # string
# display the terminal output live, without saving any results
"display_mode": False, # bool
# whether to use mvn test or mvn surefire:test
30,961 changes: 30,961 additions & 0 deletions data/ctest_mapping/opensource-hadoop-hdfs-rbf.json

Large diffs are not rendered by default.