diff --git a/Dockerfiles/arkime.Dockerfile b/Dockerfiles/arkime.Dockerfile
index bcaf77327..bdaf435b6 100644
--- a/Dockerfiles/arkime.Dockerfile
+++ b/Dockerfiles/arkime.Dockerfile
@@ -7,7 +7,7 @@ ENV DEBIAN_FRONTEND noninteractive
ENV ARKIME_VERSION "2.7.1"
ENV ARKIMEDIR "/data/moloch"
ENV ARKIME_URL "https://codeload.github.com/arkime/arkime/tar.gz/v${ARKIME_VERSION}"
-ENV ARKIME_LOCALELASTICSEARCH no
+ENV ARKIME_LOCALOPENSEARCH no
ENV ARKIME_INET yes
ADD moloch/scripts/bs4_remove_div.py /data/
@@ -78,7 +78,7 @@ RUN sed -i "s/buster main/buster main contrib non-free/g" /etc/apt/sources.list
mv -vf ./viewer/vueapp/src/components/users/Users.new ./viewer/vueapp/src/components/users/Users.vue && \
sed -i 's/v-if.*password.*"/v-if="false"/g' ./viewer/vueapp/src/components/settings/Settings.vue && \
rm -rf ./viewer/vueapp/src/components/upload && \
- sed -i "s/^\(ARKIME_LOCALELASTICSEARCH=\).*/\1"$ARKIME_LOCALELASTICSEARCH"/" ./release/Configure && \
+ sed -i "s/^\(ARKIME_LOCALOPENSEARCH=\).*/\1"$ARKIME_LOCALOPENSEARCH"/" ./release/Configure && \
sed -i "s/^\(ARKIME_INET=\).*/\1"$ARKIME_INET"/" ./release/Configure && \
./easybutton-build.sh --install && \
npm cache clean --force && \
@@ -106,8 +106,8 @@ ENV PUSER_PRIV_DROP true
ENV DEBIAN_FRONTEND noninteractive
ENV TERM xterm
-ARG ES_HOST=elasticsearch
-ARG ES_PORT=9200
+ARG ES_HOST=opensearch
+ARG OS_PORT=9200
ARG MALCOLM_USERNAME=admin
ARG ARKIME_INTERFACE=eth0
ARG ARKIME_ANALYZE_PCAP_THREADS=1
@@ -124,8 +124,8 @@ ARG MAXMIND_GEOIP_DB_LICENSE_KEY=""
# Declare envs vars for each arg
ENV ES_HOST $ES_HOST
-ENV ES_PORT $ES_PORT
-ENV ARKIME_ELASTICSEARCH "http://"$ES_HOST":"$ES_PORT
+ENV OS_PORT $OS_PORT
+ENV ARKIME_OPENSEARCH "http://"$ES_HOST":"$OS_PORT
ENV ARKIME_INTERFACE $ARKIME_INTERFACE
ENV MALCOLM_USERNAME $MALCOLM_USERNAME
# this needs to be present, but is unused as nginx is going to handle auth for us
@@ -186,7 +186,7 @@ ADD shared/bin/docker-uid-gid-setup.sh /usr/local/bin/
ADD moloch/scripts /data/
ADD shared/bin/pcap_moloch_and_zeek_processor.py /data/
ADD shared/bin/pcap_utils.py /data/
-ADD shared/bin/elastic_search_status.sh /data/
+ADD shared/bin/opensearch_status.sh /data/
ADD moloch/etc $ARKIMEDIR/etc/
ADD moloch/wise/source.*.js $ARKIMEDIR/wiseService/
ADD moloch/supervisord.conf /etc/supervisord.conf
diff --git a/Dockerfiles/file-monitor.Dockerfile b/Dockerfiles/file-monitor.Dockerfile
index b5ca7c772..9a705444e 100644
--- a/Dockerfiles/file-monitor.Dockerfile
+++ b/Dockerfiles/file-monitor.Dockerfile
@@ -83,7 +83,7 @@ ENV SRC_BASE_DIR "/usr/local/src"
ENV CLAMAV_RULES_DIR "/var/lib/clamav"
ENV YARA_VERSION "4.1.1"
ENV YARA_URL "https://github.com/VirusTotal/yara/archive/v${YARA_VERSION}.tar.gz"
-ENV YARA_RULES_URL "https://github.com/Neo23x0/signature-base"
+ENV YARA_RULOS_URL "https://github.com/Neo23x0/signature-base"
ENV YARA_RULES_DIR "/yara-rules"
ENV YARA_RULES_SRC_DIR "$SRC_BASE_DIR/signature-base"
ENV CAPA_VERSION "1.6.3"
@@ -156,7 +156,7 @@ RUN sed -i "s/buster main/buster main contrib non-free/g" /etc/apt/sources.list
make install && \
rm -rf "${SRC_BASE_DIR}"/yara* && \
cd /tmp && \
- git clone --depth 1 --single-branch "${YARA_RULES_URL}" "${YARA_RULES_SRC_DIR}" && \
+ git clone --depth 1 --single-branch "${YARA_RULOS_URL}" "${YARA_RULES_SRC_DIR}" && \
mkdir -p "${YARA_RULES_DIR}" && \
ln -f -s -r "${YARA_RULES_SRC_DIR}"/yara/* "${YARA_RULES_SRC_DIR}"/vendor/yara/* "${YARA_RULES_DIR}"/ && \
cd /tmp && \
diff --git a/Dockerfiles/filebeat.Dockerfile b/Dockerfiles/filebeat.Dockerfile
index 898e115b4..e4bf8ee78 100644
--- a/Dockerfiles/filebeat.Dockerfile
+++ b/Dockerfiles/filebeat.Dockerfile
@@ -62,7 +62,7 @@ ADD shared/bin/docker-uid-gid-setup.sh /usr/local/bin/
ADD filebeat/filebeat.yml /usr/share/filebeat/filebeat.yml
ADD filebeat/filebeat-nginx.yml /usr/share/filebeat-nginx/filebeat-nginx.yml
ADD filebeat/scripts /data/
-ADD shared/bin/elastic_search_status.sh /data/
+ADD shared/bin/opensearch_status.sh /data/
ADD filebeat/supervisord.conf /etc/supervisord.conf
RUN mkdir -p /usr/share/filebeat-nginx/data && \
chown -R root:${PGROUP} /usr/share/filebeat-nginx && \
diff --git a/Dockerfiles/kibana-helper.Dockerfile b/Dockerfiles/kibana-helper.Dockerfile
index b53ee151d..3d65e83be 100644
--- a/Dockerfiles/kibana-helper.Dockerfile
+++ b/Dockerfiles/kibana-helper.Dockerfile
@@ -1,4 +1,4 @@
-FROM alpine:3.14
+ FROM alpine:3.14
# Copyright (c) 2020 Battelle Energy Alliance, LLC. All rights reserved.
LABEL maintainer="malcolm.netsec@gmail.com"
@@ -23,8 +23,8 @@ ENV TERM xterm
ARG ARKIME_INDEX_PATTERN="sessions2-*"
ARG ARKIME_INDEX_PATTERN_ID="sessions2-*"
ARG ARKIME_INDEX_TIME_FIELD="firstPacket"
-ARG CREATE_ES_ARKIME_SESSION_INDEX="true"
-ARG ELASTICSEARCH_URL="http://elasticsearch:9200"
+ARG CREATE_OS_ARKIME_SESSION_INDEX="true"
+ARG OPENSEARCH_URL="http://opensearch:9200"
ARG ISM_SNAPSHOT_COMPRESSED=false
ARG ISM_SNAPSHOT_REPO=logs
ARG KIBANA_OFFLINE_REGION_MAPS_PORT="28991"
@@ -33,8 +33,8 @@ ARG KIBANA_URL="http://kibana:5601/kibana"
ENV ARKIME_INDEX_PATTERN $ARKIME_INDEX_PATTERN
ENV ARKIME_INDEX_PATTERN_ID $ARKIME_INDEX_PATTERN_ID
ENV ARKIME_INDEX_TIME_FIELD $ARKIME_INDEX_TIME_FIELD
-ENV CREATE_ES_ARKIME_SESSION_INDEX $CREATE_ES_ARKIME_SESSION_INDEX
-ENV ELASTICSEARCH_URL $ELASTICSEARCH_URL
+ENV CREATE_OS_ARKIME_SESSION_INDEX $CREATE_OS_ARKIME_SESSION_INDEX
+ENV OPENSEARCH_URL $OPENSEARCH_URL
ENV ISM_SNAPSHOT_COMPRESSED $ISM_SNAPSHOT_COMPRESSED
ENV ISM_SNAPSHOT_REPO $ISM_SNAPSHOT_REPO
ENV KIBANA_OFFLINE_REGION_MAPS_PORT $KIBANA_OFFLINE_REGION_MAPS_PORT
@@ -53,8 +53,8 @@ ADD kibana/scripts /data/
ADD kibana/supervisord.conf /etc/supervisord.conf
ADD kibana/zeek_template.json /data/zeek_template.json
ADD shared/bin/docker-uid-gid-setup.sh /usr/local/bin/
-ADD shared/bin/elastic_search_status.sh /data/
-ADD shared/bin/elastic_index_size_prune.py /data/
+ADD shared/bin/opensearch_status.sh /data/
+ADD shared/bin/opensearch_index_size_prune.py /data/
RUN apk --no-cache add bash python3 py3-pip curl procps psmisc npm shadow jq && \
npm install -g http-server && \
@@ -72,7 +72,7 @@ RUN apk --no-cache add bash python3 py3-pip curl procps psmisc npm shadow jq &&
chown -R ${PUSER}:${PGROUP} /opt/kibana/dashboards /opt/maps /data/init && \
chmod 755 /data/*.sh /data/*.py /data/init && \
chmod 400 /opt/maps/* && \
- (echo -e "*/2 * * * * /data/kibana-create-moloch-sessions-index.sh\n0 10 * * * /data/kibana_index_refresh.py --template zeek_template\n*/20 * * * * /data/elastic_index_size_prune.py" > ${SUPERCRONIC_CRONTAB})
+ (echo -e "*/2 * * * * /data/kibana-create-moloch-sessions-index.sh\n0 10 * * * /data/kibana_index_refresh.py --template zeek_template\n*/20 * * * * /data/opensearch_index_size_prune.py" > ${SUPERCRONIC_CRONTAB})
EXPOSE $KIBANA_OFFLINE_REGION_MAPS_PORT
diff --git a/Dockerfiles/kibana.Dockerfile b/Dockerfiles/kibana.Dockerfile
index 1c8b3df62..aebdabb87 100644
--- a/Dockerfiles/kibana.Dockerfile
+++ b/Dockerfiles/kibana.Dockerfile
@@ -82,14 +82,14 @@ ENV PUSER_PRIV_DROP true
ENV TERM xterm
-ARG ELASTICSEARCH_URL="http://elasticsearch:9200"
-ARG CREATE_ES_ARKIME_SESSION_INDEX="true"
+ARG OPENSEARCH_URL="http://opensearch:9200"
+ARG CREATE_OS_ARKIME_SESSION_INDEX="true"
ARG ARKIME_INDEX_PATTERN="sessions2-*"
ARG ARKIME_INDEX_PATTERN_ID="sessions2-*"
ARG ARKIME_INDEX_TIME_FIELD="firstPacket"
ARG KIBANA_DEFAULT_DASHBOARD="0ad3d7c2-3441-485e-9dfe-dbb22e84e576"
-ENV CREATE_ES_ARKIME_SESSION_INDEX $CREATE_ES_ARKIME_SESSION_INDEX
+ENV CREATE_OS_ARKIME_SESSION_INDEX $CREATE_OS_ARKIME_SESSION_INDEX
ENV ARKIME_INDEX_PATTERN $ARKIME_INDEX_PATTERN
ENV ARKIME_INDEX_PATTERN_ID $ARKIME_INDEX_PATTERN_ID
ENV ARKIME_INDEX_TIME_FIELD $ARKIME_INDEX_TIME_FIELD
@@ -97,7 +97,7 @@ ENV KIBANA_DEFAULT_DASHBOARD $KIBANA_DEFAULT_DASHBOARD
ENV KIBANA_OFFLINE_REGION_MAPS $KIBANA_OFFLINE_REGION_MAPS
ENV KIBANA_OFFLINE_REGION_MAPS_PORT $KIBANA_OFFLINE_REGION_MAPS_PORT
ENV PATH="/data:${PATH}"
-ENV ELASTICSEARCH_URL $ELASTICSEARCH_URL
+ENV OPENSEARCH_URL $OPENSEARCH_URL
ENV KIBANA_DEFAULT_DASHBOARD $KIBANA_DEFAULT_DASHBOARD
USER root
diff --git a/Dockerfiles/logstash.Dockerfile b/Dockerfiles/logstash.Dockerfile
index af6ab7229..95d929b8f 100644
--- a/Dockerfiles/logstash.Dockerfile
+++ b/Dockerfiles/logstash.Dockerfile
@@ -64,15 +64,15 @@ ENV TERM xterm
ARG LOGSTASH_ENRICHMENT_PIPELINE=enrichment
ARG LOGSTASH_PARSE_PIPELINE_ADDRESSES=zeek-parse
-ARG LOGSTASH_ELASTICSEARCH_PIPELINE_ADDRESS_INTERNAL=internal-es
-ARG LOGSTASH_ELASTICSEARCH_PIPELINE_ADDRESS_EXTERNAL=external-es
-ARG LOGSTASH_ELASTICSEARCH_OUTPUT_PIPELINE_ADDRESSES=internal-es,external-es
+ARG LOGSTASH_OPENSEARCH_PIPELINE_ADDRESS_INTERNAL=internal-es
+ARG LOGSTASH_OPENSEARCH_PIPELINE_ADDRESS_EXTERNAL=external-es
+ARG LOGSTASH_OPENSEARCH_OUTPUT_PIPELINE_ADDRESSES=internal-es,external-es
ENV LOGSTASH_ENRICHMENT_PIPELINE $LOGSTASH_ENRICHMENT_PIPELINE
ENV LOGSTASH_PARSE_PIPELINE_ADDRESSES $LOGSTASH_PARSE_PIPELINE_ADDRESSES
-ENV LOGSTASH_ELASTICSEARCH_PIPELINE_ADDRESS_INTERNAL $LOGSTASH_ELASTICSEARCH_PIPELINE_ADDRESS_INTERNAL
-ENV LOGSTASH_ELASTICSEARCH_PIPELINE_ADDRESS_EXTERNAL $LOGSTASH_ELASTICSEARCH_PIPELINE_ADDRESS_EXTERNAL
-ENV LOGSTASH_ELASTICSEARCH_OUTPUT_PIPELINE_ADDRESSES $LOGSTASH_ELASTICSEARCH_OUTPUT_PIPELINE_ADDRESSES
+ENV LOGSTASH_OPENSEARCH_PIPELINE_ADDRESS_INTERNAL $LOGSTASH_OPENSEARCH_PIPELINE_ADDRESS_INTERNAL
+ENV LOGSTASH_OPENSEARCH_PIPELINE_ADDRESS_EXTERNAL $LOGSTASH_OPENSEARCH_PIPELINE_ADDRESS_EXTERNAL
+ENV LOGSTASH_OPENSEARCH_OUTPUT_PIPELINE_ADDRESSES $LOGSTASH_OPENSEARCH_OUTPUT_PIPELINE_ADDRESSES
ENV JAVA_HOME=/usr/share/logstash/jdk
USER root
diff --git a/Dockerfiles/elasticsearch.Dockerfile b/Dockerfiles/opensearch.Dockerfile
similarity index 54%
rename from Dockerfiles/elasticsearch.Dockerfile
rename to Dockerfiles/opensearch.Dockerfile
index eb4ef82f0..26303c0e1 100644
--- a/Dockerfiles/elasticsearch.Dockerfile
+++ b/Dockerfiles/opensearch.Dockerfile
@@ -1,4 +1,4 @@
-FROM amazon/opendistro-for-elasticsearch:1.13.2
+FROM opensearchproject/opensearch:1.0.0
# Copyright (c) 2021 Battelle Energy Alliance, LLC. All rights reserved.
LABEL maintainer="malcolm.netsec@gmail.com"
@@ -7,16 +7,16 @@ LABEL org.opencontainers.image.url='https://github.com/idaholab/Malcolm'
LABEL org.opencontainers.image.documentation='https://github.com/idaholab/Malcolm/blob/master/README.md'
LABEL org.opencontainers.image.source='https://github.com/idaholab/Malcolm'
LABEL org.opencontainers.image.vendor='Idaho National Laboratory'
-LABEL org.opencontainers.image.title='malcolmnetsec/elasticsearch-od'
-LABEL org.opencontainers.image.description='Malcolm container providing Elasticsearch (the Apache-licensed Open Distro variant)'
+LABEL org.opencontainers.image.title='malcolmnetsec/opensearch'
+LABEL org.opencontainers.image.description='Malcolm container providing OpenSearch'
ARG DEFAULT_UID=1000
ARG DEFAULT_GID=1000
ENV DEFAULT_UID $DEFAULT_UID
ENV DEFAULT_GID $DEFAULT_GID
ENV PUID $DEFAULT_UID
-ENV PUSER "elasticsearch"
-ENV PGROUP "elasticsearch"
+ENV PUSER "opensearch"
+ENV PGROUP "opensearch"
ENV PUSER_PRIV_DROP true
ENV TERM xterm
@@ -24,28 +24,27 @@ ENV TERM xterm
ARG GITHUB_OAUTH_TOKEN=""
ARG DISABLE_INSTALL_DEMO_CONFIG=true
ENV DISABLE_INSTALL_DEMO_CONFIG $DISABLE_INSTALL_DEMO_CONFIG
-ENV JAVA_HOME=/usr/share/elasticsearch/jdk
+ENV JAVA_HOME=/usr/share/opensearch/jdk
+
+USER root
# Malcolm manages authentication and encryption via NGINX reverse proxy
-# https://opendistro.github.io/for-elasticsearch-docs/docs/security/configuration/disable/
-# https://opendistro.github.io/for-elasticsearch-docs/docs/install/docker/#customize-the-docker-image
-# https://github.com/opendistro-for-elasticsearch/opendistro-build/issues/613
RUN yum install -y openssl && \
- /usr/share/elasticsearch/bin/elasticsearch-plugin remove opendistro_security && \
- echo -e 'cluster.name: "docker-cluster"\nnetwork.host: 0.0.0.0' > /usr/share/elasticsearch/config/elasticsearch.yml && \
- chown -R $PUSER:$PGROUP /usr/share/elasticsearch/config/elasticsearch.yml && \
- sed -i "s/user=1000\b/user=%(ENV_PUID)s/g" /usr/share/elasticsearch/plugins/opendistro-performance-analyzer/pa_config/supervisord.conf && \
- sed -i "s/user=1000\b/user=%(ENV_PUID)s/g" /usr/share/elasticsearch/performance-analyzer-rca/pa_config/supervisord.conf && \
- sed -i '/[^#].*\/usr\/share\/elasticsearch\/bin\/elasticsearch.*/i /usr/local/bin/jdk-cacerts-auto-import.sh || true' /usr/local/bin/docker-entrypoint.sh
+ /usr/share/opensearch/bin/opensearch-plugin remove opensearch-security && \
+ echo -e 'cluster.name: "docker-cluster"\nnetwork.host: 0.0.0.0' > /usr/share/opensearch/config/opensearch.yml && \
+ chown -R $PUSER:$PGROUP /usr/share/opensearch/config/opensearch.yml && \
+ sed -i "s/user=1000\b/user=%(ENV_PUID)s/g" /usr/share/opensearch/plugins/opensearch-performance-analyzer/pa_config/supervisord.conf && \
+ sed -i "s/user=1000\b/user=%(ENV_PUID)s/g" /usr/share/opensearch/performance-analyzer-rca/pa_config/supervisord.conf && \
+ sed -i '/[^#].*\$OPENSEARCH_HOME\/bin\/opensearch.*/i /usr/local/bin/jdk-cacerts-auto-import.sh || true' /usr/share/opensearch/opensearch-docker-entrypoint.sh
+
+
# just used for initial keystore creation
ADD shared/bin/docker-uid-gid-setup.sh /usr/local/bin/
ADD shared/bin/jdk-cacerts-auto-import.sh /usr/local/bin/
-USER root
-
ENTRYPOINT ["/usr/local/bin/docker-uid-gid-setup.sh"]
-CMD ["/usr/local/bin/docker-entrypoint.sh"]
+CMD ["/usr/share/opensearch/opensearch-docker-entrypoint.sh"]
# to be populated at build-time:
ARG BUILD_DATE
diff --git a/Dockerfiles/pcap-monitor.Dockerfile b/Dockerfiles/pcap-monitor.Dockerfile
index 4ca3cdadd..87ad3e68f 100644
--- a/Dockerfiles/pcap-monitor.Dockerfile
+++ b/Dockerfiles/pcap-monitor.Dockerfile
@@ -24,14 +24,14 @@ ENV PUSER_PRIV_DROP false
ENV DEBIAN_FRONTEND noninteractive
ENV TERM xterm
-ARG ELASTICSEARCH_URL="http://elasticsearch:9200"
+ARG OPENSEARCH_URL="http://opensearch:9200"
ARG PCAP_PATH=/pcap
ARG PCAP_PIPELINE_DEBUG=false
ARG PCAP_PIPELINE_DEBUG_EXTRA=false
ARG PCAP_PIPELINE_IGNORE_PREEXISTING=false
ARG ZEEK_PATH=/zeek
-ENV ELASTICSEARCH_URL $ELASTICSEARCH_URL
+ENV OPENSEARCH_URL $OPENSEARCH_URL
ENV PCAP_PATH $PCAP_PATH
ENV PCAP_PIPELINE_DEBUG $PCAP_PIPELINE_DEBUG
ENV PCAP_PIPELINE_DEBUG_EXTRA $PCAP_PIPELINE_DEBUG_EXTRA
diff --git a/README.md b/README.md
index ec75d122a..6c787e766 100644
--- a/README.md
+++ b/README.md
@@ -141,7 +141,7 @@ You can then observe that the images have been retrieved by running `docker imag
$ docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
malcolmnetsec/arkime 3.2.2 xxxxxxxxxxxx 39 hours ago 683MB
-malcolmnetsec/elasticsearch-od 3.2.2 xxxxxxxxxxxx 40 hours ago 690MB
+malcolmnetsec/opensearch 3.2.2 xxxxxxxxxxxx 40 hours ago 690MB
malcolmnetsec/file-monitor 3.2.2 xxxxxxxxxxxx 39 hours ago 470MB
malcolmnetsec/file-upload 3.2.2 xxxxxxxxxxxx 39 hours ago 199MB
malcolmnetsec/filebeat-oss 3.2.2 xxxxxxxxxxxx 39 hours ago 555MB
@@ -346,7 +346,7 @@ $ ./scripts/build.sh
Then, go take a walk or something since it will be a while. When you're done, you can run `docker images` and see you have fresh images for:
* `malcolmnetsec/arkime` (based on `debian:buster-slim`)
-* `malcolmnetsec/elasticsearch-od` (based on `amazon/opendistro-for-elasticsearch`)
+* `malcolmnetsec/opensearch` (based on `amazon/opendistro-for-elasticsearch`)
* `malcolmnetsec/filebeat-oss` (based on `docker.elastic.co/beats/filebeat-oss`)
* `malcolmnetsec/file-monitor` (based on `debian:buster-slim`)
* `malcolmnetsec/file-upload` (based on `debian:buster-slim`)
@@ -458,7 +458,7 @@ Although `install.py` will attempt to automate many of the following configurati
#### `docker-compose.yml` parameters
-Edit `docker-compose.yml` and search for the `ES_JAVA_OPTS` key. Edit the `-Xms4g -Xmx4g` values, replacing `4g` with a number that is half of your total system memory, or just under 32 gigabytes, whichever is less. So, for example, if I had 64 gigabytes of memory I would edit those values to be `-Xms31g -Xmx31g`. This indicates how much memory can be allocated to the Elasticsearch heaps. For a pleasant experience, I would suggest not using a value under 10 gigabytes. Similar values can be modified for Logstash with `LS_JAVA_OPTS`, where using 3 or 4 gigabytes is recommended.
+Edit `docker-compose.yml` and search for the `OPENSEARCH_JAVA_OPTS` key. Edit the `-Xms4g -Xmx4g` values, replacing `4g` with a number that is half of your total system memory, or just under 32 gigabytes, whichever is less. So, for example, if I had 64 gigabytes of memory I would edit those values to be `-Xms31g -Xmx31g`. This indicates how much memory can be allocated to the Elasticsearch heaps. For a pleasant experience, I would suggest not using a value under 10 gigabytes. Similar values can be modified for Logstash with `LS_JAVA_OPTS`, where using 3 or 4 gigabytes is recommended.
Various other environment variables inside of `docker-compose.yml` can be tweaked to control aspects of how Malcolm behaves, particularly with regards to processing PCAP files and Zeek logs. The environment variables of particular interest are located near the top of that file under **Commonly tweaked configuration options**, which include:
@@ -1825,7 +1825,7 @@ Pulling zeek ... done
user@host:~/Malcolm$ docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
malcolmnetsec/arkime 3.2.2 xxxxxxxxxxxx 39 hours ago 683MB
-malcolmnetsec/elasticsearch-od 3.2.2 xxxxxxxxxxxx 40 hours ago 690MB
+malcolmnetsec/opensearch 3.2.2 xxxxxxxxxxxx 40 hours ago 690MB
malcolmnetsec/file-monitor 3.2.2 xxxxxxxxxxxx 39 hours ago 470MB
malcolmnetsec/file-upload 3.2.2 xxxxxxxxxxxx 39 hours ago 199MB
malcolmnetsec/filebeat-oss 3.2.2 xxxxxxxxxxxx 39 hours ago 555MB
diff --git a/docker-compose-standalone.yml b/docker-compose-standalone.yml
index db55f6743..45530804f 100644
--- a/docker-compose-standalone.yml
+++ b/docker-compose-standalone.yml
@@ -78,8 +78,8 @@ x-zeek-variables: &zeek-variables
ZEEK_DISABLE_SPICY_WIREGUARD : ''
x-kibana-helper-variables: &kibana-helper-variables
- ELASTICSEARCH_INDEX_SIZE_PRUNE_LIMIT : '0'
- ELASTICSEARCH_INDEX_SIZE_PRUNE_NAME_SORT : 'false'
+ OPENSEARCH_INDEX_SIZE_PRUNE_LIMIT : '0'
+ OPENSEARCH_INDEX_SIZE_PRUNE_NAME_SORT : 'false'
ISM_CLOSE_AGE : '60d'
ISM_COLD_AGE : '30d'
ISM_DELETE_AGE : '365d'
@@ -122,24 +122,24 @@ x-pcap-capture-variables: &pcap-capture-variables
################################################################################
services:
- elasticsearch:
- image: malcolmnetsec/elasticsearch-od:3.2.2
+ opensearch:
+ image: malcolmnetsec/opensearch:3.2.2
restart: "no"
stdin_open: false
tty: true
- hostname: elasticsearch
+ hostname: opensearch
environment:
<< : *process-variables
logger.level : 'WARN'
bootstrap.memory_lock : 'true'
- ES_JAVA_OPTS : '-Xms4g -Xmx4g -Xss256k -Djava.security.egd=file:/dev/./urandom'
+ OPENSEARCH_JAVA_OPTS : '-Xms4g -Xmx4g -Xss256k -Djava.security.egd=file:/dev/./urandom'
VIRTUAL_HOST : 'es.malcolm.local'
discovery.type : 'single-node'
discovery.zen.minimum_master_nodes : 1
cluster.routing.allocation.disk.threshold_enabled : 'false'
cluster.routing.allocation.node_initial_primaries_recoveries : 8
indices.query.bool.max_clause_count : 2048
- path.repo : '/opt/elasticsearch/backup'
+ path.repo : '/opt/opensearch/backup'
expose:
- 9200
ulimits:
@@ -149,10 +149,10 @@ services:
cap_add:
- IPC_LOCK
volumes:
- - ./elasticsearch/elasticsearch.keystore:/usr/share/elasticsearch/config/elasticsearch.keystore:rw
- - ./nginx/ca-trust:/usr/share/elasticsearch/ca-trust:ro
- - ./elasticsearch:/usr/share/elasticsearch/data:delegated
- - ./elasticsearch-backup:/opt/elasticsearch/backup:delegated
+ - ./opensearch/opensearch.keystore:/usr/share/opensearch/config/opensearch.keystore:rw
+ - ./nginx/ca-trust:/usr/share/opensearch/ca-trust:ro
+ - ./opensearch:/usr/share/opensearch/data:delegated
+ - ./opensearch-backup:/opt/opensearch/backup:delegated
healthcheck:
test: ["CMD", "curl", "--silent", "--fail", "http://localhost:9200"]
interval: 30s
@@ -168,15 +168,15 @@ services:
environment:
<< : *process-variables
<< : *kibana-helper-variables
- ELASTICSEARCH_URL : 'http://elasticsearch:9200'
+ OPENSEARCH_URL : 'http://opensearch:9200'
KIBANA_URL : 'http://kibana:5601/kibana'
VIRTUAL_HOST : 'kibana-helper.malcolm.local'
ARKIME_INDEX_PATTERN : 'sessions2-*'
ARKIME_INDEX_PATTERN_ID : 'sessions2-*'
ARKIME_INDEX_TIME_FIELD : 'firstPacket'
- CREATE_ES_ARKIME_SESSION_INDEX : 'true'
+ CREATE_OS_ARKIME_SESSION_INDEX : 'true'
depends_on:
- - elasticsearch
+ - opensearch
expose:
- 28991
volumes:
@@ -195,10 +195,10 @@ services:
hostname: kibana
environment:
<< : *process-variables
- ELASTICSEARCH_URL : 'http://elasticsearch:9200'
+ OPENSEARCH_URL : 'http://opensearch:9200'
VIRTUAL_HOST : 'kibana.malcolm.local'
depends_on:
- - elasticsearch
+ - opensearch
- kibana-helper
expose:
- 5601
@@ -219,10 +219,10 @@ services:
<< : *logstash-variables
<< : *common-beats-variables
<< : *common-lookup-variables
- ES_HOSTS : 'elasticsearch:9200'
+ ES_HOSTS : 'opensearch:9200'
LS_JAVA_OPTS : '-Xms2g -Xmx2g -Xss2m -Djava.security.egd=file:/dev/./urandom'
depends_on:
- - elasticsearch
+ - opensearch
expose:
- 5044
- 9001
@@ -293,8 +293,8 @@ services:
<< : *arkime-variables
ARKIME_VERSION : '2.7.1'
VIRTUAL_HOST : 'arkime.malcolm.local'
- ES_HOST : 'elasticsearch'
- ES_PORT : 9200
+ ES_HOST : 'opensearch'
+ OS_PORT : 9200
ES_MAX_SHARDS_PER_NODE : 2500
VIEWER : 'on'
WISE : 'on'
@@ -303,7 +303,7 @@ services:
soft: -1
hard: -1
depends_on:
- - elasticsearch
+ - opensearch
expose:
- 8000
- 8005
@@ -333,7 +333,7 @@ services:
soft: -1
hard: -1
depends_on:
- - elasticsearch
+ - opensearch
volumes:
- ./pcap:/pcap
- ./zeek-logs/upload:/zeek/upload
@@ -402,9 +402,9 @@ services:
environment:
<< : *process-variables
<< : *common-upload-variables
- ELASTICSEARCH_URL : 'http://elasticsearch:9200'
+ OPENSEARCH_URL : 'http://opensearch:9200'
depends_on:
- - elasticsearch
+ - opensearch
expose:
- 30441
volumes:
diff --git a/docker-compose.yml b/docker-compose.yml
index 26f5c20fd..1f73a119a 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -78,8 +78,8 @@ x-zeek-variables: &zeek-variables
ZEEK_DISABLE_SPICY_WIREGUARD : ''
x-kibana-helper-variables: &kibana-helper-variables
- ELASTICSEARCH_INDEX_SIZE_PRUNE_LIMIT : '0'
- ELASTICSEARCH_INDEX_SIZE_PRUNE_NAME_SORT : 'false'
+ OPENSEARCH_INDEX_SIZE_PRUNE_LIMIT : '0'
+ OPENSEARCH_INDEX_SIZE_PRUNE_NAME_SORT : 'false'
ISM_CLOSE_AGE : '60d'
ISM_COLD_AGE : '30d'
ISM_DELETE_AGE : '365d'
@@ -122,27 +122,27 @@ x-pcap-capture-variables: &pcap-capture-variables
################################################################################
services:
- elasticsearch:
+ opensearch:
build:
context: .
- dockerfile: Dockerfiles/elasticsearch.Dockerfile
- image: malcolmnetsec/elasticsearch-od:3.2.2
+ dockerfile: Dockerfiles/opensearch.Dockerfile
+ image: malcolmnetsec/opensearch:3.2.2
restart: "no"
stdin_open: false
tty: true
- hostname: elasticsearch
+ hostname: opensearch
environment:
<< : *process-variables
logger.level : 'INFO'
bootstrap.memory_lock : 'true'
- ES_JAVA_OPTS : '-Xms4g -Xmx4g -Xss256k -Djava.security.egd=file:/dev/./urandom'
+ OPENSEARCH_JAVA_OPTS : '-Xms4g -Xmx4g -Xss256k -Djava.security.egd=file:/dev/./urandom'
VIRTUAL_HOST : 'es.malcolm.local'
discovery.type : 'single-node'
discovery.zen.minimum_master_nodes : 1
cluster.routing.allocation.disk.threshold_enabled : 'false'
cluster.routing.allocation.node_initial_primaries_recoveries : 8
indices.query.bool.max_clause_count : 2048
- path.repo : '/opt/elasticsearch/backup'
+ path.repo : '/opt/opensearch/backup'
expose:
- 9200
ulimits:
@@ -152,10 +152,10 @@ services:
cap_add:
- IPC_LOCK
volumes:
- - ./elasticsearch/elasticsearch.keystore:/usr/share/elasticsearch/config/elasticsearch.keystore:rw
- - ./nginx/ca-trust:/usr/share/elasticsearch/ca-trust:ro
- - ./elasticsearch:/usr/share/elasticsearch/data:delegated
- - ./elasticsearch-backup:/opt/elasticsearch/backup:delegated
+ - ./opensearch/opensearch.keystore:/usr/share/opensearch/config/opensearch.keystore:rw
+ - ./nginx/ca-trust:/usr/share/opensearch/ca-trust:ro
+ - ./opensearch:/usr/share/opensearch/data:delegated
+ - ./opensearch-backup:/opt/opensearch/backup:delegated
healthcheck:
test: ["CMD", "curl", "--silent", "--fail", "http://localhost:9200"]
interval: 30s
@@ -174,15 +174,15 @@ services:
environment:
<< : *process-variables
<< : *kibana-helper-variables
- ELASTICSEARCH_URL : 'http://elasticsearch:9200'
+ OPENSEARCH_URL : 'http://opensearch:9200'
KIBANA_URL : 'http://kibana:5601/kibana'
VIRTUAL_HOST : 'kibana-helper.malcolm.local'
ARKIME_INDEX_PATTERN : 'sessions2-*'
ARKIME_INDEX_PATTERN_ID : 'sessions2-*'
ARKIME_INDEX_TIME_FIELD : 'firstPacket'
- CREATE_ES_ARKIME_SESSION_INDEX : 'true'
+ CREATE_OS_ARKIME_SESSION_INDEX : 'true'
depends_on:
- - elasticsearch
+ - opensearch
expose:
- 28991
volumes:
@@ -204,10 +204,10 @@ services:
hostname: kibana
environment:
<< : *process-variables
- ELASTICSEARCH_URL : 'http://elasticsearch:9200'
+ OPENSEARCH_URL : 'http://opensearch:9200'
VIRTUAL_HOST : 'kibana.malcolm.local'
depends_on:
- - elasticsearch
+ - opensearch
- kibana-helper
expose:
- 5601
@@ -231,10 +231,10 @@ services:
<< : *logstash-variables
<< : *common-beats-variables
<< : *common-lookup-variables
- ES_HOSTS : 'elasticsearch:9200'
+ ES_HOSTS : 'opensearch:9200'
LS_JAVA_OPTS : '-Xms2g -Xmx2g -Xss2m -Djava.security.egd=file:/dev/./urandom'
depends_on:
- - elasticsearch
+ - opensearch
expose:
- 5044
- 9001
@@ -314,8 +314,8 @@ services:
<< : *arkime-variables
ARKIME_VERSION : '2.7.1'
VIRTUAL_HOST : 'arkime.malcolm.local'
- ES_HOST : 'elasticsearch'
- ES_PORT : 9200
+ ES_HOST : 'opensearch'
+ OS_PORT : 9200
ES_MAX_SHARDS_PER_NODE : 2500
VIEWER : 'on'
WISE : 'on'
@@ -324,7 +324,7 @@ services:
soft: -1
hard: -1
depends_on:
- - elasticsearch
+ - opensearch
expose:
- 8000
- 8005
@@ -360,7 +360,7 @@ services:
soft: -1
hard: -1
depends_on:
- - elasticsearch
+ - opensearch
volumes:
- ./pcap:/pcap
- ./zeek-logs/upload:/zeek/upload
@@ -439,9 +439,9 @@ services:
environment:
<< : *process-variables
<< : *common-upload-variables
- ELASTICSEARCH_URL : 'http://elasticsearch:9200'
+ OPENSEARCH_URL : 'http://opensearch:9200'
depends_on:
- - elasticsearch
+ - opensearch
expose:
- 30441
volumes:
diff --git a/filebeat/filebeat-nginx.yml b/filebeat/filebeat-nginx.yml
index df188d473..b248d367d 100644
--- a/filebeat/filebeat-nginx.yml
+++ b/filebeat/filebeat-nginx.yml
@@ -15,7 +15,7 @@ filebeat.modules:
#-------------------------- Elasticsearch output -------------------------------
output.elasticsearch:
enabled: true
- hosts: ["elasticsearch:9200"]
+ hosts: ["opensearch:9200"]
indices:
- index: "filebeat-%{[agent.version]}-nginx-%{+yyyy.MM.dd}"
when.equals:
diff --git a/filebeat/supervisord.conf b/filebeat/supervisord.conf
index 26dd9e3c1..41039d84a 100644
--- a/filebeat/supervisord.conf
+++ b/filebeat/supervisord.conf
@@ -29,7 +29,7 @@ stdout_logfile_maxbytes=0
redirect_stderr=true
[program:filebeat-nginx]
-command=bash -c "/data/elastic_search_status.sh && /usr/local/bin/docker-entrypoint -e --strict.perms=false \
+command=bash -c "/data/opensearch_status.sh && /usr/local/bin/docker-entrypoint -e --strict.perms=false \
--path.home /usr/share/filebeat-nginx \
--path.config /usr/share/filebeat-nginx \
--path.data /usr/share/filebeat-nginx/data \
diff --git a/kibana/kibana.yml b/kibana/kibana.yml
index 81cc7fbad..0dd1bf6a1 100644
--- a/kibana/kibana.yml
+++ b/kibana/kibana.yml
@@ -5,7 +5,7 @@ server.name: kibana
server.host: "0"
server.basePath: "/kibana"
server.rewriteBasePath: true
-elasticsearch.hosts: ["${ELASTICSEARCH_URL}"]
+elasticsearch.hosts: ["${OPENSEARCH_URL}"]
elasticsearch.requestTimeout: 180000
kibana.defaultAppId: "dashboard/${KIBANA_DEFAULT_DASHBOARD}"
diff --git a/kibana/scripts/kibana-create-moloch-sessions-index.sh b/kibana/scripts/kibana-create-moloch-sessions-index.sh
index b9a053790..a6ddfd9fc 100755
--- a/kibana/scripts/kibana-create-moloch-sessions-index.sh
+++ b/kibana/scripts/kibana-create-moloch-sessions-index.sh
@@ -5,12 +5,12 @@
set -euo pipefail
shopt -s nocasematch
-if [[ -n $ELASTICSEARCH_URL ]]; then
- ES_URL="$ELASTICSEARCH_URL"
-elif [[ -n $ES_HOST ]] && [[ -n $ES_PORT ]]; then
- ES_URL="http://$ES_HOST:$ES_PORT"
+if [[ -n $OPENSEARCH_URL ]]; then
+ OS_URL="$OPENSEARCH_URL"
+elif [[ -n $ES_HOST ]] && [[ -n $OS_PORT ]]; then
+ OS_URL="http://$ES_HOST:$OS_PORT"
else
- ES_URL="http://elasticsearch:9200"
+ OS_URL="http://opensearch:9200"
fi
if [[ -n $KIBANA_URL ]]; then
@@ -32,10 +32,10 @@ ZEEK_TEMPLATE_FILE_ORIG="/data/zeek_template.json"
INDEX_POLICY_NAME=${ISM_POLICY_NAME:-"session_index_policy"}
# is the argument to automatically create this index enabled?
-if [[ "$CREATE_ES_ARKIME_SESSION_INDEX" = "true" ]] ; then
+if [[ "$CREATE_OS_ARKIME_SESSION_INDEX" = "true" ]] ; then
# give Elasticsearch time to start before configuring Kibana
- /data/elastic_search_status.sh >/dev/null 2>&1
+ /data/opensearch_status.sh >/dev/null 2>&1
# is the kibana process server up and responding to requests?
if curl -L --silent --output /dev/null --fail -XGET "$KIB_URL/api/status" ; then
@@ -45,8 +45,8 @@ if [[ "$CREATE_ES_ARKIME_SESSION_INDEX" = "true" ]] ; then
echo "Elasticsearch is running! Setting up index management policies..."
- # register the repo location for elasticsearch snapshots
- /data/register-elasticsearch-snapshot-repo.sh
+ # register the repo location for opensearch snapshots
+ /data/register-opensearch-snapshot-repo.sh
# tweak the sessions template (sessions2-* zeek template file) to use the index management policy
if [[ -f "$INDEX_POLICY_FILE_HOST" ]] && (( $(jq length "$INDEX_POLICY_FILE_HOST") > 0 )); then
@@ -56,7 +56,7 @@ if [[ "$CREATE_ES_ARKIME_SESSION_INDEX" = "true" ]] ; then
else
# need to generate index management file based on environment variables
- /data/elastic_index_policy_create.py \
+ /data/opensearch_index_policy_create.py \
--policy "$INDEX_POLICY_NAME" \
--index-pattern "$INDEX_PATTERN" \
--priority 100 \
@@ -70,7 +70,7 @@ if [[ "$CREATE_ES_ARKIME_SESSION_INDEX" = "true" ]] ; then
if [[ -f "$INDEX_POLICY_FILE" ]]; then
# make API call to define index management policy
# https://opendistro.github.io/for-elasticsearch-docs/docs/ism/api/#create-policy
- curl -w "\n" -L --silent --output /dev/null --show-error -XPUT -H "Content-Type: application/json" "$ES_URL/_opendistro/_ism/policies/$INDEX_POLICY_NAME" -d "@$INDEX_POLICY_FILE"
+ curl -w "\n" -L --silent --output /dev/null --show-error -XPUT -H "Content-Type: application/json" "$OS_URL/_opendistro/_ism/policies/$INDEX_POLICY_NAME" -d "@$INDEX_POLICY_FILE"
if [[ -f "$ZEEK_TEMPLATE_FILE_ORIG" ]]; then
# insert opendistro.index_state_management.policy_id into index template settings: will be
@@ -87,7 +87,7 @@ if [[ "$CREATE_ES_ARKIME_SESSION_INDEX" = "true" ]] ; then
# load zeek_template containing zeek field type mappings (merged from /data/zeek_template.json to /data/init/zeek_template.json in kibana_helpers.sh on startup)
curl -w "\n" -sSL --fail -XPOST -H "Content-Type: application/json" \
- "$ES_URL/_template/zeek_template?include_type_name=true" -d "@$ZEEK_TEMPLATE_FILE" 2>&1
+ "$OS_URL/_template/zeek_template?include_type_name=true" -d "@$ZEEK_TEMPLATE_FILE" 2>&1
echo "Importing index pattern..."
diff --git a/kibana/scripts/kibana_index_refresh.py b/kibana/scripts/kibana_index_refresh.py
index aea9600a5..5081f869b 100755
--- a/kibana/scripts/kibana_index_refresh.py
+++ b/kibana/scripts/kibana_index_refresh.py
@@ -44,7 +44,7 @@ def main():
parser.add_argument('-v', '--verbose', dest='debug', type=str2bool, nargs='?', const=True, default=False, help="Verbose output")
parser.add_argument('-i', '--index', dest='index', metavar='', type=str, default='sessions2-*', help='Index Pattern Name')
parser.add_argument('-k', '--kibana', dest='kibanaUrl', metavar='', type=str, default=os.getenv('KIBANA_URL', 'http://kibana:5601/kibana'), help='Kibana URL')
- parser.add_argument('-e', '--elastic', dest='elasticUrl', metavar='', type=str, default=os.getenv('ELASTICSEARCH_URL', 'http://elasticsearch:9200'), help='Elasticsearch URL')
+ parser.add_argument('-e', '--elastic', dest='elasticUrl', metavar='', type=str, default=os.getenv('OPENSEARCH_URL', 'http://opensearch:9200'), help='Elasticsearch URL')
parser.add_argument('-t', '--template', dest='template', metavar='', type=str, default=None, help='Elasticsearch template to merge')
parser.add_argument('-n', '--dry-run', dest='dryrun', type=str2bool, nargs='?', const=True, default=False, help="Dry run (no PUT)")
try:
diff --git a/kibana/scripts/elastic_index_policy_create.py b/kibana/scripts/opensearch_index_policy_create.py
similarity index 100%
rename from kibana/scripts/elastic_index_policy_create.py
rename to kibana/scripts/opensearch_index_policy_create.py
diff --git a/kibana/scripts/register-elasticsearch-snapshot-repo.sh b/kibana/scripts/register-opensearch-snapshot-repo.sh
similarity index 58%
rename from kibana/scripts/register-elasticsearch-snapshot-repo.sh
rename to kibana/scripts/register-opensearch-snapshot-repo.sh
index 4b63fb4aa..dee89083a 100755
--- a/kibana/scripts/register-elasticsearch-snapshot-repo.sh
+++ b/kibana/scripts/register-opensearch-snapshot-repo.sh
@@ -3,17 +3,17 @@
# Copyright (c) 2021 Battelle Energy Alliance, LLC. All rights reserved.
if [ $# -gt 0 ]; then
- ES_URL="$1"
-elif [[ -n $ELASTICSEARCH_URL ]]; then
- ES_URL="$ELASTICSEARCH_URL"
-elif [[ -n $ES_HOST ]] && [[ -n $ES_PORT ]]; then
- ES_URL="http://$ES_HOST:$ES_PORT"
+ OS_URL="$1"
+elif [[ -n $OPENSEARCH_URL ]]; then
+ OS_URL="$OPENSEARCH_URL"
+elif [[ -n $ES_HOST ]] && [[ -n $OS_PORT ]]; then
+ OS_URL="http://$ES_HOST:$OS_PORT"
else
- ES_URL="http://elasticsearch:9200"
+ OS_URL="http://opensearch:9200"
fi
[[ -n $ISM_SNAPSHOT_REPO ]] && \
curl -w "\n" -H "Accept: application/json" \
-H "Content-type: application/json" \
- -XPUT -fsSL "$ES_URL/_snapshot/$ISM_SNAPSHOT_REPO" \
+ -XPUT -fsSL "$OS_URL/_snapshot/$ISM_SNAPSHOT_REPO" \
-d "{ \"type\": \"fs\", \"settings\": { \"location\": \"$ISM_SNAPSHOT_REPO\", \"compress\": ${ISM_SNAPSHOT_COMPRESSED:-false} } }"
\ No newline at end of file
diff --git a/kibana/supervisord.conf b/kibana/supervisord.conf
index 3f6a1edc2..ceba8cdb0 100644
--- a/kibana/supervisord.conf
+++ b/kibana/supervisord.conf
@@ -17,7 +17,7 @@ supervisor.rpcinterface_factory=supervisor.rpcinterface:make_main_rpcinterface
serverurl=unix:///tmp/supervisor.sock
[program:idxinit]
-command=bash -c "sleep 180 && /data/elastic_search_status.sh -w && /data/kibana_index_refresh.py -v --kibana \"%(ENV_KIBANA_URL)s\" --elastic \"%(ENV_ELASTICSEARCH_URL)s\" --template zeek_template"
+command=bash -c "sleep 180 && /data/opensearch_status.sh -w && /data/kibana_index_refresh.py -v --kibana \"%(ENV_KIBANA_URL)s\" --elastic \"%(ENV_OPENSEARCH_URL)s\" --template zeek_template"
autostart=true
autorestart=false
startsecs=0
diff --git a/logstash/pipelines/enrichment/99_elastic_forward.conf b/logstash/pipelines/enrichment/99_elastic_forward.conf
deleted file mode 100644
index 11b3e5380..000000000
--- a/logstash/pipelines/enrichment/99_elastic_forward.conf
+++ /dev/null
@@ -1,5 +0,0 @@
-output {
- pipeline {
- send_to => [_MALCOLM_ELASTICSEARCH_OUTPUT_PIPELINES_]
- }
-}
diff --git a/logstash/pipelines/enrichment/99_opensearch_forward.conf b/logstash/pipelines/enrichment/99_opensearch_forward.conf
new file mode 100644
index 000000000..3f0d792f5
--- /dev/null
+++ b/logstash/pipelines/enrichment/99_opensearch_forward.conf
@@ -0,0 +1,5 @@
+output {
+ pipeline {
+ send_to => [_MALCOLM_OPENSEARCH_OUTPUT_PIPELINES_]
+ }
+}
diff --git a/logstash/pipelines/external/01_input_external_es.conf b/logstash/pipelines/external/01_input_external_es.conf
index 563d83041..3a0516aed 100644
--- a/logstash/pipelines/external/01_input_external_es.conf
+++ b/logstash/pipelines/external/01_input_external_es.conf
@@ -1,6 +1,6 @@
input {
pipeline {
- address => "${ELASTICSEARCH_PIPELINE_ADDRESS_EXTERNAL:external-es}"
+ address => "${OPENSEARCH_PIPELINE_ADDRESS_EXTERNAL:external-es}"
}
}
diff --git a/logstash/pipelines/external/99_elastic_output.conf b/logstash/pipelines/external/99_opensearch_output.conf
similarity index 100%
rename from logstash/pipelines/external/99_elastic_output.conf
rename to logstash/pipelines/external/99_opensearch_output.conf
diff --git a/logstash/pipelines/output/01_input_internal_es.conf b/logstash/pipelines/output/01_input_internal_es.conf
index e202c8091..205aed677 100644
--- a/logstash/pipelines/output/01_input_internal_es.conf
+++ b/logstash/pipelines/output/01_input_internal_es.conf
@@ -1,6 +1,6 @@
input {
pipeline {
- address => "${ELASTICSEARCH_PIPELINE_ADDRESS_INTERNAL:internal-es}"
+ address => "${OPENSEARCH_PIPELINE_ADDRESS_INTERNAL:internal-es}"
}
}
diff --git a/logstash/pipelines/output/99_elastic_output.conf b/logstash/pipelines/output/99_opensearch_output.conf
similarity index 80%
rename from logstash/pipelines/output/99_elastic_output.conf
rename to logstash/pipelines/output/99_opensearch_output.conf
index ca4d9a417..0b9da44b1 100644
--- a/logstash/pipelines/output/99_elastic_output.conf
+++ b/logstash/pipelines/output/99_opensearch_output.conf
@@ -1,7 +1,7 @@
output {
elasticsearch {
id => "output_elasticsearch_moloch"
- hosts => "${ES_HOSTS:elasticsearch:9200}"
+ hosts => "${ES_HOSTS:opensearch:9200}"
manage_template => false
index => "sessions2-%{+YYMMdd}"
document_id => "%{+YYMMdd}-%{zeekLogDocId}"
diff --git a/logstash/pipelines/zeek/11_zeek_logs.conf b/logstash/pipelines/zeek/11_zeek_logs.conf
index 5cfe0297b..f34797dea 100644
--- a/logstash/pipelines/zeek/11_zeek_logs.conf
+++ b/logstash/pipelines/zeek/11_zeek_logs.conf
@@ -3582,7 +3582,7 @@ filter {
mutate { id => "mutate_add_field_zeek_not_conn_zeekLogDocId"
add_field => { "[zeekLogDocId]" => "%{[zeek][uid]}-%{source}:%{[@metadata][uuid]}" } }
}
- } # if/else for defining logOffset field used in document ID for elasticsearch
+ } # if/else for defining logOffset field used in document ID for OpenSearch
if ([zeek][ts]) {
diff --git a/logstash/scripts/logstash-start.sh b/logstash/scripts/logstash-start.sh
index 065f63c67..fbc778661 100755
--- a/logstash/scripts/logstash-start.sh
+++ b/logstash/scripts/logstash-start.sh
@@ -29,9 +29,9 @@ ENRICHMENT_PIPELINE=${LOGSTASH_ENRICHMENT_PIPELINE:-"enrichment"}
PARSE_PIPELINE_ADDRESSES=${LOGSTASH_PARSE_PIPELINE_ADDRESSES:-"zeek-parse"}
# pipeline addresses for forwarding from Logstash to Elasticsearch (both "internal" and "external" pipelines)
-export ELASTICSEARCH_PIPELINE_ADDRESS_INTERNAL=${LOGSTASH_ELASTICSEARCH_PIPELINE_ADDRESS_INTERNAL:-"internal-es"}
-export ELASTICSEARCH_PIPELINE_ADDRESS_EXTERNAL=${LOGSTASH_ELASTICSEARCH_PIPELINE_ADDRESS_EXTERNAL:-"external-es"}
-ELASTICSEARCH_OUTPUT_PIPELINE_ADDRESSES=${LOGSTASH_ELASTICSEARCH_OUTPUT_PIPELINE_ADDRESSES:-"$ELASTICSEARCH_PIPELINE_ADDRESS_INTERNAL,$ELASTICSEARCH_PIPELINE_ADDRESS_EXTERNAL"}
+export OPENSEARCH_PIPELINE_ADDRESS_INTERNAL=${LOGSTASH_OPENSEARCH_PIPELINE_ADDRESS_INTERNAL:-"internal-es"}
+export OPENSEARCH_PIPELINE_ADDRESS_EXTERNAL=${LOGSTASH_OPENSEARCH_PIPELINE_ADDRESS_EXTERNAL:-"external-es"}
+OPENSEARCH_OUTPUT_PIPELINE_ADDRESSES=${LOGSTASH_OPENSEARCH_OUTPUT_PIPELINE_ADDRESSES:-"$OPENSEARCH_PIPELINE_ADDRESS_INTERNAL,$OPENSEARCH_PIPELINE_ADDRESS_EXTERNAL"}
# ip-to-segment-logstash.py translate $INPUT_CIDR_MAP, $INPUT_HOST_MAP, $INPUT_MIXED_MAP into this logstash filter file
NETWORK_MAP_OUTPUT_FILTER="$PIPELINES_DIR"/"$ENRICHMENT_PIPELINE"/16_host_segment_filters.conf
@@ -53,7 +53,7 @@ find "$PIPELINES_DIR" -mindepth 1 -maxdepth 1 -type d -print0 2>/dev/null | sort
xargs -0 -n 1 -I '{}' bash -c '
PIPELINE_NAME="$(basename "{}")"
PIPELINE_ADDRESS_NAME="$(cat "{}"/*.conf | sed -e "s/:[\}]*.*\(}\)/\1/" | envsubst | grep -P "\baddress\s*=>" | awk "{print \$3}" | sed "s/[\"'']//g" | head -n 1)"
- if [[ -n "$ES_EXTERNAL_HOSTS" ]] || [[ "$PIPELINE_ADDRESS_NAME" != "$ELASTICSEARCH_PIPELINE_ADDRESS_EXTERNAL" ]]; then
+ if [[ -n "$ES_EXTERNAL_HOSTS" ]] || [[ "$PIPELINE_ADDRESS_NAME" != "$OPENSEARCH_PIPELINE_ADDRESS_EXTERNAL" ]]; then
echo "- pipeline.id: malcolm-$PIPELINE_NAME" >> "$PIPELINES_CFG"
echo " path.config: "{}"" >> "$PIPELINES_CFG"
cat "{}"/"$PIPELINE_EXTRA_CONF_FILE" 2>/dev/null >> "$PIPELINES_CFG"
@@ -69,15 +69,15 @@ rm -f "$NETWORK_MAP_OUTPUT_FILTER"
if [[ -z "$ES_EXTERNAL_HOSTS" ]]; then
# external ES host destination is not specified, remove external destination from enrichment pipeline output
- ELASTICSEARCH_OUTPUT_PIPELINE_ADDRESSES="$(echo "$ELASTICSEARCH_OUTPUT_PIPELINE_ADDRESSES" | sed "s/,[[:blank:]]*$ELASTICSEARCH_PIPELINE_ADDRESS_EXTERNAL//")"
+ OPENSEARCH_OUTPUT_PIPELINE_ADDRESSES="$(echo "$OPENSEARCH_OUTPUT_PIPELINE_ADDRESSES" | sed "s/,[[:blank:]]*$OPENSEARCH_PIPELINE_ADDRESS_EXTERNAL//")"
fi
# insert quotes around the elasticsearch parsing and output pipeline list
MALCOLM_PARSE_PIPELINE_ADDRESSES=$(printf '"%s"\n' "${PARSE_PIPELINE_ADDRESSES//,/\",\"}")
-MALCOLM_ELASTICSEARCH_OUTPUT_PIPELINES=$(printf '"%s"\n' "${ELASTICSEARCH_OUTPUT_PIPELINE_ADDRESSES//,/\",\"}")
+MALCOLM_OPENSEARCH_OUTPUT_PIPELINES=$(printf '"%s"\n' "${OPENSEARCH_OUTPUT_PIPELINE_ADDRESSES//,/\",\"}")
# do a manual global replace on these particular values in the config files, as Logstash doesn't like the environment variables with quotes in them
-find "$PIPELINES_DIR" -type f -name "*.conf" -exec sed -i "s/_MALCOLM_ELASTICSEARCH_OUTPUT_PIPELINES_/${MALCOLM_ELASTICSEARCH_OUTPUT_PIPELINES}/g" "{}" \; 2>/dev/null
+find "$PIPELINES_DIR" -type f -name "*.conf" -exec sed -i "s/_MALCOLM_OPENSEARCH_OUTPUT_PIPELINES_/${MALCOLM_OPENSEARCH_OUTPUT_PIPELINES}/g" "{}" \; 2>/dev/null
find "$PIPELINES_DIR" -type f -name "*.conf" -exec sed -i "s/_MALCOLM_PARSE_PIPELINE_ADDRESSES_/${MALCOLM_PARSE_PIPELINE_ADDRESSES}/g" "{}" \; 2>/dev/null
# import trusted CA certificates if necessary
diff --git a/malcolm-iso/build.sh b/malcolm-iso/build.sh
index b745abe83..5b0245742 100755
--- a/malcolm-iso/build.sh
+++ b/malcolm-iso/build.sh
@@ -97,8 +97,8 @@ if [ -d "$WORKDIR" ]; then
mkdir -p "$MALCOLM_DEST_DIR/htadmin/"
mkdir -p "$MALCOLM_DEST_DIR/logstash/certs/"
mkdir -p "$MALCOLM_DEST_DIR/filebeat/certs/"
- mkdir -p "$MALCOLM_DEST_DIR/elasticsearch/nodes/"
- mkdir -p "$MALCOLM_DEST_DIR/elasticsearch-backup/"
+ mkdir -p "$MALCOLM_DEST_DIR/opensearch/nodes/"
+ mkdir -p "$MALCOLM_DEST_DIR/opensearch-backup/"
mkdir -p "$MALCOLM_DEST_DIR/moloch-raw/"
mkdir -p "$MALCOLM_DEST_DIR/moloch-logs/"
mkdir -p "$MALCOLM_DEST_DIR/pcap/upload/"
diff --git a/moloch/etc/config.ini b/moloch/etc/config.ini
index ec9545245..f72295f6c 100644
--- a/moloch/etc/config.ini
+++ b/moloch/etc/config.ini
@@ -4,7 +4,7 @@
#
[default]
-elasticsearch=http://elasticsearch:9200
+elasticsearch=http://opensearch:9200
cronQueries=true
rotateIndex=daily
passwordSecret=Malcolm
diff --git a/moloch/moloch_regression_test_harness/docker-compose.yml b/moloch/moloch_regression_test_harness/docker-compose.yml
index 9aa4b544b..c205d0a7a 100644
--- a/moloch/moloch_regression_test_harness/docker-compose.yml
+++ b/moloch/moloch_regression_test_harness/docker-compose.yml
@@ -1,15 +1,14 @@
version: '3.7'
services:
- elasticsearch:
- image: docker.elastic.co/elasticsearch/elasticsearch-od:7.10.2
+ opensearch:
+ image: opensearchproject/opensearch:1.0.0
restart: "no"
environment:
logger.level : 'INFO'
bootstrap.memory_lock : 'true'
- ES_JAVA_OPTS : '-Xms4g -Xmx4g -Xss256k -Djava.security.egd=file:/dev/./urandom'
+ OPENSEARCH_JAVA_OPTS : '-Xms4g -Xmx4g -Xss256k -Djava.security.egd=file:/dev/./urandom'
discovery.type : 'single-node'
- # cluster.initial_master_nodes : 'elasticsearch'
cluster.routing.allocation.disk.threshold_enabled : 'false'
cluster.routing.allocation.node_initial_primaries_recoveries : 8
expose:
@@ -38,7 +37,7 @@ services:
soft: -1
hard: -1
depends_on:
- - elasticsearch
+ - opensearch
expose:
- 8000
- 8005
diff --git a/moloch/scripts/initmoloch.sh b/moloch/scripts/initmoloch.sh
index d3eeb9953..83631830b 100755
--- a/moloch/scripts/initmoloch.sh
+++ b/moloch/scripts/initmoloch.sh
@@ -5,7 +5,7 @@
rm -f /var/run/moloch/initialized /var/run/moloch/runwise
echo "Giving Elasticsearch time to start..."
-/data/elastic_search_status.sh 2>&1 && echo "Elasticsearch is running!"
+/data/opensearch_status.sh 2>&1 && echo "Elasticsearch is running!"
# download and/or update geo updates
$ARKIMEDIR/bin/moloch_update_geo.sh
@@ -25,11 +25,11 @@ if [[ "$WISE" = "on" ]] ; then
fi
# initialize the contents of the Elasticearch database if it has never been initialized (ie., the users_v# table hasn't been created)
-if [[ $(curl -fs -XGET -H'Content-Type: application/json' "http://$ES_HOST:$ES_PORT/_cat/indices/users_v*" | wc -l) < 1 ]]; then
+if [[ $(curl -fs -XGET -H'Content-Type: application/json' "http://$ES_HOST:$OS_PORT/_cat/indices/users_v*" | wc -l) < 1 ]]; then
echo "Initializing Elasticsearch database..."
- $ARKIMEDIR/db/db.pl http://$ES_HOST:$ES_PORT initnoprompt
+ $ARKIMEDIR/db/db.pl http://$ES_HOST:$OS_PORT initnoprompt
# this password isn't going to be used by Arkime, nginx will do the auth instead
$ARKIMEDIR/bin/moloch_add_user.sh "${MALCOLM_USERNAME}" "${MALCOLM_USERNAME}" "ignored" --admin --webauthonly --webauth
@@ -40,7 +40,7 @@ if [[ $(curl -fs -XGET -H'Content-Type: application/json' "http://$ES_HOST:$ES_P
rm -f /tmp/not_a_packet.pcap
#set some default settings I want for moloch
- curl -sS -H'Content-Type: application/json' -XPOST http://$ES_HOST:$ES_PORT/users_v7/user/$MALCOLM_USERNAME/_update -d "@$ARKIMEDIR/etc/user_settings.json"
+ curl -sS -H'Content-Type: application/json' -XPOST http://$ES_HOST:$OS_PORT/users_v7/user/$MALCOLM_USERNAME/_update -d "@$ARKIMEDIR/etc/user_settings.json"
echo -e "\nElasticsearch database initialized!\n"
@@ -50,7 +50,7 @@ else
if /data/moloch-needs-upgrade.sh 2>&1; then
echo "Elasticsearch database needs to be upgraded for $ARKIME_VERSION!"
- $ARKIMEDIR/db/db.pl http://$ES_HOST:$ES_PORT upgradenoprompt
+ $ARKIMEDIR/db/db.pl http://$ES_HOST:$OS_PORT upgradenoprompt
echo "Elasticsearch database upgrade complete!"
echo
@@ -64,11 +64,11 @@ fi # if/else Elasticsearch database initialized
# increase Elasticsearch max shards per node from default if desired
if [[ -n $ES_MAX_SHARDS_PER_NODE ]]; then
# see https://github.com/elastic/elasticsearch/issues/40803
- curl -sS -H'Content-Type: application/json' -XPUT http://$ES_HOST:$ES_PORT/_cluster/settings -d "{ \"persistent\": { \"cluster.max_shards_per_node\": \"$ES_MAX_SHARDS_PER_NODE\" } }"
+ curl -sS -H'Content-Type: application/json' -XPUT http://$ES_HOST:$OS_PORT/_cluster/settings -d "{ \"persistent\": { \"cluster.max_shards_per_node\": \"$ES_MAX_SHARDS_PER_NODE\" } }"
fi
# before running viewer, call _refresh to make sure everything is available for search first
-curl -sS -XPOST http://$ES_HOST:$ES_PORT/_refresh
+curl -sS -XPOST http://$ES_HOST:$OS_PORT/_refresh
touch /var/run/moloch/initialized
diff --git a/moloch/scripts/moloch-needs-upgrade.sh b/moloch/scripts/moloch-needs-upgrade.sh
index 6304ed496..60f8eae6b 100755
--- a/moloch/scripts/moloch-needs-upgrade.sh
+++ b/moloch/scripts/moloch-needs-upgrade.sh
@@ -40,6 +40,6 @@ while read INDEX_NAME; do
fi # compare INDEX_NAME vs. INDEX_PREFIX
done # loop over ARKIME_INDEX_CURRENT_VERSIONS
-done <<<$(curl -fsS -H"Content-Type: application/json" -XGET "http://$ES_HOST:$ES_PORT/_cat/indices?v" | tail -n +2 | awk '{print $3}')
+done <<<$(curl -fsS -H"Content-Type: application/json" -XGET "http://$ES_HOST:$OS_PORT/_cat/indices?v" | tail -n +2 | awk '{print $3}')
exit $RETURN_CODE
diff --git a/moloch/scripts/wipemoloch.sh b/moloch/scripts/wipemoloch.sh
index 883cce832..65885eba6 100755
--- a/moloch/scripts/wipemoloch.sh
+++ b/moloch/scripts/wipemoloch.sh
@@ -4,10 +4,10 @@
echo "Checking Elasticsearch..."
-/data/elastic_search_status.sh 2>&1 && echo "Elasticsearch is running!"
+/data/opensearch_status.sh 2>&1 && echo "Elasticsearch is running!"
#Wipe is the same initalize except it keeps users intact
-echo WIPE | /data/moloch/db/db.pl http://$ES_HOST:$ES_PORT wipe
+echo WIPE | /data/moloch/db/db.pl http://$ES_HOST:$OS_PORT wipe
#this is a hacky way to get all of the parseable field definitions put into E.S.
touch /tmp/not_a_packet.pcap
diff --git a/nginx/nginx.conf b/nginx/nginx.conf
index e0a8140a7..d2fb7a2fc 100644
--- a/nginx/nginx.conf
+++ b/nginx/nginx.conf
@@ -53,8 +53,8 @@ http {
server kibana-helper:28991;
}
- upstream docker-elasticsearch {
- server elasticsearch:9200;
+ upstream docker-opensearch {
+ server opensearch:9200;
}
upstream docker-logstash-stats {
@@ -284,7 +284,7 @@ http {
}
}
- # Elasticsearch API
+ # OpenSearch API
server {
listen 9200 ssl;
ssl_certificate /etc/nginx/certs/cert.pem;
@@ -298,7 +298,7 @@ http {
}
location / {
- proxy_pass http://docker-elasticsearch;
+ proxy_pass http://docker-opensearch;
proxy_redirect off;
proxy_set_header Host es.malcolm.local;
client_max_body_size 50m;
diff --git a/elasticsearch-backup/.gitignore b/opensearch-backup/.gitignore
similarity index 100%
rename from elasticsearch-backup/.gitignore
rename to opensearch-backup/.gitignore
diff --git a/elasticsearch/.gitignore b/opensearch/.gitignore
similarity index 100%
rename from elasticsearch/.gitignore
rename to opensearch/.gitignore
diff --git a/pcap-monitor/supervisord.conf b/pcap-monitor/supervisord.conf
index 69aad428d..389c363a1 100644
--- a/pcap-monitor/supervisord.conf
+++ b/pcap-monitor/supervisord.conf
@@ -30,7 +30,7 @@ redirect_stderr=true
command=python3 /usr/local/bin/pcap_watcher.py
--verbose "%(ENV_PCAP_PIPELINE_DEBUG)s"
--extra-verbose "%(ENV_PCAP_PIPELINE_DEBUG_EXTRA)s"
- --elasticsearch "%(ENV_ELASTICSEARCH_URL)s"
+ --elasticsearch "%(ENV_OPENSEARCH_URL)s"
--elasticsearch-wait
--moloch-node arkime
--ignore-existing "%(ENV_PCAP_PIPELINE_IGNORE_PREEXISTING)s"
diff --git a/scripts/beats/README.md b/scripts/beats/README.md
index d1356c959..c51714c38 100644
--- a/scripts/beats/README.md
+++ b/scripts/beats/README.md
@@ -89,9 +89,9 @@ Enter password for sensor (again):
Generated keystore for winlogbeat
BEAT_KIBANA_SSL_VERIFY
-BEAT_ES_HOST
-BEAT_ES_PROTOCOL
-BEAT_ES_SSL_VERIFY
+BEAT_OS_HOST
+BEAT_OS_PROTOCOL
+BEAT_OS_SSL_VERIFY
BEAT_KIBANA_HOST
BEAT_HTTP_PASSWORD
BEAT_HTTP_USERNAME
@@ -151,11 +151,11 @@ Enter password for sensor (again):
Generated keystore for filebeat
BEAT_KIBANA_PROTOCOL
BEAT_KIBANA_SSL_VERIFY
-BEAT_ES_PROTOCOL
-BEAT_ES_SSL_VERIFY
+BEAT_OS_PROTOCOL
+BEAT_OS_SSL_VERIFY
BEAT_KIBANA_DASHBOARDS_ENABLED
BEAT_KIBANA_DASHBOARDS_PATH
-BEAT_ES_HOST
+BEAT_OS_HOST
BEAT_HTTP_PASSWORD
BEAT_HTTP_USERNAME
BEAT_KIBANA_HOST
diff --git a/scripts/beats/beat_common.py b/scripts/beats/beat_common.py
index bc614f32e..c28ae2e7a 100644
--- a/scripts/beats/beat_common.py
+++ b/scripts/beats/beat_common.py
@@ -47,9 +47,9 @@
OPERATION_RUN = 'run'
OPERATION_CONFIGURE = 'config'
-BEAT_ES_HOST = "BEAT_ES_HOST"
-BEAT_ES_PROTOCOL = "BEAT_ES_PROTOCOL"
-BEAT_ES_SSL_VERIFY = "BEAT_ES_SSL_VERIFY"
+BEAT_OS_HOST = "BEAT_OS_HOST"
+BEAT_OS_PROTOCOL = "BEAT_OS_PROTOCOL"
+BEAT_OS_SSL_VERIFY = "BEAT_OS_SSL_VERIFY"
BEAT_HTTP_PASSWORD = "BEAT_HTTP_PASSWORD"
BEAT_HTTP_USERNAME = "BEAT_HTTP_USERNAME"
BEAT_KIBANA_DASHBOARDS_ENABLED = "BEAT_KIBANA_DASHBOARDS_ENABLED"
@@ -67,11 +67,11 @@
#-------------------------- Elasticsearch output -------------------------------
output.elasticsearch:
enabled: true
- hosts: ["${BEAT_ES_HOST}"]
- protocol: "${BEAT_ES_PROTOCOL}"
+ hosts: ["${BEAT_OS_HOST}"]
+ protocol: "${BEAT_OS_PROTOCOL}"
username: "${BEAT_HTTP_USERNAME}"
password: "${BEAT_HTTP_PASSWORD}"
- ssl.verification_mode: "${BEAT_ES_SSL_VERIFY}"
+ ssl.verification_mode: "${BEAT_OS_SSL_VERIFY}"
setup.template.enabled: true
setup.template.overwrite: false
@@ -245,9 +245,9 @@ def __init__(self, debug=False, ymlFileSpec=None, beatName=None, acceptDefaults=
self.beatInstallDir = None
self.defaultKibanaDashboardDir = None
self.keystoreItems = defaultdict(str)
- for initItem in [BEAT_ES_HOST,
- BEAT_ES_PROTOCOL,
- BEAT_ES_SSL_VERIFY,
+ for initItem in [BEAT_OS_HOST,
+ BEAT_OS_PROTOCOL,
+ BEAT_OS_SSL_VERIFY,
BEAT_HTTP_PASSWORD,
BEAT_HTTP_USERNAME,
BEAT_KIBANA_DASHBOARDS_ENABLED,
@@ -295,8 +295,8 @@ def configure_beat_yml(self):
if (self.ymlFileSpec is not None):
if os.path.isfile(self.ymlFileSpec):
- # if it doesn't look like connectivity stuff (at last BEAT_ES_PROTOCOL) is in the YML file, offer to append it
- if ((len(list(filter(lambda x: BEAT_ES_PROTOCOL in x, [line.rstrip('\n') for line in open(self.ymlFileSpec)]))) == 0) and
+ # if it doesn't look like connectivity stuff (at last BEAT_OS_PROTOCOL) is in the YML file, offer to append it
+ if ((len(list(filter(lambda x: BEAT_OS_PROTOCOL in x, [line.rstrip('\n') for line in open(self.ymlFileSpec)]))) == 0) and
YesOrNo("Append connectivity boilerplate to {}?".format(self.ymlFileSpec), default=False, acceptDefault=self.acceptDefaults)):
with open(self.ymlFileSpec, 'a') as ymlFile:
ymlFile.write(BEAT_YML_TEMPLATE)
@@ -334,20 +334,20 @@ def configure_keystore(self):
while tmpVal not in ['http', 'https']:
tmpVal = AskForString("Enter {} connection protocol (http or https) [{}]".format(destination, tmpDefault), default=tmpDefault, acceptDefault=self.acceptDefaults).lower()
if (len(tmpVal) == 0): tmpVal = tmpDefault
- self.keystoreItems[BEAT_ES_PROTOCOL.replace('_ES_', '_KIBANA_' if (destination == 'Kibana') else '_ES_')] = tmpVal
+ self.keystoreItems[BEAT_OS_PROTOCOL.replace('_OS_', '_KIBANA_' if (destination == 'Kibana') else '_OS_')] = tmpVal
# SSL verification
tmpVal, tmpDefault = '', 'none'
while tmpVal not in ['none', 'full']:
tmpVal = AskForString("Enter {} SSL verification (none (for self-signed certificates) or full) [{}]".format(destination, tmpDefault), default=tmpDefault, acceptDefault=self.acceptDefaults).lower()
if (len(tmpVal) == 0): tmpVal = tmpDefault
- self.keystoreItems[BEAT_ES_SSL_VERIFY.replace('_ES_', '_KIBANA_' if (destination == 'Kibana') else '_ES_')] = tmpVal
+ self.keystoreItems[BEAT_OS_SSL_VERIFY.replace('_OS_', '_KIBANA_' if (destination == 'Kibana') else '_OS_')] = tmpVal
# host
tmpVal, tmpDefault = '', ''
while (len(tmpVal) == 0):
tmpVal = AskForString("Enter {} connection host".format(destination), default=tmpDefault, acceptDefault=self.acceptDefaults)
- self.keystoreItems[BEAT_ES_HOST.replace('_ES_', '_KIBANA_' if (destination == 'Kibana') else '_ES_')] = tmpVal
+ self.keystoreItems[BEAT_OS_HOST.replace('_OS_', '_KIBANA_' if (destination == 'Kibana') else '_OS_')] = tmpVal
if (BEAT_KIBANA_HOST in self.keystoreItems):
diff --git a/scripts/beats/linux_vm_example/auditbeat.yml b/scripts/beats/linux_vm_example/auditbeat.yml
index 9c4c6a908..b48c5ffcf 100644
--- a/scripts/beats/linux_vm_example/auditbeat.yml
+++ b/scripts/beats/linux_vm_example/auditbeat.yml
@@ -106,7 +106,7 @@ auditbeat.modules:
# - equals:
# event.dataset: 'socket'
# - equals:
-# destination.ip: "${BEAT_ES_HOST}"
+# destination.ip: "${BEAT_OS_HOST}"
# - and:
# - equals:
# event.module: 'system'
diff --git a/scripts/beats/windows_vm_example/Malcolm_Windows_Forwarder_Download_and_Config.ps1 b/scripts/beats/windows_vm_example/Malcolm_Windows_Forwarder_Download_and_Config.ps1
index 480ef9f62..42a065f6b 100644
--- a/scripts/beats/windows_vm_example/Malcolm_Windows_Forwarder_Download_and_Config.ps1
+++ b/scripts/beats/windows_vm_example/Malcolm_Windows_Forwarder_Download_and_Config.ps1
@@ -55,11 +55,11 @@ fields_under_root: true
#-------------------------- Elasticsearch output -------------------------------
output.elasticsearch:
enabled: true
- hosts: ["${BEAT_ES_HOST}"]
- protocol: "${BEAT_ES_PROTOCOL}"
+ hosts: ["${BEAT_OS_HOST}"]
+ protocol: "${BEAT_OS_PROTOCOL}"
username: "${BEAT_HTTP_USERNAME}"
password: "${BEAT_HTTP_PASSWORD}"
- ssl.verification_mode: "${BEAT_ES_SSL_VERIFY}"
+ ssl.verification_mode: "${BEAT_OS_SSL_VERIFY}"
setup.template.enabled: true
setup.template.overwrite: false
@@ -138,13 +138,13 @@ function Configure-Beat {
} while ($pwd1_text -ne $pwd2_text)
$es_pass = ([Runtime.InteropServices.Marshal]::PtrToStringAuto([Runtime.InteropServices.Marshal]::SecureStringToBSTR($es_pass))).Trim()
- Run-Beat-Command $beat @("keystore","add","BEAT_ES_PROTOCOL","--stdin","--force") "https"
+ Run-Beat-Command $beat @("keystore","add","BEAT_OS_PROTOCOL","--stdin","--force") "https"
Run-Beat-Command $beat @("keystore","add","BEAT_KIBANA_PROTOCOL","--stdin","--force") "https"
- Run-Beat-Command $beat @("keystore","add","BEAT_ES_SSL_VERIFY","--stdin","--force") "none"
+ Run-Beat-Command $beat @("keystore","add","BEAT_OS_SSL_VERIFY","--stdin","--force") "none"
Run-Beat-Command $beat @("keystore","add","BEAT_KIBANA_SSL_VERIFY","--stdin","--force") "none"
Run-Beat-Command $beat @("keystore","add","BEAT_KIBANA_DASHBOARDS_ENABLED","--stdin","--force") "true"
Run-Beat-Command $beat @("keystore","add","BEAT_KIBANA_DASHBOARDS_PATH","--stdin","--force") "C:\\Program Files\\$beat\\kibana"
- Run-Beat-Command $beat @("keystore","add","BEAT_ES_HOST","--stdin","--force") "$es_host"
+ Run-Beat-Command $beat @("keystore","add","BEAT_OS_HOST","--stdin","--force") "$es_host"
Run-Beat-Command $beat @("keystore","add","BEAT_KIBANA_HOST","--stdin","--force") "$kb_host"
Run-Beat-Command $beat @("keystore","add","BEAT_HTTP_USERNAME","--stdin","--force") "$es_user"
Run-Beat-Command $beat @("keystore","add","BEAT_HTTP_PASSWORD","--stdin","--force") "$es_pass"
diff --git a/scripts/control.py b/scripts/control.py
index 3fc15b7e6..5eb26ffaa 100755
--- a/scripts/control.py
+++ b/scripts/control.py
@@ -64,7 +64,7 @@ def keystore_op(service, dropPriv=False, *keystore_args, **run_process_kwargs):
err = -1
results = []
- # the elastic containers all follow the same naming pattern for these executables
+ # the opensearch containers all follow the same naming pattern for these executables
keystoreBinProc = f"/usr/share/{service}/bin/{service}-keystore"
# if we're using docker-uid-gid-setup.sh to drop privileges as we spin up a container
@@ -373,8 +373,8 @@ def stop(wipe=False):
osEnv['TMPDIR'] = MalcolmTmpPath
if wipe:
- # attempt to DELETE _template/zeek_template in Elasticsearch
- err, out = run_process([dockerComposeBin, '-f', args.composeFile, 'exec', 'arkime', 'bash', '-c', 'curl -fs --output /dev/null -H"Content-Type: application/json" -XDELETE "http://$ES_HOST:$ES_PORT/_template/zeek_template"'], env=osEnv, debug=args.debug)
+ # attempt to DELETE _template/zeek_template in OpenSearch
+ err, out = run_process([dockerComposeBin, '-f', args.composeFile, 'exec', 'arkime', 'bash', '-c', 'curl -fs --output /dev/null -H"Content-Type: application/json" -XDELETE "http://$ES_HOST:$OS_PORT/_template/zeek_template"'], env=osEnv, debug=args.debug)
# if stop.sh is being called with wipe.sh (after the docker-compose file)
# then also remove named and anonymous volumes (not external volumes, of course)
@@ -387,11 +387,11 @@ def stop(wipe=False):
exit(err)
if wipe:
- # delete elasticsearch database
- shutil.rmtree(os.path.join(MalcolmPath, 'elasticsearch/nodes'), ignore_errors=True)
+ # delete OpenSearch database
+ shutil.rmtree(os.path.join(MalcolmPath, 'opensearch/nodes'), ignore_errors=True)
# delete data files (backups, zeek logs, arkime logs, PCAP files, captured PCAP files)
- for dataDir in ['elasticsearch-backup', 'zeek-logs', 'moloch-logs', 'pcap', 'moloch-raw']:
+ for dataDir in ['opensearch-backup', 'zeek-logs', 'moloch-logs', 'pcap', 'moloch-raw']:
for root, dirnames, filenames in os.walk(os.path.join(MalcolmPath, dataDir), topdown=True, onerror=None):
for file in filenames:
fileSpec = os.path.join(root, file)
@@ -402,7 +402,7 @@ def stop(wipe=False):
pass
# clean up empty directories
- for dataDir in [os.path.join('elasticsearch-backup', 'logs'), os.path.join('zeek-logs', 'processed'), os.path.join('zeek-logs', 'current')]:
+ for dataDir in [os.path.join('opensearch-backup', 'logs'), os.path.join('zeek-logs', 'processed'), os.path.join('zeek-logs', 'current')]:
RemoveEmptyFolders(dataDir, removeRoot=False)
eprint("Malcolm has been stopped and its data cleared\n")
@@ -425,9 +425,9 @@ def start():
# touch the metadata file
open(os.path.join(MalcolmPath, os.path.join('htadmin', 'metadata')), 'a').close()
- # if the elasticsearch and logstash keystore don't exist exist, create empty ones
- if not os.path.isfile(os.path.join(MalcolmPath, os.path.join('elasticsearch', 'elasticsearch.keystore'))):
- keystore_op('elasticsearch', True, 'create')
+ # if the OpenSearch and Logstash keystore don't exist exist, create empty ones
+ if not os.path.isfile(os.path.join(MalcolmPath, os.path.join('opensearch', 'opensearch.keystore'))):
+ keystore_op('opensearch', True, 'create')
if not os.path.isfile(os.path.join(MalcolmPath, os.path.join('logstash', os.path.join('certs', 'logstash.keystore')))):
keystore_op('logstash', True, 'create')
@@ -442,8 +442,8 @@ def start():
os.chmod(authFile, stat.S_IRUSR | stat.S_IWUSR)
# make sure some directories exist before we start
- for path in [os.path.join(MalcolmPath, 'elasticsearch'),
- os.path.join(MalcolmPath, 'elasticsearch-backup'),
+ for path in [os.path.join(MalcolmPath, 'opensearch'),
+ os.path.join(MalcolmPath, 'opensearch-backup'),
os.path.join(MalcolmPath, os.path.join('nginx', 'ca-trust')),
os.path.join(MalcolmPath, os.path.join('pcap', 'upload')),
os.path.join(MalcolmPath, os.path.join('pcap', 'processed')),
@@ -712,12 +712,12 @@ def authSetup(wipe=False):
os.remove(oldfile)
# create and populate keystore for remote
- if YesOrNo('Store username/password for forwarding Logstash events to a secondary, external Elasticsearch instance', default=False):
+ if YesOrNo('Store username/password for forwarding Logstash events to a secondary, external OpenSearch instance', default=False):
# prompt username and password
esPassword = None
esPasswordConfirm = None
- esUsername = AskForString("External Elasticsearch username")
+ esUsername = AskForString("External OpenSearch username")
while True:
esPassword = AskForPassword(f"{esUsername} password: ")
@@ -735,19 +735,19 @@ def authSetup(wipe=False):
success, results = keystore_op('logstash', False, 'list')
results = [x.upper() for x in results if x and (not x.upper().startswith('WARNING')) and (not x.upper().startswith('KEYSTORE')) and (not x.upper().startswith('USING BUNDLED JDK'))]
if success and ('ES_EXTERNAL_USER' in results) and ('ES_EXTERNAL_PASSWORD' in results):
- eprint(f"External Elasticsearch instance variables stored: {', '.join(results)}")
+ eprint(f"External OpenSearch instance variables stored: {', '.join(results)}")
else:
- eprint("Failed to store external Elasticsearch instance variables:\n")
+ eprint("Failed to store external OpenSearch instance variables:\n")
eprint("\n".join(results))
- # Open Distro for Elasticsearch authenticate sender account credentials
+ # OpenSearch authenticate sender account credentials
# https://opendistro.github.io/for-elasticsearch-docs/docs/alerting/monitors/#authenticate-sender-account
if YesOrNo('Store username/password for email alert sender account (see https://opendistro.github.io/for-elasticsearch-docs/docs/alerting/monitors/#authenticate-sender-account)', default=False):
# prompt username and password
emailPassword = None
emailPasswordConfirm = None
- emailSender = AskForString("Open Distro alerting email sender name")
+ emailSender = AskForString("OpenSearch alerting email sender name")
emailUsername = AskForString("Email account username")
while True:
@@ -757,16 +757,16 @@ def authSetup(wipe=False):
break
eprint("Passwords do not match")
- # create elasticsearch keystore file, don't complain if it already exists, and set the keystore items
+ # create OpenSearch keystore file, don't complain if it already exists, and set the keystore items
usernameKey = f'opendistro.alerting.destination.email.{emailSender}.username'
passwordKey = f'opendistro.alerting.destination.email.{emailSender}.password'
- keystore_op('elasticsearch', True, 'create', stdin='N')
- keystore_op('elasticsearch', True, 'remove', usernameKey)
- keystore_op('elasticsearch', True, 'add', usernameKey, '--stdin', stdin=emailUsername)
- keystore_op('elasticsearch', True, 'remove', passwordKey)
- keystore_op('elasticsearch', True, 'add', passwordKey, '--stdin', stdin=emailPassword)
- success, results = keystore_op('elasticsearch', True, 'list')
+ keystore_op('opensearch', True, 'create', stdin='N')
+ keystore_op('opensearch', True, 'remove', usernameKey)
+ keystore_op('opensearch', True, 'add', usernameKey, '--stdin', stdin=emailUsername)
+ keystore_op('opensearch', True, 'remove', passwordKey)
+ keystore_op('opensearch', True, 'add', passwordKey, '--stdin', stdin=emailPassword)
+ success, results = keystore_op('opensearch', True, 'list')
results = [x for x in results if x and (not x.upper().startswith('WARNING')) and (not x.upper().startswith('KEYSTORE'))]
if success and (usernameKey in results) and (passwordKey in results):
eprint(f"Email alert sender account variables stored: {', '.join(results)}")
diff --git a/scripts/install.py b/scripts/install.py
index 047cfe680..904e9c073 100755
--- a/scripts/install.py
+++ b/scripts/install.py
@@ -194,7 +194,7 @@ def install_malcolm_files(self, malcolm_install_file):
return result, installPath
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- def tweak_malcolm_runtime(self, malcolm_install_path, expose_elastic_default=False, expose_logstash_default=False, restart_mode_default=False):
+ def tweak_malcolm_runtime(self, malcolm_install_path, expose_opensearch_default=False, expose_logstash_default=False, restart_mode_default=False):
global args
if not args.configFile:
@@ -229,32 +229,32 @@ def tweak_malcolm_runtime(self, malcolm_install_path, expose_elastic_default=Fal
eprint(f"{malcolm_install_path} contains {composeFiles}, system memory is {self.totalMemoryGigs} GiB")
if self.totalMemoryGigs >= 63.0:
- esMemory = '30g'
+ osMemory = '30g'
lsMemory = '6g'
elif self.totalMemoryGigs >= 31.0:
- esMemory = '21g'
+ osMemory = '21g'
lsMemory = '3500m'
elif self.totalMemoryGigs >= 15.0:
- esMemory = '10g'
+ osMemory = '10g'
lsMemory = '3g'
elif self.totalMemoryGigs >= 11.0:
- esMemory = '6g'
+ osMemory = '6g'
lsMemory = '2500m'
elif self.totalMemoryGigs >= 7.0:
eprint(f"Detected only {self.totalMemoryGigs} GiB of memory; performance will be suboptimal")
- esMemory = '4g'
+ osMemory = '4g'
lsMemory = '2500m'
elif self.totalMemoryGigs > 0.0:
eprint(f"Detected only {self.totalMemoryGigs} GiB of memory; performance will be suboptimal")
- esMemory = '3500m'
+ osMemory = '3500m'
lsMemory = '2g'
else:
eprint("Failed to determine system memory size, using defaults; performance may be suboptimal")
- esMemory = '8g'
+ osMemory = '8g'
lsMemory = '3g'
- while not InstallerYesOrNo(f'Setting {esMemory} for Elasticsearch and {lsMemory} for Logstash. Is this OK?', default=True):
- esMemory = InstallerAskForString('Enter memory for Elasticsearch (e.g., 16g, 9500m, etc.)')
+ while not InstallerYesOrNo(f'Setting {osMemory} for OpenSearch and {lsMemory} for Logstash. Is this OK?', default=True):
+ osMemory = InstallerAskForString('Enter memory for OpenSearch (e.g., 16g, 9500m, etc.)')
lsMemory = InstallerAskForString('Enter memory for LogStash (e.g., 4g, 2500m, etc.)')
restartMode = None
@@ -292,16 +292,16 @@ def tweak_malcolm_runtime(self, malcolm_install_path, expose_elastic_default=Fal
indexPruneSizeLimit = '0'
indexPruneNameSort = False
- if InstallerYesOrNo('Configure Elasticsearch index state management?', default=False):
+ if InstallerYesOrNo('Configure OpenSearch index state management?', default=False):
# configure snapshots
if InstallerYesOrNo('Configure index snapshots?', default=False):
# snapshot repository directory and compression
- indexSnapshotDir = './elasticsearch-backup'
- if not InstallerYesOrNo('Store snapshots locally in {}?'.format(os.path.join(malcolm_install_path, 'elasticsearch-backup')), default=True):
+ indexSnapshotDir = './opensearch-backup'
+ if not InstallerYesOrNo('Store snapshots locally in {}?'.format(os.path.join(malcolm_install_path, 'opensearch-backup')), default=True):
while True:
- indexSnapshotDir = InstallerAskForString('Enter Elasticsearch index snapshot directory')
+ indexSnapshotDir = InstallerAskForString('Enter OpenSearch index snapshot directory')
if (len(indexSnapshotDir) > 1) and os.path.isdir(indexSnapshotDir):
indexSnapshotDir = os.path.realpath(indexSnapshotDir)
break
@@ -341,12 +341,12 @@ def tweak_malcolm_runtime(self, malcolm_install_path, expose_elastic_default=Fal
reverseDns = InstallerYesOrNo('Perform reverse DNS lookup locally for source and destination IP addresses in Zeek logs?', default=False)
autoOui = InstallerYesOrNo('Perform hardware vendor OUI lookups for MAC addresses?', default=True)
autoFreq = InstallerYesOrNo('Perform string randomness scoring on some fields?', default=True)
- elasticOpen = InstallerYesOrNo('Expose Elasticsearch port to external hosts?', default=expose_elastic_default)
+ opensearchOpen = InstallerYesOrNo('Expose OpenSearch port to external hosts?', default=expose_opensearch_default)
logstashOpen = InstallerYesOrNo('Expose Logstash port to external hosts?', default=expose_logstash_default)
logstashSsl = logstashOpen and InstallerYesOrNo('Should Logstash require SSL for Zeek logs? (Note: This requires the forwarder to be similarly configured and a corresponding copy of the client SSL files.)', default=True)
- externalEsForward = InstallerYesOrNo('Forward Logstash logs to external Elasticstack instance?', default=False)
+ externalEsForward = InstallerYesOrNo('Forward Logstash logs to external OpenSearch instance?', default=False)
if externalEsForward:
- externalEsHost = InstallerAskForString('Enter external Elasticstack host:port (e.g., 10.0.0.123:9200)')
+ externalEsHost = InstallerAskForString('Enter external OpenSearch host:port (e.g., 10.0.0.123:9200)')
externalEsSsl = InstallerYesOrNo(f'Connect to "{externalEsHost}" using SSL?', default=True)
externalEsSslVerify = externalEsSsl and InstallerYesOrNo(f'Require SSL certificate validation for communication with "{externalEsHost}"?', default=False)
else:
@@ -437,7 +437,7 @@ def tweak_malcolm_runtime(self, malcolm_install_path, expose_elastic_default=Fal
serviceStartLine = True
if (currentService is not None) and (restartMode is not None) and re.match(r'^\s*restart\s*:.*$', line):
- # elasticsearch backup directory
+ # OpenSearch backup directory
line = f"{serviceIndent * 2}restart: {restartMode}"
elif 'PUID' in line:
# process UID
@@ -481,9 +481,9 @@ def tweak_malcolm_runtime(self, malcolm_install_path, expose_elastic_default=Fal
elif 'PCAP_IFACE' in line:
# capture interface(s)
line = re.sub(r'(PCAP_IFACE\s*:\s*)(\S+)', fr"\g<1>'{pcapIface}'", line)
- elif 'ES_JAVA_OPTS' in line:
- # elasticsearch memory allowance
- line = re.sub(r'(-Xm[sx])(\w+)', fr'\g<1>{esMemory}', line)
+ elif 'OS_JAVA_OPTS' in line:
+ # OpenSearch memory allowance
+ line = re.sub(r'(-Xm[sx])(\w+)', fr'\g<1>{osMemory}', line)
elif 'LS_JAVA_OPTS' in line:
# logstash memory allowance
line = re.sub(r'(-Xm[sx])(\w+)', fr'\g<1>{lsMemory}', line)
@@ -502,41 +502,41 @@ def tweak_malcolm_runtime(self, malcolm_install_path, expose_elastic_default=Fal
elif 'BEATS_SSL' in line:
# enable/disable beats SSL
line = re.sub(r'(BEATS_SSL\s*:\s*)(\S+)', fr"\g<1>{TrueOrFalseQuote(logstashOpen and logstashSsl)}", line)
- elif (currentService == 'elasticsearch') and re.match(r'^\s*-.+:/opt/elasticsearch/backup(:.+)?\s*$', line) and (indexSnapshotDir is not None) and os.path.isdir(indexSnapshotDir):
- # elasticsearch backup directory
+ elif (currentService == 'opensearch') and re.match(r'^\s*-.+:/opt/opensearch/backup(:.+)?\s*$', line) and (indexSnapshotDir is not None) and os.path.isdir(indexSnapshotDir):
+ # OpenSearch backup directory
volumeParts = line.strip().lstrip('-').lstrip().split(':')
volumeParts[0] = indexSnapshotDir
line = "{}- {}".format(serviceIndent * 3, ':'.join(volumeParts))
elif 'ISM_SNAPSHOT_AGE' in line:
- # elasticsearch index state management snapshot age
+ # OpenSearch index state management snapshot age
line = re.sub(r'(ISM_SNAPSHOT_AGE\s*:\s*)(\S+)', fr"\g<1>'{indexSnapshotAge}'", line)
elif 'ISM_COLD_AGE' in line:
- # elasticsearch index state management cold (read-only) age
+ # OpenSearch index state management cold (read-only) age
line = re.sub(r'(ISM_COLD_AGE\s*:\s*)(\S+)', fr"\g<1>'{indexColdAge}'", line)
elif 'ISM_CLOSE_AGE' in line:
- # elasticsearch index state management close age
+ # OpenSearch index state management close age
line = re.sub(r'(ISM_CLOSE_AGE\s*:\s*)(\S+)', fr"\g<1>'{indexCloseAge}'", line)
elif 'ISM_DELETE_AGE' in line:
- # elasticsearch index state management close age
+ # OpenSearch index state management close age
line = re.sub(r'(ISM_DELETE_AGE\s*:\s*)(\S+)', fr"\g<1>'{indexDeleteAge}'", line)
elif 'ISM_SNAPSHOT_COMPRESSED' in line:
- # elasticsearch index state management snapshot compression
+ # OpenSearch index state management snapshot compression
line = re.sub(r'(ISM_SNAPSHOT_COMPRESSED\s*:\s*)(\S+)', fr"\g<1>{TrueOrFalseQuote(indexSnapshotCompressed)}", line)
- elif 'ELASTICSEARCH_INDEX_SIZE_PRUNE_LIMIT' in line:
+ elif 'OPENSEARCH_INDEX_SIZE_PRUNE_LIMIT' in line:
# delete based on index pattern size
- line = re.sub(r'(ELASTICSEARCH_INDEX_SIZE_PRUNE_LIMIT\s*:\s*)(\S+)', fr"\g<1>'{indexPruneSizeLimit}'", line)
- elif 'ELASTICSEARCH_INDEX_SIZE_PRUNE_NAME_SORT' in line:
+ line = re.sub(r'(OPENSEARCH_INDEX_SIZE_PRUNE_LIMIT\s*:\s*)(\S+)', fr"\g<1>'{indexPruneSizeLimit}'", line)
+ elif 'OPENSEARCH_INDEX_SIZE_PRUNE_NAME_SORT' in line:
# delete based on index pattern size (sorted by name vs. creation time)
- line = re.sub(r'(ELASTICSEARCH_INDEX_SIZE_PRUNE_NAME_SORT\s*:\s*)(\S+)', fr"\g<1>{TrueOrFalseQuote(indexPruneNameSort)}", line)
- elif 'ES_EXTERNAL_HOSTS' in line:
- # enable/disable forwarding Logstash to external Elasticsearch instance
- line = re.sub(r'(#\s*)?(ES_EXTERNAL_HOSTS\s*:\s*)(\S+)', fr"\g<2>'{externalEsHost}'", line)
- elif 'ES_EXTERNAL_SSL_CERTIFICATE_VERIFICATION' in line:
- # enable/disable SSL certificate verification for external Elasticsearch instance
- line = re.sub(r'(#\s*)?(ES_EXTERNAL_SSL_CERTIFICATE_VERIFICATION\s*:\s*)(\S+)', fr"\g<2>{TrueOrFalseQuote(externalEsSsl and externalEsSslVerify)}", line)
- elif 'ES_EXTERNAL_SSL' in line:
- # enable/disable SSL certificate verification for external Elasticsearch instance
- line = re.sub(r'(#\s*)?(ES_EXTERNAL_SSL\s*:\s*)(\S+)', fr"\g<2>{TrueOrFalseQuote(externalEsSsl)}", line)
+ line = re.sub(r'(OPENSEARCH_INDEX_SIZE_PRUNE_NAME_SORT\s*:\s*)(\S+)', fr"\g<1>{TrueOrFalseQuote(indexPruneNameSort)}", line)
+ elif 'OS_EXTERNAL_HOSTS' in line:
+ # enable/disable forwarding Logstash to external OpenSearch instance
+ line = re.sub(r'(#\s*)?(OS_EXTERNAL_HOSTS\s*:\s*)(\S+)', fr"\g<2>'{externalEsHost}'", line)
+ elif 'OS_EXTERNAL_SSL_CERTIFICATE_VERIFICATION' in line:
+ # enable/disable SSL certificate verification for external OpenSearch instance
+ line = re.sub(r'(#\s*)?(OS_EXTERNAL_SSL_CERTIFICATE_VERIFICATION\s*:\s*)(\S+)', fr"\g<2>{TrueOrFalseQuote(externalEsSsl and externalEsSslVerify)}", line)
+ elif 'OS_EXTERNAL_SSL' in line:
+ # enable/disable SSL certificate verification for external OpenSearch instance
+ line = re.sub(r'(#\s*)?(OS_EXTERNAL_SSL\s*:\s*)(\S+)', fr"\g<2>{TrueOrFalseQuote(externalEsSsl)}", line)
elif logstashOpen and serviceStartLine and (currentService == 'logstash'):
# exposing logstash port 5044 to the world
print(line)
@@ -547,10 +547,10 @@ def tweak_malcolm_runtime(self, malcolm_install_path, expose_elastic_default=Fal
# remove previous/leftover/duplicate exposing logstash port 5044 to the world
skipLine = True
elif (not serviceStartLine) and (currentService == 'nginx-proxy') and re.match(r'^.*-.*\b9200:9200"?\s*$', line):
- # comment/uncomment port forwarding for elastic based on elasticOpen
+ # comment/uncomment port forwarding for OpenSearch based on opensearchOpen
leadingSpaces = len(line) - len(line.lstrip())
if leadingSpaces <= 0: leadingSpaces = 6
- line = f"{' ' * leadingSpaces}{'' if elasticOpen else '# '}{line.lstrip().lstrip('#').lstrip()}"
+ line = f"{' ' * leadingSpaces}{'' if opensearchOpen else '# '}{line.lstrip().lstrip('#').lstrip()}"
if not skipLine: print(line)
@@ -1239,7 +1239,7 @@ def main():
parser.add_argument('-f', '--configure-file', required=False, dest='configFile', metavar='', type=str, default='', help='Single docker-compose YML file to configure')
parser.add_argument('-d', '--defaults', dest='acceptDefaults', type=str2bool, nargs='?', const=True, default=False, help="Accept defaults to prompts without user interaction")
parser.add_argument('-l', '--logstash-expose', dest='exposeLogstash', type=str2bool, nargs='?', const=True, default=False, help="Expose Logstash port to external hosts")
- parser.add_argument('-e', '--elasticsearch-expose', dest='exposeElastic', type=str2bool, nargs='?', const=True, default=False, help="Expose Elasticsearch port to external hosts")
+ parser.add_argument('-e', '--opensearch-expose', dest='exposeOpenSearch', type=str2bool, nargs='?', const=True, default=False, help="Expose OpenSearch port to external hosts")
parser.add_argument('-r', '--restart-malcolm', dest='malcolmAutoRestart', type=str2bool, nargs='?', const=True, default=False, help="Restart Malcolm on system restart (unless-stopped)")
try:
@@ -1325,7 +1325,7 @@ def main():
success, installPath = installer.install_malcolm_files(malcolmFile)
if (installPath is not None) and os.path.isdir(installPath) and hasattr(installer, 'tweak_malcolm_runtime'):
- installer.tweak_malcolm_runtime(installPath, expose_elastic_default=args.exposeElastic, expose_logstash_default=args.exposeLogstash, restart_mode_default=args.malcolmAutoRestart)
+ installer.tweak_malcolm_runtime(installPath, expose_opensearch_default=args.exposeOpenSearch, expose_logstash_default=args.exposeLogstash, restart_mode_default=args.malcolmAutoRestart)
eprint(f"\nMalcolm has been installed to {installPath}. See README.md for more information.")
eprint(f"Scripts for starting and stopping Malcolm and changing authentication-related settings can be found in {os.path.join(installPath, 'scripts')}.")
diff --git a/scripts/malcolm_appliance_packager.sh b/scripts/malcolm_appliance_packager.sh
index b35e7a758..c24ebcb17 100755
--- a/scripts/malcolm_appliance_packager.sh
+++ b/scripts/malcolm_appliance_packager.sh
@@ -66,8 +66,8 @@ if mkdir "$DESTDIR"; then
mkdir $VERBOSE -p "$DESTDIR/htadmin/"
mkdir $VERBOSE -p "$DESTDIR/logstash/certs/"
mkdir $VERBOSE -p "$DESTDIR/filebeat/certs/"
- mkdir $VERBOSE -p "$DESTDIR/elasticsearch/nodes/"
- mkdir $VERBOSE -p "$DESTDIR/elasticsearch-backup/"
+ mkdir $VERBOSE -p "$DESTDIR/opensearch/nodes/"
+ mkdir $VERBOSE -p "$DESTDIR/opensearch-backup/"
mkdir $VERBOSE -p "$DESTDIR/moloch-raw/"
mkdir $VERBOSE -p "$DESTDIR/moloch-logs/"
mkdir $VERBOSE -p "$DESTDIR/pcap/upload/"
diff --git a/sensor-iso/config/hooks/normal/0910-sensor-build.hook.chroot b/sensor-iso/config/hooks/normal/0910-sensor-build.hook.chroot
index e23ddd374..6ef767fb0 100755
--- a/sensor-iso/config/hooks/normal/0910-sensor-build.hook.chroot
+++ b/sensor-iso/config/hooks/normal/0910-sensor-build.hook.chroot
@@ -26,7 +26,7 @@ BEATS_DEB_URL_TEMPLATE="https://artifacts.elastic.co/downloads/beats/$BEATS_DEB_
YARA_VERSION="4.1.1"
YARA_URL="https://github.com/VirusTotal/yara/archive/v${YARA_VERSION}.tar.gz"
-YARA_RULES_URL="https://codeload.github.com/Neo23x0/signature-base/tar.gz/master"
+YARA_RULOS_URL="https://codeload.github.com/Neo23x0/signature-base/tar.gz/master"
YARA_RULES_DIR="/opt/yara-rules"
CAPA_VERSION="1.6.3"
@@ -134,7 +134,7 @@ cd /tmp
rm -rf /usr/local/src/yara*
mkdir -p ./Neo23x0
-curl -sSL "$YARA_RULES_URL" | tar xzvf - -C ./Neo23x0 --strip-components 1
+curl -sSL "$YARA_RULOS_URL" | tar xzvf - -C ./Neo23x0 --strip-components 1
mkdir -p "${YARA_RULES_DIR}"/custom
cp ./Neo23x0/yara/* ./Neo23x0/vendor/yara/* "${YARA_RULES_DIR}"/
cp ./Neo23x0/LICENSE "${YARA_RULES_DIR}"/_LICENSE
diff --git a/sensor-iso/interface/sensor_ctl/auditbeat/auditbeat.yml b/sensor-iso/interface/sensor_ctl/auditbeat/auditbeat.yml
index 19430dfd5..4f2021f6e 100644
--- a/sensor-iso/interface/sensor_ctl/auditbeat/auditbeat.yml
+++ b/sensor-iso/interface/sensor_ctl/auditbeat/auditbeat.yml
@@ -220,9 +220,9 @@ auditbeat.modules:
# - equals:
# event.dataset: 'socket'
# - equals:
-# destination.ip: "${BEAT_ES_HOST}"
+# destination.ip: "${BEAT_OS_HOST}"
# - equals:
-# destination.port: "${BEAT_ES_PORT}"
+# destination.port: "${BEAT_OS_PORT}"
# - and:
# - equals:
# event.module: 'system'
@@ -295,11 +295,11 @@ fields_under_root: true
#-------------------------- Elasticsearch output -------------------------------
output.elasticsearch:
enabled: true
- hosts: ["${BEAT_ES_HOST}:${BEAT_ES_PORT}"]
- protocol: "${BEAT_ES_PROTOCOL}"
+ hosts: ["${BEAT_OS_HOST}:${BEAT_OS_PORT}"]
+ protocol: "${BEAT_OS_PROTOCOL}"
username: "${BEAT_HTTP_USERNAME}"
password: "${BEAT_HTTP_PASSWORD}"
- ssl.verification_mode: "${BEAT_ES_SSL_VERIFY}"
+ ssl.verification_mode: "${BEAT_OS_SSL_VERIFY}"
setup.template.enabled: true
setup.template.overwrite: false
diff --git a/sensor-iso/interface/sensor_ctl/control_vars.conf b/sensor-iso/interface/sensor_ctl/control_vars.conf
index f2c83554f..1886acfa4 100644
--- a/sensor-iso/interface/sensor_ctl/control_vars.conf
+++ b/sensor-iso/interface/sensor_ctl/control_vars.conf
@@ -58,7 +58,7 @@ export ZEEK_DISABLE_SPICY_WIREGUARD=
# affects Arkime only for now: beats values are stored in keystores per-beat
export ES_PROTOCOL=https
export ES_HOST=127.0.0.1
-export ES_PORT=9200
+export OS_PORT=9200
export ES_USERNAME=sensor
export ES_PASSWORD=%70%61%73%73%77%6F%72%64
export ES_SSL_VERIFY=none
diff --git a/sensor-iso/interface/sensor_ctl/filebeat-syslog/filebeat-syslog.yml b/sensor-iso/interface/sensor_ctl/filebeat-syslog/filebeat-syslog.yml
index 8f33c9969..4fc7653b7 100644
--- a/sensor-iso/interface/sensor_ctl/filebeat-syslog/filebeat-syslog.yml
+++ b/sensor-iso/interface/sensor_ctl/filebeat-syslog/filebeat-syslog.yml
@@ -11,11 +11,11 @@ fields_under_root: true
#-------------------------- Elasticsearch output -------------------------------
output.elasticsearch:
enabled: true
- hosts: ["${BEAT_ES_HOST}:${BEAT_ES_PORT}"]
- protocol: "${BEAT_ES_PROTOCOL}"
+ hosts: ["${BEAT_OS_HOST}:${BEAT_OS_PORT}"]
+ protocol: "${BEAT_OS_PROTOCOL}"
username: "${BEAT_HTTP_USERNAME}"
password: "${BEAT_HTTP_PASSWORD}"
- ssl.verification_mode: "${BEAT_ES_SSL_VERIFY}"
+ ssl.verification_mode: "${BEAT_OS_SSL_VERIFY}"
setup.template.enabled: true
setup.template.overwrite: false
diff --git a/sensor-iso/interface/sensor_ctl/heatbeat/protologbeat.yml b/sensor-iso/interface/sensor_ctl/heatbeat/protologbeat.yml
index da4553b9f..fc9e0cf2c 100644
--- a/sensor-iso/interface/sensor_ctl/heatbeat/protologbeat.yml
+++ b/sensor-iso/interface/sensor_ctl/heatbeat/protologbeat.yml
@@ -26,11 +26,11 @@ setup.template.json.name: "protologbeat"
#-------------------------- Elasticsearch output -------------------------------
output.elasticsearch:
enabled: true
- hosts: ["${BEAT_ES_HOST}:${BEAT_ES_PORT}"]
- protocol: "${BEAT_ES_PROTOCOL}"
+ hosts: ["${BEAT_OS_HOST}:${BEAT_OS_PORT}"]
+ protocol: "${BEAT_OS_PROTOCOL}"
username: "${BEAT_HTTP_USERNAME}"
password: "${BEAT_HTTP_PASSWORD}"
- ssl.verification_mode: "${BEAT_ES_SSL_VERIFY}"
+ ssl.verification_mode: "${BEAT_OS_SSL_VERIFY}"
template.versions.2x.enabled: false
#============================== Dashboards =====================================
diff --git a/sensor-iso/interface/sensor_ctl/metricbeat/metricbeat.yml b/sensor-iso/interface/sensor_ctl/metricbeat/metricbeat.yml
index d3b4651fb..fd14ba164 100644
--- a/sensor-iso/interface/sensor_ctl/metricbeat/metricbeat.yml
+++ b/sensor-iso/interface/sensor_ctl/metricbeat/metricbeat.yml
@@ -47,11 +47,11 @@ fields_under_root: true
#-------------------------- Elasticsearch output -------------------------------
output.elasticsearch:
enabled: true
- hosts: ["${BEAT_ES_HOST}:${BEAT_ES_PORT}"]
- protocol: "${BEAT_ES_PROTOCOL}"
+ hosts: ["${BEAT_OS_HOST}:${BEAT_OS_PORT}"]
+ protocol: "${BEAT_OS_PROTOCOL}"
username: "${BEAT_HTTP_USERNAME}"
password: "${BEAT_HTTP_PASSWORD}"
- ssl.verification_mode: "${BEAT_ES_SSL_VERIFY}"
+ ssl.verification_mode: "${BEAT_OS_SSL_VERIFY}"
setup.template.enabled: true
setup.template.overwrite: false
diff --git a/sensor-iso/interface/sensor_ctl/supervisor.init/moloch_config_populate.sh b/sensor-iso/interface/sensor_ctl/supervisor.init/moloch_config_populate.sh
index cedae153e..a94e6f952 100755
--- a/sensor-iso/interface/sensor_ctl/supervisor.init/moloch_config_populate.sh
+++ b/sensor-iso/interface/sensor_ctl/supervisor.init/moloch_config_populate.sh
@@ -20,22 +20,22 @@ if [[ -n $SUPERVISOR_PATH ]] && [[ -r "$SUPERVISOR_PATH"/moloch/config.ini ]]; t
if [[ -n $ES_PROTOCOL ]] && [[ -n $ES_HOST ]]; then
# build elasticsearch URL for moloch-capture
- ARKIME_ELASTICSEARCH="${ES_PROTOCOL}://"
+ ARKIME_OPENSEARCH="${ES_PROTOCOL}://"
if [[ -n $ES_USERNAME ]] && [[ -n $ES_PASSWORD ]]; then
- ARKIME_ELASTICSEARCH+="${ES_USERNAME}:${ES_PASSWORD}@"
+ ARKIME_OPENSEARCH+="${ES_USERNAME}:${ES_PASSWORD}@"
fi
- ARKIME_ELASTICSEARCH+="${ES_HOST}"
+ ARKIME_OPENSEARCH+="${ES_HOST}"
- if [[ -n $ES_PORT ]]; then
- ARKIME_ELASTICSEARCH+=":${ES_PORT}"
+ if [[ -n $OS_PORT ]]; then
+ ARKIME_OPENSEARCH+=":${OS_PORT}"
else
- ARKIME_ELASTICSEARCH+=":9200"
+ ARKIME_OPENSEARCH+=":9200"
fi
# place the URL in the config file
- sed -r -i "s|(elasticsearch)\s*=\s*.*|\1=$ARKIME_ELASTICSEARCH|" "$ARKIME_CONFIG_FILE"
+ sed -r -i "s|(elasticsearch)\s*=\s*.*|\1=$ARKIME_OPENSEARCH|" "$ARKIME_CONFIG_FILE"
fi
# if SSL certificate verification is turned off, supply the --insecure flag
diff --git a/shared/bin/configure-capture.py b/shared/bin/configure-capture.py
index 7653641ba..d8c3a6118 100755
--- a/shared/bin/configure-capture.py
+++ b/shared/bin/configure-capture.py
@@ -93,10 +93,10 @@ class Constants:
BEAT_LS_SSL_VERIFY = 'BEAT_LS_SSL_VERIFY'
# specific to beats forwarded to elasticsearch (eg., metricbeat, auditbeat, filebeat-syslog)
- BEAT_ES_HOST = "BEAT_ES_HOST"
- BEAT_ES_PORT = "BEAT_ES_PORT"
- BEAT_ES_PROTOCOL = "BEAT_ES_PROTOCOL"
- BEAT_ES_SSL_VERIFY = "BEAT_ES_SSL_VERIFY"
+ BEAT_OS_HOST = "BEAT_OS_HOST"
+ BEAT_OS_PORT = "BEAT_OS_PORT"
+ BEAT_OS_PROTOCOL = "BEAT_OS_PROTOCOL"
+ BEAT_OS_SSL_VERIFY = "BEAT_OS_SSL_VERIFY"
BEAT_HTTP_PASSWORD = "BEAT_HTTP_PASSWORD"
BEAT_HTTP_USERNAME = "BEAT_HTTP_USERNAME"
BEAT_KIBANA_DASHBOARDS_ENABLED = "BEAT_KIBANA_DASHBOARDS_ENABLED"
@@ -205,8 +205,8 @@ def input_elasticsearch_connection_info(forwarder,
elastic_protocol = "https"
if (d.yesno("Elasticsearch SSL verification", yes_label="None", no_label="Full") != Dialog.OK):
elastic_ssl_verify = "full"
- return_dict[Constants.BEAT_ES_PROTOCOL] = elastic_protocol
- return_dict[Constants.BEAT_ES_SSL_VERIFY] = elastic_ssl_verify
+ return_dict[Constants.BEAT_OS_PROTOCOL] = elastic_protocol
+ return_dict[Constants.BEAT_OS_SSL_VERIFY] = elastic_ssl_verify
while True:
# host/port for Elasticsearch
@@ -223,8 +223,8 @@ def input_elasticsearch_connection_info(forwarder,
code = d.msgbox(text=Constants.MSG_ERROR_BAD_HOST)
else:
- return_dict[Constants.BEAT_ES_HOST] = values[0]
- return_dict[Constants.BEAT_ES_PORT] = values[1]
+ return_dict[Constants.BEAT_OS_HOST] = values[0]
+ return_dict[Constants.BEAT_OS_PORT] = values[1]
break
# Kibana configuration (if supported by forwarder)
@@ -307,12 +307,12 @@ def input_elasticsearch_connection_info(forwarder,
# test Elasticsearch connection
code = d.infobox(Constants.MSG_TESTING_CONNECTION.format("Elasticsearch"))
- retcode, message, output = test_connection(protocol=return_dict[Constants.BEAT_ES_PROTOCOL],
- host=return_dict[Constants.BEAT_ES_HOST],
- port=return_dict[Constants.BEAT_ES_PORT],
+ retcode, message, output = test_connection(protocol=return_dict[Constants.BEAT_OS_PROTOCOL],
+ host=return_dict[Constants.BEAT_OS_HOST],
+ port=return_dict[Constants.BEAT_OS_PORT],
username=return_dict[Constants.BEAT_HTTP_USERNAME] if (len(return_dict[Constants.BEAT_HTTP_USERNAME]) > 0) else None,
password=return_dict[Constants.BEAT_HTTP_PASSWORD] if (len(return_dict[Constants.BEAT_HTTP_PASSWORD]) > 0) else None,
- ssl_verify=return_dict[Constants.BEAT_ES_SSL_VERIFY])
+ ssl_verify=return_dict[Constants.BEAT_OS_SSL_VERIFY])
if (retcode == 200):
code = d.msgbox(text=Constants.MSG_TESTING_CONNECTION_SUCCESS.format("Elasticsearch", retcode, message))
else:
@@ -391,11 +391,11 @@ def main():
if len(line.strip()) > 0:
name, var = remove_prefix(line, "export").partition("=")[::2]
capture_config_dict[name.strip()] = var.strip().strip("'").strip('"')
- if (Constants.BEAT_ES_HOST not in previous_config_values.keys()) and ("ES_HOST" in capture_config_dict.keys()):
- previous_config_values[Constants.BEAT_ES_HOST] = capture_config_dict["ES_HOST"]
+ if (Constants.BEAT_OS_HOST not in previous_config_values.keys()) and ("ES_HOST" in capture_config_dict.keys()):
+ previous_config_values[Constants.BEAT_OS_HOST] = capture_config_dict["ES_HOST"]
previous_config_values[Constants.BEAT_KIBANA_HOST] = capture_config_dict["ES_HOST"]
- if (Constants.BEAT_ES_PORT not in previous_config_values.keys()) and ("ES_PORT" in capture_config_dict.keys()):
- previous_config_values[Constants.BEAT_ES_PORT] = capture_config_dict["ES_PORT"]
+ if (Constants.BEAT_OS_PORT not in previous_config_values.keys()) and ("OS_PORT" in capture_config_dict.keys()):
+ previous_config_values[Constants.BEAT_OS_PORT] = capture_config_dict["OS_PORT"]
if (Constants.BEAT_HTTP_USERNAME not in previous_config_values.keys()) and ("ES_USERNAME" in capture_config_dict.keys()):
previous_config_values[Constants.BEAT_HTTP_USERNAME] = capture_config_dict["ES_USERNAME"]
if (Constants.ARKIME_PACKET_ACL not in previous_config_values.keys()) and ("ARKIME_PACKET_ACL" in capture_config_dict.keys()):
@@ -699,8 +699,8 @@ def main():
# get elasticsearch/kibana connection information from user
elastic_config_dict = input_elasticsearch_connection_info(forwarder=fwd_mode,
- default_es_host=previous_config_values[Constants.BEAT_ES_HOST],
- default_es_port=previous_config_values[Constants.BEAT_ES_PORT],
+ default_es_host=previous_config_values[Constants.BEAT_OS_HOST],
+ default_es_port=previous_config_values[Constants.BEAT_OS_PORT],
default_username=previous_config_values[Constants.BEAT_HTTP_USERNAME],
default_password=previous_config_values[Constants.BEAT_HTTP_PASSWORD])
moloch_elastic_config_dict = elastic_config_dict.copy()
@@ -713,7 +713,7 @@ def main():
# get list of IP addresses allowed for packet payload retrieval
lines = previous_config_values[Constants.ARKIME_PACKET_ACL].split(",")
- lines.append(elastic_config_dict[Constants.BEAT_ES_HOST])
+ lines.append(elastic_config_dict[Constants.BEAT_OS_HOST])
code, lines = d.editbox_str("\n".join(list(filter(None, list(set(lines))))), title=Constants.MSG_CONFIG_ARKIME_PCAP_ACL)
if code != Dialog.OK:
raise CancelledError
@@ -780,8 +780,8 @@ def main():
# get elasticsearch/kibana connection information from user
forwarder_dict.update(input_elasticsearch_connection_info(forwarder=fwd_mode,
- default_es_host=previous_config_values[Constants.BEAT_ES_HOST],
- default_es_port=previous_config_values[Constants.BEAT_ES_PORT],
+ default_es_host=previous_config_values[Constants.BEAT_OS_HOST],
+ default_es_port=previous_config_values[Constants.BEAT_OS_PORT],
default_kibana_host=previous_config_values[Constants.BEAT_KIBANA_HOST],
default_kibana_port=previous_config_values[Constants.BEAT_KIBANA_PORT],
default_username=previous_config_values[Constants.BEAT_HTTP_USERNAME],
diff --git a/shared/bin/elastic_index_size_prune.py b/shared/bin/opensearch_index_size_prune.py
similarity index 77%
rename from shared/bin/elastic_index_size_prune.py
rename to shared/bin/opensearch_index_size_prune.py
index e713c0df4..06fa2989e 100755
--- a/shared/bin/elastic_index_size_prune.py
+++ b/shared/bin/opensearch_index_size_prune.py
@@ -35,14 +35,14 @@ def main():
global debug
parser = argparse.ArgumentParser(description=scriptName, add_help=True, usage='{} '.format(scriptName))
- parser.add_argument('-v', '--verbose', dest='debug', type=str2bool, nargs='?', const=True, default=str2bool(os.getenv('ELASTICSEARCH_INDEX_SIZE_PRUNE_DEBUG', default='False')), help="Verbose output")
- parser.add_argument('-i', '--index', dest='index', metavar='', type=str, default=os.getenv('ELASTICSEARCH_INDEX_SIZE_PRUNE_INDEX', 'sessions2-*'), help='Index pattern')
- parser.add_argument('-e', '--elastic', dest='elasticUrl', metavar='', type=str, default=os.getenv('ELASTICSEARCH_URL', 'http://elasticsearch:9200'), help='Elasticsearch URL')
- parser.add_argument('--node', dest='node', metavar='', type=str, default=os.getenv('ELASTICSEARCH_INDEX_SIZE_PRUNE_NODE', ''), help='Node IDs or names')
- parser.add_argument('-l', '--limit', dest='limit', metavar='', type=str, default=os.getenv('ELASTICSEARCH_INDEX_SIZE_PRUNE_LIMIT', '0'), help='Index pattern size limit (e.g., 100gb, 25%, ...)')
- parser.add_argument('-n', '--dry-run', dest='dryrun', type=str2bool, nargs='?', const=True, default=str2bool(os.getenv('ELASTICSEARCH_INDEX_SIZE_PRUNE_DRY_RUN', default='False')), help="Dry run")
- parser.add_argument('-p', '--primary', dest='primaryTotals', type=str2bool, nargs='?', const=True, default=str2bool(os.getenv('ELASTICSEARCH_INDEX_SIZE_PRUNE_PRIMARY', default='False')), help="Perform totals based on primaries (vs. totals)")
- parser.add_argument('--name-sort', dest='nameSorted', type=str2bool, nargs='?', const=True, default=str2bool(os.getenv('ELASTICSEARCH_INDEX_SIZE_PRUNE_NAME_SORT', default='False')), help="Sort indices by name (vs. creation date)")
+ parser.add_argument('-v', '--verbose', dest='debug', type=str2bool, nargs='?', const=True, default=str2bool(os.getenv('OPENSEARCH_INDEX_SIZE_PRUNE_DEBUG', default='False')), help="Verbose output")
+ parser.add_argument('-i', '--index', dest='index', metavar='', type=str, default=os.getenv('OPENSEARCH_INDEX_SIZE_PRUNE_INDEX', 'sessions2-*'), help='Index pattern')
+ parser.add_argument('-o', '--opensearch', dest='opensearchUrl', metavar='', type=str, default=os.getenv('OPENSEARCH_URL', 'http://opensearch:9200'), help='OpenSearch URL')
+ parser.add_argument('--node', dest='node', metavar='', type=str, default=os.getenv('OPENSEARCH_INDEX_SIZE_PRUNE_NODE', ''), help='Node IDs or names')
+ parser.add_argument('-l', '--limit', dest='limit', metavar='', type=str, default=os.getenv('OPENSEARCH_INDEX_SIZE_PRUNE_LIMIT', '0'), help='Index pattern size limit (e.g., 100gb, 25%, ...)')
+ parser.add_argument('-n', '--dry-run', dest='dryrun', type=str2bool, nargs='?', const=True, default=str2bool(os.getenv('OPENSEARCH_INDEX_SIZE_PRUNE_DRY_RUN', default='False')), help="Dry run")
+ parser.add_argument('-p', '--primary', dest='primaryTotals', type=str2bool, nargs='?', const=True, default=str2bool(os.getenv('OPENSEARCH_INDEX_SIZE_PRUNE_PRIMARY', default='False')), help="Perform totals based on primaries (vs. totals)")
+ parser.add_argument('--name-sort', dest='nameSorted', type=str2bool, nargs='?', const=True, default=str2bool(os.getenv('OPENSEARCH_INDEX_SIZE_PRUNE_NAME_SORT', default='False')), help="Sort indices by name (vs. creation date)")
try:
parser.error = parser.exit
args = parser.parse_args()
@@ -62,11 +62,11 @@ def main():
if (args.limit == '0'):
return
- esInfoResponse = requests.get(args.elasticUrl)
- esInfo = esInfoResponse.json()
- elasticVersion = esInfo['version']['number']
+ osInfoResponse = requests.get(args.opensearchUrl)
+ osInfo = osInfoResponse.json()
+ opensearchVersion = osInfo['version']['number']
if debug:
- eprint(f'Elasticsearch version is {elasticVersion}')
+ eprint(f'OpenSearch version is {opensearchVersion}')
totalIndices = 0
limitMegabytes = None
@@ -88,13 +88,13 @@ def main():
# get allocation statistics for node(s) to do percentage calculation
esDiskUsageStats = []
- esInfoResponse = requests.get(f'{args.elasticUrl}/_cat/allocation{f"/{args.node}" if args.node else ""}?format=json')
- esInfo = esInfoResponse.json()
+ osInfoResponse = requests.get(f'{args.opensearchUrl}/_cat/allocation{f"/{args.node}" if args.node else ""}?format=json')
+ osInfo = osInfoResponse.json()
# normalize allocation statistics' sizes (eg., 100mb) into bytes
- if (len(esInfo) > 1):
+ if (len(osInfo) > 1):
esDiskUsageStats = []
- for stat in esInfo:
+ for stat in osInfo:
if ('node' in stat) and (stat['node'] != 'UNASSIGNED'):
esDiskUsageStats.append({key:humanfriendly.parse_size(value) if re.match(r'^\d+(\.\d+)?\s*[kmgtp]?b$', value, flags=re.IGNORECASE) else value for (key,value) in stat.items()})
@@ -130,11 +130,11 @@ def main():
eprint(f'Index limit for {args.index} is {humanfriendly.format_size(humanfriendly.parse_size(f"{limitMegabytes}mb"))}')
# now determine the total size of the indices from the index pattern
- esInfoResponse = requests.get(f'{args.elasticUrl}/{args.index}/_stats/store')
- esInfo = esInfoResponse.json()
+ osInfoResponse = requests.get(f'{args.opensearchUrl}/{args.index}/_stats/store')
+ osInfo = osInfoResponse.json()
try:
- totalSizeInMegabytes = esInfo['_all']['primaries' if args.primaryTotals else 'total']['store']['size_in_bytes'] // 1000000
- totalIndices = len(esInfo["indices"])
+ totalSizeInMegabytes = osInfo['_all']['primaries' if args.primaryTotals else 'total']['store']['size_in_bytes'] // 1000000
+ totalIndices = len(osInfo["indices"])
except Exception as e:
raise Exception(f'Error getting {args.index} size_in_bytes: {e}')
if debug:
@@ -144,20 +144,20 @@ def main():
# the indices have outgrown their bounds, we need to delete the oldest
if debug:
- eprint(f'{len(esInfo)} {args.index} indices occupy {humanfriendly.format_size(humanfriendly.parse_size(f"{totalSizeInMegabytes}mb"))} ({humanfriendly.format_size(humanfriendly.parse_size(f"{limitMegabytes}mb"))} allowed)')
+ eprint(f'{len(osInfo)} {args.index} indices occupy {humanfriendly.format_size(humanfriendly.parse_size(f"{totalSizeInMegabytes}mb"))} ({humanfriendly.format_size(humanfriendly.parse_size(f"{limitMegabytes}mb"))} allowed)')
# get list of indexes in index pattern and sort by creation date
- esInfoResponse = requests.get(f'{args.elasticUrl}/_cat/indices/{args.index}',
+ osInfoResponse = requests.get(f'{args.opensearchUrl}/_cat/indices/{args.index}',
params={'format':'json',
'h':'i,id,status,health,rep,creation.date,pri.store.size,store.size'})
- esInfo = sorted(esInfoResponse.json(), key=lambda k: k['i' if args.nameSorted else 'creation.date'])
- totalIndices = len(esInfo)
+ osInfo = sorted(osInfoResponse.json(), key=lambda k: k['i' if args.nameSorted else 'creation.date'])
+ totalIndices = len(osInfo)
# determine how many megabytes need to be deleted and which of the oldest indices will cover that
indicesToDelete = []
needsDeletedMb = totalSizeInMegabytes-limitMegabytes
sizeKey = 'pri.store.size' if args.primaryTotals else 'store.size'
- for index in esInfo:
+ for index in osInfo:
indexSizeMb = humanfriendly.parse_size(index[sizeKey]) // 1000000
if (needsDeletedMb > 0):
indicesToDelete.append(index)
@@ -172,7 +172,7 @@ def main():
if not args.dryrun:
# delete the indices to free up the space indicated
for index in indicesToDelete:
- esDeleteResponse = requests.delete(f'{args.elasticUrl}/{index["i"]}')
+ esDeleteResponse = requests.delete(f'{args.opensearchUrl}/{index["i"]}')
print(f'DELETE {index["i"]} ({humanfriendly.format_size(humanfriendly.parse_size(index[sizeKey]))}): {requests.status_codes._codes[esDeleteResponse.status_code][0]}')
else:
diff --git a/shared/bin/elastic_search_status.sh b/shared/bin/opensearch_status.sh
similarity index 70%
rename from shared/bin/elastic_search_status.sh
rename to shared/bin/opensearch_status.sh
index 0653befa4..12d35fe19 100755
--- a/shared/bin/elastic_search_status.sh
+++ b/shared/bin/opensearch_status.sh
@@ -9,14 +9,14 @@ ENCODING="utf-8"
# options
# -v (verbose)
#
-# -e url (Elasticsearch URL, e.g., http://elasticsearch:9200)
+# -e url (Elasticsearch URL, e.g., http://opensearch:9200)
# OR
# -i ip (Elasticsearch ip)
# -p port (Elasticsearch port)
#
# -w (wait not only for "up" status, but also wait for actual sessions2-* logs to exist)
-ES_URL=
+OS_URL=
WAIT_FOR_LOG_DATA=0
while getopts 've:i:p:w' OPTION; do
case "$OPTION" in
@@ -25,7 +25,7 @@ while getopts 've:i:p:w' OPTION; do
;;
e)
- ES_URL="$OPTARG"
+ OS_URL="$OPTARG"
;;
i)
@@ -33,7 +33,7 @@ while getopts 've:i:p:w' OPTION; do
;;
p)
- ES_PORT="$OPTARG"
+ OS_PORT="$OPTARG"
;;
w)
@@ -48,36 +48,36 @@ while getopts 've:i:p:w' OPTION; do
done
shift "$(($OPTIND -1))"
-if [[ -z $ES_URL ]]; then
- if [[ -n $ELASTICSEARCH_URL ]]; then
- ES_URL="$ELASTICSEARCH_URL"
- elif [[ -n $ES_HOST ]] && [[ -n $ES_PORT ]]; then
- ES_URL="http://$ES_HOST:$ES_PORT"
+if [[ -z $OS_URL ]]; then
+ if [[ -n $OPENSEARCH_URL ]]; then
+ OS_URL="$OPENSEARCH_URL"
+ elif [[ -n $ES_HOST ]] && [[ -n $OS_PORT ]]; then
+ OS_URL="http://$ES_HOST:$OS_PORT"
else
- ES_URL="http://elasticsearch:9200"
+ OS_URL="http://opensearch:9200"
fi
fi
# wait for the ES HTTP server to respond at all
-until $(curl --output /dev/null --silent --head --fail "$ES_URL"); do
+until $(curl --output /dev/null --silent --head --fail "$OS_URL"); do
# printf '.' >&2
sleep 1
done
# now wait for the HTTP "Ok" response
-until [ "$(curl --write-out %{http_code} --silent --output /dev/null "$ES_URL")" = "200" ]; do
+until [ "$(curl --write-out %{http_code} --silent --output /dev/null "$OS_URL")" = "200" ]; do
# printf '-' >&2
sleep 1
done
# next wait for ES status to turn to green or yellow
-until [[ "$(curl -fsSL "$ES_URL/_cat/health?h=status" | sed -r 's/^[[:space:]]+|[[:space:]]+$//g')" =~ ^(yellow|green)$ ]]; do
+until [[ "$(curl -fsSL "$OS_URL/_cat/health?h=status" | sed -r 's/^[[:space:]]+|[[:space:]]+$//g')" =~ ^(yellow|green)$ ]]; do
# printf '+' >&2
sleep 1
done
-echo "Elasticsearch is up and healthy at "$ES_URL"" >&2
+echo "Elasticsearch is up and healthy at "$OS_URL"" >&2
if (( $WAIT_FOR_LOG_DATA == 1 )); then
sleep 1
@@ -85,13 +85,13 @@ if (( $WAIT_FOR_LOG_DATA == 1 )); then
echo "Waiting until Elasticsearch has logs..." >&2
# wait until at least one sessions2-* index exists
- until (( $(curl -fs -H'Content-Type: application/json' -XGET "$ES_URL/_cat/indices/sessions2-*" 2>/dev/null | wc -l) > 0 )) ; do
+ until (( $(curl -fs -H'Content-Type: application/json' -XGET "$OS_URL/_cat/indices/sessions2-*" 2>/dev/null | wc -l) > 0 )) ; do
sleep 5
done
echo "Log indices exist." >&2
# wait until at least one record with @timestamp exists
- until curl -fs -H'Content-Type: application/json' -XPOST "$ES_URL/sessions2-*/_search" -d'{ "sort": { "@timestamp" : "desc" }, "size" : 1 }' >/dev/null 2>&1 ; do
+ until curl -fs -H'Content-Type: application/json' -XPOST "$OS_URL/sessions2-*/_search" -d'{ "sort": { "@timestamp" : "desc" }, "size" : 1 }' >/dev/null 2>&1 ; do
sleep 5
done
echo "Logs exist." >&2