diff --git a/.bash_aliases b/.bash_aliases index a8d1d767a..05b57d476 100644 --- a/.bash_aliases +++ b/.bash_aliases @@ -1,7 +1,8 @@ -alias iotstack_up="docker-compose -f ~/IOTstack/docker-compose.yml up -d" -alias iotstack_down="docker-compose -f ~/IOTstack/docker-compose.yml down" -alias iotstack_start="docker-compose -f ~/IOTstack/docker-compose.yml start" -alias iotstack_stop="docker-compose -f ~/IOTstack/docker-compose.yml stop" -alias iotstack_update="docker-compose -f ~/IOTstack/docker-compose.yml pull" -alias iotstack_build="docker-compose -f ~/IOTstack/docker-compose.yml build" - +IOTSTACK_HOME="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +alias iotstack_up="cd "$IOTSTACK_HOME" && docker-compose up -d --remove-orphans" +alias iotstack_down="cd "$IOTSTACK_HOME" && docker-compose down --remove-orphans" +alias iotstack_start="cd "$IOTSTACK_HOME" && docker-compose start" +alias iotstack_stop="cd "$IOTSTACK_HOME" && docker-compose stop" +alias iotstack_pull="cd "$IOTSTACK_HOME" && docker-compose pull" +alias iotstack_build="cd "$IOTSTACK_HOME" && docker-compose build --pull --no-cache" +alias iotstack_update_docker_images='f(){ iotstack_pull "$@" && iotstack_build "$@" && iotstack_up --build "$@"; }; f' diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml new file mode 100644 index 000000000..6a03007a4 --- /dev/null +++ b/.github/workflows/main.yml @@ -0,0 +1,17 @@ +name: Publish docs via GitHub Pages +on: + push: + branches: + - master + +jobs: + build: + name: Deploy docs + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: actions/setup-python@v2 + with: + python-version: 3.x + - run: pip3 install -r requirements-mkdocs.txt + - run: mkdocs gh-deploy --force diff --git a/.gitignore b/.gitignore index 5f09a9ea8..e9d222463 100644 --- a/.gitignore +++ b/.gitignore @@ -2,5 +2,30 @@ /services/ /volumes/ /backups/ +/.tmp/* +__pycache__ docker-compose.yml -.outofdate \ No newline at end of file +docker-compose.override.yml +compose-override.yml +build-installer.sh +docker-compose-base.yml +build-options.json +iotstack_build_*.zip +postbuild.sh +pre_backup.sh +post_backup.sh +post_restore.sh +.project_outofdate +.outofdate +.docker_notinstalled +.docker_outofdate +.new_install +.installed +.env + +!.gitkeep + +.vscode +.idea +*.code-workspace + diff --git a/.templates/adguardhome/service.yml b/.templates/adguardhome/service.yml new file mode 100644 index 000000000..5ab6dd48e --- /dev/null +++ b/.templates/adguardhome/service.yml @@ -0,0 +1,33 @@ +adguardhome: + container_name: adguardhome + image: adguard/adguardhome + restart: unless-stopped + environment: + - TZ=${TZ:-Etc/UTC} + # enable host mode to activate DHCP server on ports 67/udp & 68/tcp+udp + # note that you must also disable all other ports if you enable host mode + # network_mode: host + ports: + # regular DNS + - "53:53/tcp" + - "53:53/udp" + # administration port (http) + # note: external and internal ports MUST be the same + # not active until defined via setup port + - "8089:8089/tcp" + # HTTPS/DNS-over-HTTPS + # - "443:443/tcp" + # DNS-over-QUIC + # - "784:784/udp" + # DNS-over-TLS + # - "853:853/tcp" + # setup (http) + # note: only active until port 8089 becomes active + - "3001:3000/tcp" + # DNSCrypt + # - "5443:5443/tcp" + # - "5443:5443/udp" + volumes: + - ./volumes/adguardhome/workdir:/opt/adguardhome/work + - ./volumes/adguardhome/confdir:/opt/adguardhome/conf + diff --git a/.templates/adminer/build.py b/.templates/adminer/build.py new file mode 100755 index 000000000..5a5fb79ad --- /dev/null +++ b/.templates/adminer/build.py @@ -0,0 +1,315 @@ +#!/usr/bin/env python3 + +issues = {} # Returned issues dict +buildHooks = {} # Options, and others hooks +haltOnErrors = True + +# Main wrapper function. Required to make local vars work correctly +def main(): + import os + import time + import ruamel.yaml + import signal + import sys + from blessed import Terminal + + from deps.chars import specialChars, commonTopBorder, commonBottomBorder, commonEmptyLine, padText + from deps.consts import servicesDirectory, templatesDirectory + from deps.common_functions import getExternalPorts, getInternalPorts, checkPortConflicts, enterPortNumberWithWhiptail + + yaml = ruamel.yaml.YAML() + yaml.preserve_quotes = True + + global dockerComposeServicesYaml # The loaded memory YAML of all checked services + global toRun # Switch for which function to run when executed + global buildHooks # Where to place the options menu result + global currentServiceName # Name of the current service + global issues # Returned issues dict + global haltOnErrors # Turn on to allow erroring + global hideHelpText # Showing and hiding the help controls text + + try: # If not already set, then set it. + hideHelpText = hideHelpText + except: + hideHelpText = False + + documentationHint = 'https://sensorsiot.github.io/IOTstack/Containers/Adminer' + + # runtime vars + portConflicts = [] + + # This lets the menu know whether to put " >> Options " or not + # This function is REQUIRED. + def checkForOptionsHook(): + try: + buildHooks["options"] = callable(runOptionsMenu) + except: + buildHooks["options"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForPreBuildHook(): + try: + buildHooks["preBuildHook"] = callable(preBuild) + except: + buildHooks["preBuildHook"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForPostBuildHook(): + try: + buildHooks["postBuildHook"] = callable(postBuild) + except: + buildHooks["postBuildHook"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForRunChecksHook(): + try: + buildHooks["runChecksHook"] = callable(runChecks) + except: + buildHooks["runChecksHook"] = False + return buildHooks + return buildHooks + + # This service will not check anything unless this is set + # This function is optional, and will run each time the menu is rendered + def runChecks(): + checkForIssues() + return [] + + # This function is optional, and will run after the docker-compose.yml file is written to disk. + def postBuild(): + return True + + # This function is optional, and will run just before the build docker-compose.yml code. + def preBuild(): + return True + + # ##################################### + # Supporting functions below + # ##################################### + + def checkForIssues(): + for (index, serviceName) in enumerate(dockerComposeServicesYaml): + if not currentServiceName == serviceName: # Skip self + currentServicePorts = getExternalPorts(currentServiceName, dockerComposeServicesYaml) + portConflicts = checkPortConflicts(serviceName, currentServicePorts, dockerComposeServicesYaml) + if (len(portConflicts) > 0): + issues["portConflicts"] = portConflicts + + # ##################################### + # End Supporting functions + # ##################################### + + ############################ + # Menu Logic + ############################ + + global currentMenuItemIndex + global selectionInProgress + global menuNavigateDirection + global needsRender + + selectionInProgress = True + currentMenuItemIndex = 0 + menuNavigateDirection = 0 + needsRender = 1 + term = Terminal() + hotzoneLocation = [((term.height // 16) + 6), 0] + + def goBack(): + global selectionInProgress + global needsRender + selectionInProgress = False + needsRender = 1 + return True + + def enterPortNumberExec(): + # global term + global needsRender + global dockerComposeServicesYaml + externalPort = getExternalPorts(currentServiceName, dockerComposeServicesYaml)[0] + internalPort = getInternalPorts(currentServiceName, dockerComposeServicesYaml)[0] + newPortNumber = enterPortNumberWithWhiptail(term, dockerComposeServicesYaml, currentServiceName, hotzoneLocation, externalPort) + + if newPortNumber > 0: + dockerComposeServicesYaml[currentServiceName]["ports"][0] = "{newExtPort}:{oldIntPort}".format( + newExtPort = newPortNumber, + oldIntPort = internalPort + ) + createMenu() + needsRender = 1 + + def onResize(sig, action): + global adminerBuildOptions + global currentMenuItemIndex + mainRender(1, adminerBuildOptions, currentMenuItemIndex) + + adminerBuildOptions = [] + + def createMenu(): + global adminerBuildOptions + try: + adminerBuildOptions = [] + portNumber = getExternalPorts(currentServiceName, dockerComposeServicesYaml)[0] + adminerBuildOptions.append([ + "Change external WUI Port Number from: {port}".format(port=portNumber), + enterPortNumberExec + ]) + except: # Error getting port + pass + adminerBuildOptions.append(["Go back", goBack]) + + def runOptionsMenu(): + createMenu() + menuEntryPoint() + return True + + def renderHotZone(term, menu, selection, hotzoneLocation): + lineLengthAtTextStart = 71 + print(term.move(hotzoneLocation[0], hotzoneLocation[1])) + for (index, menuItem) in enumerate(menu): + toPrint = "" + if index == selection: + toPrint += ('{bv} -> {t.blue_on_green} {title} {t.normal} <-'.format(t=term, title=menuItem[0], bv=specialChars[renderMode]["borderVertical"])) + else: + toPrint += ('{bv} {t.normal} {title} '.format(t=term, title=menuItem[0], bv=specialChars[renderMode]["borderVertical"])) + + for i in range(lineLengthAtTextStart - len(menuItem[0])): + toPrint += " " + + toPrint += "{bv}".format(bv=specialChars[renderMode]["borderVertical"]) + + toPrint = term.center(toPrint) + + print(toPrint) + + def mainRender(needsRender, menu, selection): + term = Terminal() + + if needsRender == 1: + print(term.clear()) + print(term.move_y(term.height // 16)) + print(term.black_on_cornsilk4(term.center('IOTstack Adminer Options'))) + print("") + print(term.center(commonTopBorder(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Select Option to configure {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + + if needsRender >= 1: + renderHotZone(term, menu, selection, hotzoneLocation) + + if needsRender == 1: + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + if not hideHelpText: + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Controls: {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Up] and [Down] to move selection cursor {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [H] Show/hide this text {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Enter] to run command or save input {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Escape] to go back to build stack menu {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + if len(documentationHint) > 1: + if len(documentationHint) > 56: + documentationAndPadding = padText(documentationHint, 71) + print(term.center("{bv} Documentation: {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} {dap} {bv}".format(bv=specialChars[renderMode]["borderVertical"], dap=documentationAndPadding))) + else: + documentationAndPadding = padText(documentationHint, 56) + print(term.center("{bv} Documentation: {dap} {bv}".format(bv=specialChars[renderMode]["borderVertical"], dap=documentationAndPadding))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonBottomBorder(renderMode))) + + def runSelection(selection): + import types + global adminerBuildOptions + if len(adminerBuildOptions[selection]) > 1 and isinstance(adminerBuildOptions[selection][1], types.FunctionType): + adminerBuildOptions[selection][1]() + else: + print(term.green_reverse('IOTstack Error: No function assigned to menu item: "{}"'.format(nodeRedBuildOptions[selection][0]))) + + def isMenuItemSelectable(menu, index): + if len(menu) > index: + if len(menu[index]) > 2: + if menu[index][2]["skip"] == True: + return False + return True + + def menuEntryPoint(): + # These need to be reglobalised due to eval() + global currentMenuItemIndex + global selectionInProgress + global menuNavigateDirection + global needsRender + global hideHelpText + global adminerBuildOptions + term = Terminal() + with term.fullscreen(): + menuNavigateDirection = 0 + mainRender(needsRender, adminerBuildOptions, currentMenuItemIndex) + selectionInProgress = True + with term.cbreak(): + while selectionInProgress: + menuNavigateDirection = 0 + + if needsRender: # Only rerender when changed to prevent flickering + mainRender(needsRender, adminerBuildOptions, currentMenuItemIndex) + needsRender = 0 + key = term.inkey(esc_delay=0.05) + if key.is_sequence: + if key.name == 'KEY_TAB': + menuNavigateDirection += 1 + if key.name == 'KEY_DOWN': + menuNavigateDirection += 1 + if key.name == 'KEY_UP': + menuNavigateDirection -= 1 + if key.name == 'KEY_LEFT': + goBack() + if key.name == 'KEY_ENTER': + runSelection(currentMenuItemIndex) + if key.name == 'KEY_ESCAPE': + return True + elif key: + if key == 'h': # H pressed + if hideHelpText: + hideHelpText = False + else: + hideHelpText = True + mainRender(1, adminerBuildOptions, currentMenuItemIndex) + + if menuNavigateDirection != 0: # If a direction was pressed, find next selectable item + currentMenuItemIndex += menuNavigateDirection + currentMenuItemIndex = currentMenuItemIndex % len(adminerBuildOptions) + needsRender = 2 + + while not isMenuItemSelectable(adminerBuildOptions, currentMenuItemIndex): + currentMenuItemIndex += menuNavigateDirection + currentMenuItemIndex = currentMenuItemIndex % len(adminerBuildOptions) + return True + + #################### + # End menu section + #################### + + + if haltOnErrors: + eval(toRun)() + else: + try: + eval(toRun)() + except: + pass + +# This check isn't required, but placed here for debugging purposes +global currentServiceName # Name of the current service +if currentServiceName == 'adminer': + main() +else: + print("Error. '{}' Tried to run 'adminer' config".format(currentServiceName)) diff --git a/.templates/adminer/service.yml b/.templates/adminer/service.yml index 55908eac5..95721843d 100644 --- a/.templates/adminer/service.yml +++ b/.templates/adminer/service.yml @@ -1,6 +1,7 @@ - adminer: - container_name: adminer - image: adminer - restart: unless-stopped - ports: - - 9080:8080 +adminer: + container_name: adminer + image: adminer + restart: unless-stopped + ports: + - "9080:8080" + diff --git a/.templates/blynk_server/Dockerfile b/.templates/blynk_server/Dockerfile index 0b041f020..c936ee1c8 100644 --- a/.templates/blynk_server/Dockerfile +++ b/.templates/blynk_server/Dockerfile @@ -1,25 +1,75 @@ -FROM adoptopenjdk/openjdk11 -MAINTAINER Graham Garner +# Acknowledgements: +# Based on: +# https://github.com/SensorsIot/IOTstack/blob/master/.templates/blynk_server/Dockerfile +# (as at commit ID 4dff89c1bb6a5b1c01d3c087dcb662256a0c050f) +# Borrows from: +# https://github.com/Peterkn2001/blynk-server/blob/master/server/Docker/Dockerfile +# (as at commit ID 889c7e55161832e21264d993d9fa5abd1c015e1c) -#RUN apt-get update -#RUN apt-get install -y apt-utils -#RUN apt-get install -y default-jdk curl +FROM ubuntu -ENV BLYNK_SERVER_VERSION 0.41.10 -RUN mkdir /blynk -RUN curl -L https://github.com/blynkkk/blynk-server/releases/download/v${BLYNK_SERVER_VERSION}/server-${BLYNK_SERVER_VERSION}.jar > /blynk/server.jar +# declare the version to be built, defaulting to 0.41.16 (which is +# current as of 2021-10-22) +ARG BLYNK_SERVER_VERSION=0.41.16 -# Create data folder. To persist data, map a volume to /data -RUN mkdir /data +# form the download URL +ENV BLYNK_SERVER_URL=https://github.com/Peterkn2001/blynk-server/releases/download/v${BLYNK_SERVER_VERSION}/server-${BLYNK_SERVER_VERSION}.jar -# Create configuration folder. To persist data, map a file to /config/server.properties -RUN mkdir /config && touch /config/server.properties -VOLUME ["/config", "/data/backup"] +# Add support packages to the base image +RUN apt-get update \ + && apt-get install -y \ + apt-utils \ + libreadline8 \ + libreadline-dev \ + && apt-get install -y \ + curl \ + libxrender1 \ + maven \ + openjdk-11-jdk \ + rsync -# IP port listing: -# 8080: Hardware without ssl/tls support -# 9443: Blynk app, https, web sockets, admin port -EXPOSE 8080 9443 +# Add IOTstack-specific support +ENV IOTSTACK_DEFAULTS_DIR="iotstack_defaults" +ENV IOTSTACK_ENTRY_POINT="docker-entrypoint.sh" +COPY ${IOTSTACK_DEFAULTS_DIR} /${IOTSTACK_DEFAULTS_DIR} +COPY ${IOTSTACK_ENTRY_POINT} /${IOTSTACK_ENTRY_POINT} +RUN chmod 755 /${IOTSTACK_ENTRY_POINT} -WORKDIR /data -ENTRYPOINT ["java", "-jar", "/blynk/server.jar", "-dataFolder", "/data", "-serverConfig", "/config/server.properties"] \ No newline at end of file +# define well-known paths +ENV IOTSTACK_DATA_DIR="/data" +ENV IOTSTACK_CONF_DIR="/config" +ENV IOTSTACK_JAVA_DIR="/blynk" + +# Create and populate expected folders +RUN mkdir -p ${IOTSTACK_DATA_DIR} ${IOTSTACK_JAVA_DIR} \ + && curl -L ${BLYNK_SERVER_URL} >"${IOTSTACK_JAVA_DIR}/server.jar" + +# declare expected mapped volumes +VOLUME ["${IOTSTACK_CONF_DIR}", "${IOTSTACK_DATA_DIR}"] + +# Expose assumed internal ports: +# 8080 http.port +# 8440 hardware.mqtt.port +# 9443 https.port +EXPOSE 8080 8440 9443 + +# set the working directory +WORKDIR ${IOTSTACK_DATA_DIR} + +# define launch procedure +ENTRYPOINT ["/docker-entrypoint.sh"] +CMD ["java", "-jar", "/blynk/server.jar", "-dataFolder", "/data", "-serverConfig", "/config/server.properties", "-mailConfig", "/config/mail.properties"] + +# supplement image metadata +LABEL blynk-server.version=${BLYNK_SERVER_VERSION} +LABEL blynk-server.url=${BLYNK_SERVER_URL} +LABEL com.github.SensorsIot.IOTstack.Dockerfile.maintainer="877dev <877dev@gmail.com>" +LABEL com.github.Peterkn2001.blynk-server.Dockerfile.maintainer="Florian Mauduit " + +# unset variables that are not needed by docker-entrypoint.sh +ENV IOTSTACK_ENTRY_POINT= +ENV IOTSTACK_DATA_DIR= +ENV IOTSTACK_JAVA_DIR= +ENV BLYNK_SERVER_URL= + +# EOF diff --git a/.templates/blynk_server/docker-entrypoint.sh b/.templates/blynk_server/docker-entrypoint.sh new file mode 100755 index 000000000..15ede7f4c --- /dev/null +++ b/.templates/blynk_server/docker-entrypoint.sh @@ -0,0 +1,19 @@ +#!/bin/bash +set -e + +# were we launched as root with defaults available? +if [ "$(id -u)" = "0" -a -d /"$IOTSTACK_DEFAULTS_DIR" ]; then + + # yes! ensure that the IOTSTACK_CONF_DIR exists + mkdir -p "$IOTSTACK_CONF_DIR" + + # populate runtime directory from the defaults + rsync -arp --ignore-existing "/${IOTSTACK_DEFAULTS_DIR}/" "${IOTSTACK_CONF_DIR}" + + # enforce correct ownership + chown -R "${IOTSTACK_UID:-nobody}":"${IOTSTACK_GID:-nobody}" "$IOTSTACK_CONF_DIR" + +fi + +# start the blynk server +exec "$@" diff --git a/.templates/blynk_server/iotstack_defaults/mail.properties b/.templates/blynk_server/iotstack_defaults/mail.properties new file mode 100644 index 000000000..aa01f9c46 --- /dev/null +++ b/.templates/blynk_server/iotstack_defaults/mail.properties @@ -0,0 +1,9 @@ +mail.smtp.auth=true + mail.smtp.starttls.enable=true + mail.smtp.host=smtp.gmail.com + mail.smtp.port=587 + mail.smtp.username=YOUR_GMAIL@gmail.com + mail.smtp.password=YOUR_GMAIL_APP_PASSWORD + mail.smtp.connectiontimeout=30000 + mail.smtp.timeout=120000 + diff --git a/.templates/blynk_server/iotstack_defaults/server.properties b/.templates/blynk_server/iotstack_defaults/server.properties new file mode 100644 index 000000000..4b5ba80ff --- /dev/null +++ b/.templates/blynk_server/iotstack_defaults/server.properties @@ -0,0 +1,40 @@ +hardware.mqtt.port=8440 + http.port=8080 + force.port.80.for.csv=false + force.port.80.for.redirect=true + https.port=9443 + data.folder=./data + logs.folder=./logs + log.level=info + user.devices.limit=10 + user.tags.limit=100 + user.dashboard.max.limit=100 + user.widget.max.size.limit=20 + user.message.quota.limit=100 + notifications.queue.limit=2000 + blocking.processor.thread.pool.limit=6 + notifications.frequency.user.quota.limit=5 + webhooks.frequency.user.quota.limit=1000 + webhooks.response.size.limit=96 + user.profile.max.size=128 + terminal.strings.pool.size=25 + map.strings.pool.size=25 + lcd.strings.pool.size=6 + table.rows.pool.size=100 + profile.save.worker.period=60000 + stats.print.worker.period=60000 + web.request.max.size=524288 + csv.export.data.points.max=43200 + hard.socket.idle.timeout=10 + enable.db=false + enable.raw.db.data.store=false + async.logger.ring.buffer.size=2048 + allow.reading.widget.without.active.app=false + allow.store.ip=true + initial.energy=1000000 + admin.rootPath=/admin + restore.host=blynk-cloud.com + product.name=Blynk + admin.email=admin@blynk.cc + admin.pass=admin + diff --git a/.templates/blynk_server/service.yml b/.templates/blynk_server/service.yml index 8f0dce752..57dc0fcb7 100644 --- a/.templates/blynk_server/service.yml +++ b/.templates/blynk_server/service.yml @@ -1,11 +1,19 @@ - blynk_server: - build: ./services/blynk_server/. - container_name: blynk_server - restart: unless-stopped - ports: - - 8180:8080 - - 8441:8441 - - 9443:9443 - volumes: - - ./volumes/blynk_server/data:/data +blynk_server: + build: + context: ./.templates/blynk_server/. + args: + - BLYNK_SERVER_VERSION=0.41.16 + container_name: blynk_server + restart: unless-stopped + environment: + - TZ=Etc/UTC + - IOTSTACK_UID=1000 + - IOTSTACK_GID=1000 + ports: + - "8180:8080" + - "8440:8440" + - "9444:9443" + volumes: + - ./volumes/blynk_server/data:/data + - ./volumes/blynk_server/config:/config diff --git a/.templates/chronograf/service.yml b/.templates/chronograf/service.yml new file mode 100644 index 000000000..8ce6a93ee --- /dev/null +++ b/.templates/chronograf/service.yml @@ -0,0 +1,20 @@ +chronograf: + container_name: chronograf + image: chronograf:latest + restart: unless-stopped + environment: + - TZ=${TZ:-Etc/UTC} + # see https://docs.influxdata.com/chronograf/v1.9/administration/config-options/ + - INFLUXDB_URL=http://influxdb:8086 + # - INFLUXDB_USERNAME= + # - INFLUXDB_PASSWORD= + # - INFLUXDB_ORG= + # - KAPACITOR_URL=http://kapacitor:9092 + ports: + - "8888:8888" + volumes: + - ./volumes/chronograf:/var/lib/chronograf + depends_on: + - influxdb + # - kapacitor + diff --git a/.templates/dashmachine/service.yml b/.templates/dashmachine/service.yml new file mode 100644 index 000000000..20ed3c35e --- /dev/null +++ b/.templates/dashmachine/service.yml @@ -0,0 +1,9 @@ +dashmachine: + image: rmountjoy/dashmachine:latest + container_name: dashmachine + volumes: + - ./volumes/dashmachine/user_data:/dashmachine/dashmachine/user_data + ports: + - "5000:5000" + restart: unless-stopped + diff --git a/.templates/deconz/build.py b/.templates/deconz/build.py new file mode 100755 index 000000000..57e3a53d0 --- /dev/null +++ b/.templates/deconz/build.py @@ -0,0 +1,480 @@ +#!/usr/bin/env python3 + +issues = {} # Returned issues dict +buildHooks = {} # Options, and others hooks +haltOnErrors = True + +# Main wrapper function. Required to make local vars work correctly +def main(): + import os + import time + import ruamel.yaml + import signal + import sys + from blessed import Terminal + + from deps.chars import specialChars, commonTopBorder, commonBottomBorder, commonEmptyLine + from deps.consts import servicesDirectory, templatesDirectory, buildSettingsFileName, buildCache, servicesFileName + from deps.common_functions import getExternalPorts, getInternalPorts, checkPortConflicts, enterPortNumberWithWhiptail, generateRandomString + + yaml = ruamel.yaml.YAML() + yaml.preserve_quotes = True + + global dockerComposeServicesYaml # The loaded memory YAML of all checked services + global toRun # Switch for which function to run when executed + global buildHooks # Where to place the options menu result + global currentServiceName # Name of the current service + global issues # Returned issues dict + global haltOnErrors # Turn on to allow erroring + global hideHelpText # Showing and hiding the help controls text + global serviceService + global serviceTemplate + global hasRebuiltHardwareSelection + + serviceService = servicesDirectory + currentServiceName + serviceTemplate = templatesDirectory + currentServiceName + buildSettings = serviceService + buildSettingsFileName + + hasRebuiltHardwareSelection = False + + try: # If not already set, then set it. + hideHelpText = hideHelpText + except: + hideHelpText = False + + # runtime vars + portConflicts = [] + + # This lets the menu know whether to put " >> Options " or not + # This function is REQUIRED. + def checkForOptionsHook(): + try: + buildHooks["options"] = callable(runOptionsMenu) + except: + buildHooks["options"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForPreBuildHook(): + try: + buildHooks["preBuildHook"] = callable(preBuild) + except: + buildHooks["preBuildHook"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForPostBuildHook(): + try: + buildHooks["postBuildHook"] = callable(postBuild) + except: + buildHooks["postBuildHook"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForRunChecksHook(): + try: + buildHooks["runChecksHook"] = callable(runChecks) + except: + buildHooks["runChecksHook"] = False + return buildHooks + return buildHooks + + # This service will not check anything unless this is set + # This function is optional, and will run each time the menu is rendered + def runChecks(): + checkForIssues() + return [] + + # This function is optional, and will run after the docker-compose.yml file is written to disk. + def postBuild(): + return True + + # This function is optional, and will run just before the build docker-compose.yml code. + def preBuild(): + global dockerComposeServicesYaml + global currentServiceName + with open("{serviceDir}{buildSettings}".format(serviceDir=serviceService, buildSettings=buildSettingsFileName)) as objHardwareListFile: + deconzYamlBuildOptions = yaml.load(objHardwareListFile) + # Password randomisation + # Multi-service: + with open((r'%s/' % serviceTemplate) + servicesFileName) as objServiceFile: + serviceYamlTemplate = yaml.load(objServiceFile) + + oldBuildCache = {} + try: + with open(r'%s' % buildCache) as objBuildCache: + oldBuildCache = yaml.load(objBuildCache) + except: + pass + + buildCacheServices = {} + if "services" in oldBuildCache: + buildCacheServices = oldBuildCache["services"] + + if not os.path.exists(serviceService): + os.makedirs(serviceService, exist_ok=True) + + if os.path.exists(buildSettings): + # Password randomisation + if "databasePasswordOption" in deconzYamlBuildOptions: + if ( + deconzYamlBuildOptions["databasePasswordOption"] == "Randomise database password for this build" + or deconzYamlBuildOptions["databasePasswordOption"] == "Randomise database password every build" + or deconzYamlBuildOptions["databasePasswordOption"] == "Use default password for this build" + ): + if deconzYamlBuildOptions["databasePasswordOption"] == "Use default password for this build": + newPassword = "IOtSt4ckDec0nZ" + else: + newPassword = generateRandomString() + for (index, serviceName) in enumerate(serviceYamlTemplate): + dockerComposeServicesYaml[serviceName] = serviceYamlTemplate[serviceName] + if "environment" in serviceYamlTemplate[serviceName]: + for (envIndex, envName) in enumerate(serviceYamlTemplate[serviceName]["environment"]): + envName = envName.replace("%randomPassword%", newPassword) + dockerComposeServicesYaml[serviceName]["environment"][envIndex] = envName + + # Ensure you update the "Do nothing" and other 2 strings used for password settings in 'passwords.py' + if (deconzYamlBuildOptions["databasePasswordOption"] == "Randomise database password for this build"): + deconzYamlBuildOptions["databasePasswordOption"] = "Do nothing" + with open(buildSettings, 'w') as outputFile: + yaml.dump(deconzYamlBuildOptions, outputFile) + else: # Do nothing - don't change password + for (index, serviceName) in enumerate(buildCacheServices): + if serviceName in buildCacheServices: # Load service from cache if exists (to maintain password) + dockerComposeServicesYaml[serviceName] = buildCacheServices[serviceName] + else: + dockerComposeServicesYaml[serviceName] = serviceYamlTemplate[serviceName] + else: + print("Deconz Warning: Build settings file not found, using default password") + time.sleep(1) + newPassword = "IOtSt4ckDec0nZ" + for (index, serviceName) in enumerate(serviceYamlTemplate): + dockerComposeServicesYaml[serviceName] = serviceYamlTemplate[serviceName] + if "environment" in serviceYamlTemplate[serviceName]: + for (envIndex, envName) in enumerate(serviceYamlTemplate[serviceName]["environment"]): + envName = envName.replace("%randomPassword%", newPassword) + dockerComposeServicesYaml[serviceName]["environment"][envIndex] = envName + + deconzYamlBuildOptions["databasePasswordOption"] = "Do nothing" + with open(buildSettings, 'w') as outputFile: + yaml.dump(deconzYamlBuildOptions, outputFile) + + else: + print("Deconz Warning: Build settings file not found, using default password") + time.sleep(1) + newPassword = "IOtSt4ckDec0nZ" + for (index, serviceName) in enumerate(serviceYamlTemplate): + dockerComposeServicesYaml[serviceName] = serviceYamlTemplate[serviceName] + if "environment" in serviceYamlTemplate[serviceName]: + for (envIndex, envName) in enumerate(serviceYamlTemplate[serviceName]["environment"]): + envName = envName.replace("%randomPassword%", newPassword) + dockerComposeServicesYaml[serviceName]["environment"][envIndex] = envName + deconzYamlBuildOptions = { + "version": "1", + "application": "IOTstack", + "service": "Deconz", + "comment": "Deconz Build Options" + } + + deconzYamlBuildOptions["databasePasswordOption"] = "Do nothing" + with open(buildSettings, 'w') as outputFile: + yaml.dump(deconzYamlBuildOptions, outputFile) + + try: + if currentServiceName in dockerComposeServicesYaml: + dockerComposeServicesYaml[currentServiceName]["devices"] = deconzYamlBuildOptions["hardware"] + except Exception as err: + print("Error setting deconz hardware: ", err) + return False + + return True + + # ##################################### + # Supporting functions below + # ##################################### + + def checkForIssues(): + fileIssues = checkFiles() + if (len(fileIssues) > 0): + issues["fileIssues"] = fileIssues + for (index, serviceName) in enumerate(dockerComposeServicesYaml): + if not currentServiceName == serviceName: # Skip self + currentServicePorts = getExternalPorts(currentServiceName, dockerComposeServicesYaml) + portConflicts = checkPortConflicts(serviceName, currentServicePorts, dockerComposeServicesYaml) + if (len(portConflicts) > 0): + issues["portConflicts"] = portConflicts + + def checkFiles(): + fileIssues = [] + if not os.path.exists("{serviceDir}{buildSettings}".format(serviceDir=serviceService, buildSettings=buildSettingsFileName)): + fileIssues.append(serviceService + '/build_settings.yml does not exist. Select hardware in options to fix.') + return fileIssues + + # ##################################### + # End Supporting functions + # ##################################### + + ############################ + # Menu Logic + ############################ + + global currentMenuItemIndex + global selectionInProgress + global menuNavigateDirection + global needsRender + + selectionInProgress = True + currentMenuItemIndex = 0 + menuNavigateDirection = 0 + needsRender = 1 + term = Terminal() + hotzoneLocation = [((term.height // 16) + 6), 0] + + def goBack(): + global selectionInProgress + global needsRender + selectionInProgress = False + needsRender = 1 + return True + + def selectDeconzHardware(): + global needsRender + global hasRebuiltHardwareSelection + deconzSelectHardwareFilePath = "./.templates/deconz/select_hw.py" + with open(deconzSelectHardwareFilePath, "rb") as pythonDynamicImportFile: + code = compile(pythonDynamicImportFile.read(), deconzSelectHardwareFilePath, "exec") + # execGlobals = globals() + # execLocals = locals() + execGlobals = { + "currentServiceName": currentServiceName, + "renderMode": renderMode + } + execLocals = {} + screenActive = False + exec(code, execGlobals, execLocals) + signal.signal(signal.SIGWINCH, onResize) + try: + hasRebuiltHardwareSelection = execGlobals["hasRebuiltHardwareSelection"] + except: + hasRebuiltHardwareSelection = False + screenActive = True + needsRender = 1 + + def enterPortNumberExec(): + # global term + global needsRender + global dockerComposeServicesYaml + externalPort = getExternalPorts(currentServiceName, dockerComposeServicesYaml)[0] + internalPort = getInternalPorts(currentServiceName, dockerComposeServicesYaml)[0] + newPortNumber = enterPortNumberWithWhiptail(term, dockerComposeServicesYaml, currentServiceName, hotzoneLocation, externalPort) + + if newPortNumber > 0: + dockerComposeServicesYaml[currentServiceName]["ports"][0] = "{newExtPort}:{oldIntPort}".format( + newExtPort = newPortNumber, + oldIntPort = internalPort + ) + createMenu() + needsRender = 1 + + def setPasswordOptions(): + global needsRender + global hasRebuiltAddons + passwordOptionsMenuFilePath = "./.templates/{currentService}/passwords.py".format(currentService=currentServiceName) + with open(passwordOptionsMenuFilePath, "rb") as pythonDynamicImportFile: + code = compile(pythonDynamicImportFile.read(), passwordOptionsMenuFilePath, "exec") + execGlobals = { + "currentServiceName": currentServiceName, + "renderMode": renderMode + } + execLocals = {} + screenActive = False + exec(code, execGlobals, execLocals) + signal.signal(signal.SIGWINCH, onResize) + screenActive = True + needsRender = 1 + + def onResize(sig, action): + global deconzBuildOptions + global currentMenuItemIndex + mainRender(1, deconzBuildOptions, currentMenuItemIndex) + + deconzBuildOptions = [] + + def createMenu(): + global deconzBuildOptions + global serviceService + try: + deconzBuildOptions = [] + portNumber = getExternalPorts(currentServiceName, dockerComposeServicesYaml)[0] + deconzBuildOptions.append([ + "Change external WUI Port Number from: {port}".format(port=portNumber), + enterPortNumberExec + ]) + except: # Error getting port + pass + + + if os.path.exists("{buildSettings}".format(buildSettings=buildSettings)): + deconzBuildOptions.insert(0, ["Change selected hardware", selectDeconzHardware]) + else: + deconzBuildOptions.insert(0, ["Select hardware", selectDeconzHardware]) + deconzBuildOptions.append([ + "DeConz Password Options", + setPasswordOptions + ]) + + deconzBuildOptions.append(["Go back", goBack]) + + def runOptionsMenu(): + createMenu() + menuEntryPoint() + return True + + def renderHotZone(term, menu, selection, hotzoneLocation): + lineLengthAtTextStart = 71 + print(term.move(hotzoneLocation[0], hotzoneLocation[1])) + for (index, menuItem) in enumerate(menu): + toPrint = "" + if index == selection: + toPrint += ('{bv} -> {t.blue_on_green} {title} {t.normal} <-'.format(t=term, title=menuItem[0], bv=specialChars[renderMode]["borderVertical"])) + else: + toPrint += ('{bv} {t.normal} {title} '.format(t=term, title=menuItem[0], bv=specialChars[renderMode]["borderVertical"])) + + for i in range(lineLengthAtTextStart - len(menuItem[0])): + toPrint += " " + + toPrint += "{bv}".format(bv=specialChars[renderMode]["borderVertical"]) + + toPrint = term.center(toPrint) + + print(toPrint) + + def mainRender(needsRender, menu, selection): + global hasRebuiltHardwareSelection + term = Terminal() + + if needsRender == 1: + print(term.clear()) + print(term.move_y(term.height // 16)) + print(term.black_on_cornsilk4(term.center('IOTstack DeConz Options'))) + print("") + print(term.center(commonTopBorder(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Select Option to configure {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + + if needsRender >= 1: + renderHotZone(term, menu, selection, hotzoneLocation) + + if needsRender == 1: + if os.path.exists("{buildSettings}".format(buildSettings=buildSettings)): + if hasRebuiltHardwareSelection: + print(term.center(commonEmptyLine(renderMode))) + print(term.center('{bv} {t.grey_on_blue4} {text} {t.normal}{t.white_on_black}{t.normal} {bv}'.format(t=term, text="Hardware list has been rebuilt: build_settings.yml", bv=specialChars[renderMode]["borderVertical"]))) + else: + print(term.center(commonEmptyLine(renderMode))) + print(term.center('{bv} {t.grey_on_blue4} {text} {t.normal}{t.white_on_black}{t.normal} {bv}'.format(t=term, text="Using existing build_settings.yml for hardware installation", bv=specialChars[renderMode]["borderVertical"]))) + else: + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + if not hideHelpText: + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Controls: {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Up] and [Down] to move selection cursor {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [H] Show/hide this text {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Enter] to run command or save input {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Escape] to go back to build stack menu {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonBottomBorder(renderMode))) + + def runSelection(selection): + import types + global deconzBuildOptions + if len(deconzBuildOptions[selection]) > 1 and isinstance(deconzBuildOptions[selection][1], types.FunctionType): + deconzBuildOptions[selection][1]() + else: + print(term.green_reverse('IOTstack Error: No function assigned to menu item: "{}"'.format(nodeRedBuildOptions[selection][0]))) + + def isMenuItemSelectable(menu, index): + if len(menu) > index: + if len(menu[index]) > 2: + if menu[index][2]["skip"] == True: + return False + return True + + def menuEntryPoint(): + # These need to be reglobalised due to eval() + global currentMenuItemIndex + global selectionInProgress + global menuNavigateDirection + global needsRender + global hideHelpText + global deconzBuildOptions + term = Terminal() + with term.fullscreen(): + menuNavigateDirection = 0 + mainRender(needsRender, deconzBuildOptions, currentMenuItemIndex) + selectionInProgress = True + with term.cbreak(): + while selectionInProgress: + menuNavigateDirection = 0 + + if needsRender: # Only rerender when changed to prevent flickering + mainRender(needsRender, deconzBuildOptions, currentMenuItemIndex) + needsRender = 0 + + key = term.inkey(esc_delay=0.05) + if key.is_sequence: + if key.name == 'KEY_TAB': + menuNavigateDirection += 1 + if key.name == 'KEY_DOWN': + menuNavigateDirection += 1 + if key.name == 'KEY_UP': + menuNavigateDirection -= 1 + if key.name == 'KEY_LEFT': + goBack() + if key.name == 'KEY_ENTER': + runSelection(currentMenuItemIndex) + if key.name == 'KEY_ESCAPE': + return True + elif key: + if key == 'h': # H pressed + if hideHelpText: + hideHelpText = False + else: + hideHelpText = True + mainRender(1, deconzBuildOptions, currentMenuItemIndex) + + if menuNavigateDirection != 0: # If a direction was pressed, find next selectable item + currentMenuItemIndex += menuNavigateDirection + currentMenuItemIndex = currentMenuItemIndex % len(deconzBuildOptions) + needsRender = 2 + + while not isMenuItemSelectable(deconzBuildOptions, currentMenuItemIndex): + currentMenuItemIndex += menuNavigateDirection + currentMenuItemIndex = currentMenuItemIndex % len(deconzBuildOptions) + return True + + #################### + # End menu section + #################### + + + if haltOnErrors: + eval(toRun)() + else: + try: + eval(toRun)() + except: + pass + +# This check isn't required, but placed here for debugging purposes +global currentServiceName # Name of the current service +if currentServiceName == 'deconz': + main() +else: + print("Error. '{}' Tried to run 'deconz' config".format(currentServiceName)) diff --git a/.templates/deconz/hardware_list.yml b/.templates/deconz/hardware_list.yml new file mode 100644 index 000000000..356ed1961 --- /dev/null +++ b/.templates/deconz/hardware_list.yml @@ -0,0 +1,9 @@ +version: 1 +application: "IOTstack" +service: "deconz" +comment: "Deconz hardware check list." +hardwarePaths: + - "/dev/ttyUSB0" + - "/dev/ttyACM0" + - "/dev/serial0" + - "/dev/ttyS0" diff --git a/.templates/deconz/passwords.py b/.templates/deconz/passwords.py new file mode 100755 index 000000000..4f4b6315c --- /dev/null +++ b/.templates/deconz/passwords.py @@ -0,0 +1,328 @@ +#!/usr/bin/env python3 + +import signal + +def main(): + from blessed import Terminal + from deps.chars import specialChars, commonTopBorder, commonBottomBorder, commonEmptyLine + from deps.consts import servicesDirectory, templatesDirectory, buildSettingsFileName + import time + import subprocess + import ruamel.yaml + import os + + global signal + global currentServiceName + global menuSelectionInProgress + global mainMenuList + global currentMenuItemIndex + global renderMode + global paginationSize + global paginationStartIndex + global hideHelpText + + yaml = ruamel.yaml.YAML() + yaml.preserve_quotes = True + + try: # If not already set, then set it. + hideHelpText = hideHelpText + except: + hideHelpText = False + + term = Terminal() + hotzoneLocation = [((term.height // 16) + 6), 0] + paginationToggle = [10, term.height - 25] + paginationStartIndex = 0 + paginationSize = paginationToggle[0] + + serviceService = servicesDirectory + currentServiceName + serviceTemplate = templatesDirectory + currentServiceName + buildSettings = serviceService + buildSettingsFileName + + def goBack(): + global menuSelectionInProgress + global needsRender + menuSelectionInProgress = False + needsRender = 1 + return True + + mainMenuList = [] + + hotzoneLocation = [((term.height // 16) + 6), 0] + + menuSelectionInProgress = True + currentMenuItemIndex = 0 + menuNavigateDirection = 0 + + # Render Modes: + # 0 = No render needed + # 1 = Full render + # 2 = Hotzone only + needsRender = 1 + + def onResize(sig, action): + global mainMenuList + global currentMenuItemIndex + mainRender(1, mainMenuList, currentMenuItemIndex) + + def generateLineText(text, textLength=None, paddingBefore=0, lineLength=64): + result = "" + for i in range(paddingBefore): + result += " " + + textPrintableCharactersLength = textLength + + if (textPrintableCharactersLength) == None: + textPrintableCharactersLength = len(text) + + result += text + remainingSpace = lineLength - textPrintableCharactersLength + + for i in range(remainingSpace): + result += " " + + return result + + def renderHotZone(term, renderType, menu, selection, hotzoneLocation, paddingBefore = 4): + global paginationSize + selectedTextLength = len("-> ") + + print(term.move(hotzoneLocation[0], hotzoneLocation[1])) + + if paginationStartIndex >= 1: + print(term.center("{b} {uaf} {uaf}{uaf}{uaf} {ual} {b}".format( + b=specialChars[renderMode]["borderVertical"], + uaf=specialChars[renderMode]["upArrowFull"], + ual=specialChars[renderMode]["upArrowLine"] + ))) + else: + print(term.center(commonEmptyLine(renderMode))) + + for (index, menuItem) in enumerate(menu): # Menu loop + if index >= paginationStartIndex and index < paginationStartIndex + paginationSize: + lineText = generateLineText(menuItem[0], paddingBefore=paddingBefore) + + # Menu highlight logic + if index == selection: + formattedLineText = '-> {t.blue_on_green}{title}{t.normal} <-'.format(t=term, title=menuItem[0]) + paddedLineText = generateLineText(formattedLineText, textLength=len(menuItem[0]) + selectedTextLength, paddingBefore=paddingBefore - selectedTextLength) + toPrint = paddedLineText + else: + toPrint = '{title}{t.normal}'.format(t=term, title=lineText) + # ##### + + # Menu check render logic + if menuItem[1]["checked"]: + toPrint = " (X) " + toPrint + else: + toPrint = " ( ) " + toPrint + + toPrint = "{bv} {toPrint} {bv}".format(bv=specialChars[renderMode]["borderVertical"], toPrint=toPrint) # Generate border + toPrint = term.center(toPrint) # Center Text (All lines should have the same amount of printable characters) + # ##### + print(toPrint) + + if paginationStartIndex + paginationSize < len(menu): + print(term.center("{b} {daf} {daf}{daf}{daf} {dal} {b}".format( + b=specialChars[renderMode]["borderVertical"], + daf=specialChars[renderMode]["downArrowFull"], + dal=specialChars[renderMode]["downArrowLine"] + ))) + else: + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + + + def mainRender(needsRender, menu, selection): + global paginationStartIndex + global paginationSize + term = Terminal() + + if selection >= paginationStartIndex + paginationSize: + paginationStartIndex = selection - (paginationSize - 1) + 1 + needsRender = 1 + + if selection <= paginationStartIndex - 1: + paginationStartIndex = selection + needsRender = 1 + + if needsRender == 1: + print(term.clear()) + print(term.move_y(term.height // 16)) + print(term.black_on_cornsilk4(term.center('IOTstack DeConz Password Options'))) + print("") + print(term.center(commonTopBorder(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Select Password Option {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + + if needsRender >= 1: + renderHotZone(term, needsRender, menu, selection, hotzoneLocation) + + if needsRender == 1: + print(term.center(commonEmptyLine(renderMode))) + if not hideHelpText: + if term.height < 32: + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Not enough vertical room to render controls help text {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + else: + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Controls: {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Space] to select option {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Up] and [Down] to move selection cursor {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [H] Show/hide this text {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Enter] to build and save option {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Escape] to cancel changes {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonBottomBorder(renderMode))) + + def runSelection(selection): + import types + if len(mainMenuList[selection]) > 1 and isinstance(mainMenuList[selection][1], types.FunctionType): + mainMenuList[selection][1]() + else: + print(term.green_reverse('IOTstack Error: No function assigned to menu item: "{}"'.format(mainMenuList[selection][0]))) + + def isMenuItemSelectable(menu, index): + if len(menu) > index: + if len(menu[index]) > 1: + if "skip" in menu[index][1] and menu[index][1]["skip"] == True: + return False + return True + + def loadOptionsMenu(): + global mainMenuList + mainMenuList.append(["Use default password for this build", { "checked": True }]) + mainMenuList.append(["Randomise database password for this build", { "checked": False }]) + mainMenuList.append(["Randomise database password every build", { "checked": False }]) + mainMenuList.append(["Do nothing", { "checked": False }]) + + def checkMenuItem(selection): + global mainMenuList + for (index, menuItem) in enumerate(mainMenuList): + mainMenuList[index][1]["checked"] = False + + mainMenuList[selection][1]["checked"] = True + + def saveOptions(): + try: + if not os.path.exists(serviceService): + os.makedirs(serviceService, exist_ok=True) + + if os.path.exists(buildSettings): + with open(r'%s' % buildSettings) as objBuildSettingsFile: + deconzYamlBuildOptions = yaml.load(objBuildSettingsFile) + else: + deconzYamlBuildOptions = { + "version": "1", + "application": "IOTstack", + "service": "deconz", + "comment": "Build Settings", + } + + deconzYamlBuildOptions["databasePasswordOption"] = "" + + for (index, menuOption) in enumerate(mainMenuList): + if menuOption[1]["checked"]: + deconzYamlBuildOptions["databasePasswordOption"] = menuOption[0] + break + + with open(buildSettings, 'w') as outputFile: + yaml.dump(deconzYamlBuildOptions, outputFile) + + except Exception as err: + print("Error saving DeConz Password options", currentServiceName) + print(err) + return False + global hasRebuiltHardwareSelection + hasRebuiltHardwareSelection = True + return True + + def loadOptions(): + try: + if not os.path.exists(serviceService): + os.makedirs(serviceService, exist_ok=True) + + if os.path.exists(buildSettings): + with open(r'%s' % buildSettings) as objBuildSettingsFile: + deconzYamlBuildOptions = yaml.load(objBuildSettingsFile) + + for (index, menuOption) in enumerate(mainMenuList): + if menuOption[0] == deconzYamlBuildOptions["databasePasswordOption"]: + checkMenuItem(index) + break + + except Exception as err: + print("Error loading DeConz Password options", currentServiceName) + print(err) + return False + return True + + + if __name__ == 'builtins': + global signal + term = Terminal() + signal.signal(signal.SIGWINCH, onResize) + loadOptionsMenu() + loadOptions() + with term.fullscreen(): + menuNavigateDirection = 0 + mainRender(needsRender, mainMenuList, currentMenuItemIndex) + menuSelectionInProgress = True + with term.cbreak(): + while menuSelectionInProgress: + menuNavigateDirection = 0 + + if not needsRender == 0: # Only rerender when changed to prevent flickering + mainRender(needsRender, mainMenuList, currentMenuItemIndex) + needsRender = 0 + + key = term.inkey(esc_delay=0.05) + if key.is_sequence: + if key.name == 'KEY_TAB': + if paginationSize == paginationToggle[0]: + paginationSize = paginationToggle[1] + else: + paginationSize = paginationToggle[0] + mainRender(1, mainMenuList, currentMenuItemIndex) + if key.name == 'KEY_DOWN': + menuNavigateDirection += 1 + if key.name == 'KEY_UP': + menuNavigateDirection -= 1 + if key.name == 'KEY_ENTER': + if saveOptions(): + return True + else: + print("Something went wrong. Try saving the list again.") + if key.name == 'KEY_ESCAPE': + menuSelectionInProgress = False + return True + elif key: + if key == ' ': # Space pressed + checkMenuItem(currentMenuItemIndex) # Update checked list + needsRender = 2 + elif key == 'h': # H pressed + if hideHelpText: + hideHelpText = False + else: + hideHelpText = True + mainRender(1, mainMenuList, currentMenuItemIndex) + + if menuNavigateDirection != 0: # If a direction was pressed, find next selectable item + currentMenuItemIndex += menuNavigateDirection + currentMenuItemIndex = currentMenuItemIndex % len(mainMenuList) + needsRender = 2 + + while not isMenuItemSelectable(mainMenuList, currentMenuItemIndex): + currentMenuItemIndex += menuNavigateDirection + currentMenuItemIndex = currentMenuItemIndex % len(mainMenuList) + return True + + return True + +originalSignalHandler = signal.getsignal(signal.SIGINT) +main() +signal.signal(signal.SIGWINCH, originalSignalHandler) diff --git a/.templates/deconz/select_hw.py b/.templates/deconz/select_hw.py new file mode 100755 index 000000000..381aa31a5 --- /dev/null +++ b/.templates/deconz/select_hw.py @@ -0,0 +1,330 @@ +#!/usr/bin/env python3 + +import signal + +def main(): + from blessed import Terminal + from deps.chars import specialChars, commonTopBorder, commonBottomBorder, commonEmptyLine + from deps.consts import servicesDirectory, templatesDirectory, buildSettingsFileName + import time + import subprocess + import ruamel.yaml + import os + + global signal + global currentServiceName + global dockerCommandsSelectionInProgress + global mainMenuList + global currentMenuItemIndex + global renderMode + global paginationSize + global paginationStartIndex + global hardwareListFile + global hideHelpText + + global installCommand + + yaml = ruamel.yaml.YAML() + yaml.preserve_quotes = True + + try: # If not already set, then set it. + hideHelpText = hideHelpText + except: + hideHelpText = False + + term = Terminal() + hotzoneLocation = [((term.height // 16) + 6), 0] + paginationToggle = [10, term.height - 25] + paginationStartIndex = 0 + paginationSize = paginationToggle[0] + + serviceService = servicesDirectory + currentServiceName + serviceTemplate = templatesDirectory + currentServiceName + hardwareListFileSource = serviceTemplate + '/hardware_list.yml' + + def goBack(): + global dockerCommandsSelectionInProgress + global needsRender + dockerCommandsSelectionInProgress = False + needsRender = 1 + return True + + mainMenuList = [] + + hotzoneLocation = [((term.height // 16) + 6), 0] + + dockerCommandsSelectionInProgress = True + currentMenuItemIndex = 0 + menuNavigateDirection = 0 + + # Render Modes: + # 0 = No render needed + # 1 = Full render + # 2 = Hotzone only + needsRender = 1 + + def onResize(sig, action): + global mainMenuList + global currentMenuItemIndex + mainRender(1, mainMenuList, currentMenuItemIndex) + + def generateLineText(text, textLength=None, paddingBefore=0, lineLength=64): + result = "" + for i in range(paddingBefore): + result += " " + + textPrintableCharactersLength = textLength + + if (textPrintableCharactersLength) == None: + textPrintableCharactersLength = len(text) + + result += text + remainingSpace = lineLength - textPrintableCharactersLength + + for i in range(remainingSpace): + result += " " + + return result + + def renderHotZone(term, renderType, menu, selection, hotzoneLocation, paddingBefore = 4): + global paginationSize + selectedTextLength = len("-> ") + + print(term.move(hotzoneLocation[0], hotzoneLocation[1])) + + if paginationStartIndex >= 1: + print(term.center("{b} {uaf} {uaf}{uaf}{uaf} {ual} {b}".format( + b=specialChars[renderMode]["borderVertical"], + uaf=specialChars[renderMode]["upArrowFull"], + ual=specialChars[renderMode]["upArrowLine"] + ))) + else: + print(term.center(commonEmptyLine(renderMode))) + + for (index, menuItem) in enumerate(menu): # Menu loop + if index >= paginationStartIndex and index < paginationStartIndex + paginationSize: + lineText = generateLineText(menuItem[0], paddingBefore=paddingBefore) + + # Menu highlight logic + if index == selection: + formattedLineText = '-> {t.blue_on_green}{title}{t.normal} <-'.format(t=term, title=menuItem[0]) + paddedLineText = generateLineText(formattedLineText, textLength=len(menuItem[0]) + selectedTextLength, paddingBefore=paddingBefore - selectedTextLength) + toPrint = paddedLineText + else: + toPrint = '{title}{t.normal}'.format(t=term, title=lineText) + # ##### + + # Menu check render logic + if menuItem[1]["checked"]: + toPrint = " (X) " + toPrint + else: + toPrint = " ( ) " + toPrint + + toPrint = "{bv} {toPrint} {bv}".format(bv=specialChars[renderMode]["borderVertical"], toPrint=toPrint) # Generate border + toPrint = term.center(toPrint) # Center Text (All lines should have the same amount of printable characters) + # ##### + print(toPrint) + + if paginationStartIndex + paginationSize < len(menu): + print(term.center("{b} {daf} {daf}{daf}{daf} {dal} {b}".format( + b=specialChars[renderMode]["borderVertical"], + daf=specialChars[renderMode]["downArrowFull"], + dal=specialChars[renderMode]["downArrowLine"] + ))) + else: + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + + + def mainRender(needsRender, menu, selection): + global paginationStartIndex + global paginationSize + term = Terminal() + + if selection >= paginationStartIndex + paginationSize: + paginationStartIndex = selection - (paginationSize - 1) + 1 + needsRender = 1 + + if selection <= paginationStartIndex - 1: + paginationStartIndex = selection + needsRender = 1 + + if needsRender == 1: + print(term.clear()) + print(term.move_y(term.height // 16)) + print(term.black_on_cornsilk4(term.center('IOTstack DeConz Hardware'))) + print("") + print(term.center(commonTopBorder(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Select DeConz Hardware {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + + if needsRender >= 1: + renderHotZone(term, needsRender, menu, selection, hotzoneLocation) + + if needsRender == 1: + print(term.center(commonEmptyLine(renderMode))) + if not hideHelpText: + if term.height < 32: + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Not enough vertical room to render controls help text {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + else: + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Controls: {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Space] to select or deselect hardware {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Up] and [Down] to move selection cursor {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [H] Show/hide this text {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Enter] to build and save hardware list {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Escape] to cancel changes {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonBottomBorder(renderMode))) + + def runSelection(selection): + import types + if len(mainMenuList[selection]) > 1 and isinstance(mainMenuList[selection][1], types.FunctionType): + mainMenuList[selection][1]() + else: + print(term.green_reverse('IOTstack Error: No function assigned to menu item: "{}"'.format(mainMenuList[selection][0]))) + + def isMenuItemSelectable(menu, index): + if len(menu) > index: + if len(menu[index]) > 1: + if "skip" in menu[index][1] and menu[index][1]["skip"] == True: + return False + return True + + def loadAddonsMenu(): + global mainMenuList + if os.path.exists(hardwareListFileSource): + with open(r'%s' % hardwareListFileSource) as objHardwareListFile: + hardwareKnown = yaml.load(objHardwareListFile) + knownHardwareList = hardwareKnown["hardwarePaths"] + if os.path.exists("{serviceDir}{buildSettings}".format(serviceDir=serviceService, buildSettings=buildSettingsFileName)): + with open("{serviceDir}{buildSettings}".format(serviceDir=serviceService, buildSettings=buildSettingsFileName)) as objSavedHardwareListFile: + savedHardwareList = yaml.load(objSavedHardwareListFile) + savedHardware = [] + + try: + savedHardware = savedHardwareList["hardware"] + except: + print("Error: Loading saved hardware selection. Please resave your selection.") + input("Press Enter to continue...") + + for (index, hardwarePath) in enumerate(knownHardwareList): + if hardwarePath in savedHardware: + mainMenuList.append([hardwarePath, { "checked": True }]) + else: + mainMenuList.append([hardwarePath, { "checked": False }]) + + else: # No saved list + for (index, hardwarePath) in enumerate(knownHardwareList): + if os.path.exists(hardwarePath): + mainMenuList.append([hardwarePath, { "checked": True }]) + else: + mainMenuList.append([hardwarePath, { "checked": False }]) + + + else: + print("Error: '{hardwareListFile}' file doesn't exist.".format(hardwareListFile=hardwareListFileSource)) + input("Press Enter to continue...") + + def checkMenuItem(selection): + global mainMenuList + if mainMenuList[selection][1]["checked"] == True: + mainMenuList[selection][1]["checked"] = False + else: + mainMenuList[selection][1]["checked"] = True + + def saveAddonList(): + try: + if not os.path.exists(serviceService): + os.makedirs(serviceService, exist_ok=True) + deconzYamlHardwareList = { + "version": "1", + "application": "IOTstack", + "service": "deconz", + "comment": "Build Settings", + "hardware": [] + } + for (index, addon) in enumerate(mainMenuList): + if addon[1]["checked"]: + deconzYamlHardwareList["hardware"].append(addon[0]) + + with open("{serviceDir}{buildSettings}".format(serviceDir=serviceService, buildSettings=buildSettingsFileName), 'w') as outputFile: + yaml.dump(deconzYamlHardwareList, outputFile) + + except Exception as err: + print("Error saving DeConz Hardware list", currentServiceName) + print(err) + return False + global hasRebuiltHardwareSelection + hasRebuiltHardwareSelection = True + return True + + + if __name__ == 'builtins': + global signal + term = Terminal() + signal.signal(signal.SIGWINCH, onResize) + loadAddonsMenu() + with term.fullscreen(): + menuNavigateDirection = 0 + mainRender(needsRender, mainMenuList, currentMenuItemIndex) + dockerCommandsSelectionInProgress = True + with term.cbreak(): + while dockerCommandsSelectionInProgress: + menuNavigateDirection = 0 + + if not needsRender == 0: # Only rerender when changed to prevent flickering + mainRender(needsRender, mainMenuList, currentMenuItemIndex) + needsRender = 0 + + key = term.inkey(esc_delay=0.05) + if key.is_sequence: + if key.name == 'KEY_TAB': + if paginationSize == paginationToggle[0]: + paginationSize = paginationToggle[1] + else: + paginationSize = paginationToggle[0] + mainRender(1, mainMenuList, currentMenuItemIndex) + if key.name == 'KEY_DOWN': + menuNavigateDirection += 1 + if key.name == 'KEY_UP': + menuNavigateDirection -= 1 + if key.name == 'KEY_ENTER': + if saveAddonList(): + return True + else: + print("Something went wrong. Try saving the list again.") + if key.name == 'KEY_ESCAPE': + dockerCommandsSelectionInProgress = False + return True + elif key: + if key == ' ': # Space pressed + checkMenuItem(currentMenuItemIndex) # Update checked list + needsRender = 2 + elif key == 'h': # H pressed + if hideHelpText: + hideHelpText = False + else: + hideHelpText = True + mainRender(1, mainMenuList, currentMenuItemIndex) + + if menuNavigateDirection != 0: # If a direction was pressed, find next selectable item + currentMenuItemIndex += menuNavigateDirection + currentMenuItemIndex = currentMenuItemIndex % len(mainMenuList) + needsRender = 2 + + while not isMenuItemSelectable(mainMenuList, currentMenuItemIndex): + currentMenuItemIndex += menuNavigateDirection + currentMenuItemIndex = currentMenuItemIndex % len(mainMenuList) + return True + + return True + +originalSignalHandler = signal.getsignal(signal.SIGINT) +main() +signal.signal(signal.SIGWINCH, originalSignalHandler) diff --git a/.templates/deconz/service.yml b/.templates/deconz/service.yml new file mode 100644 index 000000000..426fd37fa --- /dev/null +++ b/.templates/deconz/service.yml @@ -0,0 +1,21 @@ +deconz: + image: deconzcommunity/deconz + container_name: deconz + restart: unless-stopped + ports: + - "8090:80" + - "443:443" + - "5901:5900" + volumes: + - ./volumes/deconz:/opt/deCONZ + devices: # This list is replaced during the build process. Modify the list in "build_settings.yml" to change it. + - /dev/null + environment: + - DECONZ_VNC_MODE=1 + - DECONZ_VNC_PASSWORD=%randomPassword% + - DEBUG_INFO=1 + - DEBUG_APS=0 + - DEBUG_ZCL=0 + - DEBUG_ZDP=0 + - DEBUG_OTAU=0 + diff --git a/.templates/diyhue/build.py b/.templates/diyhue/build.py new file mode 100755 index 000000000..7d0ff9b78 --- /dev/null +++ b/.templates/diyhue/build.py @@ -0,0 +1,323 @@ +#!/usr/bin/env python3 + +issues = {} # Returned issues dict +buildHooks = {} # Options, and others hooks +haltOnErrors = True + +# Main wrapper function. Required to make local vars work correctly +def main(): + import os + import time + import ruamel.yaml + import signal + import sys + from blessed import Terminal + + from deps.chars import specialChars, commonTopBorder, commonBottomBorder, commonEmptyLine + from deps.consts import servicesDirectory, templatesDirectory + from deps.common_functions import getExternalPorts, getInternalPorts, checkPortConflicts, enterPortNumberWithWhiptail, getNetworkDetails + + yaml = ruamel.yaml.YAML() + yaml.preserve_quotes = True + + global dockerComposeServicesYaml # The loaded memory YAML of all checked services + global toRun # Switch for which function to run when executed + global buildHooks # Where to place the options menu result + global currentServiceName # Name of the current service + global issues # Returned issues dict + global haltOnErrors # Turn on to allow erroring + global hideHelpText # Showing and hiding the help controls text + global serviceService + + serviceService = servicesDirectory + currentServiceName + serviceTemplate = templatesDirectory + currentServiceName + + try: # If not already set, then set it. + hideHelpText = hideHelpText + except: + hideHelpText = False + + # runtime vars + portConflicts = [] + + # This lets the menu know whether to put " >> Options " or not + # This function is REQUIRED. + def checkForOptionsHook(): + try: + buildHooks["options"] = callable(runOptionsMenu) + except: + buildHooks["options"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForPreBuildHook(): + try: + buildHooks["preBuildHook"] = callable(preBuild) + except: + buildHooks["preBuildHook"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForPostBuildHook(): + try: + buildHooks["postBuildHook"] = callable(postBuild) + except: + buildHooks["postBuildHook"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForRunChecksHook(): + try: + buildHooks["runChecksHook"] = callable(runChecks) + except: + buildHooks["runChecksHook"] = False + return buildHooks + return buildHooks + + # This service will not check anything unless this is set + # This function is optional, and will run each time the menu is rendered + def runChecks(): + checkForIssues() + return [] + + # This function is optional, and will run after the docker-compose.yml file is written to disk. + def postBuild(): + return True + + # This function is optional, and will run just before the build docker-compose.yml code. + def preBuild(): + global dockerComposeServicesYaml + try: + if "diyhue" in dockerComposeServicesYaml: + networkDetails = getNetworkDetails() + if "environment" in dockerComposeServicesYaml["diyhue"]: + for (envIndex, envName) in enumerate(dockerComposeServicesYaml["diyhue"]["environment"]): + ipAddressSet = envName.replace("%LAN_IP_Address%", networkDetails["ip"]) + macAddressSet = ipAddressSet.replace("%LAN_MAC_Address%", networkDetails["mac"]) + dockerComposeServicesYaml["diyhue"]["environment"][envIndex] = macAddressSet + except Exception as err: + print("Error setting diyhue network details: ", err) + input("Press any key to continue...") + return False + + return True + + # ##################################### + # Supporting functions below + # ##################################### + + def checkForIssues(): + for (index, serviceName) in enumerate(dockerComposeServicesYaml): + if not currentServiceName == serviceName: # Skip self + currentServicePorts = getExternalPorts(currentServiceName, dockerComposeServicesYaml) + portConflicts = checkPortConflicts(serviceName, currentServicePorts, dockerComposeServicesYaml) + if (len(portConflicts) > 0): + issues["portConflicts"] = portConflicts + + # ##################################### + # End Supporting functions + # ##################################### + + ############################ + # Menu Logic + ############################ + + global currentMenuItemIndex + global selectionInProgress + global menuNavigateDirection + global needsRender + + selectionInProgress = True + currentMenuItemIndex = 0 + menuNavigateDirection = 0 + needsRender = 1 + term = Terminal() + hotzoneLocation = [((term.height // 16) + 6), 0] + + def goBack(): + global selectionInProgress + global needsRender + selectionInProgress = False + needsRender = 1 + return True + + def enterPortNumberExec(): + # global term + global needsRender + global dockerComposeServicesYaml + externalPort = getExternalPorts(currentServiceName, dockerComposeServicesYaml)[0] + internalPort = getInternalPorts(currentServiceName, dockerComposeServicesYaml)[0] + newPortNumber = enterPortNumberWithWhiptail(term, dockerComposeServicesYaml, currentServiceName, hotzoneLocation, externalPort) + + if newPortNumber > 0: + dockerComposeServicesYaml[currentServiceName]["ports"][0] = "{newExtPort}:{oldIntPort}".format( + newExtPort = newPortNumber, + oldIntPort = internalPort + ) + createMenu() + needsRender = 1 + + def onResize(sig, action): + global diyhueBuildOptions + global currentMenuItemIndex + mainRender(1, diyhueBuildOptions, currentMenuItemIndex) + + diyhueBuildOptions = [] + + def createMenu(): + global diyhueBuildOptions + try: + diyhueBuildOptions = [] + portNumber = getExternalPorts(currentServiceName, dockerComposeServicesYaml)[0] + diyhueBuildOptions.append([ + "Change external WUI Port Number from: {port}".format(port=portNumber), + enterPortNumberExec + ]) + except: # Error getting port + pass + diyhueBuildOptions.append(["Go back", goBack]) + + def runOptionsMenu(): + createMenu() + menuEntryPoint() + return True + + def renderHotZone(term, menu, selection, hotzoneLocation): + lineLengthAtTextStart = 71 + print(term.move(hotzoneLocation[0], hotzoneLocation[1])) + for (index, menuItem) in enumerate(menu): + toPrint = "" + if index == selection: + toPrint += ('{bv} -> {t.blue_on_green} {title} {t.normal} <-'.format(t=term, title=menuItem[0], bv=specialChars[renderMode]["borderVertical"])) + else: + toPrint += ('{bv} {t.normal} {title} '.format(t=term, title=menuItem[0], bv=specialChars[renderMode]["borderVertical"])) + + for i in range(lineLengthAtTextStart - len(menuItem[0])): + toPrint += " " + + toPrint += "{bv}".format(bv=specialChars[renderMode]["borderVertical"]) + + toPrint = term.center(toPrint) + + print(toPrint) + + def mainRender(needsRender, menu, selection): + term = Terminal() + + if needsRender == 1: + print(term.clear()) + print(term.move_y(term.height // 16)) + print(term.black_on_cornsilk4(term.center('IOTstack DIY Hue Options'))) + print("") + print(term.center(commonTopBorder(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Select Option to configure {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + + if needsRender >= 1: + renderHotZone(term, menu, selection, hotzoneLocation) + + if needsRender == 1: + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + if not hideHelpText: + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Controls: {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Up] and [Down] to move selection cursor {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [H] Show/hide this text {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Enter] to run command or save input {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Escape] to go back to build stack menu {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonBottomBorder(renderMode))) + + def runSelection(selection): + import types + global diyhueBuildOptions + if len(diyhueBuildOptions[selection]) > 1 and isinstance(diyhueBuildOptions[selection][1], types.FunctionType): + diyhueBuildOptions[selection][1]() + else: + print(term.green_reverse('IOTstack Error: No function assigned to menu item: "{}"'.format(nodeRedBuildOptions[selection][0]))) + + def isMenuItemSelectable(menu, index): + if len(menu) > index: + if len(menu[index]) > 2: + if menu[index][2]["skip"] == True: + return False + return True + + def menuEntryPoint(): + # These need to be reglobalised due to eval() + global currentMenuItemIndex + global selectionInProgress + global menuNavigateDirection + global needsRender + global hideHelpText + global diyhueBuildOptions + term = Terminal() + with term.fullscreen(): + menuNavigateDirection = 0 + mainRender(needsRender, diyhueBuildOptions, currentMenuItemIndex) + selectionInProgress = True + with term.cbreak(): + while selectionInProgress: + menuNavigateDirection = 0 + + if needsRender: # Only rerender when changed to prevent flickering + mainRender(needsRender, diyhueBuildOptions, currentMenuItemIndex) + needsRender = 0 + + key = term.inkey(esc_delay=0.05) + if key.is_sequence: + if key.name == 'KEY_TAB': + menuNavigateDirection += 1 + if key.name == 'KEY_DOWN': + menuNavigateDirection += 1 + if key.name == 'KEY_UP': + menuNavigateDirection -= 1 + if key.name == 'KEY_LEFT': + goBack() + if key.name == 'KEY_ENTER': + runSelection(currentMenuItemIndex) + if key.name == 'KEY_ESCAPE': + return True + elif key: + if key == 'h': # H pressed + if hideHelpText: + hideHelpText = False + else: + hideHelpText = True + mainRender(1, diyhueBuildOptions, currentMenuItemIndex) + + if menuNavigateDirection != 0: # If a direction was pressed, find next selectable item + currentMenuItemIndex += menuNavigateDirection + currentMenuItemIndex = currentMenuItemIndex % len(diyhueBuildOptions) + needsRender = 2 + + while not isMenuItemSelectable(diyhueBuildOptions, currentMenuItemIndex): + currentMenuItemIndex += menuNavigateDirection + currentMenuItemIndex = currentMenuItemIndex % len(diyhueBuildOptions) + return True + + #################### + # End menu section + #################### + + + if haltOnErrors: + eval(toRun)() + else: + try: + eval(toRun)() + except: + pass + +# This check isn't required, but placed here for debugging purposes +global currentServiceName # Name of the current service +if currentServiceName == 'diyhue': + main() +else: + print("Error. '{}' Tried to run 'diyhue' config".format(currentServiceName)) diff --git a/.templates/diyhue/diyhue.env b/.templates/diyhue/diyhue.env deleted file mode 100644 index 65a1f61be..000000000 --- a/.templates/diyhue/diyhue.env +++ /dev/null @@ -1,2 +0,0 @@ -IP=your_Pi's_IP_here -MAC=your_Pi's_MAC_here diff --git a/.templates/diyhue/service.yml b/.templates/diyhue/service.yml index cadebf5a9..fc6439379 100644 --- a/.templates/diyhue/service.yml +++ b/.templates/diyhue/service.yml @@ -1,14 +1,15 @@ - diyhue: - container_name: diyhue - image: diyhue/core:latest - ports: - - "8070:80/tcp" - - "1900:1900/udp" - - "1982:1982/udp" - - "2100:2100/udp" - # - "443:443/tcp" - env_file: - - ./services/diyhue/diyhue.env - volumes: - - ./volumes/diyhue/:/opt/hue-emulator/export/ - restart: unless-stopped +diyhue: + container_name: diyhue + image: diyhue/core:latest + ports: + - "8070:80/tcp" + - "1900:1900/udp" + - "1982:1982/udp" + - "2100:2100/udp" + environment: + - IP=%LAN_IP_Address% + - MAC=%LAN_MAC_Address% + volumes: + - ./volumes/diyhue:/opt/hue-emulator/export + restart: unless-stopped + diff --git a/.templates/docker-compose-base.yml b/.templates/docker-compose-base.yml new file mode 100644 index 000000000..7337dc90a --- /dev/null +++ b/.templates/docker-compose-base.yml @@ -0,0 +1,14 @@ +--- + +networks: + default: + driver: bridge + ipam: + driver: default + nextcloud: + driver: bridge + internal: true + ipam: + driver: default + +services: diff --git a/.templates/domoticz/service.yml b/.templates/domoticz/service.yml new file mode 100644 index 000000000..931ae2db0 --- /dev/null +++ b/.templates/domoticz/service.yml @@ -0,0 +1,15 @@ +domoticz: + container_name: domoticz + image: domoticz/domoticz:stable + restart: unless-stopped + environment: + - TZ=${TZ:-Etc/UTC} + # - LOG_PATH=/opt/domoticz/userdata/domoticz.log + # - EXTRA_CMD_ARG= + ports: + - "8083:8080" + - "1443:443" + volumes: + - ./volumes/domoticz:/opt/domoticz/userdata + x-devices: + - "/dev/serial/by-id/usb-0658_0200-if00-port0:/dev/ttyACM0" diff --git a/.templates/dozzle/build.py b/.templates/dozzle/build.py new file mode 100755 index 000000000..04208d9ac --- /dev/null +++ b/.templates/dozzle/build.py @@ -0,0 +1,308 @@ +#!/usr/bin/env python3 + +issues = {} # Returned issues dict +buildHooks = {} # Options, and others hooks +haltOnErrors = True + +# Main wrapper function. Required to make local vars work correctly +def main(): + import os + import time + import ruamel.yaml + import signal + import sys + from blessed import Terminal + + from deps.chars import specialChars, commonTopBorder, commonBottomBorder, commonEmptyLine + from deps.consts import servicesDirectory, templatesDirectory + from deps.common_functions import getExternalPorts, getInternalPorts, checkPortConflicts, enterPortNumberWithWhiptail + + yaml = ruamel.yaml.YAML() + yaml.preserve_quotes = True + + global dockerComposeServicesYaml # The loaded memory YAML of all checked services + global toRun # Switch for which function to run when executed + global buildHooks # Where to place the options menu result + global currentServiceName # Name of the current service + global issues # Returned issues dict + global haltOnErrors # Turn on to allow erroring + global hideHelpText # Showing and hiding the help controls text + global serviceService + + serviceService = servicesDirectory + currentServiceName + + try: # If not already set, then set it. + hideHelpText = hideHelpText + except: + hideHelpText = False + + # runtime vars + portConflicts = [] + + # This lets the menu know whether to put " >> Options " or not + # This function is REQUIRED. + def checkForOptionsHook(): + try: + buildHooks["options"] = callable(runOptionsMenu) + except: + buildHooks["options"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForPreBuildHook(): + try: + buildHooks["preBuildHook"] = callable(preBuild) + except: + buildHooks["preBuildHook"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForPostBuildHook(): + try: + buildHooks["postBuildHook"] = callable(postBuild) + except: + buildHooks["postBuildHook"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForRunChecksHook(): + try: + buildHooks["runChecksHook"] = callable(runChecks) + except: + buildHooks["runChecksHook"] = False + return buildHooks + return buildHooks + + # This service will not check anything unless this is set + # This function is optional, and will run each time the menu is rendered + def runChecks(): + checkForIssues() + return [] + + # This function is optional, and will run after the docker-compose.yml file is written to disk. + def postBuild(): + return True + + # This function is optional, and will run just before the build docker-compose.yml code. + def preBuild(): + return True + + # ##################################### + # Supporting functions below + # ##################################### + + def checkForIssues(): + for (index, serviceName) in enumerate(dockerComposeServicesYaml): + if not currentServiceName == serviceName: # Skip self + currentServicePorts = getExternalPorts(currentServiceName, dockerComposeServicesYaml) + portConflicts = checkPortConflicts(serviceName, currentServicePorts, dockerComposeServicesYaml) + if (len(portConflicts) > 0): + issues["portConflicts"] = portConflicts + + # ##################################### + # End Supporting functions + # ##################################### + + ############################ + # Menu Logic + ############################ + + global currentMenuItemIndex + global selectionInProgress + global menuNavigateDirection + global needsRender + + selectionInProgress = True + currentMenuItemIndex = 0 + menuNavigateDirection = 0 + needsRender = 1 + term = Terminal() + hotzoneLocation = [((term.height // 16) + 6), 0] + + def goBack(): + global selectionInProgress + global needsRender + selectionInProgress = False + needsRender = 1 + return True + + def enterPortNumberExec(): + # global term + global needsRender + global dockerComposeServicesYaml + externalPort = getExternalPorts(currentServiceName, dockerComposeServicesYaml)[0] + internalPort = getInternalPorts(currentServiceName, dockerComposeServicesYaml)[0] + newPortNumber = enterPortNumberWithWhiptail(term, dockerComposeServicesYaml, currentServiceName, hotzoneLocation, externalPort) + + if newPortNumber > 0: + dockerComposeServicesYaml[currentServiceName]["ports"][0] = "{newExtPort}:{oldIntPort}".format( + newExtPort = newPortNumber, + oldIntPort = internalPort + ) + createMenu() + needsRender = 1 + + def onResize(sig, action): + global dozzleBuildOptions + global currentMenuItemIndex + mainRender(1, dozzleBuildOptions, currentMenuItemIndex) + + dozzleBuildOptions = [] + + def createMenu(): + global dozzleBuildOptions + try: + dozzleBuildOptions = [] + portNumber = getExternalPorts(currentServiceName, dockerComposeServicesYaml)[0] + # dozzleBuildOptions.append([ + # "Change external WUI Port Number from: {port}".format(port=portNumber), + # enterPortNumberExec + # ]) + except: # Error getting port + pass + dozzleBuildOptions.append(["Go back", goBack]) + + def runOptionsMenu(): + createMenu() + menuEntryPoint() + return True + + def renderHotZone(term, menu, selection, hotzoneLocation): + lineLengthAtTextStart = 71 + print(term.move(hotzoneLocation[0], hotzoneLocation[1])) + for (index, menuItem) in enumerate(menu): + toPrint = "" + if index == selection: + toPrint += ('{bv} -> {t.blue_on_green} {title} {t.normal} <-'.format(t=term, title=menuItem[0], bv=specialChars[renderMode]["borderVertical"])) + else: + toPrint += ('{bv} {t.normal} {title} '.format(t=term, title=menuItem[0], bv=specialChars[renderMode]["borderVertical"])) + + for i in range(lineLengthAtTextStart - len(menuItem[0])): + toPrint += " " + + toPrint += "{bv}".format(bv=specialChars[renderMode]["borderVertical"]) + + toPrint = term.center(toPrint) + + print(toPrint) + + def mainRender(needsRender, menu, selection): + term = Terminal() + + if needsRender == 1: + print(term.clear()) + print(term.move_y(term.height // 16)) + print(term.black_on_cornsilk4(term.center('IOTstack Dozzle Options'))) + print("") + print(term.center(commonTopBorder(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Select Option to configure {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + + if needsRender >= 1: + renderHotZone(term, menu, selection, hotzoneLocation) + + if needsRender == 1: + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + if not hideHelpText: + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Controls: {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Up] and [Down] to move selection cursor {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [H] Show/hide this text {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Enter] to run command or save input {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Escape] to go back to build stack menu {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonBottomBorder(renderMode))) + + def runSelection(selection): + import types + global dozzleBuildOptions + if len(dozzleBuildOptions[selection]) > 1 and isinstance(dozzleBuildOptions[selection][1], types.FunctionType): + dozzleBuildOptions[selection][1]() + else: + print(term.green_reverse('IOTstack Error: No function assigned to menu item: "{}"'.format(nodeRedBuildOptions[selection][0]))) + + def isMenuItemSelectable(menu, index): + if len(menu) > index: + if len(menu[index]) > 2: + if menu[index][2]["skip"] == True: + return False + return True + + def menuEntryPoint(): + # These need to be reglobalised due to eval() + global currentMenuItemIndex + global selectionInProgress + global menuNavigateDirection + global needsRender + global hideHelpText + global dozzleBuildOptions + term = Terminal() + with term.fullscreen(): + menuNavigateDirection = 0 + mainRender(needsRender, dozzleBuildOptions, currentMenuItemIndex) + selectionInProgress = True + with term.cbreak(): + while selectionInProgress: + menuNavigateDirection = 0 + + if needsRender: # Only rerender when changed to prevent flickering + mainRender(needsRender, dozzleBuildOptions, currentMenuItemIndex) + needsRender = 0 + + key = term.inkey(esc_delay=0.05) + if key.is_sequence: + if key.name == 'KEY_TAB': + menuNavigateDirection += 1 + if key.name == 'KEY_DOWN': + menuNavigateDirection += 1 + if key.name == 'KEY_UP': + menuNavigateDirection -= 1 + if key.name == 'KEY_LEFT': + goBack() + if key.name == 'KEY_ENTER': + runSelection(currentMenuItemIndex) + if key.name == 'KEY_ESCAPE': + return True + elif key: + if key == 'h': # H pressed + if hideHelpText: + hideHelpText = False + else: + hideHelpText = True + mainRender(1, dozzleBuildOptions, currentMenuItemIndex) + + if menuNavigateDirection != 0: # If a direction was pressed, find next selectable item + currentMenuItemIndex += menuNavigateDirection + currentMenuItemIndex = currentMenuItemIndex % len(dozzleBuildOptions) + needsRender = 2 + + while not isMenuItemSelectable(dozzleBuildOptions, currentMenuItemIndex): + currentMenuItemIndex += menuNavigateDirection + currentMenuItemIndex = currentMenuItemIndex % len(dozzleBuildOptions) + return True + + #################### + # End menu section + #################### + + + if haltOnErrors: + eval(toRun)() + else: + try: + eval(toRun)() + except: + pass + +# This check isn't required, but placed here for debugging purposes +global currentServiceName # Name of the current service +if currentServiceName == 'dozzle': + main() +else: + print("Error. '{}' Tried to run 'dozzle' config".format(currentServiceName)) diff --git a/.templates/dozzle/service.yml b/.templates/dozzle/service.yml new file mode 100644 index 000000000..82960e794 --- /dev/null +++ b/.templates/dozzle/service.yml @@ -0,0 +1,9 @@ +dozzle: + container_name: dozzle + image: amir20/dozzle:latest + restart: unless-stopped + ports: + - "8889:8080" + volumes: + - /var/run/docker.sock:/var/run/docker.sock + diff --git a/.templates/duckdns/service.yml b/.templates/duckdns/service.yml new file mode 100644 index 000000000..149dbd393 --- /dev/null +++ b/.templates/duckdns/service.yml @@ -0,0 +1,14 @@ +duckdns: + container_name: duckdns + build: https://github.com/ukkopahis/docker-duckdns.git + network_mode: host + restart: unless-stopped + environment: + PUID: 1000 + PGID: 1000 + # Required variables, define here on in docker-compose.override.yml + # TOKEN: token from duckdns.org + # SUBDOMAINS: your domain added to duckdns.org (without .duckdns.org) + # Optional + # PRIVATE_SUBDOMAINS: your domain added to duckdns.org (without .duckdns.org) + diff --git a/.templates/esphome/88-tty-iotstack-esphome.rules b/.templates/esphome/88-tty-iotstack-esphome.rules new file mode 100644 index 000000000..2b8f30d39 --- /dev/null +++ b/.templates/esphome/88-tty-iotstack-esphome.rules @@ -0,0 +1,47 @@ +# Assumptions: +# +# 1. The ESPhome container is running with the container-name "esphome". +# +# 2. The service definition for the ESPhome container includes: +# +# device_cgroup_rules: +# - 'c 188:* rw' +# +# This clause permits the container to access any device with a major +# number 188, which captures most USB-to-serial adapters that are +# found on ESP32 dev boards or equivalent off-board adapters such as +# those made by Future Technology Devices International (FTDI) and +# Silicon Laboratories Incorporated. The major number 188 also shows +# up in the UDEV rules below. +# +# 3. The ESP device to be managed is mounted and/or unmounted WHILE the +# container is running. In other words, all bets are off if the host +# system reboots or the container starts while the USB device is +# connected. You will likely need to unplug/replug the device to +# get the container back in sync. +# +# The rules do NOT check if the container is running and do NOT check +# for errors. All that will happen is errors in the system log. +# +# Removing ESPhome from your stack does NOT remove this rules file. It +# does not matter whether you accomplish removal by editing your compose +# file or via the IOTstack menu, this rule will be left in place and it +# will generate an error every time it fires in response to insertion +# or removal of a matching USB device. +# +# It is perfectly safe to remove this rules file yourself: +# +# sudo rm /etc/udev/rules.d/88-tty-iotstack-esphome.rules +# +# That's all you have to do. UDEV is dynamic and, despite what you read +# on the web, does NOT have to be restarted or reloaded. + +# Upon insertion of a matching USB device, mount the same device inside the container +ACTION=="add", \ + SUBSYSTEM=="tty", ENV{MAJOR}=="188", \ + RUN+="/usr/bin/docker exec esphome mknod %E{DEVNAME} c %M %m" + +# Upon removal of a matching USB device, remove the same device inside the container +ACTION=="remove", \ + SUBSYSTEM=="tty", ENV{MAJOR}=="188", \ + RUN+="/usr/bin/docker exec esphome rm -f %E{DEVNAME}" diff --git a/.templates/esphome/build.py b/.templates/esphome/build.py new file mode 100755 index 000000000..641cd87bf --- /dev/null +++ b/.templates/esphome/build.py @@ -0,0 +1,175 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +issues = {} # Returned issues dict +buildHooks = {} # Options, and others hooks +haltOnErrors = True + +import os +import sys + +global templatesDirectory +global currentServiceName # Name of the current service +global generateRandomString + +from deps.consts import templatesDirectory +from deps.common_functions import generateRandomString + + +# Main wrapper function. Required to make local vars work correctly +def main(): + + global toRun # Switch for which function to run when executed + global buildHooks # Where to place the options menu result + global issues # Returned issues dict + global haltOnErrors # Turn on to allow erroring + + # runtime vars + portConflicts = [] + + # This lets the menu know whether to put " >> Options " or not + # This function is REQUIRED. + def checkForOptionsHook(): + try: + buildHooks["options"] = callable(runOptionsMenu) + except: + buildHooks["options"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForPreBuildHook(): + try: + buildHooks["preBuildHook"] = callable(preBuild) + except: + buildHooks["preBuildHook"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForPostBuildHook(): + try: + buildHooks["postBuildHook"] = callable(postBuild) + except: + buildHooks["postBuildHook"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForRunChecksHook(): + try: + buildHooks["runChecksHook"] = callable(runChecks) + except: + buildHooks["runChecksHook"] = False + return buildHooks + return buildHooks + + # This service will not check anything unless this is set + # This function is optional, and will run each time the menu is rendered + def runChecks(): + checkForIssues() + return [] + + # This function is optional, and will run after the docker-compose.yml file is written to disk. + def postBuild(): + return True + + # This function is optional, and will run just before the build docker-compose.yml code. + def preBuild(): + return True + + # ##################################### + # Supporting functions below + # ##################################### + + def doCustomSetup() : + + import os + import re + import subprocess + from os.path import exists + + def copyUdevRulesFile(templates,rules) : + + # the expected location of the rules file in the template is the absolute path ... + SOURCE_PATH = templates + '/' + currentServiceName + '/' + rules + + # the rules file should be installed at the following absolute path... + TARGET_PATH = '/etc/udev/rules.d' + '/' + rules + + # does the target already exist? + if not exists(TARGET_PATH) : + + # no! does the source path exist? + if exists(SOURCE_PATH) : + + # yes! we should copy the source to the target + subprocess.call(['sudo', 'cp', SOURCE_PATH, TARGET_PATH]) + + # sudo cp sets root ownership but not necessarily correct mode + subprocess.call(['sudo', 'chmod', '644', TARGET_PATH]) + + def setEnvironment (path, key, value) : + + # assume the variable should be written + shouldWrite = True + + # does the target file already exist? + if exists(path) : + + # yes! open the file so we can search it + env_file = open(path, 'r+') + + # prepare to read by lines + env_data = env_file.readlines() + + # we are searching for... + expression = '^' + key + '=' + + # search by line + for line in env_data: + if re.search(expression, line) : + shouldWrite = False + break + + else : + + # no! create the file + env_file = open(path, 'w') + + # should the variable be written? + if shouldWrite : + print(key + '=' + value, file=env_file) + + # done with the environment file + env_file.close() + + copyUdevRulesFile( + os.path.realpath(templatesDirectory), + '88-tty-iotstack-' + currentServiceName + '.rules' + ) + + # the environment file is located at ... + DOT_ENV_PATH = os.path.realpath('.') + '/.env' + + # check/set environment variables + setEnvironment(DOT_ENV_PATH,'ESPHOME_USERNAME',currentServiceName) + setEnvironment(DOT_ENV_PATH,'ESPHOME_PASSWORD',generateRandomString()) + + + def checkForIssues(): + doCustomSetup() # done here because is called least-frequently + return True + + if haltOnErrors: + eval(toRun)() + else: + try: + eval(toRun)() + except: + pass + +if currentServiceName == 'esphome': + main() +else: + print("Error. '{}' Tried to run 'plex' config".format(currentServiceName)) diff --git a/.templates/esphome/service.yml b/.templates/esphome/service.yml new file mode 100644 index 000000000..330be3297 --- /dev/null +++ b/.templates/esphome/service.yml @@ -0,0 +1,15 @@ +esphome: + container_name: esphome + image: esphome/esphome + restart: unless-stopped + environment: + - TZ=${TZ:-Etc/UTC} + - USERNAME=${ESPHOME_USERNAME:-esphome} + - PASSWORD=${ESPHOME_PASSWORD:?eg echo ESPHOME_PASSWORD=ChangeMe >>~/IOTstack/.env} + network_mode: host + x-ports: + - "6052:6052" + volumes: + - ./volumes/esphome/config:/config + device_cgroup_rules: + - 'c 188:* rw' diff --git a/.templates/espruinohub/build.py b/.templates/espruinohub/build.py new file mode 100755 index 000000000..84a45e335 --- /dev/null +++ b/.templates/espruinohub/build.py @@ -0,0 +1,90 @@ +#!/usr/bin/env python3 + +issues = {} # Returned issues dict +buildHooks = {} # Options, and others hooks +haltOnErrors = True + +# Main wrapper function. Required to make local vars work correctly +def main(): + global dockerComposeServicesYaml # The loaded memory YAML of all checked services + global toRun # Switch for which function to run when executed + global buildHooks # Where to place the options menu result + global currentServiceName # Name of the current service + global issues # Returned issues dict + global haltOnErrors # Turn on to allow erroring + + # runtime vars + portConflicts = [] + + # This lets the menu know whether to put " >> Options " or not + # This function is REQUIRED. + def checkForOptionsHook(): + try: + buildHooks["options"] = callable(runOptionsMenu) + except: + buildHooks["options"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForPreBuildHook(): + try: + buildHooks["preBuildHook"] = callable(preBuild) + except: + buildHooks["preBuildHook"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForPostBuildHook(): + try: + buildHooks["postBuildHook"] = callable(postBuild) + except: + buildHooks["postBuildHook"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForRunChecksHook(): + try: + buildHooks["runChecksHook"] = callable(runChecks) + except: + buildHooks["runChecksHook"] = False + return buildHooks + return buildHooks + + # This service will not check anything unless this is set + # This function is optional, and will run each time the menu is rendered + def runChecks(): + checkForIssues() + return [] + + # This function is optional, and will run after the docker-compose.yml file is written to disk. + def postBuild(): + return True + + # This function is optional, and will run just before the build docker-compose.yml code. + def preBuild(): + return True + + # ##################################### + # Supporting functions below + # ##################################### + + def checkForIssues(): + return True + + if haltOnErrors: + eval(toRun)() + else: + try: + eval(toRun)() + except: + pass + +# This check isn't required, but placed here for debugging purposes +global currentServiceName # Name of the current service +if currentServiceName == 'espruinohub': + main() +else: + print("Error. '{}' Tried to run 'espruinohub' config".format(currentServiceName)) diff --git a/.templates/espruinohub/service.yml b/.templates/espruinohub/service.yml index ba5a8e517..eb71c6019 100644 --- a/.templates/espruinohub/service.yml +++ b/.templates/espruinohub/service.yml @@ -1,6 +1,7 @@ - espruinohub: - container_name: espruinohub - image: humbertosales/espruinohub-docker-rpi - network_mode: host - privileged: true - restart: unless-stopped +espruinohub: + container_name: espruinohub + image: humbertosales/espruinohub-docker-rpi + network_mode: host + privileged: true + restart: unless-stopped + diff --git a/.templates/example_template/example_build.py b/.templates/example_template/example_build.py new file mode 100755 index 000000000..f55f8da3e --- /dev/null +++ b/.templates/example_template/example_build.py @@ -0,0 +1,312 @@ +#!/usr/bin/env python3 + +# Be warned that globals and variable scopes do not function normally in this Python script. This is because this script is eval'd with exec. + +issues = {} # Returned issues dict +buildHooks = {} # Options, and others hooks +haltOnErrors = True + +# Main wrapper function. Required to make local vars work correctly +def main(): + from blessed import Terminal + from deps.chars import specialChars, commonTopBorder, commonBottomBorder, commonEmptyLine # Common functions used when creating menu + import types + import time + + global dockerComposeServicesYaml # The loaded memory YAML of all checked services + global toRun # Switch for which function to run when executed + global buildHooks # Where to place the options menu result + global currentServiceName # Name of the current service + global issues # Returned issues dict + global haltOnErrors # Turn on to allow erroring + global hideHelpText + + try: # If not already set, then set it to prevent errors. + hideHelpText = hideHelpText + except: + hideHelpText = False + + # runtime vars + portConflicts = [] + + # This lets the menu know whether to put " >> Options " or not + # This function is REQUIRED. + def checkForOptionsHook(): + try: + buildHooks["options"] = callable(runOptionsMenu) + except: + buildHooks["options"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForPreBuildHook(): + try: + buildHooks["preBuildHook"] = callable(preBuild) + except: + buildHooks["preBuildHook"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForPostBuildHook(): + try: + buildHooks["postBuildHook"] = callable(postBuild) + except: + buildHooks["postBuildHook"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForRunChecksHook(): + try: + buildHooks["runChecksHook"] = callable(runChecks) + except: + buildHooks["runChecksHook"] = False + return buildHooks + return buildHooks + + # This service will not check anything unless this is set + # This function is optional, and will run each time the menu is rendered + def runChecks(): + checkForIssues() + return [] + + # This is the menu that will run for " >> Options " + def runOptionsMenu(): + menuEntryPoint() + return True + + # This function is optional, and will run after the docker-compose.yml file is written to disk. + def postBuild(): + return True + + # This function is optional, and will run just before the build docker-compose.yml code. + def preBuild(): + return True + + # ##################################### + # Supporting functions below + # ##################################### + + def checkForIssues(): + for (index, serviceName) in enumerate(dockerComposeServicesYaml): + if not currentServiceName == serviceName: # Skip self + currentServicePorts = getExternalPorts(currentServiceName) + portConflicts = checkPortConflicts(serviceName, currentServicePorts) + if (len(portConflicts) > 0): + issues["portConflicts"] = portConflicts + + def getExternalPorts(serviceName): + externalPorts = [] + try: + yamlService = dockerComposeServicesYaml[serviceName] + if "ports" in yamlService: + for (index, port) in enumerate(yamlService["ports"]): + try: + externalAndInternal = port.split(":") + externalPorts.append(externalAndInternal[0]) + except: + pass + except: + pass + return externalPorts + + def checkPortConflicts(serviceName, currentPorts): + portConflicts = [] + if not currentServiceName == serviceName: + yamlService = dockerComposeServicesYaml[serviceName] + servicePorts = getExternalPorts(serviceName) + for (index, servicePort) in enumerate(servicePorts): + for (index, currentPort) in enumerate(currentPorts): + if (servicePort == currentPort): + portConflicts.append([servicePort, serviceName]) + return portConflicts + + + + # ##################################### + # Example menu below + # ##################################### + # You can build your menu system any way you like. This one is provided as an example. + # Checkout Blessed for full functionality, like text entry and so on at: https://blessed.readthedocs.io/en/latest/ + + # The functions the menu executes are below. They must be placed before the menu list 'menuItemsExample' + def menuCmdItem1(): + print("You chose item1!") + return True + + def menuCmdAnotherItem(): + print("This is another menu item") + return True + + def nop(): + return True + + def menuCmdStillAnotherItem(): + print("This is still another menu item") + return True + + def goBack(): + global selectionInProgress + selectionInProgress = False + return True + + # The actual menu + menuItemsExample = [ + ["Item 1", menuCmdItem1], + ["Another item", menuCmdAnotherItem], + ["I'm skipped!", nop, { "skip": True }], + ["Still another item", menuCmdStillAnotherItem], + ["Error item"], + ["Error item"], + ["Some custom thing", nop, { "customProperty": True }], + ["I'm also skipped!", nop, { "skip": True }], + ["Go back", goBack] + ] + + # Vars that the menu uses + global currentMenuItemIndex + global selectionInProgress + global menuNavigateDirection + global needsRender + + selectionInProgress = True + currentMenuItemIndex = 0 + menuNavigateDirection = 0 + needsRender = True + + # This is the main rendering function for the menu + def mainRender(menu, selection): + term = Terminal() + print(term.clear()) + + print(term.clear()) + print(term.move_y(term.height // 16)) + print(term.black_on_cornsilk4(term.center('IOTstack Example Commands'))) + print("") + print(term.center(commonTopBorder(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Select Command to run {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + + print(term.center(commonEmptyLine(renderMode))) + + lineLengthAtTextStart = 71 + + for (index, menuItem) in enumerate(menu): + toPrint = "" + if index == selection: # This checks if the current rendering item is the one that's selected + toPrint += ('{bv} -> {t.blue_on_green} {title} {t.normal} <-'.format(t=term, title=menuItem[0], bv=specialChars[renderMode]["borderVertical"])) + else: + if len(menu[index]) > 2 and "customProperty" in menu[index][2] and menu[index][2]["customProperty"] == True: # A custom property check example + toPrint += ('{bv} {t.black_on_green} {title} {t.normal} '.format(t=term, title=menuItem[0], bv=specialChars[renderMode]["borderVertical"])) + else: + toPrint += ('{bv} {t.normal} {title} '.format(t=term, title=menuItem[0], bv=specialChars[renderMode]["borderVertical"])) + + for i in range(lineLengthAtTextStart - len(menuItem[0])): # Pad the remainder of the line + toPrint += " " + + toPrint += "{bv}".format(bv=specialChars[renderMode]["borderVertical"]) + + toPrint = term.center(toPrint) + + print(toPrint) + + print(term.center(commonEmptyLine(renderMode))) + if not hideHelpText: + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Controls: {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Up] and [Down] to move selection cursor {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [H] Show/hide this text {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Enter] to run command {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Escape] to go back to build stack menu {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonBottomBorder(renderMode))) + + + def runSelection(selection): + term = Terminal() + if len(menuItemsExample[selection]) > 1 and isinstance(menuItemsExample[selection][1], types.FunctionType): + menuItemsExample[selection][1]() + else: + print(term.green_reverse('IOTstack Example Error: No function assigned to menu item: "{}"'.format(menuItemsExample[selection][0]))) + + def isMenuItemSelectable(menu, index): + if len(menu) > index: + if len(menu[index]) > 2: + if "skip" in menu[index][2] and menu[index][2]["skip"] == True: + return False + return True + + def menuEntryPoint(): + # These need to be reglobalised due to eval() + global currentMenuItemIndex + global selectionInProgress + global menuNavigateDirection + global needsRender + global hideHelpText + term = Terminal() + with term.fullscreen(): + menuNavigateDirection = 0 + mainRender(menuItemsExample, currentMenuItemIndex) + selectionInProgress = True + with term.cbreak(): + while selectionInProgress: + menuNavigateDirection = 0 + + if needsRender: # Only rerender when changed to prevent flickering + mainRender(menuItemsExample, currentMenuItemIndex) + needsRender = False + + key = term.inkey(esc_delay=0.05) + if key.is_sequence: + if key.name == 'KEY_TAB': + menuNavigateDirection += 1 + if key.name == 'KEY_DOWN': + menuNavigateDirection += 1 + if key.name == 'KEY_UP': + menuNavigateDirection -= 1 + if key.name == 'KEY_LEFT': + goBack() + if key.name == 'KEY_ENTER': + runSelection(currentMenuItemIndex) + if key.name == 'KEY_ESCAPE': + return True + elif key: + if key == 'h': # H pressed + if hideHelpText: + hideHelpText = False + else: + hideHelpText = True + mainRender(1, menuItemsExample, currentMenuItemIndex) + + if menuNavigateDirection != 0: # If a direction was pressed, find next selectable item + currentMenuItemIndex += menuNavigateDirection + currentMenuItemIndex = currentMenuItemIndex % len(menuItemsExample) + needsRender = True + + while not isMenuItemSelectable(menuItemsExample, currentMenuItemIndex): + currentMenuItemIndex += menuNavigateDirection + currentMenuItemIndex = currentMenuItemIndex % len(menuItemsExample) + return True + + + + + + # Entrypoint for execution + if haltOnErrors: + eval(toRun)() + else: + try: + eval(toRun)() + except: + pass + +# This check isn't required, but placed here for debugging purposes +global currentServiceName # Name of the current service +if currentServiceName == 'SERVICENAME': + main() +else: + print("Error. '{}' Tried to run 'SERVICENAME' config".format(currentServiceName)) diff --git a/.templates/example_template/example_service.yml b/.templates/example_template/example_service.yml new file mode 100644 index 000000000..209b9e4a2 --- /dev/null +++ b/.templates/example_template/example_service.yml @@ -0,0 +1,12 @@ +containerNameGoesHere: + container_name: containerNameGoesHere + restart: unless-stopped + image: example_template/core:latest + ports: + - "8070:80/tcp" # Always ensure the WUI port is first in the ports list. + - "1900:1900/udp" + env_file: + - ./services/example_template.env + volumes: + - ./volumes/example_template/:/opt/example_template/ + diff --git a/.templates/gitea/build.py b/.templates/gitea/build.py new file mode 100755 index 000000000..e783e8956 --- /dev/null +++ b/.templates/gitea/build.py @@ -0,0 +1,309 @@ +#!/usr/bin/env python3 + +issues = {} # Returned issues dict +buildHooks = {} # Options, and others hooks +haltOnErrors = True + +# Main wrapper function. Required to make local vars work correctly +def main(): + import os + import time + import ruamel.yaml + import signal + import sys + from blessed import Terminal + + from deps.chars import specialChars, commonTopBorder, commonBottomBorder, commonEmptyLine + from deps.consts import servicesDirectory, templatesDirectory + from deps.common_functions import getExternalPorts, getInternalPorts, checkPortConflicts, enterPortNumberWithWhiptail + + yaml = ruamel.yaml.YAML() + yaml.preserve_quotes = True + + global dockerComposeServicesYaml # The loaded memory YAML of all checked services + global toRun # Switch for which function to run when executed + global buildHooks # Where to place the options menu result + global currentServiceName # Name of the current service + global issues # Returned issues dict + global haltOnErrors # Turn on to allow erroring + global hideHelpText # Showing and hiding the help controls text + global serviceService + + serviceService = servicesDirectory + currentServiceName + serviceTemplate = templatesDirectory + currentServiceName + + try: # If not already set, then set it. + hideHelpText = hideHelpText + except: + hideHelpText = False + + # runtime vars + portConflicts = [] + + # This lets the menu know whether to put " >> Options " or not + # This function is REQUIRED. + def checkForOptionsHook(): + try: + buildHooks["options"] = callable(runOptionsMenu) + except: + buildHooks["options"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForPreBuildHook(): + try: + buildHooks["preBuildHook"] = callable(preBuild) + except: + buildHooks["preBuildHook"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForPostBuildHook(): + try: + buildHooks["postBuildHook"] = callable(postBuild) + except: + buildHooks["postBuildHook"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForRunChecksHook(): + try: + buildHooks["runChecksHook"] = callable(runChecks) + except: + buildHooks["runChecksHook"] = False + return buildHooks + return buildHooks + + # This service will not check anything unless this is set + # This function is optional, and will run each time the menu is rendered + def runChecks(): + checkForIssues() + return [] + + # This function is optional, and will run after the docker-compose.yml file is written to disk. + def postBuild(): + return True + + # This function is optional, and will run just before the build docker-compose.yml code. + def preBuild(): + return True + + # ##################################### + # Supporting functions below + # ##################################### + + def checkForIssues(): + for (index, serviceName) in enumerate(dockerComposeServicesYaml): + if not currentServiceName == serviceName: # Skip self + currentServicePorts = getExternalPorts(currentServiceName, dockerComposeServicesYaml) + portConflicts = checkPortConflicts(serviceName, currentServicePorts, dockerComposeServicesYaml) + if (len(portConflicts) > 0): + issues["portConflicts"] = portConflicts + + # ##################################### + # End Supporting functions + # ##################################### + + ############################ + # Menu Logic + ############################ + + global currentMenuItemIndex + global selectionInProgress + global menuNavigateDirection + global needsRender + + selectionInProgress = True + currentMenuItemIndex = 0 + menuNavigateDirection = 0 + needsRender = 1 + term = Terminal() + hotzoneLocation = [((term.height // 16) + 6), 0] + + def goBack(): + global selectionInProgress + global needsRender + selectionInProgress = False + needsRender = 1 + return True + + def enterPortNumberExec(): + # global term + global needsRender + global dockerComposeServicesYaml + externalPort = getExternalPorts(currentServiceName, dockerComposeServicesYaml)[0] + internalPort = getInternalPorts(currentServiceName, dockerComposeServicesYaml)[0] + newPortNumber = enterPortNumberWithWhiptail(term, dockerComposeServicesYaml, currentServiceName, hotzoneLocation, externalPort) + + if newPortNumber > 0: + dockerComposeServicesYaml[currentServiceName]["ports"][0] = "{newExtPort}:{oldIntPort}".format( + newExtPort = newPortNumber, + oldIntPort = internalPort + ) + createMenu() + needsRender = 1 + + def onResize(sig, action): + global giteaBuildOptions + global currentMenuItemIndex + mainRender(1, giteaBuildOptions, currentMenuItemIndex) + + giteaBuildOptions = [] + + def createMenu(): + global giteaBuildOptions + try: + giteaBuildOptions = [] + portNumber = getExternalPorts(currentServiceName, dockerComposeServicesYaml)[0] + giteaBuildOptions.append([ + "Change external WUI Port Number from: {port}".format(port=portNumber), + enterPortNumberExec + ]) + except: # Error getting port + pass + giteaBuildOptions.append(["Go back", goBack]) + + def runOptionsMenu(): + createMenu() + menuEntryPoint() + return True + + def renderHotZone(term, menu, selection, hotzoneLocation): + lineLengthAtTextStart = 71 + print(term.move(hotzoneLocation[0], hotzoneLocation[1])) + for (index, menuItem) in enumerate(menu): + toPrint = "" + if index == selection: + toPrint += ('{bv} -> {t.blue_on_green} {title} {t.normal} <-'.format(t=term, title=menuItem[0], bv=specialChars[renderMode]["borderVertical"])) + else: + toPrint += ('{bv} {t.normal} {title} '.format(t=term, title=menuItem[0], bv=specialChars[renderMode]["borderVertical"])) + + for i in range(lineLengthAtTextStart - len(menuItem[0])): + toPrint += " " + + toPrint += "{bv}".format(bv=specialChars[renderMode]["borderVertical"]) + + toPrint = term.center(toPrint) + + print(toPrint) + + def mainRender(needsRender, menu, selection): + term = Terminal() + + if needsRender == 1: + print(term.clear()) + print(term.move_y(term.height // 16)) + print(term.black_on_cornsilk4(term.center('IOTstack Gitea Options'))) + print("") + print(term.center(commonTopBorder(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Select Option to configure {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + + if needsRender >= 1: + renderHotZone(term, menu, selection, hotzoneLocation) + + if needsRender == 1: + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + if not hideHelpText: + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Controls: {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Up] and [Down] to move selection cursor {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [H] Show/hide this text {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Enter] to run command or save input {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Escape] to go back to build stack menu {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonBottomBorder(renderMode))) + + def runSelection(selection): + import types + global giteaBuildOptions + if len(giteaBuildOptions[selection]) > 1 and isinstance(giteaBuildOptions[selection][1], types.FunctionType): + giteaBuildOptions[selection][1]() + else: + print(term.green_reverse('IOTstack Error: No function assigned to menu item: "{}"'.format(nodeRedBuildOptions[selection][0]))) + + def isMenuItemSelectable(menu, index): + if len(menu) > index: + if len(menu[index]) > 2: + if menu[index][2]["skip"] == True: + return False + return True + + def menuEntryPoint(): + # These need to be reglobalised due to eval() + global currentMenuItemIndex + global selectionInProgress + global menuNavigateDirection + global needsRender + global hideHelpText + global giteaBuildOptions + term = Terminal() + with term.fullscreen(): + menuNavigateDirection = 0 + mainRender(needsRender, giteaBuildOptions, currentMenuItemIndex) + selectionInProgress = True + with term.cbreak(): + while selectionInProgress: + menuNavigateDirection = 0 + + if needsRender: # Only rerender when changed to prevent flickering + mainRender(needsRender, giteaBuildOptions, currentMenuItemIndex) + needsRender = 0 + + key = term.inkey(esc_delay=0.05) + if key.is_sequence: + if key.name == 'KEY_TAB': + menuNavigateDirection += 1 + if key.name == 'KEY_DOWN': + menuNavigateDirection += 1 + if key.name == 'KEY_UP': + menuNavigateDirection -= 1 + if key.name == 'KEY_LEFT': + goBack() + if key.name == 'KEY_ENTER': + runSelection(currentMenuItemIndex) + if key.name == 'KEY_ESCAPE': + return True + elif key: + if key == 'h': # H pressed + if hideHelpText: + hideHelpText = False + else: + hideHelpText = True + mainRender(1, giteaBuildOptions, currentMenuItemIndex) + + if menuNavigateDirection != 0: # If a direction was pressed, find next selectable item + currentMenuItemIndex += menuNavigateDirection + currentMenuItemIndex = currentMenuItemIndex % len(giteaBuildOptions) + needsRender = 2 + + while not isMenuItemSelectable(giteaBuildOptions, currentMenuItemIndex): + currentMenuItemIndex += menuNavigateDirection + currentMenuItemIndex = currentMenuItemIndex % len(giteaBuildOptions) + return True + + #################### + # End menu section + #################### + + + if haltOnErrors: + eval(toRun)() + else: + try: + eval(toRun)() + except: + pass + +# This check isn't required, but placed here for debugging purposes +global currentServiceName # Name of the current service +if currentServiceName == 'gitea': + main() +else: + print("Error. '{}' Tried to run 'gitea' config".format(currentServiceName)) diff --git a/.templates/gitea/service.yml b/.templates/gitea/service.yml new file mode 100644 index 000000000..6bb8525dd --- /dev/null +++ b/.templates/gitea/service.yml @@ -0,0 +1,14 @@ +gitea: + container_name: gitea + image: "kunde21/gitea-arm:latest" + restart: unless-stopped + ports: + - "7920:3000/tcp" + - "2222:22/tcp" + environment: + - USER_UID=1000 + - USER_GID=1000 + volumes: + - ./volumes/gitea/data:/data + - /etc/timezone:/etc/timezone:ro + diff --git a/.templates/grafana/build.py b/.templates/grafana/build.py new file mode 100755 index 000000000..7da3d8934 --- /dev/null +++ b/.templates/grafana/build.py @@ -0,0 +1,306 @@ +#!/usr/bin/env python3 + +issues = {} # Returned issues dict +buildHooks = {} # Options, and others hooks +haltOnErrors = True + +# Main wrapper function. Required to make local vars work correctly +def main(): + import os + import time + from blessed import Terminal + from deps.chars import specialChars, commonTopBorder, commonBottomBorder, commonEmptyLine, padText + from deps.consts import servicesDirectory, templatesDirectory, volumesDirectory + from deps.common_functions import getExternalPorts, getInternalPorts, checkPortConflicts, enterPortNumberWithWhiptail + + global dockerComposeServicesYaml # The loaded memory YAML of all checked services + global toRun # Switch for which function to run when executed + global buildHooks # Where to place the options menu result + global currentServiceName # Name of the current service + global issues # Returned issues dict + global haltOnErrors # Turn on to allow erroring + + # runtime vars + portConflicts = [] + serviceVolume = volumesDirectory + currentServiceName + serviceService = servicesDirectory + currentServiceName + serviceTemplate = templatesDirectory + currentServiceName + + try: # If not already set, then set it. + hideHelpText = hideHelpText + except: + hideHelpText = False + + documentationHint = 'https://sensorsiot.github.io/IOTstack/Containers/Grafana' + + # This lets the menu know whether to put " >> Options " or not + # This function is REQUIRED. + def checkForOptionsHook(): + try: + buildHooks["options"] = callable(runOptionsMenu) + except: + buildHooks["options"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForPreBuildHook(): + try: + buildHooks["preBuildHook"] = callable(preBuild) + except: + buildHooks["preBuildHook"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForPostBuildHook(): + try: + buildHooks["postBuildHook"] = callable(postBuild) + except: + buildHooks["postBuildHook"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForRunChecksHook(): + try: + buildHooks["runChecksHook"] = callable(runChecks) + except: + buildHooks["runChecksHook"] = False + return buildHooks + return buildHooks + + # This service will not check anything unless this is set + # This function is optional, and will run each time the menu is rendered + def runChecks(): + checkForIssues() + return [] + + # This function is optional, and will run after the docker-compose.yml file is written to disk. + def postBuild(): + return True + + # This function is optional, and will run just before the build docker-compose.yml code. + def preBuild(): + return True + + # ##################################### + # Supporting functions below + # ##################################### + + def checkForIssues(): + for (index, serviceName) in enumerate(dockerComposeServicesYaml): + if not currentServiceName == serviceName: # Skip self + currentServicePorts = getExternalPorts(currentServiceName, dockerComposeServicesYaml) + portConflicts = checkPortConflicts(serviceName, currentServicePorts, dockerComposeServicesYaml) + if (len(portConflicts) > 0): + issues["portConflicts"] = portConflicts + + ############################ + # Menu Logic + ############################ + + global currentMenuItemIndex + global selectionInProgress + global menuNavigateDirection + global needsRender + + selectionInProgress = True + currentMenuItemIndex = 0 + menuNavigateDirection = 0 + needsRender = 1 + term = Terminal() + hotzoneLocation = [((term.height // 16) + 6), 0] + + def goBack(): + global selectionInProgress + global needsRender + selectionInProgress = False + needsRender = 1 + return True + + def enterPortNumberExec(): + # global term + global needsRender + global dockerComposeServicesYaml + externalPort = getExternalPorts(currentServiceName, dockerComposeServicesYaml)[0] + internalPort = getInternalPorts(currentServiceName, dockerComposeServicesYaml)[0] + newPortNumber = enterPortNumberWithWhiptail(term, dockerComposeServicesYaml, currentServiceName, hotzoneLocation, externalPort) + + if newPortNumber > 0: + dockerComposeServicesYaml[currentServiceName]["ports"][0] = "{newExtPort}:{oldIntPort}".format( + newExtPort = newPortNumber, + oldIntPort = internalPort + ) + createMenu() + needsRender = 1 + + def onResize(sig, action): + global grafanaBuildOptions + global currentMenuItemIndex + mainRender(1, grafanaBuildOptions, currentMenuItemIndex) + + grafanaBuildOptions = [] + + def createMenu(): + global grafanaBuildOptions + try: + grafanaBuildOptions = [] + portNumber = getExternalPorts(currentServiceName, dockerComposeServicesYaml)[0] + grafanaBuildOptions.append([ + "Change external WUI Port Number from: {port}".format(port=portNumber), + enterPortNumberExec + ]) + except: # Error getting port + pass + grafanaBuildOptions.append(["Go back", goBack]) + + def runOptionsMenu(): + createMenu() + menuEntryPoint() + return True + + def renderHotZone(term, menu, selection, hotzoneLocation): + lineLengthAtTextStart = 71 + print(term.move(hotzoneLocation[0], hotzoneLocation[1])) + for (index, menuItem) in enumerate(menu): + toPrint = "" + if index == selection: + toPrint += ('{bv} -> {t.blue_on_green} {title} {t.normal} <-'.format(t=term, title=menuItem[0], bv=specialChars[renderMode]["borderVertical"])) + else: + toPrint += ('{bv} {t.normal} {title} '.format(t=term, title=menuItem[0], bv=specialChars[renderMode]["borderVertical"])) + + for i in range(lineLengthAtTextStart - len(menuItem[0])): + toPrint += " " + + toPrint += "{bv}".format(bv=specialChars[renderMode]["borderVertical"]) + + toPrint = term.center(toPrint) + + print(toPrint) + + def mainRender(needsRender, menu, selection): + term = Terminal() + + if needsRender == 1: + print(term.clear()) + print(term.move_y(term.height // 16)) + print(term.black_on_cornsilk4(term.center('IOTstack Grafana Options'))) + print("") + print(term.center(commonTopBorder(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Select Option to configure {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + + if needsRender >= 1: + renderHotZone(term, menu, selection, hotzoneLocation) + + if needsRender == 1: + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + if not hideHelpText: + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Controls: {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Up] and [Down] to move selection cursor {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [H] Show/hide this text {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Enter] to run command or save input {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Escape] to go back to build stack menu {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + if len(documentationHint) > 1: + if len(documentationHint) > 56: + documentationAndPadding = padText(documentationHint, 71) + print(term.center("{bv} Documentation: {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} {dap} {bv}".format(bv=specialChars[renderMode]["borderVertical"], dap=documentationAndPadding))) + else: + documentationAndPadding = padText(documentationHint, 56) + print(term.center("{bv} Documentation: {dap} {bv}".format(bv=specialChars[renderMode]["borderVertical"], dap=documentationAndPadding))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonBottomBorder(renderMode))) + + def runSelection(selection): + import types + global grafanaBuildOptions + if len(grafanaBuildOptions[selection]) > 1 and isinstance(grafanaBuildOptions[selection][1], types.FunctionType): + grafanaBuildOptions[selection][1]() + else: + print(term.green_reverse('IOTstack Error: No function assigned to menu item: "{}"'.format(nodeRedBuildOptions[selection][0]))) + + def isMenuItemSelectable(menu, index): + if len(menu) > index: + if len(menu[index]) > 2: + if menu[index][2]["skip"] == True: + return False + return True + + def menuEntryPoint(): + # These need to be reglobalised due to eval() + global currentMenuItemIndex + global selectionInProgress + global menuNavigateDirection + global needsRender + global hideHelpText + global grafanaBuildOptions + term = Terminal() + with term.fullscreen(): + menuNavigateDirection = 0 + mainRender(needsRender, grafanaBuildOptions, currentMenuItemIndex) + selectionInProgress = True + with term.cbreak(): + while selectionInProgress: + menuNavigateDirection = 0 + + if needsRender: # Only rerender when changed to prevent flickering + mainRender(needsRender, grafanaBuildOptions, currentMenuItemIndex) + needsRender = 0 + + key = term.inkey(esc_delay=0.05) + if key.is_sequence: + if key.name == 'KEY_TAB': + menuNavigateDirection += 1 + if key.name == 'KEY_DOWN': + menuNavigateDirection += 1 + if key.name == 'KEY_UP': + menuNavigateDirection -= 1 + if key.name == 'KEY_LEFT': + goBack() + if key.name == 'KEY_ENTER': + runSelection(currentMenuItemIndex) + if key.name == 'KEY_ESCAPE': + return True + elif key: + if key == 'h': # H pressed + if hideHelpText: + hideHelpText = False + else: + hideHelpText = True + mainRender(1, grafanaBuildOptions, currentMenuItemIndex) + + if menuNavigateDirection != 0: # If a direction was pressed, find next selectable item + currentMenuItemIndex += menuNavigateDirection + currentMenuItemIndex = currentMenuItemIndex % len(grafanaBuildOptions) + needsRender = 2 + + while not isMenuItemSelectable(grafanaBuildOptions, currentMenuItemIndex): + currentMenuItemIndex += menuNavigateDirection + currentMenuItemIndex = currentMenuItemIndex % len(grafanaBuildOptions) + return True + + #################### + # End menu section + #################### + + if haltOnErrors: + eval(toRun)() + else: + try: + eval(toRun)() + except: + pass + +# This check isn't required, but placed here for debugging purposes +global currentServiceName # Name of the current service +if currentServiceName == 'grafana': + main() +else: + print("Error. '{}' Tried to run 'grafana' config".format(currentServiceName)) diff --git a/.templates/grafana/grafana.env b/.templates/grafana/grafana.env deleted file mode 100644 index e76f26be5..000000000 --- a/.templates/grafana/grafana.env +++ /dev/null @@ -1,9 +0,0 @@ -#TZ=Africa/Johannesburg -GF_PATHS_DATA=/var/lib/grafana -GF_PATHS_LOGS=/var/log/grafana -# [SERVER] -#GF_SERVER_ROOT_URL=http://localhost:3000/grafana -#GF_SERVER_SERVE_FROM_SUB_PATH=true -# [SECURITY] -#GF_SECURITY_ADMIN_USER=admin -#GF_SECURITY_ADMIN_PASSWORD=admin diff --git a/.templates/grafana/service.yml b/.templates/grafana/service.yml index 538c9858e..594267ae1 100644 --- a/.templates/grafana/service.yml +++ b/.templates/grafana/service.yml @@ -1,12 +1,21 @@ - grafana: - container_name: grafana - image: grafana/grafana:6.3.6 - restart: unless-stopped - user: "0" - ports: - - 3000:3000 - env_file: - - ./services/grafana/grafana.env - volumes: - - ./volumes/grafana/data:/var/lib/grafana - - ./volumes/grafana/log:/var/log/grafana +grafana: + container_name: grafana + image: grafana/grafana + restart: unless-stopped + user: "0" + ports: + - "3000:3000" + environment: + - TZ=Etc/UTC + - GF_PATHS_DATA=/var/lib/grafana + - GF_PATHS_LOGS=/var/log/grafana + volumes: + - ./volumes/grafana/data:/var/lib/grafana + - ./volumes/grafana/log:/var/log/grafana + healthcheck: + test: ["CMD", "wget", "-O", "/dev/null", "http://localhost:3000"] + interval: 30s + timeout: 10s + retries: 3 + start_period: 30s + diff --git a/.templates/heimdall/service.yml b/.templates/heimdall/service.yml new file mode 100644 index 000000000..a5dad453b --- /dev/null +++ b/.templates/heimdall/service.yml @@ -0,0 +1,14 @@ +heimdall: + image: ghcr.io/linuxserver/heimdall + container_name: heimdall + environment: + - PUID=1000 + - PGID=1000 + - TZ=${TZ:-Etc/UTC} + volumes: + - ./volumes/heimdall/config:/config + ports: + - "8882:80" + - "8883:443" + restart: unless-stopped + diff --git a/.templates/home_assistant/build.py b/.templates/home_assistant/build.py new file mode 100755 index 000000000..2a4e69b77 --- /dev/null +++ b/.templates/home_assistant/build.py @@ -0,0 +1,319 @@ +#!/usr/bin/env python3 + +issues = {} # Returned issues dict +buildHooks = {} # Options, and others hooks +haltOnErrors = True + +# Main wrapper function. Required to make local vars work correctly +def main(): + import os + import time + import ruamel.yaml + import signal + import sys + from blessed import Terminal + + from deps.chars import specialChars, commonTopBorder, commonBottomBorder, commonEmptyLine, padText + from deps.consts import servicesDirectory, templatesDirectory + from deps.common_functions import getExternalPorts, getInternalPorts, checkPortConflicts, enterPortNumberWithWhiptail + + yaml = ruamel.yaml.YAML() + yaml.preserve_quotes = True + + global dockerComposeServicesYaml # The loaded memory YAML of all checked services + global toRun # Switch for which function to run when executed + global buildHooks # Where to place the options menu result + global currentServiceName # Name of the current service + global issues # Returned issues dict + global haltOnErrors # Turn on to allow erroring + global hideHelpText # Showing and hiding the help controls text + global serviceService + + serviceService = servicesDirectory + currentServiceName + + try: # If not already set, then set it. + hideHelpText = hideHelpText + except: + hideHelpText = False + + documentationHint = 'https://sensorsiot.github.io/IOTstack/Containers/Home-Assistant' + + # runtime vars + portConflicts = [] + + # This lets the menu know whether to put " >> Options " or not + # This function is REQUIRED. + def checkForOptionsHook(): + try: + buildHooks["options"] = callable(runOptionsMenu) + except: + buildHooks["options"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForPreBuildHook(): + try: + buildHooks["preBuildHook"] = callable(preBuild) + except: + buildHooks["preBuildHook"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForPostBuildHook(): + try: + buildHooks["postBuildHook"] = callable(postBuild) + except: + buildHooks["postBuildHook"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForRunChecksHook(): + try: + buildHooks["runChecksHook"] = callable(runChecks) + except: + buildHooks["runChecksHook"] = False + return buildHooks + return buildHooks + + # This service will not check anything unless this is set + # This function is optional, and will run each time the menu is rendered + def runChecks(): + checkForIssues() + return [] + + # This function is optional, and will run after the docker-compose.yml file is written to disk. + def postBuild(): + return True + + # This function is optional, and will run just before the build docker-compose.yml code. + def preBuild(): + return True + + # ##################################### + # Supporting functions below + # ##################################### + + def checkForIssues(): + for (index, serviceName) in enumerate(dockerComposeServicesYaml): + if not currentServiceName == serviceName: # Skip self + currentServicePorts = getExternalPorts(currentServiceName, dockerComposeServicesYaml) + portConflicts = checkPortConflicts(serviceName, currentServicePorts, dockerComposeServicesYaml) + if (len(portConflicts) > 0): + issues["portConflicts"] = portConflicts + + # ##################################### + # End Supporting functions + # ##################################### + + ############################ + # Menu Logic + ############################ + + global currentMenuItemIndex + global selectionInProgress + global menuNavigateDirection + global needsRender + + selectionInProgress = True + currentMenuItemIndex = 0 + menuNavigateDirection = 0 + needsRender = 1 + term = Terminal() + hotzoneLocation = [((term.height // 16) + 6), 0] + + def goBack(): + global selectionInProgress + global needsRender + selectionInProgress = False + needsRender = 1 + return True + + def enterPortNumberExec(): + # global term + global needsRender + global dockerComposeServicesYaml + externalPort = getExternalPorts(currentServiceName, dockerComposeServicesYaml)[0] + internalPort = getInternalPorts(currentServiceName, dockerComposeServicesYaml)[0] + newPortNumber = enterPortNumberWithWhiptail(term, dockerComposeServicesYaml, currentServiceName, hotzoneLocation, externalPort) + + if newPortNumber > 0: + dockerComposeServicesYaml[currentServiceName]["ports"][0] = "{newExtPort}:{oldIntPort}".format( + newExtPort = newPortNumber, + oldIntPort = internalPort + ) + createMenu() + needsRender = 1 + + def onResize(sig, action): + global homeAssistantBuildOptions + global currentMenuItemIndex + mainRender(1, homeAssistantBuildOptions, currentMenuItemIndex) + + homeAssistantBuildOptions = [] + + def createMenu(): + global homeAssistantBuildOptions + try: + homeAssistantBuildOptions = [] + portNumber = getExternalPorts(currentServiceName, dockerComposeServicesYaml)[0] + # homeAssistantBuildOptions.append([ + # "Change external WUI Port Number from: {port}".format(port=portNumber), + # enterPortNumberExec + # ]) + except: # Error getting port + pass + homeAssistantBuildOptions.append(["Go back", goBack]) + + def runOptionsMenu(): + createMenu() + menuEntryPoint() + return True + + def renderHotZone(term, menu, selection, hotzoneLocation): + lineLengthAtTextStart = 71 + print(term.move(hotzoneLocation[0], hotzoneLocation[1])) + for (index, menuItem) in enumerate(menu): + toPrint = "" + if index == selection: + toPrint += ('{bv} -> {t.blue_on_green} {title} {t.normal} <-'.format(t=term, title=menuItem[0], bv=specialChars[renderMode]["borderVertical"])) + else: + toPrint += ('{bv} {t.normal} {title} '.format(t=term, title=menuItem[0], bv=specialChars[renderMode]["borderVertical"])) + + for i in range(lineLengthAtTextStart - len(menuItem[0])): + toPrint += " " + + toPrint += "{bv}".format(bv=specialChars[renderMode]["borderVertical"]) + + toPrint = term.center(toPrint) + + print(toPrint) + + def mainRender(needsRender, menu, selection): + term = Terminal() + + if needsRender == 1: + print(term.clear()) + print(term.move_y(term.height // 16)) + print(term.black_on_cornsilk4(term.center('IOTstack Home Assistant (Container) Options'))) + print("") + print(term.center(commonTopBorder(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Select Option to configure {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + + if needsRender >= 1: + renderHotZone(term, menu, selection, hotzoneLocation) + + if needsRender == 1: + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + if not hideHelpText: + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Controls: {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Up] and [Down] to move selection cursor {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [H] Show/hide this text {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Enter] to run command or save input {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Escape] to go back to build stack menu {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + if len(documentationHint) > 1: + if len(documentationHint) > 56: + documentationAndPadding = padText(documentationHint, 71) + print(term.center("{bv} Documentation: {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} {dap} {bv}".format(bv=specialChars[renderMode]["borderVertical"], dap=documentationAndPadding))) + else: + documentationAndPadding = padText(documentationHint, 56) + print(term.center("{bv} Documentation: {dap} {bv}".format(bv=specialChars[renderMode]["borderVertical"], dap=documentationAndPadding))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonBottomBorder(renderMode))) + + def runSelection(selection): + import types + global homeAssistantBuildOptions + if len(homeAssistantBuildOptions[selection]) > 1 and isinstance(homeAssistantBuildOptions[selection][1], types.FunctionType): + homeAssistantBuildOptions[selection][1]() + else: + print(term.green_reverse('IOTstack Error: No function assigned to menu item: "{}"'.format(nodeRedBuildOptions[selection][0]))) + + def isMenuItemSelectable(menu, index): + if len(menu) > index: + if len(menu[index]) > 2: + if menu[index][2]["skip"] == True: + return False + return True + + def menuEntryPoint(): + # These need to be reglobalised due to eval() + global currentMenuItemIndex + global selectionInProgress + global menuNavigateDirection + global needsRender + global hideHelpText + global homeAssistantBuildOptions + term = Terminal() + with term.fullscreen(): + menuNavigateDirection = 0 + mainRender(needsRender, homeAssistantBuildOptions, currentMenuItemIndex) + selectionInProgress = True + with term.cbreak(): + while selectionInProgress: + menuNavigateDirection = 0 + + if needsRender: # Only rerender when changed to prevent flickering + mainRender(needsRender, homeAssistantBuildOptions, currentMenuItemIndex) + needsRender = 0 + + key = term.inkey(esc_delay=0.05) + if key.is_sequence: + if key.name == 'KEY_TAB': + menuNavigateDirection += 1 + if key.name == 'KEY_DOWN': + menuNavigateDirection += 1 + if key.name == 'KEY_UP': + menuNavigateDirection -= 1 + if key.name == 'KEY_LEFT': + goBack() + if key.name == 'KEY_ENTER': + runSelection(currentMenuItemIndex) + if key.name == 'KEY_ESCAPE': + return True + elif key: + if key == 'h': # H pressed + if hideHelpText: + hideHelpText = False + else: + hideHelpText = True + mainRender(1, homeAssistantBuildOptions, currentMenuItemIndex) + + if menuNavigateDirection != 0: # If a direction was pressed, find next selectable item + currentMenuItemIndex += menuNavigateDirection + currentMenuItemIndex = currentMenuItemIndex % len(homeAssistantBuildOptions) + needsRender = 2 + + while not isMenuItemSelectable(homeAssistantBuildOptions, currentMenuItemIndex): + currentMenuItemIndex += menuNavigateDirection + currentMenuItemIndex = currentMenuItemIndex % len(homeAssistantBuildOptions) + return True + + #################### + # End menu section + #################### + + + if haltOnErrors: + eval(toRun)() + else: + try: + eval(toRun)() + except: + pass + +# This check isn't required, but placed here for debugging purposes +global currentServiceName # Name of the current service +if currentServiceName == 'home_assistant': + main() +else: + print("Error. '{}' Tried to run 'home_assistant' config".format(currentServiceName)) diff --git a/.templates/home_assistant/service.yml b/.templates/home_assistant/service.yml new file mode 100644 index 000000000..b6ed8363c --- /dev/null +++ b/.templates/home_assistant/service.yml @@ -0,0 +1,9 @@ +home_assistant: + container_name: home_assistant + image: ghcr.io/home-assistant/home-assistant:stable + restart: unless-stopped + network_mode: host + volumes: + - /etc/localtime:/etc/localtime:ro + - ./volumes/home_assistant:/config + privileged: true diff --git a/.templates/homebridge/homebridge.env b/.templates/homebridge/homebridge.env deleted file mode 100644 index daa0c6c3e..000000000 --- a/.templates/homebridge/homebridge.env +++ /dev/null @@ -1,5 +0,0 @@ -TZ=Europe/London -PGID=1000 -PUID=1000 -HOMEBRIDGE_CONFIG_UI=1 -HOMEBRIDGE_CONFIG_UI_PORT=8080 diff --git a/.templates/homebridge/service.yml b/.templates/homebridge/service.yml index 78fb68aa0..d2c24b02f 100644 --- a/.templates/homebridge/service.yml +++ b/.templates/homebridge/service.yml @@ -1,8 +1,14 @@ - homebridge: - container_name: homebridge - image: oznu/homebridge:no-avahi-arm32v6 - restart: unless-stopped - network_mode: host - env_file: ./services/homebridge/homebridge.env - volumes: - - ./volumes/homebridge:/homebridge +homebridge: + container_name: homebridge + image: homebridge/homebridge:latest + restart: unless-stopped + environment: + - TZ=Etc/UTC + - PGID=1000 + - PUID=1000 + - HOMEBRIDGE_CONFIG_UI=1 + - HOMEBRIDGE_CONFIG_UI_PORT=8581 + volumes: + - ./volumes/homebridge:/homebridge + network_mode: host + diff --git a/.templates/homer/service.yml b/.templates/homer/service.yml new file mode 100644 index 000000000..94e76e7a0 --- /dev/null +++ b/.templates/homer/service.yml @@ -0,0 +1,12 @@ +homer: + image: b4bz/homer:latest + container_name: homer + environment: + - INIT_ASSETS=1 + user: $UID:$GID + volumes: + - ./volumes/homer/assets:/www/assets + ports: + - "8881:8080" + restart: unless-stopped + diff --git a/.templates/influxdb/build.py b/.templates/influxdb/build.py new file mode 100755 index 000000000..c8c5e2b68 --- /dev/null +++ b/.templates/influxdb/build.py @@ -0,0 +1,393 @@ +#!/usr/bin/env python3 + +issues = {} # Returned issues dict +buildHooks = {} # Options, and others hooks +haltOnErrors = True + +# Main wrapper function. Required to make local vars work correctly +def main(): + import os + import time + import ruamel.yaml + import signal + import sys + import subprocess + from blessed import Terminal + + from deps.chars import specialChars, commonTopBorder, commonBottomBorder, commonEmptyLine, padText + from deps.consts import servicesDirectory, templatesDirectory, servicesFileName, buildSettingsFileName + from deps.common_functions import getExternalPorts, checkPortConflicts, generateRandomString + + yaml = ruamel.yaml.YAML() + yaml.preserve_quotes = True + + global dockerComposeServicesYaml # The loaded memory YAML of all checked services + global toRun # Switch for which function to run when executed + global buildHooks # Where to place the options menu result + global currentServiceName # Name of the current service + global issues # Returned issues dict + global haltOnErrors # Turn on to allow erroring + global hideHelpText # Showing and hiding the help controls text + global serviceService + + serviceService = servicesDirectory + currentServiceName + serviceTemplate = templatesDirectory + currentServiceName + buildSettings = serviceService + buildSettingsFileName + + try: # If not already set, then set it. + hideHelpText = hideHelpText + except: + hideHelpText = False + + documentationHint = 'https://sensorsiot.github.io/IOTstack/Containers/InfluxDB' + + # runtime vars + portConflicts = [] + + # This lets the menu know whether to put " >> Options " or not + # This function is REQUIRED. + def checkForOptionsHook(): + try: + buildHooks["options"] = callable(runOptionsMenu) + except: + buildHooks["options"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForPreBuildHook(): + try: + buildHooks["preBuildHook"] = callable(preBuild) + except: + buildHooks["preBuildHook"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForPostBuildHook(): + try: + buildHooks["postBuildHook"] = callable(postBuild) + except: + buildHooks["postBuildHook"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForRunChecksHook(): + try: + buildHooks["runChecksHook"] = callable(runChecks) + except: + buildHooks["runChecksHook"] = False + return buildHooks + return buildHooks + + # This service will not check anything unless this is set + # This function is optional, and will run each time the menu is rendered + def runChecks(): + checkForIssues() + return [] + + # This function is optional, and will run after the docker-compose.yml file is written to disk. + def postBuild(): + return True + + # This function is optional, and will run just before the build docker-compose.yml code. + def preBuild(): + # Multi-service: + with open((r'%s/' % serviceTemplate) + servicesFileName) as objServiceFile: + serviceYamlTemplate = yaml.load(objServiceFile) + + oldBuildCache = {} + try: + with open(r'%s' % buildCache) as objBuildCache: + oldBuildCache = yaml.load(objBuildCache) + except: + pass + + buildCacheServices = {} + if "services" in oldBuildCache: + buildCacheServices = oldBuildCache["services"] + + if not os.path.exists(serviceService): + os.makedirs(serviceService, exist_ok=True) + + if os.path.exists(buildSettings): + # Password randomisation + with open(r'%s' % buildSettings) as objBuildSettingsFile: + influxDbYamlBuildOptions = yaml.load(objBuildSettingsFile) + if ( + influxDbYamlBuildOptions["databasePasswordOption"] == "Randomise database password for this build" + or influxDbYamlBuildOptions["databasePasswordOption"] == "Randomise database password every build" + or influxDbYamlBuildOptions["databasePasswordOption"] == "Use default password for this build" + ): + if influxDbYamlBuildOptions["databasePasswordOption"] == "Use default password for this build": + randomPassword = "IOtSt4ckInfluX" + else: + randomPassword = generateRandomString() + for (index, serviceName) in enumerate(serviceYamlTemplate): + dockerComposeServicesYaml[serviceName] = serviceYamlTemplate[serviceName] + if "environment" in serviceYamlTemplate[serviceName]: + for (envIndex, envName) in enumerate(serviceYamlTemplate[serviceName]["environment"]): + envName = envName.replace("%randomPassword%", randomPassword) + dockerComposeServicesYaml[serviceName]["environment"][envIndex] = envName + + # Ensure you update the "Do nothing" and other 2 strings used for password settings in 'passwords.py' + if (influxDbYamlBuildOptions["databasePasswordOption"] == "Randomise database password for this build"): + influxDbYamlBuildOptions["databasePasswordOption"] = "Do nothing" + with open(buildSettings, 'w') as outputFile: + yaml.dump(influxDbYamlBuildOptions, outputFile) + else: # Do nothing - don't change password + for (index, serviceName) in enumerate(buildCacheServices): + if serviceName in buildCacheServices: # Load service from cache if exists (to maintain password) + dockerComposeServicesYaml[serviceName] = buildCacheServices[serviceName] + else: + dockerComposeServicesYaml[serviceName] = serviceYamlTemplate[serviceName] + + else: + print("InfluxDB Warning: Build settings file not found, using default password") + time.sleep(1) + randomPassword = "IOtSt4ckInfluX" + for (index, serviceName) in enumerate(serviceYamlTemplate): + dockerComposeServicesYaml[serviceName] = serviceYamlTemplate[serviceName] + if "environment" in serviceYamlTemplate[serviceName]: + for (envIndex, envName) in enumerate(serviceYamlTemplate[serviceName]["environment"]): + envName = envName.replace("%randomPassword%", randomPassword) + dockerComposeServicesYaml[serviceName]["environment"][envIndex] = envName + influxDbYamlBuildOptions = { + "version": "1", + "application": "IOTstack", + "service": "InfluxDB", + "comment": "InfluxDB Build Options" + } + + influxDbYamlBuildOptions["databasePasswordOption"] = "Do nothing" + with open(buildSettings, 'w') as outputFile: + yaml.dump(influxDbYamlBuildOptions, outputFile) + + return True + + # ##################################### + # Supporting functions below + # ##################################### + + def checkForIssues(): + for (index, serviceName) in enumerate(dockerComposeServicesYaml): + if not currentServiceName == serviceName: # Skip self + currentServicePorts = getExternalPorts(currentServiceName, dockerComposeServicesYaml) + portConflicts = checkPortConflicts(serviceName, currentServicePorts, dockerComposeServicesYaml) + if (len(portConflicts) > 0): + issues["portConflicts"] = portConflicts + + # ##################################### + # End Supporting functions + # ##################################### + + ############################ + # Menu Logic + ############################ + + global currentMenuItemIndex + global selectionInProgress + global menuNavigateDirection + global needsRender + + selectionInProgress = True + currentMenuItemIndex = 0 + menuNavigateDirection = 0 + needsRender = 1 + term = Terminal() + hotzoneLocation = [((term.height // 16) + 6), 0] + + def goBack(): + global selectionInProgress + global needsRender + selectionInProgress = False + needsRender = 1 + return True + + def setPasswordOptions(): + global needsRender + global hasRebuiltAddons + passwordOptionsMenuFilePath = "./.templates/{currentService}/passwords.py".format(currentService=currentServiceName) + with open(passwordOptionsMenuFilePath, "rb") as pythonDynamicImportFile: + code = compile(pythonDynamicImportFile.read(), passwordOptionsMenuFilePath, "exec") + execGlobals = { + "currentServiceName": currentServiceName, + "renderMode": renderMode + } + execLocals = {} + screenActive = False + exec(code, execGlobals, execLocals) + signal.signal(signal.SIGWINCH, onResize) + screenActive = True + needsRender = 1 + + def onResize(sig, action): + global influxDbBuildOptions + global currentMenuItemIndex + mainRender(1, influxDbBuildOptions, currentMenuItemIndex) + + influxDbBuildOptions = [] + + def createMenu(): + global influxDbBuildOptions + global serviceService + + influxDbBuildOptions = [] + # influxDbBuildOptions.append([ + # "InfluxDB Password Options", + # setPasswordOptions + # ]) + + influxDbBuildOptions.append(["Go back", goBack]) + + def runOptionsMenu(): + createMenu() + menuEntryPoint() + return True + + def renderHotZone(term, menu, selection, hotzoneLocation): + lineLengthAtTextStart = 71 + print(term.move(hotzoneLocation[0], hotzoneLocation[1])) + for (index, menuItem) in enumerate(menu): + toPrint = "" + if index == selection: + toPrint += ('{bv} -> {t.blue_on_green} {title} {t.normal} <-'.format(t=term, title=menuItem[0], bv=specialChars[renderMode]["borderVertical"])) + else: + toPrint += ('{bv} {t.normal} {title} '.format(t=term, title=menuItem[0], bv=specialChars[renderMode]["borderVertical"])) + + for i in range(lineLengthAtTextStart - len(menuItem[0])): + toPrint += " " + + toPrint += "{bv}".format(bv=specialChars[renderMode]["borderVertical"]) + + toPrint = term.center(toPrint) + + print(toPrint) + + def mainRender(needsRender, menu, selection): + term = Terminal() + + if needsRender == 1: + print(term.clear()) + print(term.move_y(term.height // 16)) + print(term.black_on_cornsilk4(term.center('IOTstack InfluxDB Options'))) + print("") + print(term.center(commonTopBorder(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Select Option to configure {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + + if needsRender >= 1: + renderHotZone(term, menu, selection, hotzoneLocation) + + if needsRender == 1: + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + if not hideHelpText: + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Controls: {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Up] and [Down] to move selection cursor {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [H] Show/hide this text {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Enter] to run command or save input {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Escape] to go back to build stack menu {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + if len(documentationHint) > 1: + if len(documentationHint) > 56: + documentationAndPadding = padText(documentationHint, 71) + print(term.center("{bv} Documentation: {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} {dap} {bv}".format(bv=specialChars[renderMode]["borderVertical"], dap=documentationAndPadding))) + else: + documentationAndPadding = padText(documentationHint, 56) + print(term.center("{bv} Documentation: {dap} {bv}".format(bv=specialChars[renderMode]["borderVertical"], dap=documentationAndPadding))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonBottomBorder(renderMode))) + + def runSelection(selection): + import types + global influxDbBuildOptions + if len(influxDbBuildOptions[selection]) > 1 and isinstance(influxDbBuildOptions[selection][1], types.FunctionType): + influxDbBuildOptions[selection][1]() + else: + print(term.green_reverse('IOTstack Error: No function assigned to menu item: "{}"'.format(nodeRedBuildOptions[selection][0]))) + + def isMenuItemSelectable(menu, index): + if len(menu) > index: + if len(menu[index]) > 2: + if menu[index][2]["skip"] == True: + return False + return True + + def menuEntryPoint(): + # These need to be reglobalised due to eval() + global currentMenuItemIndex + global selectionInProgress + global menuNavigateDirection + global needsRender + global hideHelpText + global influxDbBuildOptions + term = Terminal() + with term.fullscreen(): + menuNavigateDirection = 0 + mainRender(needsRender, influxDbBuildOptions, currentMenuItemIndex) + selectionInProgress = True + with term.cbreak(): + while selectionInProgress: + menuNavigateDirection = 0 + + if needsRender: # Only rerender when changed to prevent flickering + mainRender(needsRender, influxDbBuildOptions, currentMenuItemIndex) + needsRender = 0 + + key = term.inkey(esc_delay=0.05) + if key.is_sequence: + if key.name == 'KEY_TAB': + menuNavigateDirection += 1 + if key.name == 'KEY_DOWN': + menuNavigateDirection += 1 + if key.name == 'KEY_UP': + menuNavigateDirection -= 1 + if key.name == 'KEY_LEFT': + goBack() + if key.name == 'KEY_ENTER': + runSelection(currentMenuItemIndex) + if key.name == 'KEY_ESCAPE': + return True + elif key: + if key == 'h': # H pressed + if hideHelpText: + hideHelpText = False + else: + hideHelpText = True + mainRender(1, influxDbBuildOptions, currentMenuItemIndex) + + if menuNavigateDirection != 0: # If a direction was pressed, find next selectable item + currentMenuItemIndex += menuNavigateDirection + currentMenuItemIndex = currentMenuItemIndex % len(influxDbBuildOptions) + needsRender = 2 + + while not isMenuItemSelectable(influxDbBuildOptions, currentMenuItemIndex): + currentMenuItemIndex += menuNavigateDirection + currentMenuItemIndex = currentMenuItemIndex % len(influxDbBuildOptions) + return True + + #################### + # End menu section + #################### + + + if haltOnErrors: + eval(toRun)() + else: + try: + eval(toRun)() + except: + pass + +# This check isn't required, but placed here for debugging purposes +global currentServiceName # Name of the current service +if currentServiceName == 'influxdb': + main() +else: + print("Error. '{}' Tried to run 'influxdb' config".format(currentServiceName)) diff --git a/.templates/influxdb/influxdb.env b/.templates/influxdb/influxdb.env deleted file mode 100644 index 585c72fed..000000000 --- a/.templates/influxdb/influxdb.env +++ /dev/null @@ -1,13 +0,0 @@ -#INFLUXDB_DB=mydb -INFLUXDB_DATA_ENGINE=tsm1 -INFLUXDB_REPORTING_DISABLED=false -#INFLUXDB_HTTP_AUTH_ENABLED=true -INFLUXDB_ADMIN_ENABLED=true -#INFLUXDB_ADMIN_USER=myadminuser -#INFLUXDB_ADMIN_PASSWORD=myadminpassword -INFLUXDB_USER=nodered -INFLUXDB_USER_PASSWORD=nodered -#INFLUXDB_READ_USER=myreaduser -#INFLUXDB_READ_USER_PASSWORD=myreadpassword -#INFLUXDB_WRITE_USER=mywriteuser -#INFLUXDB_WRITE_USER_PASSWORD=mywritepassword diff --git a/.templates/influxdb/passwords.py b/.templates/influxdb/passwords.py new file mode 100755 index 000000000..fc27f915e --- /dev/null +++ b/.templates/influxdb/passwords.py @@ -0,0 +1,326 @@ +#!/usr/bin/env python3 + +import signal + +def main(): + from blessed import Terminal + from deps.chars import specialChars, commonTopBorder, commonBottomBorder, commonEmptyLine + from deps.consts import servicesDirectory, templatesDirectory, buildSettingsFileName + import time + import subprocess + import ruamel.yaml + import os + + global signal + global currentServiceName + global menuSelectionInProgress + global mainMenuList + global currentMenuItemIndex + global renderMode + global paginationSize + global paginationStartIndex + global hideHelpText + + yaml = ruamel.yaml.YAML() + yaml.preserve_quotes = True + + try: # If not already set, then set it. + hideHelpText = hideHelpText + except: + hideHelpText = False + + term = Terminal() + hotzoneLocation = [((term.height // 16) + 6), 0] + paginationToggle = [10, term.height - 25] + paginationStartIndex = 0 + paginationSize = paginationToggle[0] + + serviceService = servicesDirectory + currentServiceName + serviceTemplate = templatesDirectory + currentServiceName + buildSettings = serviceService + buildSettingsFileName + + def goBack(): + global menuSelectionInProgress + global needsRender + menuSelectionInProgress = False + needsRender = 1 + return True + + mainMenuList = [] + + hotzoneLocation = [((term.height // 16) + 6), 0] + + menuSelectionInProgress = True + currentMenuItemIndex = 0 + menuNavigateDirection = 0 + + # Render Modes: + # 0 = No render needed + # 1 = Full render + # 2 = Hotzone only + needsRender = 1 + + def onResize(sig, action): + global mainMenuList + global currentMenuItemIndex + mainRender(1, mainMenuList, currentMenuItemIndex) + + def generateLineText(text, textLength=None, paddingBefore=0, lineLength=64): + result = "" + for i in range(paddingBefore): + result += " " + + textPrintableCharactersLength = textLength + + if (textPrintableCharactersLength) == None: + textPrintableCharactersLength = len(text) + + result += text + remainingSpace = lineLength - textPrintableCharactersLength + + for i in range(remainingSpace): + result += " " + + return result + + def renderHotZone(term, renderType, menu, selection, hotzoneLocation, paddingBefore = 4): + global paginationSize + selectedTextLength = len("-> ") + + print(term.move(hotzoneLocation[0], hotzoneLocation[1])) + + if paginationStartIndex >= 1: + print(term.center("{b} {uaf} {uaf}{uaf}{uaf} {ual} {b}".format( + b=specialChars[renderMode]["borderVertical"], + uaf=specialChars[renderMode]["upArrowFull"], + ual=specialChars[renderMode]["upArrowLine"] + ))) + else: + print(term.center(commonEmptyLine(renderMode))) + + for (index, menuItem) in enumerate(menu): # Menu loop + if index >= paginationStartIndex and index < paginationStartIndex + paginationSize: + lineText = generateLineText(menuItem[0], paddingBefore=paddingBefore) + + # Menu highlight logic + if index == selection: + formattedLineText = '-> {t.blue_on_green}{title}{t.normal} <-'.format(t=term, title=menuItem[0]) + paddedLineText = generateLineText(formattedLineText, textLength=len(menuItem[0]) + selectedTextLength, paddingBefore=paddingBefore - selectedTextLength) + toPrint = paddedLineText + else: + toPrint = '{title}{t.normal}'.format(t=term, title=lineText) + # ##### + + # Menu check render logic + if menuItem[1]["checked"]: + toPrint = " (X) " + toPrint + else: + toPrint = " ( ) " + toPrint + + toPrint = "{bv} {toPrint} {bv}".format(bv=specialChars[renderMode]["borderVertical"], toPrint=toPrint) # Generate border + toPrint = term.center(toPrint) # Center Text (All lines should have the same amount of printable characters) + # ##### + print(toPrint) + + if paginationStartIndex + paginationSize < len(menu): + print(term.center("{b} {daf} {daf}{daf}{daf} {dal} {b}".format( + b=specialChars[renderMode]["borderVertical"], + daf=specialChars[renderMode]["downArrowFull"], + dal=specialChars[renderMode]["downArrowLine"] + ))) + else: + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + + + def mainRender(needsRender, menu, selection): + global paginationStartIndex + global paginationSize + term = Terminal() + + if selection >= paginationStartIndex + paginationSize: + paginationStartIndex = selection - (paginationSize - 1) + 1 + needsRender = 1 + + if selection <= paginationStartIndex - 1: + paginationStartIndex = selection + needsRender = 1 + + if needsRender == 1: + print(term.clear()) + print(term.move_y(term.height // 16)) + print(term.black_on_cornsilk4(term.center('IOTstack InfluxDB Password Options'))) + print("") + print(term.center(commonTopBorder(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Select Password Option {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + + if needsRender >= 1: + renderHotZone(term, needsRender, menu, selection, hotzoneLocation) + + if needsRender == 1: + print(term.center(commonEmptyLine(renderMode))) + if not hideHelpText: + if term.height < 32: + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Not enough vertical room to render controls help text {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + else: + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Controls: {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Space] to select option {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Up] and [Down] to move selection cursor {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [H] Show/hide this text {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Enter] to build and save option {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Escape] to cancel changes {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonBottomBorder(renderMode))) + + def runSelection(selection): + import types + if len(mainMenuList[selection]) > 1 and isinstance(mainMenuList[selection][1], types.FunctionType): + mainMenuList[selection][1]() + else: + print(term.green_reverse('IOTstack Error: No function assigned to menu item: "{}"'.format(mainMenuList[selection][0]))) + + def isMenuItemSelectable(menu, index): + if len(menu) > index: + if len(menu[index]) > 1: + if "skip" in menu[index][1] and menu[index][1]["skip"] == True: + return False + return True + + def loadOptionsMenu(): + global mainMenuList + mainMenuList.append(["Use default database password for this build", { "checked": True }]) + mainMenuList.append(["Randomise database password for this build", { "checked": False }]) + mainMenuList.append(["Randomise database password every build", { "checked": False }]) + mainMenuList.append(["Do nothing", { "checked": False }]) + + def checkMenuItem(selection): + global mainMenuList + for (index, menuItem) in enumerate(mainMenuList): + mainMenuList[index][1]["checked"] = False + + mainMenuList[selection][1]["checked"] = True + + def saveOptions(): + try: + if not os.path.exists(serviceService): + os.makedirs(serviceService, exist_ok=True) + + if os.path.exists(buildSettings): + with open(r'%s' % buildSettings) as objBuildSettingsFile: + influxDbYamlBuildOptions = yaml.load(objBuildSettingsFile) + else: + influxDbYamlBuildOptions = { + "version": "1", + "application": "IOTstack", + "service": "influxdb", + "comment": "Build Settings", + } + + influxDbYamlBuildOptions["databasePasswordOption"] = "" + + for (index, menuOption) in enumerate(mainMenuList): + if menuOption[1]["checked"]: + influxDbYamlBuildOptions["databasePasswordOption"] = menuOption[0] + break + + with open(buildSettings, 'w') as outputFile: + yaml.dump(influxDbYamlBuildOptions, outputFile) + + except Exception as err: + print("Error saving InfluxDB Password options", currentServiceName) + print(err) + return False + return True + + def loadOptions(): + try: + if not os.path.exists(serviceService): + os.makedirs(serviceService, exist_ok=True) + + if os.path.exists(buildSettings): + with open(r'%s' % buildSettings) as objBuildSettingsFile: + influxDbYamlBuildOptions = yaml.load(objBuildSettingsFile) + + for (index, menuOption) in enumerate(mainMenuList): + if menuOption[0] == influxDbYamlBuildOptions["databasePasswordOption"]: + checkMenuItem(index) + break + + except Exception as err: + print("Error loading InfluxDB Password options", currentServiceName) + print(err) + return False + return True + + + if __name__ == 'builtins': + global signal + term = Terminal() + signal.signal(signal.SIGWINCH, onResize) + loadOptionsMenu() + loadOptions() + with term.fullscreen(): + menuNavigateDirection = 0 + mainRender(needsRender, mainMenuList, currentMenuItemIndex) + menuSelectionInProgress = True + with term.cbreak(): + while menuSelectionInProgress: + menuNavigateDirection = 0 + + if not needsRender == 0: # Only rerender when changed to prevent flickering + mainRender(needsRender, mainMenuList, currentMenuItemIndex) + needsRender = 0 + + key = term.inkey(esc_delay=0.05) + if key.is_sequence: + if key.name == 'KEY_TAB': + if paginationSize == paginationToggle[0]: + paginationSize = paginationToggle[1] + else: + paginationSize = paginationToggle[0] + mainRender(1, mainMenuList, currentMenuItemIndex) + if key.name == 'KEY_DOWN': + menuNavigateDirection += 1 + if key.name == 'KEY_UP': + menuNavigateDirection -= 1 + if key.name == 'KEY_ENTER': + if saveOptions(): + return True + else: + print("Something went wrong. Try saving the list again.") + if key.name == 'KEY_ESCAPE': + menuSelectionInProgress = False + return True + elif key: + if key == ' ': # Space pressed + checkMenuItem(currentMenuItemIndex) # Update checked list + needsRender = 2 + elif key == 'h': # H pressed + if hideHelpText: + hideHelpText = False + else: + hideHelpText = True + mainRender(1, mainMenuList, currentMenuItemIndex) + + if menuNavigateDirection != 0: # If a direction was pressed, find next selectable item + currentMenuItemIndex += menuNavigateDirection + currentMenuItemIndex = currentMenuItemIndex % len(mainMenuList) + needsRender = 2 + + while not isMenuItemSelectable(mainMenuList, currentMenuItemIndex): + currentMenuItemIndex += menuNavigateDirection + currentMenuItemIndex = currentMenuItemIndex % len(mainMenuList) + return True + + return True + +originalSignalHandler = signal.getsignal(signal.SIGINT) +main() +signal.signal(signal.SIGWINCH, originalSignalHandler) diff --git a/.templates/influxdb/service.yml b/.templates/influxdb/service.yml index 3886ada52..34793cc6c 100644 --- a/.templates/influxdb/service.yml +++ b/.templates/influxdb/service.yml @@ -1,13 +1,27 @@ - influxdb: - container_name: influxdb - image: "influxdb:latest" - restart: unless-stopped - ports: - - 8086:8086 - - 8083:8083 - - 2003:2003 - env_file: - - ./services/influxdb/influxdb.env - volumes: - - ./volumes/influxdb/data:/var/lib/influxdb - - ./backups/influxdb/db:/var/lib/influxdb/backup +influxdb: + container_name: influxdb + image: "influxdb:1.8" + restart: unless-stopped + ports: + - "8086:8086" + environment: + - TZ=Etc/UTC + - INFLUXDB_HTTP_FLUX_ENABLED=false + - INFLUXDB_REPORTING_DISABLED=false + - INFLUXDB_HTTP_AUTH_ENABLED=false + - INFLUXDB_MONITOR_STORE_ENABLED=FALSE + # - INFLUX_USERNAME=dba + # - INFLUX_PASSWORD=supremo + # - INFLUXDB_UDP_ENABLED=false + # - INFLUXDB_UDP_BIND_ADDRESS=0.0.0.0:8086 + # - INFLUXDB_UDP_DATABASE=udp + volumes: + - ./volumes/influxdb/data:/var/lib/influxdb + - ./backups/influxdb/db:/var/lib/influxdb/backup + healthcheck: + test: ["CMD", "curl", "http://localhost:8086"] + interval: 30s + timeout: 10s + retries: 3 + start_period: 30s + diff --git a/.templates/influxdb/terminal.sh b/.templates/influxdb/terminal.sh index 8e342496f..54c464e50 100755 --- a/.templates/influxdb/terminal.sh +++ b/.templates/influxdb/terminal.sh @@ -2,11 +2,14 @@ echo "You are about to enter the influxdb console:" echo "" +echo "IOTstack influxdb Documentation: https://sensorsiot.github.io/IOTstack/Containers/InfluxDB/" +echo "" echo "to create a db: CREATE DATABASE myname" echo "to show existing a databases: SHOW DATABASES" echo "to use a specific db: USE myname" echo "" echo "to exit type: EXIT" echo "" +echo "docker exec -it influxdb influx" -docker exec -it influxdb influx +docker exec -it influxdb influx "$@" diff --git a/.templates/influxdb2/service.yml b/.templates/influxdb2/service.yml new file mode 100644 index 000000000..a922be47b --- /dev/null +++ b/.templates/influxdb2/service.yml @@ -0,0 +1,26 @@ +influxdb2: + container_name: influxdb2 + image: "influxdb:latest" + restart: unless-stopped + environment: + - TZ=Etc/UTC + - DOCKER_INFLUXDB_INIT_USERNAME=me + - DOCKER_INFLUXDB_INIT_PASSWORD=mypassword + - DOCKER_INFLUXDB_INIT_ORG=myorg + - DOCKER_INFLUXDB_INIT_BUCKET=mybucket + - DOCKER_INFLUXDB_INIT_ADMIN_TOKEN=my-super-secret-auth-token + - DOCKER_INFLUXDB_INIT_MODE=setup + # - DOCKER_INFLUXDB_INIT_MODE=upgrade + ports: + - "8087:8086" + volumes: + - ./volumes/influxdb2/data:/var/lib/influxdb2 + - ./volumes/influxdb2/config:/etc/influxdb2 + - ./volumes/influxdb2/backup:/var/lib/backup + # - ./volumes/influxdb.migrate/data:/var/lib/influxdb:ro + healthcheck: + test: ["CMD", "influx", "ping"] + interval: 30s + timeout: 10s + retries: 3 + start_period: 30s diff --git a/.templates/kapacitor/service.yml b/.templates/kapacitor/service.yml new file mode 100644 index 000000000..febba069e --- /dev/null +++ b/.templates/kapacitor/service.yml @@ -0,0 +1,20 @@ +kapacitor: + container_name: kapacitor + image: kapacitor:1.5 + restart: unless-stopped + environment: + - TZ=${TZ:-Etc/UTC} + # see https://docs.influxdata.com/kapacitor/v1.6/administration/configuration/#kapacitor-environment-variables + - KAPACITOR_INFLUXDB_0_URLS_0=http://influxdb:8086 + # - KAPACITOR_INFLUXDB_USERNAME= + # - KAPACITOR_INFLUXDB_PASSWORD= + # - KAPACITOR_HOSTNAME=kapacitor + # - KAPACITOR_LOGGING_LEVEL=INFO + # - KAPACITOR_REPORTING_ENABLED=false + ports: + - "9092:9092" + volumes: + - ./volumes/kapacitor:/var/lib/kapacitor + depends_on: + - influxdb + diff --git a/.templates/mariadb/Dockerfile b/.templates/mariadb/Dockerfile new file mode 100644 index 000000000..36c4fa7c1 --- /dev/null +++ b/.templates/mariadb/Dockerfile @@ -0,0 +1,31 @@ +# Download base image +FROM ghcr.io/linuxserver/mariadb + +# candidates for customisation are +ENV CANDIDATES="/defaults/my.cnf /defaults/custom.cnf" + +# apply stability patches recommended in +# +# https://discord.com/channels/638610460567928832/638610461109256194/825049573520965703 +# https://stackoverflow.com/questions/61809270/how-to-discover-why-mariadb-crashes +RUN for CNF in ${CANDIDATES} ; do [ -f ${CNF} ] && break ; done ; \ + sed -i.bak \ + -e "s/^thread_cache_size/# thread_cache_size/" \ + -e "s/^read_buffer_size/# read_buffer_size/" \ + ${CNF} + +# copy the health-check script into place +ENV HEALTHCHECK_SCRIPT "iotstack_healthcheck.sh" +COPY ${HEALTHCHECK_SCRIPT} /usr/local/bin/${HEALTHCHECK_SCRIPT} + +# define the health check +HEALTHCHECK \ + --start-period=30s \ + --interval=30s \ + --timeout=10s \ + --retries=3 \ + CMD ${HEALTHCHECK_SCRIPT} || exit 1 + +ENV CANDIDATES= + +# EOF diff --git a/.templates/mariadb/build.py b/.templates/mariadb/build.py new file mode 100755 index 000000000..e8893b6e9 --- /dev/null +++ b/.templates/mariadb/build.py @@ -0,0 +1,398 @@ +#!/usr/bin/env python3 + +issues = {} # Returned issues dict +buildHooks = {} # Options, and others hooks +haltOnErrors = True + +# Main wrapper function. Required to make local vars work correctly +def main(): + import os + import time + import sys + import ruamel.yaml + import signal + import subprocess + from blessed import Terminal + + from deps.chars import specialChars, commonTopBorder, commonBottomBorder, commonEmptyLine, padText + from deps.consts import servicesDirectory, templatesDirectory, servicesFileName, buildSettingsFileName + from deps.common_functions import getExternalPorts, checkPortConflicts, generateRandomString + + yaml = ruamel.yaml.YAML() + yaml.preserve_quotes = True + + global dockerComposeServicesYaml # The loaded memory YAML of all checked services + global toRun # Switch for which function to run when executed + global buildHooks # Where to place the options menu result + global currentServiceName # Name of the current service + global issues # Returned issues dict + global haltOnErrors # Turn on to allow erroring + global hideHelpText # Showing and hiding the help controls text + global serviceService + + serviceService = servicesDirectory + currentServiceName + serviceTemplate = templatesDirectory + currentServiceName + buildSettings = serviceService + buildSettingsFileName + + try: # If not already set, then set it. + hideHelpText = hideHelpText + except: + hideHelpText = False + + documentationHint = 'https://sensorsiot.github.io/IOTstack/Containers/MariaDB' + + # runtime vars + portConflicts = [] + + # This lets the menu know whether to put " >> Options " or not + # This function is REQUIRED. + def checkForOptionsHook(): + try: + buildHooks["options"] = callable(runOptionsMenu) + except: + buildHooks["options"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForPreBuildHook(): + try: + buildHooks["preBuildHook"] = callable(preBuild) + except: + buildHooks["preBuildHook"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForPostBuildHook(): + try: + buildHooks["postBuildHook"] = callable(postBuild) + except: + buildHooks["postBuildHook"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForRunChecksHook(): + try: + buildHooks["runChecksHook"] = callable(runChecks) + except: + buildHooks["runChecksHook"] = False + return buildHooks + return buildHooks + + # This service will not check anything unless this is set + # This function is optional, and will run each time the menu is rendered + def runChecks(): + checkForIssues() + return [] + + # This function is optional, and will run after the docker-compose.yml file is written to disk. + def postBuild(): + return True + + # This function is optional, and will run just before the build docker-compose.yml code. + def preBuild(): + # Multi-service: + with open((r'%s/' % serviceTemplate) + servicesFileName) as objServiceFile: + serviceYamlTemplate = yaml.load(objServiceFile) + + oldBuildCache = {} + try: + with open(r'%s' % buildCache) as objBuildCache: + oldBuildCache = yaml.load(objBuildCache) + except: + pass + + buildCacheServices = {} + if "services" in oldBuildCache: + buildCacheServices = oldBuildCache["services"] + + if not os.path.exists(serviceService): + os.makedirs(serviceService, exist_ok=True) + + if os.path.exists(buildSettings): + # Password randomisation + with open(r'%s' % buildSettings) as objBuildSettingsFile: + mariaDbYamlBuildOptions = yaml.load(objBuildSettingsFile) + if ( + mariaDbYamlBuildOptions["databasePasswordOption"] == "Randomise database password for this build" + or mariaDbYamlBuildOptions["databasePasswordOption"] == "Randomise database password every build" + or mariaDbYamlBuildOptions["databasePasswordOption"] == "Use default password for this build" + ): + if mariaDbYamlBuildOptions["databasePasswordOption"] == "Use default password for this build": + newAdminPassword = "IOtSt4ckToorMariaDb" + newPassword = "IOtSt4ckmariaDbPw" + else: + newAdminPassword = generateRandomString() + newPassword = generateRandomString() + for (index, serviceName) in enumerate(serviceYamlTemplate): + dockerComposeServicesYaml[serviceName] = serviceYamlTemplate[serviceName] + if "environment" in serviceYamlTemplate[serviceName]: + for (envIndex, envName) in enumerate(serviceYamlTemplate[serviceName]["environment"]): + envName = envName.replace("%randomAdminPassword%", newAdminPassword) + envName = envName.replace("%randomPassword%", newPassword) + dockerComposeServicesYaml[serviceName]["environment"][envIndex] = envName + + # Ensure you update the "Do nothing" and other 2 strings used for password settings in 'passwords.py' + if (mariaDbYamlBuildOptions["databasePasswordOption"] == "Randomise database password for this build"): + mariaDbYamlBuildOptions["databasePasswordOption"] = "Do nothing" + with open(buildSettings, 'w') as outputFile: + yaml.dump(mariaDbYamlBuildOptions, outputFile) + else: # Do nothing - don't change password + for (index, serviceName) in enumerate(buildCacheServices): + if serviceName in buildCacheServices: # Load service from cache if exists (to maintain password) + dockerComposeServicesYaml[serviceName] = buildCacheServices[serviceName] + else: + dockerComposeServicesYaml[serviceName] = serviceYamlTemplate[serviceName] + + else: + print("MariaDB Warning: Build settings file not found, using default password") + time.sleep(1) + newAdminPassword = "IOtSt4ckToorMariaDb" + newPassword = "IOtSt4ckmariaDbPw" + for (index, serviceName) in enumerate(serviceYamlTemplate): + dockerComposeServicesYaml[serviceName] = serviceYamlTemplate[serviceName] + if "environment" in serviceYamlTemplate[serviceName]: + for (envIndex, envName) in enumerate(serviceYamlTemplate[serviceName]["environment"]): + envName = envName.replace("%randomAdminPassword%", newAdminPassword) + envName = envName.replace("%randomPassword%", newPassword) + dockerComposeServicesYaml[serviceName]["environment"][envIndex] = envName + mariaDbYamlBuildOptions = { + "version": "1", + "application": "IOTstack", + "service": "MariaDB", + "comment": "MariaDB Build Options" + } + + mariaDbYamlBuildOptions["databasePasswordOption"] = "Do nothing" + with open(buildSettings, 'w') as outputFile: + yaml.dump(mariaDbYamlBuildOptions, outputFile) + + return True + + # ##################################### + # Supporting functions below + # ##################################### + + def checkForIssues(): + for (index, serviceName) in enumerate(dockerComposeServicesYaml): + if not currentServiceName == serviceName: # Skip self + currentServicePorts = getExternalPorts(currentServiceName, dockerComposeServicesYaml) + portConflicts = checkPortConflicts(serviceName, currentServicePorts, dockerComposeServicesYaml) + if (len(portConflicts) > 0): + issues["portConflicts"] = portConflicts + + # ##################################### + # End Supporting functions + # ##################################### + + ############################ + # Menu Logic + ############################ + + global currentMenuItemIndex + global selectionInProgress + global menuNavigateDirection + global needsRender + + selectionInProgress = True + currentMenuItemIndex = 0 + menuNavigateDirection = 0 + needsRender = 1 + term = Terminal() + hotzoneLocation = [((term.height // 16) + 6), 0] + + def goBack(): + global selectionInProgress + global needsRender + selectionInProgress = False + needsRender = 1 + return True + + def setPasswordOptions(): + global needsRender + global hasRebuiltAddons + passwordOptionsMenuFilePath = "./.templates/{currentService}/passwords.py".format(currentService=currentServiceName) + with open(passwordOptionsMenuFilePath, "rb") as pythonDynamicImportFile: + code = compile(pythonDynamicImportFile.read(), passwordOptionsMenuFilePath, "exec") + execGlobals = { + "currentServiceName": currentServiceName, + "renderMode": renderMode + } + execLocals = {} + screenActive = False + exec(code, execGlobals, execLocals) + signal.signal(signal.SIGWINCH, onResize) + screenActive = True + needsRender = 1 + + def onResize(sig, action): + global mariaDbBuildOptions + global currentMenuItemIndex + mainRender(1, mariaDbBuildOptions, currentMenuItemIndex) + + mariaDbBuildOptions = [] + + def createMenu(): + global mariaDbBuildOptions + global serviceService + + mariaDbBuildOptions = [] + mariaDbBuildOptions.append([ + "MariaDB Password Options", + setPasswordOptions + ]) + + mariaDbBuildOptions.append(["Go back", goBack]) + + def runOptionsMenu(): + createMenu() + menuEntryPoint() + return True + + def renderHotZone(term, menu, selection, hotzoneLocation): + lineLengthAtTextStart = 71 + print(term.move(hotzoneLocation[0], hotzoneLocation[1])) + for (index, menuItem) in enumerate(menu): + toPrint = "" + if index == selection: + toPrint += ('{bv} -> {t.blue_on_green} {title} {t.normal} <-'.format(t=term, title=menuItem[0], bv=specialChars[renderMode]["borderVertical"])) + else: + toPrint += ('{bv} {t.normal} {title} '.format(t=term, title=menuItem[0], bv=specialChars[renderMode]["borderVertical"])) + + for i in range(lineLengthAtTextStart - len(menuItem[0])): + toPrint += " " + + toPrint += "{bv}".format(bv=specialChars[renderMode]["borderVertical"]) + + toPrint = term.center(toPrint) + + print(toPrint) + + def mainRender(needsRender, menu, selection): + term = Terminal() + + if needsRender == 1: + print(term.clear()) + print(term.move_y(term.height // 16)) + print(term.black_on_cornsilk4(term.center('IOTstack MariaDB Options'))) + print("") + print(term.center(commonTopBorder(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Select Option to configure {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + + if needsRender >= 1: + renderHotZone(term, menu, selection, hotzoneLocation) + + if needsRender == 1: + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + if not hideHelpText: + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Controls: {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Up] and [Down] to move selection cursor {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [H] Show/hide this text {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Enter] to run command or save input {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Escape] to go back to build stack menu {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + if len(documentationHint) > 1: + if len(documentationHint) > 56: + documentationAndPadding = padText(documentationHint, 71) + print(term.center("{bv} Documentation: {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} {dap} {bv}".format(bv=specialChars[renderMode]["borderVertical"], dap=documentationAndPadding))) + else: + documentationAndPadding = padText(documentationHint, 56) + print(term.center("{bv} Documentation: {dap} {bv}".format(bv=specialChars[renderMode]["borderVertical"], dap=documentationAndPadding))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonBottomBorder(renderMode))) + + def runSelection(selection): + import types + global mariaDbBuildOptions + if len(mariaDbBuildOptions[selection]) > 1 and isinstance(mariaDbBuildOptions[selection][1], types.FunctionType): + mariaDbBuildOptions[selection][1]() + else: + print(term.green_reverse('IOTstack Error: No function assigned to menu item: "{}"'.format(nodeRedBuildOptions[selection][0]))) + + def isMenuItemSelectable(menu, index): + if len(menu) > index: + if len(menu[index]) > 2: + if menu[index][2]["skip"] == True: + return False + return True + + def menuEntryPoint(): + # These need to be reglobalised due to eval() + global currentMenuItemIndex + global selectionInProgress + global menuNavigateDirection + global needsRender + global hideHelpText + global mariaDbBuildOptions + term = Terminal() + with term.fullscreen(): + menuNavigateDirection = 0 + mainRender(needsRender, mariaDbBuildOptions, currentMenuItemIndex) + selectionInProgress = True + with term.cbreak(): + while selectionInProgress: + menuNavigateDirection = 0 + + if needsRender: # Only rerender when changed to prevent flickering + mainRender(needsRender, mariaDbBuildOptions, currentMenuItemIndex) + needsRender = 0 + + key = term.inkey(esc_delay=0.05) + if key.is_sequence: + if key.name == 'KEY_TAB': + menuNavigateDirection += 1 + if key.name == 'KEY_DOWN': + menuNavigateDirection += 1 + if key.name == 'KEY_UP': + menuNavigateDirection -= 1 + if key.name == 'KEY_LEFT': + goBack() + if key.name == 'KEY_ENTER': + runSelection(currentMenuItemIndex) + if key.name == 'KEY_ESCAPE': + return True + elif key: + if key == 'h': # H pressed + if hideHelpText: + hideHelpText = False + else: + hideHelpText = True + mainRender(1, mariaDbBuildOptions, currentMenuItemIndex) + + if menuNavigateDirection != 0: # If a direction was pressed, find next selectable item + currentMenuItemIndex += menuNavigateDirection + currentMenuItemIndex = currentMenuItemIndex % len(mariaDbBuildOptions) + needsRender = 2 + + while not isMenuItemSelectable(mariaDbBuildOptions, currentMenuItemIndex): + currentMenuItemIndex += menuNavigateDirection + currentMenuItemIndex = currentMenuItemIndex % len(mariaDbBuildOptions) + return True + + #################### + # End menu section + #################### + + + if haltOnErrors: + eval(toRun)() + else: + try: + eval(toRun)() + except: + pass + +# This check isn't required, but placed here for debugging purposes +global currentServiceName # Name of the current service +if currentServiceName == 'mariadb': + main() +else: + print("Error. '{}' Tried to run 'mariadb' config".format(currentServiceName)) diff --git a/.templates/mariadb/iotstack_healthcheck.sh b/.templates/mariadb/iotstack_healthcheck.sh new file mode 100755 index 000000000..980d31a78 --- /dev/null +++ b/.templates/mariadb/iotstack_healthcheck.sh @@ -0,0 +1,38 @@ +#!/usr/bin/env sh + +# set a default for the port +# (refer https://mariadb.com/kb/en/mariadb-environment-variables/ ) +HEALTHCHECK_PORT="${MYSQL_TCP_PORT:-3306}" + +# the expected response is? +EXPECTED="mysqld is alive" + +# run the check +if [ -z "$MYSQL_ROOT_PASSWORD" ] ; then + RESPONSE=$(mysqladmin ping -h localhost) +else + # note - there is NO space between "-p" and the password. This is + # intentional. It is part of the mysql and mysqladmin API. + RESPONSE=$(mysqladmin -p${MYSQL_ROOT_PASSWORD} ping -h localhost) +fi + +# did the mysqladmin command succeed? +if [ $? -eq 0 ] ; then + + # yes! is the response as expected? + if [ "$RESPONSE" = "$EXPECTED" ] ; then + + # yes! this could still be a false positive so probe the port + if nc -w 1 localhost $HEALTHCHECK_PORT >/dev/null 2>&1; then + + # port responding - that defines healthy + exit 0 + + fi + + fi + +fi + +# otherwise the check fails +exit 1 diff --git a/.templates/mariadb/mariadb.env b/.templates/mariadb/mariadb.env deleted file mode 100644 index 67b93f0ab..000000000 --- a/.templates/mariadb/mariadb.env +++ /dev/null @@ -1,7 +0,0 @@ -TZ=Europe/London -PUID=1000 -PGID=1000 -MYSQL_ROOT_PASSWORD=ROOT_ACCESS_PASSWORD -#MYSQL_DATABASE=USER_DB_NAME -#MYSQL_USER=MYSQL_USER -#MYSQL_PASSWORD=DATABASE_PASSWORD \ No newline at end of file diff --git a/.templates/mariadb/passwords.py b/.templates/mariadb/passwords.py new file mode 100755 index 000000000..92f38fcf1 --- /dev/null +++ b/.templates/mariadb/passwords.py @@ -0,0 +1,326 @@ +#!/usr/bin/env python3 + +import signal + +def main(): + from blessed import Terminal + from deps.chars import specialChars, commonTopBorder, commonBottomBorder, commonEmptyLine + from deps.consts import servicesDirectory, templatesDirectory, buildSettingsFileName + import time + import subprocess + import ruamel.yaml + import os + + global signal + global currentServiceName + global menuSelectionInProgress + global mainMenuList + global currentMenuItemIndex + global renderMode + global paginationSize + global paginationStartIndex + global hideHelpText + + yaml = ruamel.yaml.YAML() + yaml.preserve_quotes = True + + try: # If not already set, then set it. + hideHelpText = hideHelpText + except: + hideHelpText = False + + term = Terminal() + hotzoneLocation = [((term.height // 16) + 6), 0] + paginationToggle = [10, term.height - 25] + paginationStartIndex = 0 + paginationSize = paginationToggle[0] + + serviceService = servicesDirectory + currentServiceName + serviceTemplate = templatesDirectory + currentServiceName + buildSettings = serviceService + buildSettingsFileName + + def goBack(): + global menuSelectionInProgress + global needsRender + menuSelectionInProgress = False + needsRender = 1 + return True + + mainMenuList = [] + + hotzoneLocation = [((term.height // 16) + 6), 0] + + menuSelectionInProgress = True + currentMenuItemIndex = 0 + menuNavigateDirection = 0 + + # Render Modes: + # 0 = No render needed + # 1 = Full render + # 2 = Hotzone only + needsRender = 1 + + def onResize(sig, action): + global mainMenuList + global currentMenuItemIndex + mainRender(1, mainMenuList, currentMenuItemIndex) + + def generateLineText(text, textLength=None, paddingBefore=0, lineLength=64): + result = "" + for i in range(paddingBefore): + result += " " + + textPrintableCharactersLength = textLength + + if (textPrintableCharactersLength) == None: + textPrintableCharactersLength = len(text) + + result += text + remainingSpace = lineLength - textPrintableCharactersLength + + for i in range(remainingSpace): + result += " " + + return result + + def renderHotZone(term, renderType, menu, selection, hotzoneLocation, paddingBefore = 4): + global paginationSize + selectedTextLength = len("-> ") + + print(term.move(hotzoneLocation[0], hotzoneLocation[1])) + + if paginationStartIndex >= 1: + print(term.center("{b} {uaf} {uaf}{uaf}{uaf} {ual} {b}".format( + b=specialChars[renderMode]["borderVertical"], + uaf=specialChars[renderMode]["upArrowFull"], + ual=specialChars[renderMode]["upArrowLine"] + ))) + else: + print(term.center(commonEmptyLine(renderMode))) + + for (index, menuItem) in enumerate(menu): # Menu loop + if index >= paginationStartIndex and index < paginationStartIndex + paginationSize: + lineText = generateLineText(menuItem[0], paddingBefore=paddingBefore) + + # Menu highlight logic + if index == selection: + formattedLineText = '-> {t.blue_on_green}{title}{t.normal} <-'.format(t=term, title=menuItem[0]) + paddedLineText = generateLineText(formattedLineText, textLength=len(menuItem[0]) + selectedTextLength, paddingBefore=paddingBefore - selectedTextLength) + toPrint = paddedLineText + else: + toPrint = '{title}{t.normal}'.format(t=term, title=lineText) + # ##### + + # Menu check render logic + if menuItem[1]["checked"]: + toPrint = " (X) " + toPrint + else: + toPrint = " ( ) " + toPrint + + toPrint = "{bv} {toPrint} {bv}".format(bv=specialChars[renderMode]["borderVertical"], toPrint=toPrint) # Generate border + toPrint = term.center(toPrint) # Center Text (All lines should have the same amount of printable characters) + # ##### + print(toPrint) + + if paginationStartIndex + paginationSize < len(menu): + print(term.center("{b} {daf} {daf}{daf}{daf} {dal} {b}".format( + b=specialChars[renderMode]["borderVertical"], + daf=specialChars[renderMode]["downArrowFull"], + dal=specialChars[renderMode]["downArrowLine"] + ))) + else: + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + + + def mainRender(needsRender, menu, selection): + global paginationStartIndex + global paginationSize + term = Terminal() + + if selection >= paginationStartIndex + paginationSize: + paginationStartIndex = selection - (paginationSize - 1) + 1 + needsRender = 1 + + if selection <= paginationStartIndex - 1: + paginationStartIndex = selection + needsRender = 1 + + if needsRender == 1: + print(term.clear()) + print(term.move_y(term.height // 16)) + print(term.black_on_cornsilk4(term.center('IOTstack MariaDB Password Options'))) + print("") + print(term.center(commonTopBorder(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Select Password Option {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + + if needsRender >= 1: + renderHotZone(term, needsRender, menu, selection, hotzoneLocation) + + if needsRender == 1: + print(term.center(commonEmptyLine(renderMode))) + if not hideHelpText: + if term.height < 32: + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Not enough vertical room to render controls help text {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + else: + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Controls: {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Space] to select option {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Up] and [Down] to move selection cursor {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [H] Show/hide this text {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Enter] to build and save option {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Escape] to cancel changes {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonBottomBorder(renderMode))) + + def runSelection(selection): + import types + if len(mainMenuList[selection]) > 1 and isinstance(mainMenuList[selection][1], types.FunctionType): + mainMenuList[selection][1]() + else: + print(term.green_reverse('IOTstack Error: No function assigned to menu item: "{}"'.format(mainMenuList[selection][0]))) + + def isMenuItemSelectable(menu, index): + if len(menu) > index: + if len(menu[index]) > 1: + if "skip" in menu[index][1] and menu[index][1]["skip"] == True: + return False + return True + + def loadOptionsMenu(): + global mainMenuList + mainMenuList.append(["Use default password for this build", { "checked": True }]) + mainMenuList.append(["Randomise database password for this build", { "checked": False }]) + mainMenuList.append(["Randomise database password every build", { "checked": False }]) + mainMenuList.append(["Do nothing", { "checked": False }]) + + def checkMenuItem(selection): + global mainMenuList + for (index, menuItem) in enumerate(mainMenuList): + mainMenuList[index][1]["checked"] = False + + mainMenuList[selection][1]["checked"] = True + + def saveOptions(): + try: + if not os.path.exists(serviceService): + os.makedirs(serviceService, exist_ok=True) + + if os.path.exists(buildSettings): + with open(r'%s' % buildSettings) as objBuildSettingsFile: + mariaDbYamlBuildOptions = yaml.load(objBuildSettingsFile) + else: + mariaDbYamlBuildOptions = { + "version": "1", + "application": "IOTstack", + "service": "mariadb", + "comment": "Build Settings", + } + + mariaDbYamlBuildOptions["databasePasswordOption"] = "" + + for (index, menuOption) in enumerate(mainMenuList): + if menuOption[1]["checked"]: + mariaDbYamlBuildOptions["databasePasswordOption"] = menuOption[0] + break + + with open(buildSettings, 'w') as outputFile: + yaml.dump(mariaDbYamlBuildOptions, outputFile) + + except Exception as err: + print("Error saving MariaDB Password options", currentServiceName) + print(err) + return False + return True + + def loadOptions(): + try: + if not os.path.exists(serviceService): + os.makedirs(serviceService, exist_ok=True) + + if os.path.exists(buildSettings): + with open(r'%s' % buildSettings) as objBuildSettingsFile: + mariaDbYamlBuildOptions = yaml.load(objBuildSettingsFile) + + for (index, menuOption) in enumerate(mainMenuList): + if menuOption[0] == mariaDbYamlBuildOptions["databasePasswordOption"]: + checkMenuItem(index) + break + + except Exception as err: + print("Error loading MariaDB Password options", currentServiceName) + print(err) + return False + return True + + + if __name__ == 'builtins': + global signal + term = Terminal() + signal.signal(signal.SIGWINCH, onResize) + loadOptionsMenu() + loadOptions() + with term.fullscreen(): + menuNavigateDirection = 0 + mainRender(needsRender, mainMenuList, currentMenuItemIndex) + menuSelectionInProgress = True + with term.cbreak(): + while menuSelectionInProgress: + menuNavigateDirection = 0 + + if not needsRender == 0: # Only rerender when changed to prevent flickering + mainRender(needsRender, mainMenuList, currentMenuItemIndex) + needsRender = 0 + + key = term.inkey(esc_delay=0.05) + if key.is_sequence: + if key.name == 'KEY_TAB': + if paginationSize == paginationToggle[0]: + paginationSize = paginationToggle[1] + else: + paginationSize = paginationToggle[0] + mainRender(1, mainMenuList, currentMenuItemIndex) + if key.name == 'KEY_DOWN': + menuNavigateDirection += 1 + if key.name == 'KEY_UP': + menuNavigateDirection -= 1 + if key.name == 'KEY_ENTER': + if saveOptions(): + return True + else: + print("Something went wrong. Try saving the list again.") + if key.name == 'KEY_ESCAPE': + menuSelectionInProgress = False + return True + elif key: + if key == ' ': # Space pressed + checkMenuItem(currentMenuItemIndex) # Update checked list + needsRender = 2 + elif key == 'h': # H pressed + if hideHelpText: + hideHelpText = False + else: + hideHelpText = True + mainRender(1, mainMenuList, currentMenuItemIndex) + + if menuNavigateDirection != 0: # If a direction was pressed, find next selectable item + currentMenuItemIndex += menuNavigateDirection + currentMenuItemIndex = currentMenuItemIndex % len(mainMenuList) + needsRender = 2 + + while not isMenuItemSelectable(mainMenuList, currentMenuItemIndex): + currentMenuItemIndex += menuNavigateDirection + currentMenuItemIndex = currentMenuItemIndex % len(mainMenuList) + return True + + return True + +originalSignalHandler = signal.getsignal(signal.SIGINT) +main() +signal.signal(signal.SIGWINCH, originalSignalHandler) diff --git a/.templates/mariadb/service.yml b/.templates/mariadb/service.yml index bad5cdcfe..ac9bbbdd4 100644 --- a/.templates/mariadb/service.yml +++ b/.templates/mariadb/service.yml @@ -1,11 +1,17 @@ - mariadb: - image: linuxserver/mariadb - container_name: mariadb - env_file: - - ./services/mariadb/mariadb.env - volumes: - - ./volumes/mariadb/config:/config - ports: - - 3306:3306 - restart: unless-stopped - +mariadb: + build: ./.templates/mariadb/. + container_name: mariadb + environment: + - TZ=${TZ:-Etc/UTC} + - PUID=1000 + - PGID=1000 + - MYSQL_ROOT_PASSWORD=%randomAdminPassword% + - MYSQL_DATABASE=default + - MYSQL_USER=mariadbuser + - MYSQL_PASSWORD=%randomPassword% + volumes: + - ./volumes/mariadb/config:/config + - ./volumes/mariadb/db_backup:/backup + ports: + - "3306:3306" + restart: unless-stopped diff --git a/.templates/mariadb/terminal.sh b/.templates/mariadb/terminal.sh index b7c282135..0aa87ed20 100755 --- a/.templates/mariadb/terminal.sh +++ b/.templates/mariadb/terminal.sh @@ -1,4 +1,9 @@ #!/bin/bash echo "run 'mysql -uroot -p' for terminal access" +echo "" +echo "IOTstack mariadb Documentation: https://sensorsiot.github.io/IOTstack/Containers/MariaDB/" +echo "" +echo "docker exec -it mariadb bash" + docker exec -it mariadb bash diff --git a/.templates/matterbridge/service.yml b/.templates/matterbridge/service.yml new file mode 100644 index 000000000..98fa8406b --- /dev/null +++ b/.templates/matterbridge/service.yml @@ -0,0 +1,8 @@ +matterbridge: + container_name: matterbridge + image: "luligu/matterbridge:1.6.7" + restart: unless-stopped + network_mode: host + volumes: + - ./volumes/matterbridge/data:/root/.matterbridge + - ./volumes/matterbridge/plugins:/root/Matterbridge diff --git a/.templates/mjpg-streamer/Dockerfile b/.templates/mjpg-streamer/Dockerfile new file mode 100644 index 000000000..c4603a0b0 --- /dev/null +++ b/.templates/mjpg-streamer/Dockerfile @@ -0,0 +1,68 @@ +# supported build argument +ARG DEBIAN_VARIANT=bullseye-slim + +# Download base image +FROM debian:${DEBIAN_VARIANT} + +# re-reference supported argument and copy to environment var +ARG DEBIAN_VARIANT +ENV DEBIAN_VARIANT=${DEBIAN_VARIANT} + +ARG DEBIAN_FRONTEND=noninteractive +ENV TZ=UTC + +# Refer https://github.com/jacksonliam/mjpg-streamer/issues/386 +# This assumes a Debian build and references: +# libjpeg62-turbo-dev +# For Ubuntu, replace with: +# libjpeg8-dev + +# acknowledgement: mostly borrowed from https://github.com/Cossey/docker +RUN apt-get -q -y update \ + && apt-get -q -y -o "DPkg::Options::=--force-confold" -o "DPkg::Options::=--force-confdef" install apt-utils \ + && rm -rf /etc/dpkg/dpkg.cfg.d/excludes \ + && apt-get -q -y -o "DPkg::Options::=--force-confold" -o "DPkg::Options::=--force-confdef" install \ + cmake \ + git \ + gcc \ + g++ \ + libjpeg62-turbo-dev \ + tzdata \ + uuid-runtime \ + && apt-get -q -y autoremove \ + && apt-get -q -y clean \ + && rm -rf /var/lib/apt/lists/* \ + && git clone https://github.com/jacksonliam/mjpg-streamer.git /usr/src/mjpg-streamer \ + && make -C /usr/src/mjpg-streamer/mjpg-streamer-experimental \ + && make -C /usr/src/mjpg-streamer/mjpg-streamer-experimental install \ + && rm -rf /usr/src/mjpg-streamer \ + && apt-get -q -y -o "DPkg::Options::=--force-confold" -o "DPkg::Options::=--force-confdef" purge \ + cmake \ + git \ + gcc \ + g++ + +# set up the container start point +ENV ENTRYPOINT_SCRIPT="docker-entrypoint.sh" +COPY docker-entrypoint.sh /usr/local/bin +RUN chmod 755 /usr/local/bin/docker-entrypoint.sh + +# starting point - self-repair (if ever needed) and launch +ENTRYPOINT ["docker-entrypoint.sh"] + +# the streamer invocation +CMD mjpg_streamer \ + -i "/usr/local/lib/mjpg-streamer/input_uvc.so -d ${MJPG_STREAMER_INTERNAL_DEVICE} -n -f ${MJPG_STREAMER_FPS} -r ${MJPG_STREAMER_SIZE}" \ + -o "/usr/local/lib/mjpg-streamer/output_http.so -p 80 -w /usr/local/share/mjpg-streamer/www ${MJPG_STREAMER_CREDENTIALS}" + +# set root's home directory as default (probably unnecessary) +WORKDIR /root + +# port +EXPOSE "80" + +# set container metadata +LABEL com.github.SensorsIot.IOTstack.Dockerfile.build-args="${DEBIAN_VARIANT}" +LABEL com.github.SensorsIot.IOTstack.Dockerfile.maintainer="Paraphraser <34226495+Paraphraser@users.noreply.github.com>" + +# EOF diff --git a/.templates/mjpg-streamer/docker-entrypoint.sh b/.templates/mjpg-streamer/docker-entrypoint.sh new file mode 100644 index 000000000..0a8ade102 --- /dev/null +++ b/.templates/mjpg-streamer/docker-entrypoint.sh @@ -0,0 +1,29 @@ +#!/bin/bash + +set -e + +# sensible defaults for supported variables +export MJPG_STREAMER_SIZE=${MJPG_STREAMER_SIZE:-640x480} +export MJPG_STREAMER_FPS=${MJPG_STREAMER_FPS:-5} +export MJPG_STREAMER_INTERNAL_DEVICE=${MJPG_STREAMER_INTERNAL_DEVICE:-/dev/video0} + +# form credential string (if the user does not pass a username, the +# username will be the container name - change on each recreate; if +# the user does not pass a password, the password will be a uuid and +# will change on every launch). +MJPG_STREAMER_USERNAME=${MJPG_STREAMER_USERNAME:-$(hostname -s)} +MJPG_STREAMER_PASSWORD=${MJPG_STREAMER_PASSWORD:-$(uuidgen)} +export MJPG_STREAMER_CREDENTIALS="-c ${MJPG_STREAMER_USERNAME}:${MJPG_STREAMER_PASSWORD}" + +# are we running as root? +if [ "$(id -u)" = '0' ] ; then + + echo "MJPG Streamer launched at $(date)" + + # any self-repair code goes here - there is no persistent storage + # at the moment so this is irrelevant. + +fi + +# away we go +exec "$@" diff --git a/.templates/mjpg-streamer/service.yml b/.templates/mjpg-streamer/service.yml new file mode 100644 index 000000000..3e9f9a006 --- /dev/null +++ b/.templates/mjpg-streamer/service.yml @@ -0,0 +1,15 @@ +mjpg-streamer: + container_name: mjpg-streamer + build: ./.templates/mjpg-streamer/. + restart: unless-stopped + environment: + - TZ=${TZ:-Etc/UTC} + - MJPG_STREAMER_USERNAME=${MJPG_STREAMER_USERNAME:-} + - MJPG_STREAMER_PASSWORD=${MJPG_STREAMER_PASSWORD:-} + - MJPG_STREAMER_SIZE=${MJPG_STREAMER_SIZE:-} + - MJPG_STREAMER_FPS=${MJPG_STREAMER_FPS:-} + ports: + - "8980:80" + devices: + - "${MJPG_STREAMER_EXTERNAL_DEVICE:-/dev/video0}:/dev/video0" + diff --git a/.templates/mosquitto/Dockerfile b/.templates/mosquitto/Dockerfile new file mode 100644 index 000000000..9c22a7988 --- /dev/null +++ b/.templates/mosquitto/Dockerfile @@ -0,0 +1,48 @@ +# supported build argument +ARG MOSQUITTO_BASE=eclipse-mosquitto:latest + +# Download base image +FROM $MOSQUITTO_BASE + +# re-reference supported argument and copy to environment var +ARG MOSQUITTO_BASE +ENV MOSQUITTO_BASE=${MOSQUITTO_BASE} + +# see https://github.com/alpinelinux/docker-alpine/issues/98 +RUN sed -i 's/https/http/' /etc/apk/repositories + +# Add support tools +RUN apk update && apk add --no-cache rsync tzdata + +# where IOTstack template files are stored +ENV IOTSTACK_DEFAULTS_DIR="iotstack_defaults" + +# copy template files to image +COPY --chown=mosquitto:mosquitto ${IOTSTACK_DEFAULTS_DIR} /${IOTSTACK_DEFAULTS_DIR} + +# copy the health-check script into place +ENV HEALTHCHECK_SCRIPT "iotstack_healthcheck.sh" +COPY ${HEALTHCHECK_SCRIPT} /usr/local/bin/${HEALTHCHECK_SCRIPT} + +# define the health check +HEALTHCHECK \ + --start-period=30s \ + --interval=30s \ + --timeout=10s \ + --retries=3 \ + CMD ${HEALTHCHECK_SCRIPT} || exit 1 + +# replace the docker entry-point script +ENV IOTSTACK_ENTRY_POINT="docker-entrypoint.sh" +COPY ${IOTSTACK_ENTRY_POINT} /${IOTSTACK_ENTRY_POINT} +RUN chmod 755 /${IOTSTACK_ENTRY_POINT} +ENV IOTSTACK_ENTRY_POINT= + +# IOTstack also declares these paths +VOLUME ["/mosquitto/config", "/mosquitto/pwfile"] + +# set container metadata +LABEL com.github.SensorsIot.IOTstack.Dockerfile.build-args="${MOSQUITTO_BASE}" +LABEL com.github.SensorsIot.IOTstack.Dockerfile.based-on="https://github.com/eclipse/mosquitto" + +# EOF diff --git a/.templates/mosquitto/directoryfix.sh b/.templates/mosquitto/directoryfix.sh deleted file mode 100755 index b276c765c..000000000 --- a/.templates/mosquitto/directoryfix.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash - -[ -d ./volumes/mosquitto ] || sudo mkdir -p ./volumes/mosquitto - -#check user 1883 -if [ $(grep -c 'user: \"1883\"' ./services/mosquitto/service.yml) -eq 1 ]; then - echo "...found user 1883" - sudo mkdir -p ./volumes/mosquitto/data/ - sudo mkdir -p ./volumes/mosquitto/log/ - sudo chown -R 1883:1883 ./volumes/mosquitto/ -fi - -#check user 0 legacy test -if [ $(grep -c 'user: \"0\"' ./services/mosquitto/service.yml) -eq 1 ]; then - echo "...found user 0 setting ownership for old template" - sudo chown -R root:root ./volumes/mosquitto/ -fi diff --git a/.templates/mosquitto/docker-entrypoint.sh b/.templates/mosquitto/docker-entrypoint.sh new file mode 100644 index 000000000..e49638571 --- /dev/null +++ b/.templates/mosquitto/docker-entrypoint.sh @@ -0,0 +1,25 @@ +#!/bin/ash +set -e + +PWFILE="/mosquitto/pwfile/pwfile" + +# Set permissions +user="$(id -u)" +if [ "$user" = '0' -a -d "/mosquitto" ]; then + + echo "[IOTstack] begin self-repair" + + rsync -arpv --ignore-existing /${IOTSTACK_DEFAULTS_DIR}/ "/mosquitto" + + # general ownership assuming mode as set in template + chown -Rc mosquitto:mosquitto /mosquitto + + # specific requirements for the password file + chmod -c 600 "$PWFILE" + + echo "[IOTstack] end self-repair" + +fi + +exec "$@" + diff --git a/.templates/mosquitto/iotstack_defaults/config/filter.acl b/.templates/mosquitto/iotstack_defaults/config/filter.acl new file mode 100644 index 000000000..e16823110 --- /dev/null +++ b/.templates/mosquitto/iotstack_defaults/config/filter.acl @@ -0,0 +1,6 @@ +user admin +topic read # +topic write # + +pattern read # +pattern write # diff --git a/.templates/mosquitto/iotstack_defaults/config/mosquitto.conf b/.templates/mosquitto/iotstack_defaults/config/mosquitto.conf new file mode 100644 index 000000000..bdada3132 --- /dev/null +++ b/.templates/mosquitto/iotstack_defaults/config/mosquitto.conf @@ -0,0 +1,36 @@ +# required by https://mosquitto.org/documentation/migrating-to-2-0/ +# +listener 1883 + +# persistence enabled for remembering retain flag across restarts +# +persistence true +persistence_location /mosquitto/data + +# logging options: +# enable one of the following (stdout = less wear on SD cards but +# logs do not persist across restarts) +#log_dest file /mosquitto/log/mosquitto.log +log_dest stdout +log_timestamp_format %Y-%m-%dT%H:%M:%S + +# Reduce size and SD-card flash wear, safe to remove if using a SSD +connection_messages false + +# password handling: +# password_file commented-out allow_anonymous true = +# open access +# password_file commented-out allow_anonymous false = +# no access +# password_file activated allow_anonymous true = +# passwords omitted is permitted but +# passwords provided must match pwfile +# password_file activated allow_anonymous false = +# no access without passwords +# passwords provided must match pwfile +# +#password_file /mosquitto/pwfile/pwfile +allow_anonymous true + +# Uncomment to enable filters +#acl_file /mosquitto/config/filter.acl diff --git a/.templates/mosquitto/iotstack_defaults/pwfile/pwfile b/.templates/mosquitto/iotstack_defaults/pwfile/pwfile new file mode 100644 index 000000000..e69de29bb diff --git a/.templates/mosquitto/iotstack_healthcheck.sh b/.templates/mosquitto/iotstack_healthcheck.sh new file mode 100755 index 000000000..8f4d51308 --- /dev/null +++ b/.templates/mosquitto/iotstack_healthcheck.sh @@ -0,0 +1,64 @@ +#!/usr/bin/env sh + +# assume the following environment variables, all of which may be null +# HEALTHCHECK_PORT +# HEALTHCHECK_USER +# HEALTHCHECK_PASSWORD +# HEALTHCHECK_TOPIC + +# set a default for the port +HEALTHCHECK_PORT="${HEALTHCHECK_PORT:-1883}" + +# strip any quotes from username and password +HEALTHCHECK_USER="$(eval echo $HEALTHCHECK_USER)" +HEALTHCHECK_PASSWORD="$(eval echo $HEALTHCHECK_PASSWORD)" + +# set a default for the topic +HEALTHCHECK_TOPIC="${HEALTHCHECK_TOPIC:-iotstack/mosquitto/healthcheck}" +HEALTHCHECK_TOPIC="$(eval echo $HEALTHCHECK_TOPIC)" + +# record the current date and time for the test payload +PUBLISH=$(date) + +# publish a retained message containing the timestamp +mosquitto_pub \ + -h localhost \ + -p "$HEALTHCHECK_PORT" \ + -t "$HEALTHCHECK_TOPIC" \ + -m "$PUBLISH" \ + -u "$HEALTHCHECK_USER" \ + -P "$HEALTHCHECK_PASSWORD" \ + -r + +# did that succeed? +if [ $? -eq 0 ] ; then + + # yes! now, subscribe to that same topic with a 2-second timeout + # plus returning on the first message + SUBSCRIBE=$(mosquitto_sub \ + -h localhost \ + -p "$HEALTHCHECK_PORT" \ + -t "$HEALTHCHECK_TOPIC" \ + -u "$HEALTHCHECK_USER" \ + -P "$HEALTHCHECK_PASSWORD" \ + -W 2 \ + -C 1 \ + ) + + # did the subscribe succeed? + if [ $? -eq 0 ] ; then + + # yes! do the publish and subscribe payloads compare equal? + if [ "$PUBLISH" = "$SUBSCRIBE" ] ; then + + # yes! return success + exit 0 + + fi + + fi + +fi + +# otherwise, return failure +exit 1 diff --git a/.templates/mosquitto/mosquitto.conf b/.templates/mosquitto/mosquitto.conf deleted file mode 100644 index 9f63ac19e..000000000 --- a/.templates/mosquitto/mosquitto.conf +++ /dev/null @@ -1,4 +0,0 @@ -persistence true -persistence_location /mosquitto/data/ -log_dest file /mosquitto/log/mosquitto.log -#password_file /mosquitto/config/pwfile diff --git a/.templates/mosquitto/service.yml b/.templates/mosquitto/service.yml index 8a62d7769..49467e1fe 100644 --- a/.templates/mosquitto/service.yml +++ b/.templates/mosquitto/service.yml @@ -1,13 +1,17 @@ - mosquitto: - container_name: mosquitto - image: eclipse-mosquitto - restart: unless-stopped - user: "1883" - ports: - - 1883:1883 - - 9001:9001 - volumes: - - ./volumes/mosquitto/data:/mosquitto/data - - ./volumes/mosquitto/log:/mosquitto/log - - ./services/mosquitto/mosquitto.conf:/mosquitto/config/mosquitto.conf +mosquitto: + container_name: mosquitto + build: + context: ./.templates/mosquitto/. + args: + - MOSQUITTO_BASE=eclipse-mosquitto:latest + restart: unless-stopped + environment: + - TZ=${TZ:-Etc/UTC} + ports: + - "1883:1883" + volumes: + - ./volumes/mosquitto/config:/mosquitto/config + - ./volumes/mosquitto/data:/mosquitto/data + - ./volumes/mosquitto/log:/mosquitto/log + - ./volumes/mosquitto/pwfile:/mosquitto/pwfile diff --git a/.templates/mosquitto/terminal.sh b/.templates/mosquitto/terminal.sh deleted file mode 100755 index 83c8a4128..000000000 --- a/.templates/mosquitto/terminal.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash - -echo "you are about to enter the shell for mosquitto" -echo "to add a password: mosquitto_passwd -c /mosquitto/config/pwfile MYUSER" -echo "the command will ask for you password and confirm" -echo "to exit: exit" - -docker exec -it mosquitto sh diff --git a/.templates/motioneye/build.py b/.templates/motioneye/build.py new file mode 100755 index 000000000..5d46dbae5 --- /dev/null +++ b/.templates/motioneye/build.py @@ -0,0 +1,332 @@ +#!/usr/bin/env python3 + +issues = {} # Returned issues dict +buildHooks = {} # Options, and others hooks +haltOnErrors = True + +# Main wrapper function. Required to make local vars work correctly +def main(): + import os + import time + import ruamel.yaml + import signal + import sys + from blessed import Terminal + + from deps.chars import specialChars, commonTopBorder, commonBottomBorder, commonEmptyLine, padText + from deps.consts import servicesDirectory, templatesDirectory + from deps.common_functions import getExternalPorts, getInternalPorts, checkPortConflicts, enterPortNumberWithWhiptail + + yaml = ruamel.yaml.YAML() + yaml.preserve_quotes = True + + global dockerComposeServicesYaml # The loaded memory YAML of all checked services + global toRun # Switch for which function to run when executed + global buildHooks # Where to place the options menu result + global currentServiceName # Name of the current service + global issues # Returned issues dict + global haltOnErrors # Turn on to allow erroring + global hideHelpText # Showing and hiding the help controls text + global serviceService + + serviceService = servicesDirectory + currentServiceName + serviceTemplate = templatesDirectory + currentServiceName + + try: # If not already set, then set it. + hideHelpText = hideHelpText + except: + hideHelpText = False + + documentationHint = 'https://sensorsiot.github.io/IOTstack/Containers/MotionEye' + + # runtime vars + portConflicts = [] + + # This lets the menu know whether to put " >> Options " or not + # This function is REQUIRED. + def checkForOptionsHook(): + try: + buildHooks["options"] = callable(runOptionsMenu) + except: + buildHooks["options"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForPreBuildHook(): + try: + buildHooks["preBuildHook"] = callable(preBuild) + except: + buildHooks["preBuildHook"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForPostBuildHook(): + try: + buildHooks["postBuildHook"] = callable(postBuild) + except: + buildHooks["postBuildHook"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForRunChecksHook(): + try: + buildHooks["runChecksHook"] = callable(runChecks) + except: + buildHooks["runChecksHook"] = False + return buildHooks + return buildHooks + + # This service will not check anything unless this is set + # This function is optional, and will run each time the menu is rendered + def runChecks(): + checkForIssues() + return [] + + # This function is optional, and will run after the docker-compose.yml file is written to disk. + def postBuild(): + return True + + # This function is optional, and will run just before the build docker-compose.yml code. + def preBuild(): + global dockerComposeServicesYaml + global currentServiceName + if os.path.exists('/dev/video0'): + dockerComposeServicesYaml[currentServiceName]["devices"] = [ + '/dev/video0' + ] + + # Setup service directory + if not os.path.exists(serviceService): + os.makedirs(serviceService, exist_ok=True) + os.makedirs(serviceService + '/etc_motioneye', exist_ok=True) + os.makedirs(serviceService + '/var_lib_motioneye', exist_ok=True) + return True + + # ##################################### + # Supporting functions below + # ##################################### + + def checkForIssues(): + for (index, serviceName) in enumerate(dockerComposeServicesYaml): + if not currentServiceName == serviceName: # Skip self + currentServicePorts = getExternalPorts(currentServiceName, dockerComposeServicesYaml) + portConflicts = checkPortConflicts(serviceName, currentServicePorts, dockerComposeServicesYaml) + if (len(portConflicts) > 0): + issues["portConflicts"] = portConflicts + + # ##################################### + # End Supporting functions + # ##################################### + + ############################ + # Menu Logic + ############################ + + global currentMenuItemIndex + global selectionInProgress + global menuNavigateDirection + global needsRender + + selectionInProgress = True + currentMenuItemIndex = 0 + menuNavigateDirection = 0 + needsRender = 1 + term = Terminal() + hotzoneLocation = [((term.height // 16) + 6), 0] + + def goBack(): + global selectionInProgress + global needsRender + selectionInProgress = False + needsRender = 1 + return True + + def enterPortNumberExec(): + # global term + global needsRender + global dockerComposeServicesYaml + externalPort = getExternalPorts(currentServiceName, dockerComposeServicesYaml)[0] + internalPort = getInternalPorts(currentServiceName, dockerComposeServicesYaml)[0] + newPortNumber = enterPortNumberWithWhiptail(term, dockerComposeServicesYaml, currentServiceName, hotzoneLocation, externalPort) + + if newPortNumber > 0: + dockerComposeServicesYaml[currentServiceName]["ports"][0] = "{newExtPort}:{oldIntPort}".format( + newExtPort = newPortNumber, + oldIntPort = internalPort + ) + createMenu() + needsRender = 1 + + def onResize(sig, action): + global motionEyeBuildOptions + global currentMenuItemIndex + mainRender(1, motionEyeBuildOptions, currentMenuItemIndex) + + motionEyeBuildOptions = [] + + def createMenu(): + global motionEyeBuildOptions + try: + motionEyeBuildOptions = [] + portNumber = getExternalPorts(currentServiceName, dockerComposeServicesYaml)[0] + motionEyeBuildOptions.append([ + "Change external WUI Port Number from: {port}".format(port=portNumber), + enterPortNumberExec + ]) + except: # Error getting port + pass + motionEyeBuildOptions.append(["Go back", goBack]) + + def runOptionsMenu(): + createMenu() + menuEntryPoint() + return True + + def renderHotZone(term, menu, selection, hotzoneLocation): + lineLengthAtTextStart = 71 + print(term.move(hotzoneLocation[0], hotzoneLocation[1])) + for (index, menuItem) in enumerate(menu): + toPrint = "" + if index == selection: + toPrint += ('{bv} -> {t.blue_on_green} {title} {t.normal} <-'.format(t=term, title=menuItem[0], bv=specialChars[renderMode]["borderVertical"])) + else: + toPrint += ('{bv} {t.normal} {title} '.format(t=term, title=menuItem[0], bv=specialChars[renderMode]["borderVertical"])) + + for i in range(lineLengthAtTextStart - len(menuItem[0])): + toPrint += " " + + toPrint += "{bv}".format(bv=specialChars[renderMode]["borderVertical"]) + + toPrint = term.center(toPrint) + + print(toPrint) + + def mainRender(needsRender, menu, selection): + term = Terminal() + + if needsRender == 1: + print(term.clear()) + print(term.move_y(term.height // 16)) + print(term.black_on_cornsilk4(term.center('IOTstack MotionEye Options'))) + print("") + print(term.center(commonTopBorder(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Select Option to configure {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + + if needsRender >= 1: + renderHotZone(term, menu, selection, hotzoneLocation) + + if needsRender == 1: + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + if not hideHelpText: + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Controls: {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Up] and [Down] to move selection cursor {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [H] Show/hide this text {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Enter] to run command or save input {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Escape] to go back to build stack menu {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + if len(documentationHint) > 1: + if len(documentationHint) > 56: + documentationAndPadding = padText(documentationHint, 71) + print(term.center("{bv} Documentation: {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} {dap} {bv}".format(bv=specialChars[renderMode]["borderVertical"], dap=documentationAndPadding))) + else: + documentationAndPadding = padText(documentationHint, 56) + print(term.center("{bv} Documentation: {dap} {bv}".format(bv=specialChars[renderMode]["borderVertical"], dap=documentationAndPadding))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonBottomBorder(renderMode))) + + def runSelection(selection): + import types + global motionEyeBuildOptions + if len(motionEyeBuildOptions[selection]) > 1 and isinstance(motionEyeBuildOptions[selection][1], types.FunctionType): + motionEyeBuildOptions[selection][1]() + else: + print(term.green_reverse('IOTstack Error: No function assigned to menu item: "{}"'.format(nodeRedBuildOptions[selection][0]))) + + def isMenuItemSelectable(menu, index): + if len(menu) > index: + if len(menu[index]) > 2: + if menu[index][2]["skip"] == True: + return False + return True + + def menuEntryPoint(): + # These need to be reglobalised due to eval() + global currentMenuItemIndex + global selectionInProgress + global menuNavigateDirection + global needsRender + global hideHelpText + global motionEyeBuildOptions + term = Terminal() + with term.fullscreen(): + menuNavigateDirection = 0 + mainRender(needsRender, motionEyeBuildOptions, currentMenuItemIndex) + selectionInProgress = True + with term.cbreak(): + while selectionInProgress: + menuNavigateDirection = 0 + + if needsRender: # Only rerender when changed to prevent flickering + mainRender(needsRender, motionEyeBuildOptions, currentMenuItemIndex) + needsRender = 0 + + key = term.inkey(esc_delay=0.05) + if key.is_sequence: + if key.name == 'KEY_TAB': + menuNavigateDirection += 1 + if key.name == 'KEY_DOWN': + menuNavigateDirection += 1 + if key.name == 'KEY_UP': + menuNavigateDirection -= 1 + if key.name == 'KEY_LEFT': + goBack() + if key.name == 'KEY_ENTER': + runSelection(currentMenuItemIndex) + if key.name == 'KEY_ESCAPE': + return True + elif key: + if key == 'h': # H pressed + if hideHelpText: + hideHelpText = False + else: + hideHelpText = True + mainRender(1, motionEyeBuildOptions, currentMenuItemIndex) + + if menuNavigateDirection != 0: # If a direction was pressed, find next selectable item + currentMenuItemIndex += menuNavigateDirection + currentMenuItemIndex = currentMenuItemIndex % len(motionEyeBuildOptions) + needsRender = 2 + + while not isMenuItemSelectable(motionEyeBuildOptions, currentMenuItemIndex): + currentMenuItemIndex += menuNavigateDirection + currentMenuItemIndex = currentMenuItemIndex % len(motionEyeBuildOptions) + return True + + #################### + # End menu section + #################### + + + if haltOnErrors: + eval(toRun)() + else: + try: + eval(toRun)() + except: + pass + +# This check isn't required, but placed here for debugging purposes +global currentServiceName # Name of the current service +if currentServiceName == 'motioneye': + main() +else: + print("Error. '{}' Tried to run 'motioneye' config".format(currentServiceName)) diff --git a/.templates/motioneye/service.yml b/.templates/motioneye/service.yml index c6119a735..5ac86310b 100644 --- a/.templates/motioneye/service.yml +++ b/.templates/motioneye/service.yml @@ -1,14 +1,13 @@ - motioneye: - image: "ccrisan/motioneye:master-armhf" - container_name: "motioneye" - restart: unless-stopped - ports: - - 8765:8765 - - 8081:8081 - volumes: - - /etc/localtime:/etc/localtime:ro - - ./volumes/motioneye/etc_motioneye:/etc/motioneye - - ./volumes/motioneye/var_lib_motioneye:/var/lib/motioneye - #devices: - # - "/dev/video0:/dev/video0" +motioneye: + image: dontobi/motioneye.rpi:latest + container_name: "motioneye" + restart: unless-stopped + ports: + - "8765:8765" + - "8766:8081" + environment: + - TZ=${TZ:-Etc/UTC} + volumes: + - ./volumes/motioneye/etc_motioneye:/etc/motioneye + - ./volumes/motioneye/var_lib_motioneye:/var/lib/motioneye diff --git a/.templates/n8n/build.py b/.templates/n8n/build.py new file mode 100755 index 000000000..d717b0008 --- /dev/null +++ b/.templates/n8n/build.py @@ -0,0 +1,328 @@ +#!/usr/bin/env python3 + +issues = {} # Returned issues dict +buildHooks = {} # Options, and others hooks +haltOnErrors = True + +# Main wrapper function. Required to make local vars work correctly +def main(): + import os + import time + import ruamel.yaml + import signal + import sys + from blessed import Terminal + + from deps.chars import specialChars, commonTopBorder, commonBottomBorder, commonEmptyLine, padText + from deps.consts import servicesDirectory, templatesDirectory + from deps.common_functions import getExternalPorts, getInternalPorts, checkPortConflicts, enterPortNumberWithWhiptail + + yaml = ruamel.yaml.YAML() + yaml.preserve_quotes = True + + global dockerComposeServicesYaml # The loaded memory YAML of all checked services + global toRun # Switch for which function to run when executed + global buildHooks # Where to place the options menu result + global currentServiceName # Name of the current service + global issues # Returned issues dict + global haltOnErrors # Turn on to allow erroring + global hideHelpText # Showing and hiding the help controls text + global serviceService + + serviceService = servicesDirectory + currentServiceName + serviceTemplate = templatesDirectory + currentServiceName + + try: # If not already set, then set it. + hideHelpText = hideHelpText + except: + hideHelpText = False + + documentationHint = 'https://docs.n8n.io/quickstart/' + + # runtime vars + portConflicts = [] + + # This lets the menu know whether to put " >> Options " or not + # This function is REQUIRED. + def checkForOptionsHook(): + try: + buildHooks["options"] = callable(runOptionsMenu) + except: + buildHooks["options"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForPreBuildHook(): + try: + buildHooks["preBuildHook"] = callable(preBuild) + except: + buildHooks["preBuildHook"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForPostBuildHook(): + try: + buildHooks["postBuildHook"] = callable(postBuild) + except: + buildHooks["postBuildHook"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForRunChecksHook(): + try: + buildHooks["runChecksHook"] = callable(runChecks) + except: + buildHooks["runChecksHook"] = False + return buildHooks + return buildHooks + + # This service will not check anything unless this is set + # This function is optional, and will run each time the menu is rendered + def runChecks(): + checkForIssues() + return [] + + # This function is optional, and will run after the docker-compose.yml file is written to disk. + def postBuild(): + return True + + # This function is optional, and will run just before the build docker-compose.yml code. + def preBuild(): + global dockerComposeServicesYaml + global currentServiceName + + # Setup service directory + if not os.path.exists(serviceService): + os.makedirs(serviceService, exist_ok=True) + os.makedirs(serviceService + '/etc_n8n', exist_ok=True) + os.makedirs(serviceService + '/var_lib_n8n', exist_ok=True) + return True + + # ##################################### + # Supporting functions below + # ##################################### + + def checkForIssues(): + for (index, serviceName) in enumerate(dockerComposeServicesYaml): + if not currentServiceName == serviceName: # Skip self + currentServicePorts = getExternalPorts(currentServiceName, dockerComposeServicesYaml) + portConflicts = checkPortConflicts(serviceName, currentServicePorts, dockerComposeServicesYaml) + if (len(portConflicts) > 0): + issues["portConflicts"] = portConflicts + + # ##################################### + # End Supporting functions + # ##################################### + + ############################ + # Menu Logic + ############################ + + global currentMenuItemIndex + global selectionInProgress + global menuNavigateDirection + global needsRender + + selectionInProgress = True + currentMenuItemIndex = 0 + menuNavigateDirection = 0 + needsRender = 1 + term = Terminal() + hotzoneLocation = [((term.height // 16) + 6), 0] + + def goBack(): + global selectionInProgress + global needsRender + selectionInProgress = False + needsRender = 1 + return True + + def enterPortNumberExec(): + # global term + global needsRender + global dockerComposeServicesYaml + externalPort = getExternalPorts(currentServiceName, dockerComposeServicesYaml)[0] + internalPort = getInternalPorts(currentServiceName, dockerComposeServicesYaml)[0] + newPortNumber = enterPortNumberWithWhiptail(term, dockerComposeServicesYaml, currentServiceName, hotzoneLocation, externalPort) + + if newPortNumber > 0: + dockerComposeServicesYaml[currentServiceName]["ports"][0] = "{newExtPort}:{oldIntPort}".format( + newExtPort = newPortNumber, + oldIntPort = internalPort + ) + createMenu() + needsRender = 1 + + def onResize(sig, action): + global n8nBuildOptions + global currentMenuItemIndex + mainRender(1, n8nBuildOptions, currentMenuItemIndex) + + n8nBuildOptions = [] + + def createMenu(): + global n8nBuildOptions + try: + n8nBuildOptions = [] + portNumber = getExternalPorts(currentServiceName, dockerComposeServicesYaml)[0] + n8nBuildOptions.append([ + "Change external WUI Port Number from: {port}".format(port=portNumber), + enterPortNumberExec + ]) + except: # Error getting port + pass + n8nBuildOptions.append(["Go back", goBack]) + + def runOptionsMenu(): + createMenu() + menuEntryPoint() + return True + + def renderHotZone(term, menu, selection, hotzoneLocation): + lineLengthAtTextStart = 71 + print(term.move(hotzoneLocation[0], hotzoneLocation[1])) + for (index, menuItem) in enumerate(menu): + toPrint = "" + if index == selection: + toPrint += ('{bv} -> {t.blue_on_green} {title} {t.normal} <-'.format(t=term, title=menuItem[0], bv=specialChars[renderMode]["borderVertical"])) + else: + toPrint += ('{bv} {t.normal} {title} '.format(t=term, title=menuItem[0], bv=specialChars[renderMode]["borderVertical"])) + + for i in range(lineLengthAtTextStart - len(menuItem[0])): + toPrint += " " + + toPrint += "{bv}".format(bv=specialChars[renderMode]["borderVertical"]) + + toPrint = term.center(toPrint) + + print(toPrint) + + def mainRender(needsRender, menu, selection): + term = Terminal() + + if needsRender == 1: + print(term.clear()) + print(term.move_y(term.height // 16)) + print(term.black_on_cornsilk4(term.center('IOTstack n8n Options'))) + print("") + print(term.center(commonTopBorder(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Select Option to configure {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + + if needsRender >= 1: + renderHotZone(term, menu, selection, hotzoneLocation) + + if needsRender == 1: + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + if not hideHelpText: + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Controls: {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Up] and [Down] to move selection cursor {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [H] Show/hide this text {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Enter] to run command or save input {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Escape] to go back to build stack menu {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + if len(documentationHint) > 1: + if len(documentationHint) > 56: + documentationAndPadding = padText(documentationHint, 71) + print(term.center("{bv} Documentation: {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} {dap} {bv}".format(bv=specialChars[renderMode]["borderVertical"], dap=documentationAndPadding))) + else: + documentationAndPadding = padText(documentationHint, 56) + print(term.center("{bv} Documentation: {dap} {bv}".format(bv=specialChars[renderMode]["borderVertical"], dap=documentationAndPadding))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonBottomBorder(renderMode))) + + def runSelection(selection): + import types + global n8nBuildOptions + if len(n8nBuildOptions[selection]) > 1 and isinstance(n8nBuildOptions[selection][1], types.FunctionType): + n8nBuildOptions[selection][1]() + else: + print(term.green_reverse('IOTstack Error: No function assigned to menu item: "{}"'.format(nodeRedBuildOptions[selection][0]))) + + def isMenuItemSelectable(menu, index): + if len(menu) > index: + if len(menu[index]) > 2: + if menu[index][2]["skip"] == True: + return False + return True + + def menuEntryPoint(): + # These need to be reglobalised due to eval() + global currentMenuItemIndex + global selectionInProgress + global menuNavigateDirection + global needsRender + global hideHelpText + global n8nBuildOptions + term = Terminal() + with term.fullscreen(): + menuNavigateDirection = 0 + mainRender(needsRender, n8nBuildOptions, currentMenuItemIndex) + selectionInProgress = True + with term.cbreak(): + while selectionInProgress: + menuNavigateDirection = 0 + + if needsRender: # Only rerender when changed to prevent flickering + mainRender(needsRender, n8nBuildOptions, currentMenuItemIndex) + needsRender = 0 + + key = term.inkey(esc_delay=0.05) + if key.is_sequence: + if key.name == 'KEY_TAB': + menuNavigateDirection += 1 + if key.name == 'KEY_DOWN': + menuNavigateDirection += 1 + if key.name == 'KEY_UP': + menuNavigateDirection -= 1 + if key.name == 'KEY_LEFT': + goBack() + if key.name == 'KEY_ENTER': + runSelection(currentMenuItemIndex) + if key.name == 'KEY_ESCAPE': + return True + elif key: + if key == 'h': # H pressed + if hideHelpText: + hideHelpText = False + else: + hideHelpText = True + mainRender(1, n8nBuildOptions, currentMenuItemIndex) + + if menuNavigateDirection != 0: # If a direction was pressed, find next selectable item + currentMenuItemIndex += menuNavigateDirection + currentMenuItemIndex = currentMenuItemIndex % len(n8nBuildOptions) + needsRender = 2 + + while not isMenuItemSelectable(n8nBuildOptions, currentMenuItemIndex): + currentMenuItemIndex += menuNavigateDirection + currentMenuItemIndex = currentMenuItemIndex % len(n8nBuildOptions) + return True + + #################### + # End menu section + #################### + + + if haltOnErrors: + eval(toRun)() + else: + try: + eval(toRun)() + except: + pass + +# This check isn't required, but placed here for debugging purposes +global currentServiceName # Name of the current service +if currentServiceName == 'n8n': + main() +else: + print("Error. '{}' Tried to run 'n8n' config".format(currentServiceName)) diff --git a/.templates/n8n/service.yml b/.templates/n8n/service.yml new file mode 100644 index 000000000..e3f8bdcc4 --- /dev/null +++ b/.templates/n8n/service.yml @@ -0,0 +1,28 @@ +n8n: + container_name: "n8n" + restart: unless-stopped + ports: + - "5678:5678" + image: n8nio/n8n + stdin_open: true + volumes: + - ./volumes/n8n:/home/node/.n8n + environment: + - TZ=${TZ:-Etc/UTC} + # - PGID=1000 + # - PUID=1000 + # - USBDEVICES=/dev/ttyAMA0 + # - PACKAGES=mc + # Optional DB and Timezone configs. + # - DB_TYPE=mysqldb + # - DB_MYSQLDB_DATABASE= + # - DB_MYSQLDB_HOST= + # - DB_MYSQLDB_PORT= + # - DB_MYSQLDB_USER= + # - DB_MYSQLDB_PASSWORD= + # - GENERIC_TIMEZONE=${TZ:-Etc/UTC} + # Optional to enable authentication + # - N8N_BASIC_AUTH_ACTIVE=true + # - N8N_BASIC_AUTH_USER= + # - N8N_BASIC_AUTH_PASSWORD= + diff --git a/.templates/nextcloud/build.py b/.templates/nextcloud/build.py new file mode 100755 index 000000000..cfb6bb54a --- /dev/null +++ b/.templates/nextcloud/build.py @@ -0,0 +1,431 @@ +#!/usr/bin/env python3 + +issues = {} # Returned issues dict +buildHooks = {} # Options, and others hooks +haltOnErrors = True + +# Main wrapper function. Required to make local vars work correctly +def main(): + import os + import time + import ruamel.yaml + import signal + import sys + import subprocess + + from blessed import Terminal + from deps.chars import specialChars, commonTopBorder, commonBottomBorder, commonEmptyLine, padText + from deps.consts import servicesDirectory, templatesDirectory, volumesDirectory, buildSettingsFileName, buildCache, servicesFileName + from deps.common_functions import getExternalPorts, getInternalPorts, checkPortConflicts, enterPortNumberWithWhiptail, generateRandomString + + yaml = ruamel.yaml.YAML() + yaml.preserve_quotes = True + + global dockerComposeServicesYaml # The loaded memory YAML of all checked services + global toRun # Switch for which function to run when executed + global buildHooks # Where to place the options menu result + global currentServiceName # Name of the current service + global issues # Returned issues dict + global haltOnErrors # Turn on to allow erroring + global hideHelpText # Showing and hiding the help controls text + global serviceService + + serviceVolume = volumesDirectory + currentServiceName + serviceService = servicesDirectory + currentServiceName + serviceTemplate = templatesDirectory + currentServiceName + buildSettings = serviceService + buildSettingsFileName + + try: # If not already set, then set it. + hideHelpText = hideHelpText + except: + hideHelpText = False + + documentationHint = 'https://sensorsiot.github.io/IOTstack/Containers/NextCloud' + + # runtime vars + portConflicts = [] + + # This lets the menu know whether to put " >> Options " or not + # This function is REQUIRED. + def checkForOptionsHook(): + try: + buildHooks["options"] = callable(runOptionsMenu) + except: + buildHooks["options"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForPreBuildHook(): + try: + buildHooks["preBuildHook"] = callable(preBuild) + except: + buildHooks["preBuildHook"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForPostBuildHook(): + try: + buildHooks["postBuildHook"] = callable(postBuild) + except: + buildHooks["postBuildHook"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForRunChecksHook(): + try: + buildHooks["runChecksHook"] = callable(runChecks) + except: + buildHooks["runChecksHook"] = False + return buildHooks + return buildHooks + + # This service will not check anything unless this is set + # This function is optional, and will run each time the menu is rendered + def runChecks(): + checkForIssues() + return [] + + # This function is optional, and will run after the docker-compose.yml file is written to disk. + def postBuild(): + commandToRun = "chmod -R 0770 %s" % serviceVolume + '/html' + print('[Nextcloud::postBuild]: %s' % commandToRun) + subprocess.call(commandToRun, shell=True) + return True + + # This function is optional, and will run just before the build docker-compose.yml code. + def preBuild(): + global dockerComposeServicesYaml + # Setup service directory + if not os.path.exists(serviceService): + os.makedirs(serviceService, exist_ok=True) + + os.makedirs(serviceVolume, exist_ok=True) + os.makedirs(serviceVolume + '/html', exist_ok=True) + + # Multi-service: + with open((r'%s/' % serviceTemplate) + servicesFileName) as objServiceFile: + servicesListed = yaml.load(objServiceFile) + + oldBuildCache = {} + try: + with open(r'%s' % buildCache) as objBuildCache: + oldBuildCache = yaml.load(objBuildCache) + except: + pass + + buildCacheServices = {} + if "services" in oldBuildCache: + buildCacheServices = oldBuildCache["services"] + + if not os.path.exists(serviceService): + os.makedirs(serviceService, exist_ok=True) + + if os.path.exists(buildSettings): + + # Password randomisation + with open(r'%s' % buildSettings) as objBuildSettingsFile: + nextCloudYamlBuildOptions = yaml.load(objBuildSettingsFile) + if ( + nextCloudYamlBuildOptions["databasePasswordOption"] == "Randomise passwords for this build" + or nextCloudYamlBuildOptions["databasePasswordOption"] == "Randomise passwords every build" + or nextCloudYamlBuildOptions["databasePasswordOption"] == "Use default passwords for this build" + ): + if nextCloudYamlBuildOptions["databasePasswordOption"] == "Use default passwords for this build": + mySqlRootPassword = "IOtSt4ckToorMySqlDb" + mySqlPassword = "IOtSt4ckmySqlDbPw" + else: + mySqlPassword = generateRandomString() + mySqlRootPassword = generateRandomString() + + for (index, serviceName) in enumerate(servicesListed): + dockerComposeServicesYaml[serviceName] = servicesListed[serviceName] + if "environment" in servicesListed[serviceName]: + for (envIndex, envName) in enumerate(servicesListed[serviceName]["environment"]): + envName = envName.replace("%randomMySqlPassword%", mySqlPassword) + dockerComposeServicesYaml[serviceName]["environment"][envIndex] = envName.replace("%randomPassword%", mySqlRootPassword) + + # Ensure you update the "Do nothing" and other 2 strings used for password settings in 'passwords.py' + if (nextCloudYamlBuildOptions["databasePasswordOption"] == "Randomise passwords for this build"): + nextCloudYamlBuildOptions["databasePasswordOption"] = "Do nothing" + with open(buildSettings, 'w') as outputFile: + yaml.dump(nextCloudYamlBuildOptions, outputFile) + else: # Do nothing - don't change password + for (index, serviceName) in enumerate(buildCacheServices): + if serviceName in buildCacheServices: # Load service from cache if exists (to maintain password) + dockerComposeServicesYaml[serviceName] = buildCacheServices[serviceName] + else: + dockerComposeServicesYaml[serviceName] = servicesListed[serviceName] + + else: + print("NextCloud Warning: Build settings file not found, using default password") + time.sleep(1) + mySqlRootPassword = "IOtSt4ckToorMySqlDb" + mySqlPassword = "IOtSt4ckmySqlDbPw" + for (index, serviceName) in enumerate(servicesListed): + dockerComposeServicesYaml[serviceName] = servicesListed[serviceName] + if "environment" in servicesListed[serviceName]: + for (envIndex, envName) in enumerate(servicesListed[serviceName]["environment"]): + envName = envName.replace("%randomMySqlPassword%", mySqlPassword) + dockerComposeServicesYaml[serviceName]["environment"][envIndex] = envName.replace("%randomPassword%", mySqlRootPassword) + nextCloudYamlBuildOptions = { + "version": "1", + "application": "IOTstack", + "service": "NextCloud", + "comment": "NextCloud Build Options" + } + + nextCloudYamlBuildOptions["databasePasswordOption"] = "Do nothing" + with open(buildSettings, 'w') as outputFile: + yaml.dump(nextCloudYamlBuildOptions, outputFile) + + return True + + # ##################################### + # Supporting functions below + # ##################################### + + def checkForIssues(): + for (index, serviceName) in enumerate(dockerComposeServicesYaml): + if not currentServiceName == serviceName: # Skip self + currentServicePorts = getExternalPorts(currentServiceName, dockerComposeServicesYaml) + portConflicts = checkPortConflicts(serviceName, currentServicePorts, dockerComposeServicesYaml) + if (len(portConflicts) > 0): + issues["portConflicts"] = portConflicts + + # ##################################### + # End Supporting functions + # ##################################### + + ############################ + # Menu Logic + ############################ + + global currentMenuItemIndex + global selectionInProgress + global menuNavigateDirection + global needsRender + + selectionInProgress = True + currentMenuItemIndex = 0 + menuNavigateDirection = 0 + needsRender = 1 + term = Terminal() + hotzoneLocation = [((term.height // 16) + 6), 0] + + def goBack(): + global selectionInProgress + global needsRender + selectionInProgress = False + needsRender = 1 + return True + + def enterPortNumberExec(): + # global term + global needsRender + global dockerComposeServicesYaml + externalPort = getExternalPorts(currentServiceName, dockerComposeServicesYaml)[0] + internalPort = getInternalPorts(currentServiceName, dockerComposeServicesYaml)[0] + newPortNumber = enterPortNumberWithWhiptail(term, dockerComposeServicesYaml, currentServiceName, hotzoneLocation, externalPort) + + if newPortNumber > 0: + dockerComposeServicesYaml[currentServiceName]["ports"][0] = "{newExtPort}:{oldIntPort}".format( + newExtPort = newPortNumber, + oldIntPort = internalPort + ) + createMenu() + needsRender = 1 + + def setPasswordOptions(): + global needsRender + global hasRebuiltAddons + passwordOptionsMenuFilePath = "./.templates/{currentService}/passwords.py".format(currentService=currentServiceName) + with open(passwordOptionsMenuFilePath, "rb") as pythonDynamicImportFile: + code = compile(pythonDynamicImportFile.read(), passwordOptionsMenuFilePath, "exec") + execGlobals = { + "currentServiceName": currentServiceName, + "renderMode": renderMode + } + execLocals = {} + screenActive = False + exec(code, execGlobals, execLocals) + signal.signal(signal.SIGWINCH, onResize) + screenActive = True + needsRender = 1 + + def onResize(sig, action): + global nextCloudBuildOptions + global currentMenuItemIndex + mainRender(1, nextCloudBuildOptions, currentMenuItemIndex) + + nextCloudBuildOptions = [] + + def createMenu(): + global nextCloudBuildOptions + try: + nextCloudBuildOptions = [] + portNumber = getExternalPorts(currentServiceName, dockerComposeServicesYaml)[0] + nextCloudBuildOptions.append([ + "Change external WUI Port Number from: {port}".format(port=portNumber), + enterPortNumberExec + ]) + except: # Error getting port + pass + nextCloudBuildOptions.append([ + "Database Password Options", + setPasswordOptions + ]) + nextCloudBuildOptions.append(["Go back", goBack]) + + def runOptionsMenu(): + createMenu() + menuEntryPoint() + return True + + def renderHotZone(term, menu, selection, hotzoneLocation): + lineLengthAtTextStart = 71 + print(term.move(hotzoneLocation[0], hotzoneLocation[1])) + for (index, menuItem) in enumerate(menu): + toPrint = "" + if index == selection: + toPrint += ('{bv} -> {t.blue_on_green} {title} {t.normal} <-'.format(t=term, title=menuItem[0], bv=specialChars[renderMode]["borderVertical"])) + else: + toPrint += ('{bv} {t.normal} {title} '.format(t=term, title=menuItem[0], bv=specialChars[renderMode]["borderVertical"])) + + for i in range(lineLengthAtTextStart - len(menuItem[0])): + toPrint += " " + + toPrint += "{bv}".format(bv=specialChars[renderMode]["borderVertical"]) + + toPrint = term.center(toPrint) + + print(toPrint) + + def mainRender(needsRender, menu, selection): + term = Terminal() + + if needsRender == 1: + print(term.clear()) + print(term.move_y(term.height // 16)) + print(term.black_on_cornsilk4(term.center('IOTstack Next Cloud Options'))) + print("") + print(term.center(commonTopBorder(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Select Option to configure {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + + if needsRender >= 1: + renderHotZone(term, menu, selection, hotzoneLocation) + + if needsRender == 1: + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + if not hideHelpText: + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Controls: {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Up] and [Down] to move selection cursor {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [H] Show/hide this text {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Enter] to run command or save input {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Escape] to go back to build stack menu {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + if len(documentationHint) > 1: + if len(documentationHint) > 56: + documentationAndPadding = padText(documentationHint, 71) + print(term.center("{bv} Documentation: {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} {dap} {bv}".format(bv=specialChars[renderMode]["borderVertical"], dap=documentationAndPadding))) + else: + documentationAndPadding = padText(documentationHint, 56) + print(term.center("{bv} Documentation: {dap} {bv}".format(bv=specialChars[renderMode]["borderVertical"], dap=documentationAndPadding))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonBottomBorder(renderMode))) + + def runSelection(selection): + import types + global nextCloudBuildOptions + if len(nextCloudBuildOptions[selection]) > 1 and isinstance(nextCloudBuildOptions[selection][1], types.FunctionType): + nextCloudBuildOptions[selection][1]() + else: + print(term.green_reverse('IOTstack Error: No function assigned to menu item: "{}"'.format(nodeRedBuildOptions[selection][0]))) + + def isMenuItemSelectable(menu, index): + if len(menu) > index: + if len(menu[index]) > 2: + if menu[index][2]["skip"] == True: + return False + return True + + def menuEntryPoint(): + # These need to be reglobalised due to eval() + global currentMenuItemIndex + global selectionInProgress + global menuNavigateDirection + global needsRender + global hideHelpText + global nextCloudBuildOptions + term = Terminal() + with term.fullscreen(): + menuNavigateDirection = 0 + mainRender(needsRender, nextCloudBuildOptions, currentMenuItemIndex) + selectionInProgress = True + with term.cbreak(): + while selectionInProgress: + menuNavigateDirection = 0 + + if needsRender: # Only rerender when changed to prevent flickering + mainRender(needsRender, nextCloudBuildOptions, currentMenuItemIndex) + needsRender = 0 + + key = term.inkey(esc_delay=0.05) + if key.is_sequence: + if key.name == 'KEY_TAB': + menuNavigateDirection += 1 + if key.name == 'KEY_DOWN': + menuNavigateDirection += 1 + if key.name == 'KEY_UP': + menuNavigateDirection -= 1 + if key.name == 'KEY_LEFT': + goBack() + if key.name == 'KEY_ENTER': + runSelection(currentMenuItemIndex) + if key.name == 'KEY_ESCAPE': + return True + elif key: + if key == 'h': # H pressed + if hideHelpText: + hideHelpText = False + else: + hideHelpText = True + mainRender(1, nextCloudBuildOptions, currentMenuItemIndex) + + if menuNavigateDirection != 0: # If a direction was pressed, find next selectable item + currentMenuItemIndex += menuNavigateDirection + currentMenuItemIndex = currentMenuItemIndex % len(nextCloudBuildOptions) + needsRender = 2 + + while not isMenuItemSelectable(nextCloudBuildOptions, currentMenuItemIndex): + currentMenuItemIndex += menuNavigateDirection + currentMenuItemIndex = currentMenuItemIndex % len(nextCloudBuildOptions) + return True + + #################### + # End menu section + #################### + + + if haltOnErrors: + eval(toRun)() + else: + try: + eval(toRun)() + except: + pass + +# This check isn't required, but placed here for debugging purposes +global currentServiceName # Name of the current service +if currentServiceName == 'nextcloud': + main() +else: + print("Error. '{}' Tried to run 'nextcloud' config".format(currentServiceName)) diff --git a/.templates/nextcloud/passwords.py b/.templates/nextcloud/passwords.py new file mode 100755 index 000000000..e54e09063 --- /dev/null +++ b/.templates/nextcloud/passwords.py @@ -0,0 +1,328 @@ +#!/usr/bin/env python3 + +import signal + +def main(): + from blessed import Terminal + from deps.chars import specialChars, commonTopBorder, commonBottomBorder, commonEmptyLine + from deps.consts import servicesDirectory, templatesDirectory, buildSettingsFileName + import time + import subprocess + import ruamel.yaml + import os + + global signal + global currentServiceName + global menuSelectionInProgress + global mainMenuList + global currentMenuItemIndex + global renderMode + global paginationSize + global paginationStartIndex + global hideHelpText + + yaml = ruamel.yaml.YAML() + yaml.preserve_quotes = True + + try: # If not already set, then set it. + hideHelpText = hideHelpText + except: + hideHelpText = False + + term = Terminal() + hotzoneLocation = [((term.height // 16) + 6), 0] + paginationToggle = [10, term.height - 25] + paginationStartIndex = 0 + paginationSize = paginationToggle[0] + + serviceService = servicesDirectory + currentServiceName + serviceTemplate = templatesDirectory + currentServiceName + buildSettings = serviceService + buildSettingsFileName + + def goBack(): + global menuSelectionInProgress + global needsRender + menuSelectionInProgress = False + needsRender = 1 + return True + + mainMenuList = [] + + hotzoneLocation = [((term.height // 16) + 6), 0] + + menuSelectionInProgress = True + currentMenuItemIndex = 0 + menuNavigateDirection = 0 + + # Render Modes: + # 0 = No render needed + # 1 = Full render + # 2 = Hotzone only + needsRender = 1 + + def onResize(sig, action): + global mainMenuList + global currentMenuItemIndex + mainRender(1, mainMenuList, currentMenuItemIndex) + + def generateLineText(text, textLength=None, paddingBefore=0, lineLength=64): + result = "" + for i in range(paddingBefore): + result += " " + + textPrintableCharactersLength = textLength + + if (textPrintableCharactersLength) == None: + textPrintableCharactersLength = len(text) + + result += text + remainingSpace = lineLength - textPrintableCharactersLength + + for i in range(remainingSpace): + result += " " + + return result + + def renderHotZone(term, renderType, menu, selection, hotzoneLocation, paddingBefore = 4): + global paginationSize + selectedTextLength = len("-> ") + + print(term.move(hotzoneLocation[0], hotzoneLocation[1])) + + if paginationStartIndex >= 1: + print(term.center("{b} {uaf} {uaf}{uaf}{uaf} {ual} {b}".format( + b=specialChars[renderMode]["borderVertical"], + uaf=specialChars[renderMode]["upArrowFull"], + ual=specialChars[renderMode]["upArrowLine"] + ))) + else: + print(term.center(commonEmptyLine(renderMode))) + + for (index, menuItem) in enumerate(menu): # Menu loop + if index >= paginationStartIndex and index < paginationStartIndex + paginationSize: + lineText = generateLineText(menuItem[0], paddingBefore=paddingBefore) + + # Menu highlight logic + if index == selection: + formattedLineText = '-> {t.blue_on_green}{title}{t.normal} <-'.format(t=term, title=menuItem[0]) + paddedLineText = generateLineText(formattedLineText, textLength=len(menuItem[0]) + selectedTextLength, paddingBefore=paddingBefore - selectedTextLength) + toPrint = paddedLineText + else: + toPrint = '{title}{t.normal}'.format(t=term, title=lineText) + # ##### + + # Menu check render logic + if menuItem[1]["checked"]: + toPrint = " (X) " + toPrint + else: + toPrint = " ( ) " + toPrint + + toPrint = "{bv} {toPrint} {bv}".format(bv=specialChars[renderMode]["borderVertical"], toPrint=toPrint) # Generate border + toPrint = term.center(toPrint) # Center Text (All lines should have the same amount of printable characters) + # ##### + print(toPrint) + + if paginationStartIndex + paginationSize < len(menu): + print(term.center("{b} {daf} {daf}{daf}{daf} {dal} {b}".format( + b=specialChars[renderMode]["borderVertical"], + daf=specialChars[renderMode]["downArrowFull"], + dal=specialChars[renderMode]["downArrowLine"] + ))) + else: + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + + + def mainRender(needsRender, menu, selection): + global paginationStartIndex + global paginationSize + term = Terminal() + + if selection >= paginationStartIndex + paginationSize: + paginationStartIndex = selection - (paginationSize - 1) + 1 + needsRender = 1 + + if selection <= paginationStartIndex - 1: + paginationStartIndex = selection + needsRender = 1 + + if needsRender == 1: + print(term.clear()) + print(term.move_y(term.height // 16)) + print(term.black_on_cornsilk4(term.center('IOTstack NextCloud Password Options'))) + print("") + print(term.center(commonTopBorder(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Select Password Option {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + + if needsRender >= 1: + renderHotZone(term, needsRender, menu, selection, hotzoneLocation) + + if needsRender == 1: + print(term.center(commonEmptyLine(renderMode))) + if not hideHelpText: + if term.height < 32: + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Not enough vertical room to render controls help text {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + else: + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Controls: {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Space] to select option {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Up] and [Down] to move selection cursor {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [H] Show/hide this text {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Enter] to build and save option {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Escape] to cancel changes {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonBottomBorder(renderMode))) + + def runSelection(selection): + import types + if len(mainMenuList[selection]) > 1 and isinstance(mainMenuList[selection][1], types.FunctionType): + mainMenuList[selection][1]() + else: + print(term.green_reverse('IOTstack Error: No function assigned to menu item: "{}"'.format(mainMenuList[selection][0]))) + + def isMenuItemSelectable(menu, index): + if len(menu) > index: + if len(menu[index]) > 1: + if "skip" in menu[index][1] and menu[index][1]["skip"] == True: + return False + return True + + def loadOptionsMenu(): + global mainMenuList + mainMenuList.append(["Use default passwords for this build", { "checked": True }]) + mainMenuList.append(["Randomise passwords for this build", { "checked": False }]) + mainMenuList.append(["Randomise passwords every build", { "checked": False }]) + mainMenuList.append(["Do nothing", { "checked": False }]) + + def checkMenuItem(selection): + global mainMenuList + for (index, menuItem) in enumerate(mainMenuList): + mainMenuList[index][1]["checked"] = False + + mainMenuList[selection][1]["checked"] = True + + def saveOptions(): + try: + if not os.path.exists(serviceService): + os.makedirs(serviceService, exist_ok=True) + + if os.path.exists(buildSettings): + with open(r'%s' % buildSettings) as objBuildSettingsFile: + nextCloudYamlBuildOptions = yaml.load(objBuildSettingsFile) + else: + nextCloudYamlBuildOptions = { + "version": "1", + "application": "IOTstack", + "service": "NextCloud", + "comment": "NextCloud Build Options" + } + + nextCloudYamlBuildOptions["databasePasswordOption"] = "" + + for (index, menuOption) in enumerate(mainMenuList): + if menuOption[1]["checked"]: + nextCloudYamlBuildOptions["databasePasswordOption"] = menuOption[0] + break + + with open(buildSettings, 'w') as outputFile: + yaml.dump(nextCloudYamlBuildOptions, outputFile) + + except Exception as err: + print("Error saving NextCloud Password options", currentServiceName) + print(err) + return False + global hasRebuiltHardwareSelection + hasRebuiltHardwareSelection = True + return True + + def loadOptions(): + try: + if not os.path.exists(serviceService): + os.makedirs(serviceService, exist_ok=True) + + if os.path.exists(buildSettings): + with open(r'%s' % buildSettings) as objBuildSettingsFile: + nextCloudYamlBuildOptions = yaml.load(objBuildSettingsFile) + + for (index, menuOption) in enumerate(mainMenuList): + if menuOption[0] == nextCloudYamlBuildOptions["databasePasswordOption"]: + checkMenuItem(index) + break + + except Exception as err: + print("Error loading NextCloud Password options", currentServiceName) + print(err) + return False + return True + + + if __name__ == 'builtins': + global signal + term = Terminal() + signal.signal(signal.SIGWINCH, onResize) + loadOptionsMenu() + loadOptions() + with term.fullscreen(): + menuNavigateDirection = 0 + mainRender(needsRender, mainMenuList, currentMenuItemIndex) + menuSelectionInProgress = True + with term.cbreak(): + while menuSelectionInProgress: + menuNavigateDirection = 0 + + if not needsRender == 0: # Only rerender when changed to prevent flickering + mainRender(needsRender, mainMenuList, currentMenuItemIndex) + needsRender = 0 + + key = term.inkey(esc_delay=0.05) + if key.is_sequence: + if key.name == 'KEY_TAB': + if paginationSize == paginationToggle[0]: + paginationSize = paginationToggle[1] + else: + paginationSize = paginationToggle[0] + mainRender(1, mainMenuList, currentMenuItemIndex) + if key.name == 'KEY_DOWN': + menuNavigateDirection += 1 + if key.name == 'KEY_UP': + menuNavigateDirection -= 1 + if key.name == 'KEY_ENTER': + if saveOptions(): + return True + else: + print("Something went wrong. Try saving the list again.") + if key.name == 'KEY_ESCAPE': + menuSelectionInProgress = False + return True + elif key: + if key == ' ': # Space pressed + checkMenuItem(currentMenuItemIndex) # Update checked list + needsRender = 2 + elif key == 'h': # H pressed + if hideHelpText: + hideHelpText = False + else: + hideHelpText = True + mainRender(1, mainMenuList, currentMenuItemIndex) + + if menuNavigateDirection != 0: # If a direction was pressed, find next selectable item + currentMenuItemIndex += menuNavigateDirection + currentMenuItemIndex = currentMenuItemIndex % len(mainMenuList) + needsRender = 2 + + while not isMenuItemSelectable(mainMenuList, currentMenuItemIndex): + currentMenuItemIndex += menuNavigateDirection + currentMenuItemIndex = currentMenuItemIndex % len(mainMenuList) + return True + + return True + +originalSignalHandler = signal.getsignal(signal.SIGINT) +main() +signal.signal(signal.SIGWINCH, originalSignalHandler) diff --git a/.templates/nextcloud/service.yml b/.templates/nextcloud/service.yml index b7e976243..9ce380d75 100644 --- a/.templates/nextcloud/service.yml +++ b/.templates/nextcloud/service.yml @@ -1,25 +1,39 @@ - nextcloud: - image: nextcloud - container_name: nextcloud - ports: - - 9321:80 - volumes: - - ./volumes/nextcloud/html:/var/www/html - restart: unless-stopped - depends_on: - - nextcloud_db - links: - - nextcloud_db +nextcloud: + container_name: nextcloud + image: nextcloud + restart: unless-stopped + environment: + - TZ=${TZ:-Etc/UTC} + - MYSQL_HOST=nextcloud_db + - MYSQL_PASSWORD=%randomMySqlPassword% + - MYSQL_DATABASE=nextcloud + - MYSQL_USER=nextcloud + ports: + - "9321:80" + - "9343:443" + volumes: + - ./volumes/nextcloud/html:/var/www/html + depends_on: + - nextcloud_db + networks: + - default + - nextcloud - nextcloud_db: - image: linuxserver/mariadb - container_name: nextcloud_db - volumes: - - ./volumes/nextcloud/db:/config - environment: - - MYSQL_ROOT_PASSWORD=password - - MYSQL_PASSWORD=password - - MYSQL_DATABASE=nextcloud - - MYSQL_USER=nextcloud - restart: unless-stopped +nextcloud_db: + container_name: nextcloud_db + build: ./.templates/mariadb/. + restart: unless-stopped + environment: + - TZ=${TZ:-Etc/UTC} + - PUID=1000 + - PGID=1000 + - MYSQL_ROOT_PASSWORD=%randomPassword% + - MYSQL_PASSWORD=%randomMySqlPassword% + - MYSQL_DATABASE=nextcloud + - MYSQL_USER=nextcloud + volumes: + - ./volumes/nextcloud/db:/config + - ./volumes/nextcloud/db_backup:/backup + networks: + - nextcloud diff --git a/.templates/nodered/Dockerfile.template b/.templates/nodered/Dockerfile.template new file mode 100755 index 000000000..8b782c751 --- /dev/null +++ b/.templates/nodered/Dockerfile.template @@ -0,0 +1,21 @@ +# reference argument - omitted defaults to latest +ARG DOCKERHUB_TAG=latest + +# Download base image +FROM nodered/node-red:${DOCKERHUB_TAG} + +# reference argument - omitted defaults to null +ARG EXTRA_PACKAGES + +# default user is node-red - need to be root to install packages +USER root + +# install packages +RUN apk add --no-cache eudev-dev ${EXTRA_PACKAGES} + +# switch back to default user +USER node-red + +# add-on nodes follow + +%run npm install modules list% diff --git a/.templates/nodered/addons.py b/.templates/nodered/addons.py new file mode 100755 index 000000000..75571382c --- /dev/null +++ b/.templates/nodered/addons.py @@ -0,0 +1,349 @@ +#!/usr/bin/env python3 + +import signal + +def main(): + from blessed import Terminal + from deps.chars import specialChars, commonTopBorder, commonBottomBorder, commonEmptyLine + from deps.consts import servicesDirectory, templatesDirectory + import time + import subprocess + import ruamel.yaml + import os + + global signal + global currentServiceName + global dockerCommandsSelectionInProgress + global mainMenuList + global currentMenuItemIndex + global renderMode + global paginationSize + global paginationStartIndex + global addonsFile + global hideHelpText + + global installCommand + + yaml = ruamel.yaml.YAML() + yaml.preserve_quotes = True + + try: # If not already set, then set it. + hideHelpText = hideHelpText + except: + hideHelpText = False + + term = Terminal() + hotzoneLocation = [((term.height // 16) + 6), 0] + paginationToggle = [10, term.height - 25] + paginationStartIndex = 0 + paginationSize = paginationToggle[0] + + serviceService = servicesDirectory + currentServiceName + serviceTemplate = templatesDirectory + currentServiceName + addonsFile = serviceTemplate + "/addons.yml" + + def goBack(): + global dockerCommandsSelectionInProgress + global needsRender + dockerCommandsSelectionInProgress = False + needsRender = 1 + return True + + mainMenuList = [] + + hotzoneLocation = [((term.height // 16) + 6), 0] + + dockerCommandsSelectionInProgress = True + currentMenuItemIndex = 0 + menuNavigateDirection = 0 + + # Render Modes: + # 0 = No render needed + # 1 = Full render + # 2 = Hotzone only + needsRender = 1 + + def onResize(sig, action): + global mainMenuList + global currentMenuItemIndex + mainRender(1, mainMenuList, currentMenuItemIndex) + + def generateLineText(text, textLength=None, paddingBefore=0, lineLength=64): + result = "" + for i in range(paddingBefore): + result += " " + + textPrintableCharactersLength = textLength + + if (textPrintableCharactersLength) == None: + textPrintableCharactersLength = len(text) + + result += text + remainingSpace = lineLength - textPrintableCharactersLength + + for i in range(remainingSpace): + result += " " + + return result + + def renderHotZone(term, renderType, menu, selection, hotzoneLocation, paddingBefore = 4): + global paginationSize + selectedTextLength = len("-> ") + + print(term.move(hotzoneLocation[0], hotzoneLocation[1])) + + if paginationStartIndex >= 1: + print(term.center("{b} {uaf} {uaf}{uaf}{uaf} {ual} {b}".format( + b=specialChars[renderMode]["borderVertical"], + uaf=specialChars[renderMode]["upArrowFull"], + ual=specialChars[renderMode]["upArrowLine"] + ))) + else: + print(term.center(commonEmptyLine(renderMode))) + + for (index, menuItem) in enumerate(menu): # Menu loop + if index >= paginationStartIndex and index < paginationStartIndex + paginationSize: + lineText = generateLineText(menuItem[0], paddingBefore=paddingBefore) + + # Menu highlight logic + if index == selection: + formattedLineText = '-> {t.blue_on_green}{title}{t.normal} <-'.format(t=term, title=menuItem[0]) + paddedLineText = generateLineText(formattedLineText, textLength=len(menuItem[0]) + selectedTextLength, paddingBefore=paddingBefore - selectedTextLength) + toPrint = paddedLineText + else: + toPrint = '{title}{t.normal}'.format(t=term, title=lineText) + # ##### + + # Menu check render logic + if menuItem[1]["checked"]: + toPrint = " (X) " + toPrint + else: + toPrint = " ( ) " + toPrint + + toPrint = "{bv} {toPrint} {bv}".format(bv=specialChars[renderMode]["borderVertical"], toPrint=toPrint) # Generate border + toPrint = term.center(toPrint) # Center Text (All lines should have the same amount of printable characters) + # ##### + print(toPrint) + + if paginationStartIndex + paginationSize < len(menu): + print(term.center("{b} {daf} {daf}{daf}{daf} {dal} {b}".format( + b=specialChars[renderMode]["borderVertical"], + daf=specialChars[renderMode]["downArrowFull"], + dal=specialChars[renderMode]["downArrowLine"] + ))) + else: + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + + + def mainRender(needsRender, menu, selection): + global paginationStartIndex + global paginationSize + term = Terminal() + + if selection >= paginationStartIndex + paginationSize: + paginationStartIndex = selection - (paginationSize - 1) + 1 + needsRender = 1 + + if selection <= paginationStartIndex - 1: + paginationStartIndex = selection + needsRender = 1 + + if needsRender == 1: + print(term.clear()) + print(term.move_y(term.height // 16)) + print(term.black_on_cornsilk4(term.center('IOTstack Node Red Addons'))) + print("") + print(term.center(commonTopBorder(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Select NodeRed Addons (npm) to install on initial run {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + + if needsRender >= 1: + renderHotZone(term, needsRender, menu, selection, hotzoneLocation) + + if needsRender == 1: + print(term.center(commonEmptyLine(renderMode))) + if not hideHelpText: + if term.height < 32: + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Not enough vertical room to render controls help text {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + else: + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} Note: After initial startup installation, you must use the Palettes menu {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} in the NodeRed WUI to add or remove addons from NodeRed. {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} Controls: {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Space] to select or deselect addon {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Up] and [Down] to move selection cursor {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Tab] Expand or collapse addon menu size {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [S] Switch between sorted by checked and sorted alphabetically {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [H] Show/hide this text {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Enter] to build and save addons list {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Escape] to cancel changes {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonBottomBorder(renderMode))) + + def runSelection(selection): + import types + if len(mainMenuList[selection]) > 1 and isinstance(mainMenuList[selection][1], types.FunctionType): + mainMenuList[selection][1]() + else: + print(term.green_reverse('IOTstack Error: No function assigned to menu item: "{}"'.format(mainMenuList[selection][0]))) + + def isMenuItemSelectable(menu, index): + if len(menu) > index: + if len(menu[index]) > 1: + if "skip" in menu[index][1] and menu[index][1]["skip"] == True: + return False + return True + + def loadAddonsMenu(): + global mainMenuList + global installCommand + if os.path.exists(addonsFile): + with open(r'%s' % addonsFile) as objAddonsFile: + addonsLoaded = yaml.load(objAddonsFile) + installCommand = addonsLoaded["dockerFileInstallCommand"] + defaultOnAddons = addonsLoaded["addons"]["default_on"] + defaultOffAddons = addonsLoaded["addons"]["default_off"] + if not os.path.exists(serviceService + '/addons_list.yml'): + defaultOnAddons.sort() + for (index, addonName) in enumerate(defaultOnAddons): + mainMenuList.append([addonName, { "checked": True }]) + + defaultOffAddons.sort() + for (index, addonName) in enumerate(defaultOffAddons): + mainMenuList.append([addonName, { "checked": False }]) + else: + with open(r'%s' % serviceService + '/addons_list.yml') as objSavedAddonsFile: + savedAddonsFile = yaml.load(objSavedAddonsFile) + savedAddons = savedAddonsFile["addons"] + savedAddons.sort() + for (index, addonName) in enumerate(savedAddons): + mainMenuList.append([addonName, { "checked": True }]) + + for (index, addonName) in enumerate(defaultOnAddons): + if not addonName in savedAddons: + mainMenuList.append([addonName, { "checked": False }]) + + for (index, addonName) in enumerate(defaultOffAddons): + if not addonName in savedAddons: + mainMenuList.append([addonName, { "checked": False }]) + sortBy = 0 + mainMenuList.sort(key=lambda x: (x[1]["checked"], x[0]), reverse=True) + + else: + print("Error: '{addonsFile}' file doesn't exist.".format(addonsFile=addonsFile)) + + def checkMenuItem(selection): + global mainMenuList + if mainMenuList[selection][1]["checked"] == True: + mainMenuList[selection][1]["checked"] = False + else: + mainMenuList[selection][1]["checked"] = True + + def saveAddonList(): + try: + if not os.path.exists(serviceService): + os.makedirs(serviceService, exist_ok=True) + nodeRedYamlAddonsList = { + "version": "1", + "application": "IOTstack", + "service": "nodered", + "comment": "Selected addons", + "dockerFileInstallCommand": installCommand, + "addons": [] + } + for (index, addon) in enumerate(mainMenuList): + if addon[1]["checked"]: + nodeRedYamlAddonsList["addons"].append(addon[0]) + + with open(r'%s/addons_list.yml' % serviceService, 'w') as outputFile: + yaml.dump(nodeRedYamlAddonsList, outputFile) + + except Exception as err: + print("Error saving NodeRed Addons list", currentServiceName) + print(err) + return False + global hasRebuiltAddons + hasRebuiltAddons = True + return True + + + if __name__ == 'builtins': + global signal + sortBy = 0 + term = Terminal() + signal.signal(signal.SIGWINCH, onResize) + loadAddonsMenu() + with term.fullscreen(): + menuNavigateDirection = 0 + mainRender(needsRender, mainMenuList, currentMenuItemIndex) + dockerCommandsSelectionInProgress = True + with term.cbreak(): + while dockerCommandsSelectionInProgress: + menuNavigateDirection = 0 + + if not needsRender == 0: # Only rerender when changed to prevent flickering + mainRender(needsRender, mainMenuList, currentMenuItemIndex) + needsRender = 0 + + key = term.inkey(esc_delay=0.05) + if key.is_sequence: + if key.name == 'KEY_TAB': + if paginationSize == paginationToggle[0]: + paginationSize = paginationToggle[1] + else: + paginationSize = paginationToggle[0] + mainRender(1, mainMenuList, currentMenuItemIndex) + if key.name == 'KEY_DOWN': + menuNavigateDirection += 1 + if key.name == 'KEY_UP': + menuNavigateDirection -= 1 + if key.name == 'KEY_ENTER': + if saveAddonList(): + return True + else: + print("Something went wrong. Try saving the list again.") + if key.name == 'KEY_ESCAPE': + dockerCommandsSelectionInProgress = False + return True + elif key: + if key == ' ': # Space pressed + checkMenuItem(currentMenuItemIndex) # Update checked list + needsRender = 2 + elif key == 's': + if sortBy == 0: + sortBy = 1 + mainMenuList.sort(key=lambda x: x[0], reverse=False) + else: + sortBy = 0 + mainMenuList.sort(key=lambda x: (x[1]["checked"], x[0]), reverse=True) + needsRender = 2 + elif key == 'h': # H pressed + if hideHelpText: + hideHelpText = False + else: + hideHelpText = True + mainRender(1, mainMenuList, currentMenuItemIndex) + + if menuNavigateDirection != 0: # If a direction was pressed, find next selectable item + currentMenuItemIndex += menuNavigateDirection + currentMenuItemIndex = currentMenuItemIndex % len(mainMenuList) + needsRender = 2 + + while not isMenuItemSelectable(mainMenuList, currentMenuItemIndex): + currentMenuItemIndex += menuNavigateDirection + currentMenuItemIndex = currentMenuItemIndex % len(mainMenuList) + return True + + return True + +originalSignalHandler = signal.getsignal(signal.SIGINT) +main() +signal.signal(signal.SIGWINCH, originalSignalHandler) diff --git a/.templates/nodered/addons.yml b/.templates/nodered/addons.yml new file mode 100644 index 000000000..2f3ae9aed --- /dev/null +++ b/.templates/nodered/addons.yml @@ -0,0 +1,60 @@ +version: 1 +application: "IOTstack" +service: "nodered" +comment: "Addons available for NodeRed" +dockerFileInstallCommand: "RUN cd /usr/src/node-red && npm install --save " +addons: + default_on: + - "node-red-node-pi-gpiod" + - "node-red-contrib-influxdb" + - "node-red-contrib-boolean-logic" + - "node-red-configurable-ping" + - "node-red-dashboard" + default_off: + - "@flowfuse/node-red-dashboard" + - "node-red-node-openweathermap" + - "node-red-contrib-discord" + - "node-red-node-email" + - "node-red-node-google" + - "node-red-node-emoncms" + - "node-red-node-geofence" + - "node-red-node-ping" + - "node-red-node-random" + - "node-red-node-smooth" + - "node-red-node-darksky" + - "node-red-node-sqlite" + - "node-red-node-serialport@0.15.0" + - "node-red-contrib-config" + - "node-red-contrib-grove" + - "node-red-contrib-diode" + - "node-red-contrib-sunevents" + - "node-red-contrib-bigtimer" + - "node-red-contrib-esplogin" + - "node-red-contrib-timeout" + - "node-red-contrib-moment" + - "node-red-contrib-telegrambot" + - "node-red-contrib-particle" + - "node-red-contrib-web-worldmap" + - "node-red-contrib-ramp-thermostat" + - "node-red-contrib-isonline" + - "node-red-contrib-npm" + - "node-red-contrib-file-function" + - "node-red-contrib-home-assistant-websocket" + - "node-red-contrib-blynk-ws" + - "node-red-contrib-owntracks" + - "node-red-contrib-alexa-local" + - "node-red-contrib-heater-controller" + - "node-red-contrib-deconz" + - "node-red-contrib-generic-ble" + - "node-red-contrib-zigbee2mqtt" + - "node-red-contrib-vcgencmd" + - "@node-red-contrib-themes/theme-collection" + - "node-red-contrib-tf-function" + - "node-red-contrib-tf-model" + - "node-red-contrib-post-object-detection" + - "node-red-contrib-bert-tokenizer" + - "node-red-node-tail" + - "node-red-contrib-boolean-logic-ultimate" + - "node-red-contrib-chartjs" + - "node-red-contrib-md5" + - "node-red-contrib-pushsafer" diff --git a/.templates/nodered/build.py b/.templates/nodered/build.py new file mode 100755 index 000000000..705b64183 --- /dev/null +++ b/.templates/nodered/build.py @@ -0,0 +1,398 @@ +#!/usr/bin/env python3 + +issues = {} # Returned issues dict +buildHooks = {} # Options, and others hooks +haltOnErrors = True + +# Main wrapper function. Required to make local vars work correctly +def main(): + import os + import time + import ruamel.yaml + import signal + import sys + from blessed import Terminal + from deps.chars import specialChars, commonTopBorder, commonBottomBorder, commonEmptyLine, padText + from deps.consts import servicesDirectory, templatesDirectory + from deps.common_functions import getExternalPorts, getInternalPorts, checkPortConflicts, enterPortNumberWithWhiptail + + global dockerComposeServicesYaml # The loaded memory YAML of all checked services + global renderMode # For rendering fancy or basic ascii characters + global toRun # Switch for which function to run when executed + global buildHooks # Where to place the options menu result + global currentServiceName # Name of the current service + global issues # Returned issues dict + global haltOnErrors # Turn on to allow erroring + global serviceService + global serviceTemplate + global addonsFile + global hideHelpText + global hasRebuiltAddons + + try: # If not already set, then set it. + hideHelpText = hideHelpText + except: + hideHelpText = False + + documentationHint = 'https://sensorsiot.github.io/IOTstack/Containers/Node-RED' + + yaml = ruamel.yaml.YAML() + yaml.preserve_quotes = True + + # runtime vars + portConflicts = [] + hasRebuiltAddons = False + + serviceService = servicesDirectory + currentServiceName + serviceTemplate = templatesDirectory + currentServiceName + addonsFile = serviceService + "/addons_list.yml" + + dockerfileTemplateReplace = "%run npm install modules list%" + + # This lets the menu know whether to put " >> Options " or not + # This function is REQUIRED. + def checkForOptionsHook(): + global buildHooks + try: + buildHooks["options"] = callable(runOptionsMenu) + except: + buildHooks["options"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForPreBuildHook(): + try: + buildHooks["preBuildHook"] = callable(preBuild) + except: + buildHooks["preBuildHook"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForPostBuildHook(): + try: + buildHooks["postBuildHook"] = callable(postBuild) + except: + buildHooks["postBuildHook"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForRunChecksHook(): + try: + buildHooks["runChecksHook"] = callable(runChecks) + except: + buildHooks["runChecksHook"] = False + return buildHooks + return buildHooks + + # This service will not check anything unless this is set + # This function is optional, and will run each time the menu is rendered + def runChecks(): + checkForIssues() + return [] + + # This function is optional, and will run after the docker-compose.yml file is written to disk. + def postBuild(): + return True + + # This function is optional, and will run just before the build docker-compose.yml code. + def preBuild(): + import time + + # Setup service directory + if not os.path.exists(serviceService): + os.makedirs(serviceService, exist_ok=True) + + # Other prebuild steps + print("Starting NodeRed Build script") + time.sleep(0.1) + with open(r'%s/Dockerfile.template' % serviceTemplate, 'r') as dockerTemplate: + templateData = dockerTemplate.read() + + with open(r'%s' % addonsFile) as objAddonsFile: + addonsSelected = yaml.load(objAddonsFile) + + addonsInstallCommands = "" + if os.path.exists(addonsFile): + installCommand = addonsSelected["dockerFileInstallCommand"] + for (index, addonName) in enumerate(addonsSelected["addons"]): + if (addonName == 'node-red-node-sqlite'): # SQLite requires a special param + addonsInstallCommands = addonsInstallCommands + "{installCommand} --unsafe-perm {addonName}\n".format(addonName=addonName, installCommand=installCommand) + else: + addonsInstallCommands = addonsInstallCommands + "{installCommand} {addonName}\n".format(addonName=addonName, installCommand=installCommand) + + templateData = templateData.replace(dockerfileTemplateReplace, addonsInstallCommands) + + with open(r'%s/Dockerfile' % serviceService, 'w') as dockerTemplate: + dockerTemplate.write(templateData) + print("Finished NodeRed Build script") + time.sleep(0.3) + return True + + # ##################################### + # Supporting functions below + # ##################################### + + def checkForIssues(): + fileIssues = checkFiles() + if (len(fileIssues) > 0): + issues["fileIssues"] = fileIssues + for (index, serviceName) in enumerate(dockerComposeServicesYaml): + if not currentServiceName == serviceName: # Skip self + currentServicePorts = getExternalPorts(currentServiceName, dockerComposeServicesYaml) + portConflicts = checkPortConflicts(serviceName, currentServicePorts, dockerComposeServicesYaml) + if (len(portConflicts) > 0): + issues["portConflicts"] = portConflicts + + def checkFiles(): + fileIssues = [] + if not os.path.exists(serviceService + '/addons_list.yml'): + fileIssues.append('/addons_list.yml does not exist. Build addons file in options to fix. This is optional') + return fileIssues + + ############################ + # Menu Logic + ############################ + + global currentMenuItemIndex + global selectionInProgress + global menuNavigateDirection + global needsRender + + selectionInProgress = True + currentMenuItemIndex = 0 + menuNavigateDirection = 0 + needsRender = 1 + term = Terminal() + hotzoneLocation = [((term.height // 16) + 6), 0] + + def goBack(): + global selectionInProgress + global needsRender + selectionInProgress = False + needsRender = 1 + return True + + def enterPortNumberExec(): + # global term + global needsRender + global dockerComposeServicesYaml + externalPort = getExternalPorts(currentServiceName, dockerComposeServicesYaml)[0] + internalPort = getInternalPorts(currentServiceName, dockerComposeServicesYaml)[0] + newPortNumber = enterPortNumberWithWhiptail(term, dockerComposeServicesYaml, currentServiceName, hotzoneLocation, externalPort) + + if newPortNumber > 0: + dockerComposeServicesYaml[currentServiceName]["ports"][0] = "{newExtPort}:{oldIntPort}".format( + newExtPort = newPortNumber, + oldIntPort = internalPort + ) + createMenu() + needsRender = 1 + + def onResize(sig, action): + global nodeRedBuildOptions + global currentMenuItemIndex + mainRender(1, nodeRedBuildOptions, currentMenuItemIndex) + + def selectNodeRedAddons(): + global needsRender + global hasRebuiltAddons + dockerCommandsFilePath = "./.templates/nodered/addons.py" + with open(dockerCommandsFilePath, "rb") as pythonDynamicImportFile: + code = compile(pythonDynamicImportFile.read(), dockerCommandsFilePath, "exec") + # execGlobals = globals() + # execLocals = locals() + execGlobals = { + "currentServiceName": currentServiceName, + "renderMode": renderMode + } + execLocals = {} + screenActive = False + exec(code, execGlobals, execLocals) + signal.signal(signal.SIGWINCH, onResize) + try: + hasRebuiltAddons = execGlobals["hasRebuiltAddons"] + except: + hasRebuiltAddons = False + screenActive = True + needsRender = 1 + + nodeRedBuildOptions = [] + + def createMenu(): + global nodeRedBuildOptions + try: + nodeRedBuildOptions = [] + portNumber = getExternalPorts(currentServiceName, dockerComposeServicesYaml)[0] + nodeRedBuildOptions.append([ + "Change external WUI Port Number from: {port}".format(port=portNumber), + enterPortNumberExec + ]) + except: # Error getting port + pass + nodeRedBuildOptions.append(["Go back", goBack]) + + if os.path.exists(serviceService + '/addons_list.yml'): + nodeRedBuildOptions.insert(0, ["Select & overwrite addons list", selectNodeRedAddons]) + else: + nodeRedBuildOptions.insert(0, ["Select & build addons list", selectNodeRedAddons]) + + def runOptionsMenu(): + createMenu() + menuEntryPoint() + return True + + def renderHotZone(term, menu, selection, hotzoneLocation): + lineLengthAtTextStart = 71 + print(term.move(hotzoneLocation[0], hotzoneLocation[1])) + for (index, menuItem) in enumerate(menu): + toPrint = "" + if index == selection: + toPrint += ('{bv} -> {t.blue_on_green} {title} {t.normal} <-'.format(t=term, title=menuItem[0], bv=specialChars[renderMode]["borderVertical"])) + else: + toPrint += ('{bv} {t.normal} {title} '.format(t=term, title=menuItem[0], bv=specialChars[renderMode]["borderVertical"])) + + for i in range(lineLengthAtTextStart - len(menuItem[0])): + toPrint += " " + + toPrint += "{bv}".format(bv=specialChars[renderMode]["borderVertical"]) + + toPrint = term.center(toPrint) + + print(toPrint) + + def mainRender(needsRender, menu, selection): + term = Terminal() + + if needsRender == 1: + print(term.clear()) + print(term.move_y(term.height // 16)) + print(term.black_on_cornsilk4(term.center('IOTstack NodeRed Options'))) + print("") + print(term.center(commonTopBorder(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Select Option to configure {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + + if needsRender >= 1: + renderHotZone(term, menu, selection, hotzoneLocation) + + if needsRender == 1: + if os.path.exists(serviceService + '/addons_list.yml'): + if hasRebuiltAddons: + print(term.center(commonEmptyLine(renderMode))) + print(term.center('{bv} {t.grey_on_blue4} {text} {t.normal}{t.white_on_black}{t.normal} {bv}'.format(t=term, text="Addons list has been rebuilt: addons_list.yml", bv=specialChars[renderMode]["borderVertical"]))) + else: + print(term.center(commonEmptyLine(renderMode))) + print(term.center('{bv} {t.grey_on_blue4} {text} {t.normal}{t.white_on_black}{t.normal} {bv}'.format(t=term, text="Using existing addons_list.yml for addons installation", bv=specialChars[renderMode]["borderVertical"]))) + else: + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + if not hideHelpText: + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Controls: {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Up] and [Down] to move selection cursor {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [H] Show/hide this text {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Enter] to run command or save input {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Escape] to go back to build stack menu {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + if len(documentationHint) > 1: + if len(documentationHint) > 56: + documentationAndPadding = padText(documentationHint, 71) + print(term.center("{bv} Documentation: {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} {dap} {bv}".format(bv=specialChars[renderMode]["borderVertical"], dap=documentationAndPadding))) + else: + documentationAndPadding = padText(documentationHint, 56) + print(term.center("{bv} Documentation: {dap} {bv}".format(bv=specialChars[renderMode]["borderVertical"], dap=documentationAndPadding))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonBottomBorder(renderMode))) + + def runSelection(selection): + import types + global nodeRedBuildOptions + if len(nodeRedBuildOptions[selection]) > 1 and isinstance(nodeRedBuildOptions[selection][1], types.FunctionType): + nodeRedBuildOptions[selection][1]() + else: + print(term.green_reverse('IOTstack Error: No function assigned to menu item: "{}"'.format(nodeRedBuildOptions[selection][0]))) + + def isMenuItemSelectable(menu, index): + if len(menu) > index: + if len(menu[index]) > 2: + if menu[index][2]["skip"] == True: + return False + return True + + def menuEntryPoint(): + # These need to be reglobalised due to eval() + global currentMenuItemIndex + global selectionInProgress + global menuNavigateDirection + global needsRender + global hideHelpText + global nodeRedBuildOptions + term = Terminal() + with term.fullscreen(): + menuNavigateDirection = 0 + mainRender(needsRender, nodeRedBuildOptions, currentMenuItemIndex) + selectionInProgress = True + with term.cbreak(): + while selectionInProgress: + menuNavigateDirection = 0 + + if needsRender: # Only rerender when changed to prevent flickering + mainRender(needsRender, nodeRedBuildOptions, currentMenuItemIndex) + needsRender = 0 + + key = term.inkey(esc_delay=0.05) + if key.is_sequence: + if key.name == 'KEY_TAB': + menuNavigateDirection += 1 + if key.name == 'KEY_DOWN': + menuNavigateDirection += 1 + if key.name == 'KEY_UP': + menuNavigateDirection -= 1 + if key.name == 'KEY_LEFT': + goBack() + if key.name == 'KEY_ENTER': + runSelection(currentMenuItemIndex) + if key.name == 'KEY_ESCAPE': + return True + elif key: + if key == 'h': # H pressed + if hideHelpText: + hideHelpText = False + else: + hideHelpText = True + mainRender(1, nodeRedBuildOptions, currentMenuItemIndex) + + if menuNavigateDirection != 0: # If a direction was pressed, find next selectable item + currentMenuItemIndex += menuNavigateDirection + currentMenuItemIndex = currentMenuItemIndex % len(nodeRedBuildOptions) + needsRender = 2 + + while not isMenuItemSelectable(nodeRedBuildOptions, currentMenuItemIndex): + currentMenuItemIndex += menuNavigateDirection + currentMenuItemIndex = currentMenuItemIndex % len(nodeRedBuildOptions) + return True + + #################### + # End menu section + #################### + + if haltOnErrors: + eval(toRun)() + else: + try: + eval(toRun)() + except: + pass + +# This check isn't required, but placed here for debugging purposes +global currentServiceName # Name of the current service +if currentServiceName == 'nodered': + main() +else: + print("Error. '{}' Tried to run 'nodered' config".format(currentServiceName)) diff --git a/.templates/nodered/build.sh b/.templates/nodered/build.sh deleted file mode 100755 index ba5ab3748..000000000 --- a/.templates/nodered/build.sh +++ /dev/null @@ -1,63 +0,0 @@ -#!/bin/bash - -# build Dockerfile for nodered - -node_selection=$(whiptail --title "Node-RED nodes" --checklist --separate-output \ - "Use the [SPACEBAR] to select the nodes you want preinstalled" 20 78 12 -- \ - "node-red-node-pi-gpiod" " " "ON" \ - "node-red-dashboard" " " "ON" \ - "node-red-node-openweathermap" " " "OFF" \ - "node-red-node-google" " " "OFF" \ - "node-red-node-emoncms" " " "OFF" \ - "node-red-node-geofence" " " "OFF" \ - "node-red-node-ping" " " "OFF" \ - "node-red-node-random" " " "OFF" \ - "node-red-node-smooth" " " "OFF" \ - "node-red-node-darksky" " " "OFF" \ - "node-red-node-sqlite" " " "OFF" \ - "node-red-contrib-influxdb" " " "ON" \ - "node-red-contrib-config" " " "OFF" \ - "node-red-contrib-grove" " " "OFF" \ - "node-red-contrib-diode" " " "OFF" \ - "node-red-contrib-bigtimer" " " "OFF" \ - "node-red-contrib-esplogin" " " "OFF" \ - "node-red-contrib-timeout" " " "OFF" \ - "node-red-contrib-moment" " " "OFF" \ - "node-red-contrib-particle" " " "OFF" \ - "node-red-contrib-web-worldmap" " " "OFF" \ - "node-red-contrib-ramp-thermostat" " " "OFF" \ - "node-red-contrib-isonline" " " "OFF" \ - "node-red-contrib-npm" " " "OFF" \ - "node-red-contrib-file-function" " " "OFF" \ - "node-red-contrib-boolean-logic" " " "OFF" \ - "node-red-contrib-home-assistant-websocket" " " "OFF" \ - "node-red-contrib-blynk-ws" " " "OFF" \ - "node-red-contrib-owntracks" " " "OFF" \ - "node-red-contrib-alexa-local" " " "OFF" \ - "node-red-contrib-heater-controller" " " "OFF" \ - 3>&1 1>&2 2>&3) - -##echo "$check_selection" -mapfile -t checked_nodes <<<"$node_selection" - -nr_dfile=./services/nodered/Dockerfile - -sqliteflag=0 - -touch $nr_dfile -echo "FROM nodered/node-red:latest" >$nr_dfile -#node red install script inspired from https://tech.scargill.net/the-script/ -echo "RUN for addonnodes in \\" >>$nr_dfile -for checked in "${checked_nodes[@]}"; do - #test to see if sqlite is selected and set flag, sqlite require additional flags - if [ "$checked" = "node-red-node-sqlite" ]; then - sqliteflag=1 - else - echo "$checked \\" >>$nr_dfile - fi -done -echo "; do \\" >>$nr_dfile -echo "npm install \${addonnodes} ;\\" >>$nr_dfile -echo "done;" >>$nr_dfile - -[ $sqliteflag = 1 ] && echo "RUN npm install --unsafe-perm node-red-node-sqlite" >>$nr_dfile diff --git a/.templates/nodered/nodered.env b/.templates/nodered/nodered.env deleted file mode 100644 index ca2e11f1f..000000000 --- a/.templates/nodered/nodered.env +++ /dev/null @@ -1 +0,0 @@ -#TZ=timezone \ No newline at end of file diff --git a/.templates/nodered/service.yml b/.templates/nodered/service.yml index ca5acad62..93238e7fb 100644 --- a/.templates/nodered/service.yml +++ b/.templates/nodered/service.yml @@ -1,11 +1,16 @@ - nodered: - container_name: nodered - build: ./services/nodered/. - restart: unless-stopped - user: "0" - privileged: true - env_file: ./services/nodered/nodered.env - ports: - - 1880:1880 - volumes: - - ./volumes/nodered/data:/data +nodered: + container_name: nodered + build: + context: ./services/nodered/. + args: + - DOCKERHUB_TAG=latest + - EXTRA_PACKAGES= + restart: unless-stopped + user: "0" + environment: + - TZ=${TZ:-Etc/UTC} + ports: + - "1880:1880" + volumes: + - ./volumes/nodered/data:/data + - ./volumes/nodered/ssh:/root/.ssh diff --git a/.templates/octoprint/service.yml b/.templates/octoprint/service.yml new file mode 100644 index 000000000..c452bf21c --- /dev/null +++ b/.templates/octoprint/service.yml @@ -0,0 +1,17 @@ +octoprint: + container_name: octoprint + image: octoprint/octoprint + restart: unless-stopped + environment: + - TZ=${TZ:-Etc/UTC} + # - ENABLE_MJPG_STREAMER=true + # - MJPG_STREAMER_INPUT=-r 640x480 -f 10 -y + # - CAMERA_DEV=/dev/video0 + ports: + - "9980:80" + devices: + - "${OCTOPRINT_DEVICE_PATH:?eg echo OCTOPRINT_DEVICE_PATH=/dev/serial0 >>~/IOTstack/.env}:/dev/ttyACM0" + # - /dev/video0:/dev/video0 + volumes: + - ./volumes/octoprint:/octoprint + diff --git a/.templates/openhab/build.py b/.templates/openhab/build.py new file mode 100755 index 000000000..fd9fd3932 --- /dev/null +++ b/.templates/openhab/build.py @@ -0,0 +1,90 @@ +#!/usr/bin/env python3 + +issues = {} # Returned issues dict +buildHooks = {} # Options, and others hooks +haltOnErrors = True + +# Main wrapper function. Required to make local vars work correctly +def main(): + + global dockerComposeServicesYaml # The loaded memory YAML of all checked services + global toRun # Switch for which function to run when executed + global buildHooks # Where to place the options menu result + global currentServiceName # Name of the current service + global issues # Returned issues dict + global haltOnErrors # Turn on to allow erroring + + # This lets the menu know whether to put " >> Options " or not + # This function is REQUIRED. + def checkForOptionsHook(): + try: + buildHooks["options"] = callable(runOptionsMenu) + except: + buildHooks["options"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForPreBuildHook(): + try: + buildHooks["preBuildHook"] = callable(preBuild) + except: + buildHooks["preBuildHook"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForPostBuildHook(): + try: + buildHooks["postBuildHook"] = callable(postBuild) + except: + buildHooks["postBuildHook"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForRunChecksHook(): + try: + buildHooks["runChecksHook"] = callable(runChecks) + except: + buildHooks["runChecksHook"] = False + return buildHooks + return buildHooks + + # This service will not check anything unless this is set + # This function is optional, and will run each time the menu is rendered + def runChecks(): + return [] + + # This function is optional, and will run after the docker-compose.yml file is written to disk. + def postBuild(): + return True + + # This function is optional, and will run just before the build docker-compose.yml code. + def preBuild(): + return True + + # ##################################### + # Supporting functions below + # ##################################### + + # None + + # ##################################### + # End Supporting functions + # ##################################### + + if haltOnErrors: + eval(toRun)() + else: + try: + eval(toRun)() + except: + pass + +# This check isn't required, but placed here for debugging purposes +global currentServiceName # Name of the current service +if currentServiceName == 'openhab': + main() +else: + print("Error. '{}' Tried to run 'openhab' config".format(currentServiceName)) diff --git a/.templates/openhab/service.yml b/.templates/openhab/service.yml index 128cbc14f..53e8f4800 100644 --- a/.templates/openhab/service.yml +++ b/.templates/openhab/service.yml @@ -1,21 +1,20 @@ - openhab: - image: "openhab/openhab:2.4.0" - container_name: openhab - restart: unless-stopped - network_mode: host -# cap_add: -# - NET_ADMIN -# - NET_RAW - volumes: - - "/etc/localtime:/etc/localtime:ro" - - "/etc/timezone:/etc/timezone:ro" - - "./volumes/openhab/addons:/openhab/addons" - - "./volumes/openhab/conf:/openhab/conf" - - "./volumes/openhab/userdata:/openhab/userdata" - environment: - OPENHAB_HTTP_PORT: "8080" - OPENHAB_HTTPS_PORT: "8443" - EXTRA_JAVA_OPTS: "-Duser.timezone=Europe/Berlin" -# # The command node is very important. It overrides -# # the "gosu openhab tini -s ./start.sh" command from Dockerfile and runs as root! -# command: "tini -s ./start.sh server" +openhab: + container_name: openhab + image: "openhab/openhab:latest" + restart: unless-stopped + network_mode: host + volumes: + - "/etc/localtime:/etc/localtime:ro" + - "/etc/timezone:/etc/timezone:ro" + - "./volumes/openhab/addons:/openhab/addons" + - "./volumes/openhab/conf:/openhab/conf" + - "./volumes/openhab/userdata:/openhab/userdata" + environment: + - OPENHAB_HTTP_PORT=4050 + - OPENHAB_HTTPS_PORT=4051 + - EXTRA_JAVA_OPTS=-Duser.timezone=Etc/UTC + x-logging: + options: + max-size: "5m" + max-file: "3" + diff --git a/.templates/otbr/build.py b/.templates/otbr/build.py new file mode 100755 index 000000000..cecdf58c6 --- /dev/null +++ b/.templates/otbr/build.py @@ -0,0 +1,392 @@ +#!/usr/bin/env python3 + +issues = {} # Returned issues dict +buildHooks = {} # Options, and others hooks +haltOnErrors = True + +# Main wrapper function. Required to make local vars work correctly +def main(): + import os + import time + import ruamel.yaml + import signal + import sys + from blessed import Terminal + + from deps.chars import specialChars, commonTopBorder, commonBottomBorder, commonEmptyLine, padText + from deps.consts import servicesDirectory, templatesDirectory, buildSettingsFileName, buildCache, servicesFileName + from deps.common_functions import getExternalPorts, getInternalPorts, checkPortConflicts, enterPortNumberWithWhiptail + + yaml = ruamel.yaml.YAML() + yaml.preserve_quotes = True + + global dockerComposeServicesYaml # The loaded memory YAML of all checked services + global toRun # Switch for which function to run when executed + global buildHooks # Where to place the options menu result + global currentServiceName # Name of the current service + global issues # Returned issues dict + global haltOnErrors # Turn on to allow erroring + global hideHelpText # Showing and hiding the help controls text + global serviceService + global hasRebuiltHardwareSelection + + serviceService = servicesDirectory + currentServiceName + serviceTemplate = templatesDirectory + currentServiceName + buildSettings = serviceService + buildSettingsFileName + + hasRebuiltHardwareSelection = False + + try: # If not already set, then set it. + hideHelpText = hideHelpText + except: + hideHelpText = False + + documentationHint = 'https://openthread.io/guides/border-router/docker' + + # runtime vars + portConflicts = [] + + # This lets the menu know whether to put " >> Options " or not + # This function is REQUIRED. + def checkForOptionsHook(): + try: + buildHooks["options"] = callable(runOptionsMenu) + except: + buildHooks["options"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForPreBuildHook(): + try: + buildHooks["preBuildHook"] = callable(preBuild) + except: + buildHooks["preBuildHook"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForPostBuildHook(): + try: + buildHooks["postBuildHook"] = callable(postBuild) + except: + buildHooks["postBuildHook"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForRunChecksHook(): + try: + buildHooks["runChecksHook"] = callable(runChecks) + except: + buildHooks["runChecksHook"] = False + return buildHooks + return buildHooks + + # This service will not check anything unless this is set + # This function is optional, and will run each time the menu is rendered + def runChecks(): + checkForIssues() + return [] + + # This function is optional, and will run after the docker-compose.yml file is written to disk. + def postBuild(): + return True + + # This function is optional, and will run just before the build docker-compose.yml code. + def preBuild(): + global dockerComposeServicesYaml + global currentServiceName + with open("{serviceDir}{buildSettings}".format(serviceDir=serviceService, buildSettings=buildSettingsFileName)) as objHardwareListFile: + otbrYamlBuildOptions = yaml.load(objHardwareListFile) + + with open((r'%s/' % serviceTemplate) + servicesFileName) as objServiceFile: + serviceYamlTemplate = yaml.load(objServiceFile) + + oldBuildCache = {} + try: + with open(r'%s' % buildCache) as objBuildCache: + oldBuildCache = yaml.load(objBuildCache) + except: + pass + + + buildCacheServices = {} + if "services" in oldBuildCache: + buildCacheServices = oldBuildCache["services"] + + if not os.path.exists(serviceService): + os.makedirs(serviceService, exist_ok=True) + + try: + if currentServiceName in dockerComposeServicesYaml: + if otbrYamlBuildOptions["hardware"] and len(otbrYamlBuildOptions["hardware"]) == 1: + newCommand = "--radio-url spinel+hdlc+uart://" + otbrYamlBuildOptions["hardware"][0] + dockerComposeServicesYaml[currentServiceName]["command"] = newCommand + dockerComposeServicesYaml[currentServiceName]["volumes"].append(otbrYamlBuildOptions["hardware"][0] + ":" + otbrYamlBuildOptions["hardware"][0]) + + except Exception as err: + print("Error setting otbr hardware: ", err) + time.sleep(10) + return False + + + + + return True + + # ##################################### + # Supporting functions below + # ##################################### + + def checkForIssues(): + passed = True + try: + with open("{serviceDir}{buildSettings}".format(serviceDir=serviceService, buildSettings=buildSettingsFileName)) as objHardwareListFile: + otbrYamlBuildOptions = yaml.load(objHardwareListFile) + if not otbrYamlBuildOptions["hardware"] or len(otbrYamlBuildOptions["hardware"]) < 1: + issues["hardware"] = "No Thread radio selected." + passed = False + if otbrYamlBuildOptions["hardware"] and len(otbrYamlBuildOptions["hardware"]) > 1: + issues["hardware"] = "Two or more thread radios selected. The first listed one will be used" + passed = False + except Exception as err: + issues["hardware"] = "No Thread radio selected." + for (index, serviceName) in enumerate(dockerComposeServicesYaml): + if not currentServiceName == serviceName: # Skip self + currentServicePorts = getExternalPorts(currentServiceName, dockerComposeServicesYaml) + portConflicts = checkPortConflicts(serviceName, currentServicePorts, dockerComposeServicesYaml) + if (len(portConflicts) > 0): + issues["portConflicts"] = portConflicts + passed = False + + # ##################################### + # End Supporting functions + # ##################################### + + + ############################ + # Menu Logic + ############################ + + global currentMenuItemIndex + global selectionInProgress + global menuNavigateDirection + global needsRender + + selectionInProgress = True + currentMenuItemIndex = 0 + menuNavigateDirection = 0 + needsRender = 1 + term = Terminal() + hotzoneLocation = [((term.height // 16) + 6), 0] + + def goBack(): + global selectionInProgress + global needsRender + selectionInProgress = False + needsRender = 1 + return True + + def selectMatterHardware(): + global needsRender + global hasRebuiltHardwareSelection + threadSelectHardwareFilePath = "./.templates/otbr/select_hardware.py" + with open(threadSelectHardwareFilePath, "rb") as pythonDynamicImportFile: + code = compile(pythonDynamicImportFile.read(), threadSelectHardwareFilePath, "exec") + # execGlobals = globals() + # execLocals = locals() + execGlobals = { + "currentServiceName": currentServiceName, + "renderMode": renderMode + } + execLocals = {} + screenActive = False + exec(code, execGlobals, execLocals) + signal.signal(signal.SIGWINCH, onResize) + try: + hasRebuiltHardwareSelection = execGlobals["hasRebuiltHardwareSelection"] + except: + hasRebuiltHardwareSelection = False + screenActive = True + needsRender = 1 + + def onResize(sig, action): + global threadBuildOptions + global currentMenuItemIndex + mainRender(1, threadBuildOptions, currentMenuItemIndex) + + threadBuildOptions = [] + + def createMenu(): + global threadBuildOptions + global serviceService + threadBuildOptions = [] + + + if os.path.exists("{buildSettings}".format(buildSettings=buildSettings)): + threadBuildOptions.insert(0, ["Change selected hardware", selectMatterHardware]) + else: + threadBuildOptions.insert(0, ["Select hardware", selectMatterHardware]) + + threadBuildOptions.append(["Go back", goBack]) + + def runOptionsMenu(): + createMenu() + menuEntryPoint() + return True + + def renderHotZone(term, menu, selection, hotzoneLocation): + lineLengthAtTextStart = 71 + print(term.move(hotzoneLocation[0], hotzoneLocation[1])) + for (index, menuItem) in enumerate(menu): + toPrint = "" + if index == selection: + toPrint += ('{bv} -> {t.blue_on_green} {title} {t.normal} <-'.format(t=term, title=menuItem[0], bv=specialChars[renderMode]["borderVertical"])) + else: + toPrint += ('{bv} {t.normal} {title} '.format(t=term, title=menuItem[0], bv=specialChars[renderMode]["borderVertical"])) + + for i in range(lineLengthAtTextStart - len(menuItem[0])): + toPrint += " " + + toPrint += "{bv}".format(bv=specialChars[renderMode]["borderVertical"]) + + toPrint = term.center(toPrint) + + print(toPrint) + + def mainRender(needsRender, menu, selection): + global hasRebuiltHardwareSelection + term = Terminal() + + if needsRender == 1: + print(term.clear()) + print(term.move_y(term.height // 16)) + print(term.black_on_cornsilk4(term.center('IOTstack Thread Options'))) + print("") + print(term.center(commonTopBorder(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Select Option to configure {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + + if needsRender >= 1: + renderHotZone(term, menu, selection, hotzoneLocation) + + if needsRender == 1: + if os.path.exists("{buildSettings}".format(buildSettings=buildSettings)): + if hasRebuiltHardwareSelection: + print(term.center(commonEmptyLine(renderMode))) + print(term.center('{bv} {t.grey_on_blue4} {text} {t.normal}{t.white_on_black}{t.normal} {bv}'.format(t=term, text="Hardware list has been rebuilt: build_settings.yml", bv=specialChars[renderMode]["borderVertical"]))) + else: + print(term.center(commonEmptyLine(renderMode))) + print(term.center('{bv} {t.grey_on_blue4} {text} {t.normal}{t.white_on_black}{t.normal} {bv}'.format(t=term, text="Using existing build_settings.yml for hardware installation", bv=specialChars[renderMode]["borderVertical"]))) + else: + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + if not hideHelpText: + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Controls: {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Up] and [Down] to move selection cursor {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [H] Show/hide this text {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Enter] to run command or save input {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Escape] to go back to build stack menu {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} There are extra steps you need to do before Thread will work. Be sure {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} to read the documentation. IPv6 and flashing custon firmware on {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} a Thread ready USB radio. {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + if len(documentationHint) > 1: + if len(documentationHint) > 56: + documentationAndPadding = padText(documentationHint, 71) + print(term.center("{bv} Documentation: {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} {dap} {bv}".format(bv=specialChars[renderMode]["borderVertical"], dap=documentationAndPadding))) + else: + documentationAndPadding = padText(documentationHint, 56) + print(term.center("{bv} Documentation: {dap} {bv}".format(bv=specialChars[renderMode]["borderVertical"], dap=documentationAndPadding))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonBottomBorder(renderMode))) + + def runSelection(selection): + import types + global threadBuildOptions + if len(threadBuildOptions[selection]) > 1 and isinstance(threadBuildOptions[selection][1], types.FunctionType): + threadBuildOptions[selection][1]() + else: + print(term.green_reverse('IOTstack Error: No function assigned to menu item: "{}"'.format(nodeRedBuildOptions[selection][0]))) + + def isMenuItemSelectable(menu, index): + if len(menu) > index: + if len(menu[index]) > 2: + if menu[index][2]["skip"] == True: + return False + return True + + def menuEntryPoint(): + # These need to be reglobalised due to eval() + global currentMenuItemIndex + global selectionInProgress + global menuNavigateDirection + global needsRender + global hideHelpText + global threadBuildOptions + term = Terminal() + with term.fullscreen(): + menuNavigateDirection = 0 + mainRender(needsRender, threadBuildOptions, currentMenuItemIndex) + selectionInProgress = True + with term.cbreak(): + while selectionInProgress: + menuNavigateDirection = 0 + + if needsRender: # Only rerender when changed to prevent flickering + mainRender(needsRender, threadBuildOptions, currentMenuItemIndex) + needsRender = 0 + + key = term.inkey(esc_delay=0.05) + if key.is_sequence: + if key.name == 'KEY_TAB': + menuNavigateDirection += 1 + if key.name == 'KEY_DOWN': + menuNavigateDirection += 1 + if key.name == 'KEY_UP': + menuNavigateDirection -= 1 + if key.name == 'KEY_LEFT': + goBack() + if key.name == 'KEY_ENTER': + runSelection(currentMenuItemIndex) + if key.name == 'KEY_ESCAPE': + return True + elif key: + if key == 'h': # H pressed + if hideHelpText: + hideHelpText = False + else: + hideHelpText = True + mainRender(1, threadBuildOptions, currentMenuItemIndex) + + if menuNavigateDirection != 0: # If a direction was pressed, find next selectable item + currentMenuItemIndex += menuNavigateDirection + currentMenuItemIndex = currentMenuItemIndex % len(threadBuildOptions) + needsRender = 2 + + while not isMenuItemSelectable(threadBuildOptions, currentMenuItemIndex): + currentMenuItemIndex += menuNavigateDirection + currentMenuItemIndex = currentMenuItemIndex % len(threadBuildOptions) + return True + + + if haltOnErrors: + eval(toRun)() + else: + try: + eval(toRun)() + except: + pass + +# This check isn't required, but placed here for debugging purposes +global currentServiceName # Name of the current service +if currentServiceName == 'otbr': + main() +else: + print("Error. '{}' Tried to run 'otbr' config".format(currentServiceName)) diff --git a/.templates/otbr/readme.md b/.templates/otbr/readme.md new file mode 100644 index 000000000..c66443e10 --- /dev/null +++ b/.templates/otbr/readme.md @@ -0,0 +1,68 @@ +# OTBR (Open Thread Border Router) + +The container will fail to show the WUI until IPv6 is enabled on the RPi. You can do so by running the following commands: +``` +$ sudo modprobe ip6_tables +$ sudo modprobe ip6table_filter +``` + +Save between reboots: +``` +$ echo "ip6_tables" | sudo tee -a /etc/modules +$ echo "ip6table_filter" | sudo tee -a /etc/modules +``` + +Open docker config `sudo nano /etc/docker/daemon.json`: +``` +{ + "ipv6": true, + "fixed-cidr-v6": "2001:db8:1::/64" +} +``` + +Then: +``` +$ sudo systemctl restart docker +``` + +I have this successfully working with a MakerDiary nrf52840 USB Thread radio node. It requires custom firmware flashed on it. + +You can flash the USB card with the `openthread/environment:latest` docker image. You only need to flash the firmware once to the USB radio, it has ran on every device I've tested running OTBR: baremetal, Docker, IOTstack, and Kubernetes (containerd). + +Run the following commands in the `openthread/environment` docker instance: +``` +$ git clone https://github.com/openthread/ot-nrf528xx.git +$ cd ot-nrf528xx/ +$ git submodule update --init +$ ./script/build nrf52840 USB_trans -DOT_BOOTLOADER=USB +``` + +After this, it depends on the type of nRF52840 you're using. If you are using a MakerDiary, mount it as a drive and drag the UF2 file into it, after converting it to a .hex file, and then a UF2 file: +``` +$ arm-none-eabi-objcopy -O ihex build/bin/ot-cli-ftd ot-cli-ftd.hex +$ pip install --pre -U git+https://github.com/makerdiary/uf2utils.git@main +$ uf2conv -f 0xADA52840 -c -b 0x1000 -o build/bin/ot-cli-ftd.uf2 build/bin/ot-cli-ftd +``` + +Since I run Zigbee and zwave on the same device, I mounted the nRF52840 this way `compose-override.yml`: +``` +services: + otbr: + volumes: + - ./volumes/otbr/data:/var/lib/otbr + - ./volumes/otbr/wpantund:/etc/wpantund.conf + - ./volumes/otbr/config:/etc/otbr + - /dev/serial/by-id/usb-Nordic_Semiconductor_nRF528xx_OpenThread_Device_XXXXXXXXXXX-if00:/dev/ttyACM0 +``` + +Note the device serial number has been replaced with Xs. You can find yours by running: +``` +ls -ahl /dev +``` + +You need to have flashed it with the OTBR firmware before running this command, as it will have a different name if running the stock firmware. + +Links: +* https://openthread.io/guides/border-router/docker (OTBR running in docker) +* https://openthread.io/guides/build/index.md (Radio/Node/RCP binary compile and firmware flashing) +* https://openthread.io/guides/border-router/raspberry-pi (Running on RPi 3+ bare-metal) \ No newline at end of file diff --git a/.templates/otbr/select_hardware.py b/.templates/otbr/select_hardware.py new file mode 100755 index 000000000..08d3c49dd --- /dev/null +++ b/.templates/otbr/select_hardware.py @@ -0,0 +1,337 @@ +#!/usr/bin/env python3 + +import signal + +def main(): + from blessed import Terminal + from deps.chars import specialChars, commonTopBorder, commonBottomBorder, commonEmptyLine + from deps.consts import servicesDirectory, templatesDirectory, buildSettingsFileName + import time + import subprocess + import ruamel.yaml + import os + + global signal + global currentServiceName + global dockerCommandsSelectionInProgress + global mainMenuList + global currentMenuItemIndex + global renderMode + global paginationSize + global paginationStartIndex + global hardwareFile + global hideHelpText + + global installCommand + + yaml = ruamel.yaml.YAML() + yaml.preserve_quotes = True + + try: # If not already set, then set it. + hideHelpText = hideHelpText + except: + hideHelpText = False + + term = Terminal() + hotzoneLocation = [((term.height // 16) + 6), 0] + paginationToggle = [10, term.height - 25] + paginationStartIndex = 0 + paginationSize = paginationToggle[0] + + serviceService = servicesDirectory + currentServiceName + serviceTemplate = templatesDirectory + currentServiceName + hardwareFileSource = serviceTemplate + '/thread_hardware.yml' + + def goBack(): + global dockerCommandsSelectionInProgress + global needsRender + dockerCommandsSelectionInProgress = False + needsRender = 1 + return True + + mainMenuList = [] + + hotzoneLocation = [((term.height // 16) + 6), 0] + + dockerCommandsSelectionInProgress = True + currentMenuItemIndex = 0 + menuNavigateDirection = 0 + + # Render Modes: + # 0 = No render needed + # 1 = Full render + # 2 = Hotzone only + needsRender = 1 + + def onResize(sig, action): + global mainMenuList + global currentMenuItemIndex + mainRender(1, mainMenuList, currentMenuItemIndex) + + def generateLineText(text, textLength=None, paddingBefore=0, lineLength=64): + result = "" + for i in range(paddingBefore): + result += " " + + textPrintableCharactersLength = textLength + + if (textPrintableCharactersLength) == None: + textPrintableCharactersLength = len(text) + + result += text + remainingSpace = lineLength - textPrintableCharactersLength + + for i in range(remainingSpace): + result += " " + + return result + + def renderHotZone(term, renderType, menu, selection, hotzoneLocation, paddingBefore = 4): + global paginationSize + selectedTextLength = len("-> ") + + print(term.move(hotzoneLocation[0], hotzoneLocation[1])) + + if paginationStartIndex >= 1: + print(term.center("{b} {uaf} {uaf}{uaf}{uaf} {ual} {b}".format( + b=specialChars[renderMode]["borderVertical"], + uaf=specialChars[renderMode]["upArrowFull"], + ual=specialChars[renderMode]["upArrowLine"] + ))) + else: + print(term.center(commonEmptyLine(renderMode))) + + for (index, menuItem) in enumerate(menu): # Menu loop + if index >= paginationStartIndex and index < paginationStartIndex + paginationSize: + lineText = generateLineText(menuItem[0], paddingBefore=paddingBefore) + + # Menu highlight logic + if index == selection: + formattedLineText = '-> {t.blue_on_green}{title}{t.normal} <-'.format(t=term, title=menuItem[0]) + paddedLineText = generateLineText(formattedLineText, textLength=len(menuItem[0]) + selectedTextLength, paddingBefore=paddingBefore - selectedTextLength) + toPrint = paddedLineText + else: + toPrint = '{title}{t.normal}'.format(t=term, title=lineText) + # ##### + + # Menu check render logic + if menuItem[1]["checked"]: + toPrint = " (X) " + toPrint + else: + toPrint = " ( ) " + toPrint + + toPrint = "{bv} {toPrint} {bv}".format(bv=specialChars[renderMode]["borderVertical"], toPrint=toPrint) # Generate border + toPrint = term.center(toPrint) # Center Text (All lines should have the same amount of printable characters) + # ##### + print(toPrint) + + if paginationStartIndex + paginationSize < len(menu): + print(term.center("{b} {daf} {daf}{daf}{daf} {dal} {b}".format( + b=specialChars[renderMode]["borderVertical"], + daf=specialChars[renderMode]["downArrowFull"], + dal=specialChars[renderMode]["downArrowLine"] + ))) + else: + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + + + def mainRender(needsRender, menu, selection): + global paginationStartIndex + global paginationSize + term = Terminal() + + if selection >= paginationStartIndex + paginationSize: + paginationStartIndex = selection - (paginationSize - 1) + 1 + needsRender = 1 + + if selection <= paginationStartIndex - 1: + paginationStartIndex = selection + needsRender = 1 + + if needsRender == 1: + print(term.clear()) + print(term.move_y(term.height // 16)) + print(term.black_on_cornsilk4(term.center('IOTstack Thread'))) + print("") + print(term.center(commonTopBorder(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Select Thread hardware {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + + if needsRender >= 1: + renderHotZone(term, needsRender, menu, selection, hotzoneLocation) + + if needsRender == 1: + print(term.center(commonEmptyLine(renderMode))) + if not hideHelpText: + if term.height < 32: + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Not enough vertical room to render controls help text {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + else: + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Controls: {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Space] to select or deselect hardware {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Up] and [Down] to move selection cursor {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [H] Show/hide this text {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Enter] to build and save hardware list {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Escape] to cancel changes {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonBottomBorder(renderMode))) + + def runSelection(selection): + import types + if len(mainMenuList[selection]) > 1 and isinstance(mainMenuList[selection][1], types.FunctionType): + mainMenuList[selection][1]() + else: + print(term.green_reverse('IOTstack Error: No function assigned to menu item: "{}"'.format(mainMenuList[selection][0]))) + + def isMenuItemSelectable(menu, index): + if len(menu) > index: + if len(menu[index]) > 1: + if "skip" in menu[index][1] and menu[index][1]["skip"] == True: + return False + return True + + def loadAddonsMenu(): + global mainMenuList + if os.path.exists(hardwareFileSource): + with open(r'%s' % hardwareFileSource) as objExtrasListFile: + hardwareKnown = yaml.load(objExtrasListFile) + knownExtrasList = hardwareKnown["hardwareList"] + if os.path.exists("{serviceDir}{buildSettings}".format(serviceDir=serviceService, buildSettings=buildSettingsFileName)): + with open("{serviceDir}{buildSettings}".format(serviceDir=serviceService, buildSettings=buildSettingsFileName)) as objSavedExtrasListFile: + savedExtrasList = yaml.load(objSavedExtrasListFile) + savedExtras = [] + + try: + savedExtras = savedExtrasList["hardware"] + except: + print("Error: Loading saved hardware selection. Please resave your selection.") + input("Press Enter to continue...") + + for (index, hardwarePath) in enumerate(knownExtrasList): + if hardwarePath in savedExtras: + mainMenuList.append([hardwarePath, { "checked": True }]) + else: + mainMenuList.append([hardwarePath, { "checked": False }]) + + else: # No saved list + for (index, hardwarePath) in enumerate(knownExtrasList): + if os.path.exists(hardwarePath): + mainMenuList.append([hardwarePath, { "checked": True }]) + else: + mainMenuList.append([hardwarePath, { "checked": False }]) + + + else: + print("Error: '{hardwareFile}' file doesn't exist.".format(hardwareFile=hardwareFileSource)) + input("Press Enter to continue...") + + def checkMenuItem(selection): + global mainMenuList + if mainMenuList[selection][1]["checked"] == True: + mainMenuList[selection][1]["checked"] = False + else: + uncheckAllMenuItems() + mainMenuList[selection][1]["checked"] = True + + def uncheckAllMenuItems(): + global mainMenuList + for menuItem in mainMenuList: + menuItem[1]["checked"] = False + + + def saveAddonList(): + try: + if not os.path.exists(serviceService): + os.makedirs(serviceService, exist_ok=True) + threadYamlExtrasList = { + "version": "1", + "application": "IOTstack", + "service": "otbr", + "comment": "Build Settings", + "hardware": [] + } + for (index, addon) in enumerate(mainMenuList): + if addon[1]["checked"]: + threadYamlExtrasList["hardware"].append(addon[0]) + + with open("{serviceDir}{buildSettings}".format(serviceDir=serviceService, buildSettings=buildSettingsFileName), 'w') as outputFile: + yaml.dump(threadYamlExtrasList, outputFile) + + except Exception as err: + print("Error saving Thread hardware list", currentServiceName) + print(err) + return False + global hasRebuiltExtrasSelection + hasRebuiltExtrasSelection = True + return True + + + if __name__ == 'builtins': + global signal + term = Terminal() + signal.signal(signal.SIGWINCH, onResize) + loadAddonsMenu() + with term.fullscreen(): + menuNavigateDirection = 0 + mainRender(needsRender, mainMenuList, currentMenuItemIndex) + dockerCommandsSelectionInProgress = True + with term.cbreak(): + while dockerCommandsSelectionInProgress: + menuNavigateDirection = 0 + + if not needsRender == 0: # Only rerender when changed to prevent flickering + mainRender(needsRender, mainMenuList, currentMenuItemIndex) + needsRender = 0 + + key = term.inkey(esc_delay=0.05) + if key.is_sequence: + if key.name == 'KEY_TAB': + if paginationSize == paginationToggle[0]: + paginationSize = paginationToggle[1] + else: + paginationSize = paginationToggle[0] + mainRender(1, mainMenuList, currentMenuItemIndex) + if key.name == 'KEY_DOWN': + menuNavigateDirection += 1 + if key.name == 'KEY_UP': + menuNavigateDirection -= 1 + if key.name == 'KEY_ENTER': + if saveAddonList(): + return True + else: + print("Something went wrong. Try saving the list again.") + if key.name == 'KEY_ESCAPE': + dockerCommandsSelectionInProgress = False + return True + elif key: + if key == ' ': # Space pressed + checkMenuItem(currentMenuItemIndex) # Update checked list + needsRender = 2 + elif key == 'h': # H pressed + if hideHelpText: + hideHelpText = False + else: + hideHelpText = True + mainRender(1, mainMenuList, currentMenuItemIndex) + + if menuNavigateDirection != 0: # If a direction was pressed, find next selectable item + currentMenuItemIndex += menuNavigateDirection + currentMenuItemIndex = currentMenuItemIndex % len(mainMenuList) + needsRender = 2 + + while not isMenuItemSelectable(mainMenuList, currentMenuItemIndex): + currentMenuItemIndex += menuNavigateDirection + currentMenuItemIndex = currentMenuItemIndex % len(mainMenuList) + return True + + return True + +originalSignalHandler = signal.getsignal(signal.SIGINT) +main() +signal.signal(signal.SIGWINCH, originalSignalHandler) diff --git a/.templates/otbr/service.yml b/.templates/otbr/service.yml new file mode 100644 index 000000000..a0ada6e67 --- /dev/null +++ b/.templates/otbr/service.yml @@ -0,0 +1,23 @@ +otbr: + container_name: otbr + image: openthread/otbr:latest + restart: unless-stopped + # network_mode: host + privileged: true + sysctls: + net.ipv6.conf.all.disable_ipv6: 0 + net.ipv4.conf.all.forwarding: 1 + net.ipv6.conf.all.forwarding: 1 + dns: + - 127.0.0.1 + stdin_open: true + tty: true + volumes: + - ./volumes/otbr/data:/var/lib/otbr + - ./volumes/otbr/wpantund:/etc/wpantund.conf + - ./volumes/otbr/config:/etc/otbr + ports: + - "8283:80" + command: > + --radio-url spinel+hdlc+uart:///dev/ttyX # Example + diff --git a/.templates/otbr/thread_hardware.yml b/.templates/otbr/thread_hardware.yml new file mode 100644 index 000000000..4a082e7ac --- /dev/null +++ b/.templates/otbr/thread_hardware.yml @@ -0,0 +1,11 @@ +version: 1 +application: "IOTstack" +service: "obtr" +comment: "Thread Hardware" +hardwareList: + - "/dev/ttyACM0" + - "/dev/ttyACM1" + - "/dev/ttyACM2" + - "/dev/ttyUSB0" + - "/dev/ttyUSB1" + - "/dev/ttyUSB2" diff --git a/.templates/pgadmin4/service.yml b/.templates/pgadmin4/service.yml new file mode 100644 index 000000000..8c8814f15 --- /dev/null +++ b/.templates/pgadmin4/service.yml @@ -0,0 +1,13 @@ +pgadmin4: + container_name: pgadmin4 + image: gpongelli/pgadmin4-arm:latest-armv7 + platform: linux/arm/v7 + # image: gpongelli/pgadmin4-arm:latest-armv8 + restart: unless-stopped + environment: + - TZ=${TZ:-Etc/UTC} + ports: + - "5050:5050" + volumes: + - ./volumes/pgadmin4:/pgadmin4 + diff --git a/.templates/pihole/pihole.env b/.templates/pihole/pihole.env deleted file mode 100644 index 2f7680704..000000000 --- a/.templates/pihole/pihole.env +++ /dev/null @@ -1,16 +0,0 @@ -#TZ=America/Chicago -WEBPASSWORD=pihole -#DNS1=8.8.8.8 -#DNS2=8.8.4.4 -#DNSSEC=false -#DNS_BOGUS_PRIV=True -#CONDITIONAL_FORWARDING=False -#CONDITIONAL_FORWARDING_IP=your_router_ip_here (only if CONDITIONAL_FORWARDING=ture) -#CONDITIONAL_FORWARDING_DOMAIN=optional -#CONDITIONAL_FORWARDING_REVERSE=optional -#ServerIP=your_Pi's_IP_here << recommended -#ServerIPv6= your_Pi's_ipv6_here << Required if using ipv6 -#VIRTUAL_HOST=$ServerIP -#IPv6=True -INTERFACE=eth0 -#DNSMASQ_LISTENING=local \ No newline at end of file diff --git a/.templates/pihole/service.yml b/.templates/pihole/service.yml index b9c1eb3b4..a6988812b 100644 --- a/.templates/pihole/service.yml +++ b/.templates/pihole/service.yml @@ -1,22 +1,26 @@ - pihole: - container_name: pihole - image: pihole/pihole:latest - ports: - - "53:53/tcp" - - "53:53/udp" - - "67:67/udp" - - "8089:80/tcp" - #- "443:443/tcp" - env_file: - - ./services/pihole/pihole.env - volumes: - - ./volumes/pihole/etc-pihole/:/etc/pihole/ - - ./volumes/pihole/etc-dnsmasq.d/:/etc/dnsmasq.d/ - dns: - - 127.0.0.1 - - 1.1.1.1 - # Recommended but not required (DHCP needs NET_ADMIN) - # https://github.com/pi-hole/docker-pi-hole#note-on-capabilities - cap_add: - - NET_ADMIN - restart: unless-stopped +pihole: + container_name: pihole + image: pihole/pihole:latest + ports: + - "8089:80/tcp" + - "53:53/tcp" + - "53:53/udp" + - "67:67/udp" + environment: + - TZ=${TZ:-Etc/UTC} + - WEBPASSWORD= + # see https://sensorsiot.github.io/IOTstack/Containers/Pi-hole/#adminPassword + - INTERFACE=eth0 + - FTLCONF_MAXDBDAYS=365 + - PIHOLE_DNS_=8.8.8.8;8.8.4.4 + # see https://github.com/pi-hole/docker-pi-hole#environment-variables + volumes: + - ./volumes/pihole/etc-pihole:/etc/pihole + - ./volumes/pihole/etc-dnsmasq.d:/etc/dnsmasq.d + dns: + - 127.0.0.1 + - 1.1.1.1 + cap_add: + - NET_ADMIN + restart: unless-stopped + diff --git a/.templates/plex/build.py b/.templates/plex/build.py new file mode 100755 index 000000000..1ea53f3a8 --- /dev/null +++ b/.templates/plex/build.py @@ -0,0 +1,90 @@ +#!/usr/bin/env python3 + +issues = {} # Returned issues dict +buildHooks = {} # Options, and others hooks +haltOnErrors = True + +# Main wrapper function. Required to make local vars work correctly +def main(): + global dockerComposeServicesYaml # The loaded memory YAML of all checked services + global toRun # Switch for which function to run when executed + global buildHooks # Where to place the options menu result + global currentServiceName # Name of the current service + global issues # Returned issues dict + global haltOnErrors # Turn on to allow erroring + + # runtime vars + portConflicts = [] + + # This lets the menu know whether to put " >> Options " or not + # This function is REQUIRED. + def checkForOptionsHook(): + try: + buildHooks["options"] = callable(runOptionsMenu) + except: + buildHooks["options"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForPreBuildHook(): + try: + buildHooks["preBuildHook"] = callable(preBuild) + except: + buildHooks["preBuildHook"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForPostBuildHook(): + try: + buildHooks["postBuildHook"] = callable(postBuild) + except: + buildHooks["postBuildHook"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForRunChecksHook(): + try: + buildHooks["runChecksHook"] = callable(runChecks) + except: + buildHooks["runChecksHook"] = False + return buildHooks + return buildHooks + + # This service will not check anything unless this is set + # This function is optional, and will run each time the menu is rendered + def runChecks(): + checkForIssues() + return [] + + # This function is optional, and will run after the docker-compose.yml file is written to disk. + def postBuild(): + return True + + # This function is optional, and will run just before the build docker-compose.yml code. + def preBuild(): + return True + + # ##################################### + # Supporting functions below + # ##################################### + + def checkForIssues(): + return True + + if haltOnErrors: + eval(toRun)() + else: + try: + eval(toRun)() + except: + pass + +# This check isn't required, but placed here for debugging purposes +global currentServiceName # Name of the current service +if currentServiceName == 'plex': + main() +else: + print("Error. '{}' Tried to run 'plex' config".format(currentServiceName)) diff --git a/.templates/plex/service.yml b/.templates/plex/service.yml index c2ca47db4..2e642dc6e 100644 --- a/.templates/plex/service.yml +++ b/.templates/plex/service.yml @@ -1,15 +1,13 @@ - plex: - image: linuxserver/plex - container_name: plex - network_mode: host - environment: - - PUID=1000 - - PGID=1000 - - VERSION=docker - #- UMASK_SET=022 #optional - volumes: - - ./volumes/plex/config:/config - #- ~/mnt/HDD/tvseries:/tv - #- ~/mnt/HDD/movies:/movies - - ./volumes/plex/transcode:/transcode - restart: unless-stopped +plex: + image: linuxserver/plex + container_name: plex + network_mode: host + environment: + - PUID=1000 + - PGID=1000 + - VERSION=docker + volumes: + - ./volumes/plex/config:/config + - ./volumes/plex/transcode:/transcode + restart: unless-stopped + diff --git a/.templates/portainer-ce/build.py b/.templates/portainer-ce/build.py new file mode 100755 index 000000000..475562973 --- /dev/null +++ b/.templates/portainer-ce/build.py @@ -0,0 +1,316 @@ +#!/usr/bin/env python3 + +issues = {} # Returned issues dict +buildHooks = {} # Options, and others hooks +haltOnErrors = True + +# Main wrapper function. Required to make local vars work correctly +def main(): + import os + import time + import ruamel.yaml + import signal + import sys + from blessed import Terminal + + from deps.chars import specialChars, commonTopBorder, commonBottomBorder, commonEmptyLine, padText + from deps.consts import servicesDirectory, templatesDirectory + from deps.common_functions import getExternalPorts, getInternalPorts, checkPortConflicts, enterPortNumberWithWhiptail + + yaml = ruamel.yaml.YAML() + yaml.preserve_quotes = True + + global dockerComposeServicesYaml # The loaded memory YAML of all checked services + global toRun # Switch for which function to run when executed + global buildHooks # Where to place the options menu result + global currentServiceName # Name of the current service + global issues # Returned issues dict + global haltOnErrors # Turn on to allow erroring + global hideHelpText # Showing and hiding the help controls text + + try: # If not already set, then set it. + hideHelpText = hideHelpText + except: + hideHelpText = False + + documentationHint = 'https://sensorsiot.github.io/IOTstack/Containers/Portainer-ce' + + # runtime vars + portConflicts = [] + + # This lets the menu know whether to put " >> Options " or not + # This function is REQUIRED. + def checkForOptionsHook(): + try: + buildHooks["options"] = callable(runOptionsMenu) + except: + buildHooks["options"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForPreBuildHook(): + try: + buildHooks["preBuildHook"] = callable(preBuild) + except: + buildHooks["preBuildHook"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForPostBuildHook(): + try: + buildHooks["postBuildHook"] = callable(postBuild) + except: + buildHooks["postBuildHook"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForRunChecksHook(): + try: + buildHooks["runChecksHook"] = callable(runChecks) + except: + buildHooks["runChecksHook"] = False + return buildHooks + return buildHooks + + # This service will not check anything unless this is set + # This function is optional, and will run each time the menu is rendered + def runChecks(): + checkForIssues() + return [] + + # This function is optional, and will run after the docker-compose.yml file is written to disk. + def postBuild(): + return True + + # This function is optional, and will run just before the build docker-compose.yml code. + def preBuild(): + return True + + # ##################################### + # Supporting functions below + # ##################################### + + def checkForIssues(): + for (index, serviceName) in enumerate(dockerComposeServicesYaml): + if not currentServiceName == serviceName: # Skip self + currentServicePorts = getExternalPorts(currentServiceName, dockerComposeServicesYaml) + portConflicts = checkPortConflicts(serviceName, currentServicePorts, dockerComposeServicesYaml) + if (len(portConflicts) > 0): + issues["portConflicts"] = portConflicts + + # ##################################### + # End Supporting functions + # ##################################### + + ############################ + # Menu Logic + ############################ + + global currentMenuItemIndex + global selectionInProgress + global menuNavigateDirection + global needsRender + + selectionInProgress = True + currentMenuItemIndex = 0 + menuNavigateDirection = 0 + needsRender = 1 + term = Terminal() + hotzoneLocation = [((term.height // 16) + 6), 0] + + def goBack(): + global selectionInProgress + global needsRender + selectionInProgress = False + needsRender = 1 + return True + + def enterPortNumberExec(): + # global term + global needsRender + global dockerComposeServicesYaml + externalPort = getExternalPorts(currentServiceName, dockerComposeServicesYaml)[0] + internalPort = getInternalPorts(currentServiceName, dockerComposeServicesYaml)[0] + newPortNumber = enterPortNumberWithWhiptail(term, dockerComposeServicesYaml, currentServiceName, hotzoneLocation, externalPort) + + if newPortNumber > 0: + dockerComposeServicesYaml[currentServiceName]["ports"][0] = "{newExtPort}:{oldIntPort}".format( + newExtPort = newPortNumber, + oldIntPort = internalPort + ) + createMenu() + needsRender = 1 + + def onResize(sig, action): + global portainerCeBuildOptions + global currentMenuItemIndex + mainRender(1, portainerCeBuildOptions, currentMenuItemIndex) + + portainerCeBuildOptions = [] + + def createMenu(): + global portainerCeBuildOptions + try: + portainerCeBuildOptions = [] + portNumber = getExternalPorts(currentServiceName, dockerComposeServicesYaml)[0] + portainerCeBuildOptions.append([ + "Change external WUI Port Number from: {port}".format(port=portNumber), + enterPortNumberExec + ]) + except: # Error getting port + pass + portainerCeBuildOptions.append(["Go back", goBack]) + + def runOptionsMenu(): + createMenu() + menuEntryPoint() + return True + + def renderHotZone(term, menu, selection, hotzoneLocation): + lineLengthAtTextStart = 71 + print(term.move(hotzoneLocation[0], hotzoneLocation[1])) + for (index, menuItem) in enumerate(menu): + toPrint = "" + if index == selection: + toPrint += ('{bv} -> {t.blue_on_green} {title} {t.normal} <-'.format(t=term, title=menuItem[0], bv=specialChars[renderMode]["borderVertical"])) + else: + toPrint += ('{bv} {t.normal} {title} '.format(t=term, title=menuItem[0], bv=specialChars[renderMode]["borderVertical"])) + + for i in range(lineLengthAtTextStart - len(menuItem[0])): + toPrint += " " + + toPrint += "{bv}".format(bv=specialChars[renderMode]["borderVertical"]) + + toPrint = term.center(toPrint) + + print(toPrint) + + def mainRender(needsRender, menu, selection): + term = Terminal() + + if needsRender == 1: + print(term.clear()) + print(term.move_y(term.height // 16)) + print(term.black_on_cornsilk4(term.center('IOTstack Portainer-CE Options'))) + print("") + print(term.center(commonTopBorder(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Select Option to configure {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + + if needsRender >= 1: + renderHotZone(term, menu, selection, hotzoneLocation) + + if needsRender == 1: + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + if not hideHelpText: + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Controls: {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Up] and [Down] to move selection cursor {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [H] Show/hide this text {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Enter] to run command or save input {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Escape] to go back to build stack menu {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + if len(documentationHint) > 1: + if len(documentationHint) > 56: + documentationAndPadding = padText(documentationHint, 71) + print(term.center("{bv} Documentation: {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} {dap} {bv}".format(bv=specialChars[renderMode]["borderVertical"], dap=documentationAndPadding))) + else: + documentationAndPadding = padText(documentationHint, 56) + print(term.center("{bv} Documentation: {dap} {bv}".format(bv=specialChars[renderMode]["borderVertical"], dap=documentationAndPadding))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonBottomBorder(renderMode))) + + def runSelection(selection): + import types + global portainerCeBuildOptions + if len(portainerCeBuildOptions[selection]) > 1 and isinstance(portainerCeBuildOptions[selection][1], types.FunctionType): + portainerCeBuildOptions[selection][1]() + else: + print(term.green_reverse('IOTstack Error: No function assigned to menu item: "{}"'.format(nodeRedBuildOptions[selection][0]))) + + def isMenuItemSelectable(menu, index): + if len(menu) > index: + if len(menu[index]) > 2: + if menu[index][2]["skip"] == True: + return False + return True + + def menuEntryPoint(): + # These need to be reglobalised due to eval() + global currentMenuItemIndex + global selectionInProgress + global menuNavigateDirection + global needsRender + global hideHelpText + global portainerCeBuildOptions + term = Terminal() + with term.fullscreen(): + menuNavigateDirection = 0 + mainRender(needsRender, portainerCeBuildOptions, currentMenuItemIndex) + selectionInProgress = True + with term.cbreak(): + while selectionInProgress: + menuNavigateDirection = 0 + + if needsRender: # Only rerender when changed to prevent flickering + mainRender(needsRender, portainerCeBuildOptions, currentMenuItemIndex) + needsRender = 0 + + key = term.inkey(esc_delay=0.05) + if key.is_sequence: + if key.name == 'KEY_TAB': + menuNavigateDirection += 1 + if key.name == 'KEY_DOWN': + menuNavigateDirection += 1 + if key.name == 'KEY_UP': + menuNavigateDirection -= 1 + if key.name == 'KEY_LEFT': + goBack() + if key.name == 'KEY_ENTER': + runSelection(currentMenuItemIndex) + if key.name == 'KEY_ESCAPE': + return True + elif key: + if key == 'h': # H pressed + if hideHelpText: + hideHelpText = False + else: + hideHelpText = True + mainRender(1, portainerCeBuildOptions, currentMenuItemIndex) + + if menuNavigateDirection != 0: # If a direction was pressed, find next selectable item + currentMenuItemIndex += menuNavigateDirection + currentMenuItemIndex = currentMenuItemIndex % len(portainerCeBuildOptions) + needsRender = 2 + + while not isMenuItemSelectable(portainerCeBuildOptions, currentMenuItemIndex): + currentMenuItemIndex += menuNavigateDirection + currentMenuItemIndex = currentMenuItemIndex % len(portainerCeBuildOptions) + return True + + #################### + # End menu section + #################### + + + if haltOnErrors: + eval(toRun)() + else: + try: + eval(toRun)() + except: + pass + +# This check isn't required, but placed here for debugging purposes +global currentServiceName # Name of the current service +if currentServiceName == 'portainer-ce': + main() +else: + print("Error. '{}' Tried to run 'portainer-ce' config".format(currentServiceName)) diff --git a/.templates/portainer-ce/service.yml b/.templates/portainer-ce/service.yml new file mode 100644 index 000000000..841f4a7a8 --- /dev/null +++ b/.templates/portainer-ce/service.yml @@ -0,0 +1,13 @@ +portainer-ce: + container_name: portainer-ce + image: portainer/portainer-ce + restart: unless-stopped + ports: + - "8000:8000" + - "9000:9000" + # HTTPS + - "9443:9443" + volumes: + - /var/run/docker.sock:/var/run/docker.sock + - ./volumes/portainer-ce/data:/data + diff --git a/.templates/portainer/service.yml b/.templates/portainer/service.yml deleted file mode 100644 index b7bd194a4..000000000 --- a/.templates/portainer/service.yml +++ /dev/null @@ -1,9 +0,0 @@ - portainer: - container_name: portainer - image: portainer/portainer - restart: unless-stopped - ports: - - 9000:9000 - volumes: - - /var/run/docker.sock:/var/run/docker.sock - - ./volumes/portainer/data:/data diff --git a/.templates/portainer_agent/service.yml b/.templates/portainer_agent/service.yml new file mode 100644 index 000000000..d1b346b22 --- /dev/null +++ b/.templates/portainer_agent/service.yml @@ -0,0 +1,10 @@ +portainer_agent: + image: portainer/agent + container_name: portainer-agent + ports: + - "9001:9001" + volumes: + - /var/run/docker.sock:/var/run/docker.sock + - /var/lib/docker/volumes:/var/lib/docker/volumes + restart: unless-stopped + diff --git a/.templates/postgres/postgres.env b/.templates/postgres/postgres.env deleted file mode 100644 index b492b3118..000000000 --- a/.templates/postgres/postgres.env +++ /dev/null @@ -1,3 +0,0 @@ -POSTGRES_USER=postuser -POSTGRES_PASSWORD=postpassword -POSTGRES_DB=postdb \ No newline at end of file diff --git a/.templates/postgres/service.yml b/.templates/postgres/service.yml index df7821a3d..c765e3a20 100644 --- a/.templates/postgres/service.yml +++ b/.templates/postgres/service.yml @@ -1,10 +1,15 @@ - postgres: - container_name: postgres - image: postgres - restart: unless-stopped - env_file: - - ./services/postgres/postgres.env - ports: - - 5432:5432 - volumes: - - ./volumes/postgres/data:/var/lib/postgresql/data +postgres: + container_name: postgres + image: postgres + restart: unless-stopped + environment: + - TZ=${TZ:-Etc/UTC} + - POSTGRES_USER=${POSTGRES_USER:-postuser} + - POSTGRES_PASSWORD=${POSTGRES_PASSWORD:-IOtSt4ckpostgresDbPw} + - POSTGRES_DB=${POSTGRES_DB:-postdb} + ports: + - "5432:5432" + volumes: + - ./volumes/postgres/data:/var/lib/postgresql/data + - ./volumes/postgres/db_backup:/backup + diff --git a/.templates/postgres/terminal.sh b/.templates/postgres/terminal.sh index 2e179ac94..82c5a3246 100644 --- a/.templates/postgres/terminal.sh +++ b/.templates/postgres/terminal.sh @@ -2,5 +2,9 @@ echo 'Use the command "psql DATABASE USER" to enter your database, replace DATABASE and USER with your values' echo "Remember to end queries with a semicolon ;" +echo "" +echo "IOTstack postgres Documentation: https://sensorsiot.github.io/IOTstack/Containers/PostgreSQL/" +echo "" +echo "docker exec -it postgres bash" -docker exec -it postgres bash \ No newline at end of file +docker exec -it postgres bash diff --git a/.templates/prometheus-cadvisor/service.yml b/.templates/prometheus-cadvisor/service.yml new file mode 100644 index 000000000..66c53af83 --- /dev/null +++ b/.templates/prometheus-cadvisor/service.yml @@ -0,0 +1,12 @@ +prometheus-cadvisor: + container_name: prometheus-cadvisor + image: zcube/cadvisor:latest + restart: unless-stopped + ports: + - "8082:8080" + volumes: + - /:/rootfs:ro + - /var/run:/var/run:rw + - /sys:/sys:ro + - /var/lib/docker/:/var/lib/docker:ro + diff --git a/.templates/prometheus-nodeexporter/service.yml b/.templates/prometheus-nodeexporter/service.yml new file mode 100644 index 000000000..517fbea58 --- /dev/null +++ b/.templates/prometheus-nodeexporter/service.yml @@ -0,0 +1,7 @@ +prometheus-nodeexporter: + container_name: prometheus-nodeexporter + image: prom/node-exporter:latest + restart: unless-stopped + expose: + - "9100" + diff --git a/.templates/prometheus/Dockerfile b/.templates/prometheus/Dockerfile new file mode 100644 index 000000000..e39404b43 --- /dev/null +++ b/.templates/prometheus/Dockerfile @@ -0,0 +1,25 @@ +# Download base image +FROM prom/prometheus:latest + +USER root + +# where IOTstack template files are stored +ENV IOTSTACK_DEFAULTS_DIR="iotstack_defaults" +ENV IOTSTACK_CONFIG_DIR="/prometheus/config/" + +# copy template files to image +COPY --chown=nobody:nobody ${IOTSTACK_DEFAULTS_DIR} /${IOTSTACK_DEFAULTS_DIR} + +# add default config from image to template +RUN cp /etc/prometheus/prometheus.yml /${IOTSTACK_DEFAULTS_DIR} + +# replace the docker entry-point script +ENV IOTSTACK_ENTRY_POINT="docker-entrypoint.sh" +COPY ${IOTSTACK_ENTRY_POINT} /${IOTSTACK_ENTRY_POINT} +RUN chmod 755 /${IOTSTACK_ENTRY_POINT} +ENTRYPOINT ["/docker-entrypoint.sh"] +ENV IOTSTACK_ENTRY_POINT= + +USER nobody + +# EOF diff --git a/.templates/prometheus/docker-entrypoint.sh b/.templates/prometheus/docker-entrypoint.sh new file mode 100644 index 000000000..3c9cc7c91 --- /dev/null +++ b/.templates/prometheus/docker-entrypoint.sh @@ -0,0 +1,33 @@ +#!/bin/ash +set -e + +# set defaults for config structure ownership +UID="${IOTSTACK_UID:-nobody}" +GID="${IOTSTACK_GID:-nobody}" + +# were we launched as root? +if [ "$(id -u)" = "0" -a -d /"$IOTSTACK_DEFAULTS_DIR" ]; then + + # yes! ensure that the IOTSTACK_CONFIG_DIR exists + mkdir -p "$IOTSTACK_CONFIG_DIR" + + # populate runtime directory from the defaults + for P in /"$IOTSTACK_DEFAULTS_DIR"/* ; do + + C=$(basename "$P") + + if [ ! -e "$IOTSTACK_CONFIG_DIR/$C" ] ; then + + cp -a "$P" "$IOTSTACK_CONFIG_DIR/$C" + + fi + + done + + # enforce correct ownership + chown -R "$UID":"$GID" "$IOTSTACK_CONFIG_DIR" + +fi + +# launch prometheus with supplied arguments +exec /bin/prometheus "$@" diff --git a/.templates/prometheus/iotstack_defaults/config.yml b/.templates/prometheus/iotstack_defaults/config.yml new file mode 100644 index 000000000..989e5d6a4 --- /dev/null +++ b/.templates/prometheus/iotstack_defaults/config.yml @@ -0,0 +1,12 @@ +global: + scrape_interval: 10s + evaluation_interval: 10s + +scrape_configs: + - job_name: "iotstack" + static_configs: + - targets: + - localhost:9090 + - prometheus-cadvisor:8080 + - prometheus-nodeexporter:9100 + diff --git a/.templates/prometheus/service.yml b/.templates/prometheus/service.yml new file mode 100644 index 000000000..702678c8f --- /dev/null +++ b/.templates/prometheus/service.yml @@ -0,0 +1,23 @@ +prometheus: + container_name: prometheus + build: ./.templates/prometheus/. + restart: unless-stopped + user: "0" + ports: + - "9090:9090" + environment: + - IOTSTACK_UID=1000 + - IOTSTACK_GID=1000 + volumes: + - ./volumes/prometheus/data:/prometheus + command: + - '--config.file=/prometheus/config/config.yml' + # defaults are: + # - --config.file=/etc/prometheus/prometheus.yml + # - --storage.tsdb.path=/prometheus + # - --web.console.libraries=/usr/share/prometheus/console_libraries + # - --web.console.templates=/usr/share/prometheus/consoles + depends_on: + - prometheus-cadvisor + - prometheus-nodeexporter + diff --git a/.templates/python-matter-server/build.py b/.templates/python-matter-server/build.py new file mode 100755 index 000000000..261aceb8b --- /dev/null +++ b/.templates/python-matter-server/build.py @@ -0,0 +1,414 @@ +#!/usr/bin/env python3 + +issues = {} # Returned issues dict +buildHooks = {} # Options, and others hooks +haltOnErrors = True + +# Main wrapper function. Required to make local vars work correctly +def main(): + import os + import time + import ruamel.yaml + import signal + import sys + from blessed import Terminal + + from deps.chars import specialChars, commonTopBorder, commonBottomBorder, commonEmptyLine, padText + from deps.common_functions import getExternalPorts, getInternalPorts, checkPortConflicts, enterPortNumberWithWhiptail + from deps.consts import servicesDirectory, templatesDirectory, buildSettingsFileName, buildCache, servicesFileName + + yaml = ruamel.yaml.YAML() + yaml.preserve_quotes = True + + global dockerComposeServicesYaml # The loaded memory YAML of all checked services + global toRun # Switch for which function to run when executed + global buildHooks # Where to place the options menu result + global currentServiceName # Name of the current service + global issues # Returned issues dict + global haltOnErrors # Turn on to allow erroring + global hideHelpText # Showing and hiding the help controls text + global serviceService + global hasRebuiltExtrasSelection + + serviceService = servicesDirectory + currentServiceName + serviceTemplate = templatesDirectory + currentServiceName + buildSettings = serviceService + buildSettingsFileName + + hasRebuiltExtrasSelection = False + + try: # If not already set, then set it. + hideHelpText = hideHelpText + except: + hideHelpText = False + + documentationHint = 'https://github.com/home-assistant-libs/python-matter-server' + + # runtime vars + portConflicts = [] + + # This lets the menu know whether to put " >> Options " or not + # This function is REQUIRED. + def checkForOptionsHook(): + try: + buildHooks["options"] = callable(runOptionsMenu) + except: + buildHooks["options"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForPreBuildHook(): + try: + buildHooks["preBuildHook"] = callable(preBuild) + except: + buildHooks["preBuildHook"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForPostBuildHook(): + try: + buildHooks["postBuildHook"] = callable(postBuild) + except: + buildHooks["postBuildHook"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForRunChecksHook(): + try: + buildHooks["runChecksHook"] = callable(runChecks) + except: + buildHooks["runChecksHook"] = False + return buildHooks + return buildHooks + + # This service will not check anything unless this is set + # This function is optional, and will run each time the menu is rendered + def runChecks(): + checkForIssues() + return [] + + # This function is optional, and will run after the docker-compose.yml file is written to disk. + def postBuild(): + return True + + # This function is optional, and will run just before the build docker-compose.yml code. + def preBuild(): + global dockerComposeServicesYaml + global currentServiceName + try: + with open("{serviceDir}{buildSettings}".format(serviceDir=serviceService, buildSettings=buildSettingsFileName)) as objExtrasListFile: + pythonMatterServerYamlBuildOptions = yaml.load(objExtrasListFile) + + with open((r'%s/' % serviceTemplate) + servicesFileName) as objServiceFile: + serviceYamlTemplate = yaml.load(objServiceFile) + + oldBuildCache = {} + try: + with open(r'%s' % buildCache) as objBuildCache: + oldBuildCache = yaml.load(objBuildCache) + except: + pass + + buildCacheServices = {} + if "services" in oldBuildCache: + buildCacheServices = oldBuildCache["services"] + + if not os.path.exists(serviceService): + os.makedirs(serviceService, exist_ok=True) + + try: + if currentServiceName in dockerComposeServicesYaml: + if pythonMatterServerYamlBuildOptions["extras"]: + if "Mount Bluetooth: /run/dbus" in pythonMatterServerYamlBuildOptions["extras"]: + if not "/run/dbus:/run/dbus:ro" in dockerComposeServicesYaml[currentServiceName]["volumes"]: + dockerComposeServicesYaml[currentServiceName]["volumes"].append("/run/dbus:/run/dbus:ro") + + currentCommand = dockerComposeServicesYaml[currentServiceName]["command"] + if not "--bluetooth-adapter 0\n" in currentCommand: + newCommand = currentCommand + "--bluetooth-adapter 0\n" + dockerComposeServicesYaml[currentServiceName]["command"] = newCommand + else: + if "/run/dbus:/run/dbus:ro" in dockerComposeServicesYaml[currentServiceName]["volumes"]: + dockerComposeServicesYaml[currentServiceName]["volumes"].remove("/run/dbus:/run/dbus:ro") + + currentCommand = dockerComposeServicesYaml[currentServiceName]["command"] + if "--bluetooth-adapter 0\n" in currentCommand: + newCommand = currentCommand.replace("--bluetooth-adapter 0\n", "") + dockerComposeServicesYaml[currentServiceName]["command"] = newCommand + + if "Enabled Root Certificates" in pythonMatterServerYamlBuildOptions["extras"]: + currentCommand = dockerComposeServicesYaml[currentServiceName]["command"] + if not "--paa-root-cert-dir /data/credentials\n" in currentCommand: + newCommand = currentCommand + "--paa-root-cert-dir /data/credentials\n" + dockerComposeServicesYaml[currentServiceName]["command"] = newCommand + else: + currentCommand = dockerComposeServicesYaml[currentServiceName]["command"] + if "--paa-root-cert-dir /data/credentials\n" in currentCommand: + newCommand = currentCommand.replace("--paa-root-cert-dir /data/credentials\n", "") + dockerComposeServicesYaml[currentServiceName]["command"] = newCommand + else: + currentCommand = dockerComposeServicesYaml[currentServiceName]["command"] + if "--paa-root-cert-dir /data/credentials\n" in currentCommand: + newCommand = currentCommand.replace("--paa-root-cert-dir /data/credentials\n", "") + dockerComposeServicesYaml[currentServiceName]["command"] = newCommand + + if "/run/dbus:/run/dbus:ro" in dockerComposeServicesYaml[currentServiceName]["volumes"]: + dockerComposeServicesYaml[currentServiceName]["volumes"].remove("/run/dbus:/run/dbus:ro") + + currentCommand = dockerComposeServicesYaml[currentServiceName]["command"] + if "--bluetooth-adapter 0\n" in currentCommand: + newCommand = currentCommand.replace("--bluetooth-adapter 0\n", "") + dockerComposeServicesYaml[currentServiceName]["command"] = newCommand + + except Exception as err: + print("Error setting pythonMatterServer extras: ", err) + time.sleep(10) + return False + except: + pass + + + + + return True + + # ##################################### + # Supporting functions below + # ##################################### + + def checkForIssues(): + for (index, serviceName) in enumerate(dockerComposeServicesYaml): + if not currentServiceName == serviceName: # Skip self + currentServicePorts = getExternalPorts(currentServiceName, dockerComposeServicesYaml) + portConflicts = checkPortConflicts(serviceName, currentServicePorts, dockerComposeServicesYaml) + if (len(portConflicts) > 0): + issues["portConflicts"] = portConflicts + + # ##################################### + # End Supporting functions + # ##################################### + + + ############################ + # Menu Logic + ############################ + + global currentMenuItemIndex + global selectionInProgress + global menuNavigateDirection + global needsRender + + selectionInProgress = True + currentMenuItemIndex = 0 + menuNavigateDirection = 0 + needsRender = 1 + term = Terminal() + hotzoneLocation = [((term.height // 16) + 6), 0] + + def goBack(): + global selectionInProgress + global needsRender + selectionInProgress = False + needsRender = 1 + return True + + def selectMatterExtras(): + global needsRender + global hasRebuiltExtrasSelection + matterSelectHardwareFilePath = "./.templates/python-matter-server/select_extras.py" + with open(matterSelectHardwareFilePath, "rb") as pythonDynamicImportFile: + code = compile(pythonDynamicImportFile.read(), matterSelectHardwareFilePath, "exec") + # execGlobals = globals() + # execLocals = locals() + execGlobals = { + "currentServiceName": currentServiceName, + "renderMode": renderMode + } + execLocals = {} + screenActive = False + exec(code, execGlobals, execLocals) + signal.signal(signal.SIGWINCH, onResize) + try: + hasRebuiltExtrasSelection = execGlobals["hasRebuiltExtrasSelection"] + except: + hasRebuiltExtrasSelection = False + screenActive = True + needsRender = 1 + + def onResize(sig, action): + global matterBuildOptions + global currentMenuItemIndex + mainRender(1, matterBuildOptions, currentMenuItemIndex) + + matterBuildOptions = [] + + def createMenu(): + global matterBuildOptions + global serviceService + matterBuildOptions = [] + + + if os.path.exists("{buildSettings}".format(buildSettings=buildSettings)): + matterBuildOptions.insert(0, ["Change selected extras", selectMatterExtras]) + else: + matterBuildOptions.insert(0, ["Select extras", selectMatterExtras]) + + matterBuildOptions.append(["Go back", goBack]) + + def runOptionsMenu(): + createMenu() + menuEntryPoint() + return True + + def renderHotZone(term, menu, selection, hotzoneLocation): + lineLengthAtTextStart = 71 + print(term.move(hotzoneLocation[0], hotzoneLocation[1])) + for (index, menuItem) in enumerate(menu): + toPrint = "" + if index == selection: + toPrint += ('{bv} -> {t.blue_on_green} {title} {t.normal} <-'.format(t=term, title=menuItem[0], bv=specialChars[renderMode]["borderVertical"])) + else: + toPrint += ('{bv} {t.normal} {title} '.format(t=term, title=menuItem[0], bv=specialChars[renderMode]["borderVertical"])) + + for i in range(lineLengthAtTextStart - len(menuItem[0])): + toPrint += " " + + toPrint += "{bv}".format(bv=specialChars[renderMode]["borderVertical"]) + + toPrint = term.center(toPrint) + + print(toPrint) + + def mainRender(needsRender, menu, selection): + global hasRebuiltExtrasSelection + term = Terminal() + + if needsRender == 1: + print(term.clear()) + print(term.move_y(term.height // 16)) + print(term.black_on_cornsilk4(term.center('IOTstack Python Matter Server Options'))) + print("") + print(term.center(commonTopBorder(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Select Option to configure {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + + if needsRender >= 1: + renderHotZone(term, menu, selection, hotzoneLocation) + + if needsRender == 1: + if os.path.exists("{buildSettings}".format(buildSettings=buildSettings)): + if hasRebuiltExtrasSelection: + print(term.center(commonEmptyLine(renderMode))) + print(term.center('{bv} {t.grey_on_blue4} {text} {t.normal}{t.white_on_black}{t.normal} {bv}'.format(t=term, text="Extras list has been rebuilt: build_settings.yml", bv=specialChars[renderMode]["borderVertical"]))) + else: + print(term.center(commonEmptyLine(renderMode))) + print(term.center('{bv} {t.grey_on_blue4} {text} {t.normal}{t.white_on_black}{t.normal} {bv}'.format(t=term, text="Using existing build_settings.yml for hardware installation", bv=specialChars[renderMode]["borderVertical"]))) + else: + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + if not hideHelpText: + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Controls: {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Up] and [Down] to move selection cursor {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [H] Show/hide this text {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Enter] to run command or save input {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Escape] to go back to build stack menu {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + if len(documentationHint) > 1: + if len(documentationHint) > 56: + documentationAndPadding = padText(documentationHint, 71) + print(term.center("{bv} Documentation: {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} {dap} {bv}".format(bv=specialChars[renderMode]["borderVertical"], dap=documentationAndPadding))) + else: + documentationAndPadding = padText(documentationHint, 56) + print(term.center("{bv} Documentation: {dap} {bv}".format(bv=specialChars[renderMode]["borderVertical"], dap=documentationAndPadding))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonBottomBorder(renderMode))) + + def runSelection(selection): + import types + global matterBuildOptions + if len(matterBuildOptions[selection]) > 1 and isinstance(matterBuildOptions[selection][1], types.FunctionType): + matterBuildOptions[selection][1]() + else: + print(term.green_reverse('IOTstack Error: No function assigned to menu item: "{}"'.format(nodeRedBuildOptions[selection][0]))) + + def isMenuItemSelectable(menu, index): + if len(menu) > index: + if len(menu[index]) > 2: + if menu[index][2]["skip"] == True: + return False + return True + + def menuEntryPoint(): + # These need to be reglobalised due to eval() + global currentMenuItemIndex + global selectionInProgress + global menuNavigateDirection + global needsRender + global hideHelpText + global matterBuildOptions + term = Terminal() + with term.fullscreen(): + menuNavigateDirection = 0 + mainRender(needsRender, matterBuildOptions, currentMenuItemIndex) + selectionInProgress = True + with term.cbreak(): + while selectionInProgress: + menuNavigateDirection = 0 + + if needsRender: # Only rerender when changed to prevent flickering + mainRender(needsRender, matterBuildOptions, currentMenuItemIndex) + needsRender = 0 + + key = term.inkey(esc_delay=0.05) + if key.is_sequence: + if key.name == 'KEY_TAB': + menuNavigateDirection += 1 + if key.name == 'KEY_DOWN': + menuNavigateDirection += 1 + if key.name == 'KEY_UP': + menuNavigateDirection -= 1 + if key.name == 'KEY_LEFT': + goBack() + if key.name == 'KEY_ENTER': + runSelection(currentMenuItemIndex) + if key.name == 'KEY_ESCAPE': + return True + elif key: + if key == 'h': # H pressed + if hideHelpText: + hideHelpText = False + else: + hideHelpText = True + mainRender(1, matterBuildOptions, currentMenuItemIndex) + + if menuNavigateDirection != 0: # If a direction was pressed, find next selectable item + currentMenuItemIndex += menuNavigateDirection + currentMenuItemIndex = currentMenuItemIndex % len(matterBuildOptions) + needsRender = 2 + + while not isMenuItemSelectable(matterBuildOptions, currentMenuItemIndex): + currentMenuItemIndex += menuNavigateDirection + currentMenuItemIndex = currentMenuItemIndex % len(matterBuildOptions) + return True + + + if haltOnErrors: + eval(toRun)() + else: + try: + eval(toRun)() + except: + pass + +# This check isn't required, but placed here for debugging purposes +global currentServiceName # Name of the current service +if currentServiceName == 'python-matter-server': + main() +else: + print("Error. '{}' Tried to run 'python-matter-server' config".format(currentServiceName)) diff --git a/.templates/python-matter-server/matter_extras.yml b/.templates/python-matter-server/matter_extras.yml new file mode 100644 index 000000000..468ba1cd3 --- /dev/null +++ b/.templates/python-matter-server/matter_extras.yml @@ -0,0 +1,7 @@ +version: 1 +application: "IOTstack" +service: "python-matter-server" +comment: "Python Matter Server Extras" +extrasList: + - "Mount Bluetooth: /run/dbus" + - "Enabled Root Certificates" diff --git a/.templates/python-matter-server/select_extras.py b/.templates/python-matter-server/select_extras.py new file mode 100755 index 000000000..c07d15c9b --- /dev/null +++ b/.templates/python-matter-server/select_extras.py @@ -0,0 +1,330 @@ +#!/usr/bin/env python3 + +import signal + +def main(): + from blessed import Terminal + from deps.chars import specialChars, commonTopBorder, commonBottomBorder, commonEmptyLine + from deps.consts import servicesDirectory, templatesDirectory, buildSettingsFileName + import time + import subprocess + import ruamel.yaml + import os + + global signal + global currentServiceName + global dockerCommandsSelectionInProgress + global mainMenuList + global currentMenuItemIndex + global renderMode + global paginationSize + global paginationStartIndex + global extrasFile + global hideHelpText + + global installCommand + + yaml = ruamel.yaml.YAML() + yaml.preserve_quotes = True + + try: # If not already set, then set it. + hideHelpText = hideHelpText + except: + hideHelpText = False + + term = Terminal() + hotzoneLocation = [((term.height // 16) + 6), 0] + paginationToggle = [10, term.height - 25] + paginationStartIndex = 0 + paginationSize = paginationToggle[0] + + serviceService = servicesDirectory + currentServiceName + serviceTemplate = templatesDirectory + currentServiceName + extrasFileSource = serviceTemplate + '/matter_extras.yml' + + def goBack(): + global dockerCommandsSelectionInProgress + global needsRender + dockerCommandsSelectionInProgress = False + needsRender = 1 + return True + + mainMenuList = [] + + hotzoneLocation = [((term.height // 16) + 6), 0] + + dockerCommandsSelectionInProgress = True + currentMenuItemIndex = 0 + menuNavigateDirection = 0 + + # Render Modes: + # 0 = No render needed + # 1 = Full render + # 2 = Hotzone only + needsRender = 1 + + def onResize(sig, action): + global mainMenuList + global currentMenuItemIndex + mainRender(1, mainMenuList, currentMenuItemIndex) + + def generateLineText(text, textLength=None, paddingBefore=0, lineLength=64): + result = "" + for i in range(paddingBefore): + result += " " + + textPrintableCharactersLength = textLength + + if (textPrintableCharactersLength) == None: + textPrintableCharactersLength = len(text) + + result += text + remainingSpace = lineLength - textPrintableCharactersLength + + for i in range(remainingSpace): + result += " " + + return result + + def renderHotZone(term, renderType, menu, selection, hotzoneLocation, paddingBefore = 4): + global paginationSize + selectedTextLength = len("-> ") + + print(term.move(hotzoneLocation[0], hotzoneLocation[1])) + + if paginationStartIndex >= 1: + print(term.center("{b} {uaf} {uaf}{uaf}{uaf} {ual} {b}".format( + b=specialChars[renderMode]["borderVertical"], + uaf=specialChars[renderMode]["upArrowFull"], + ual=specialChars[renderMode]["upArrowLine"] + ))) + else: + print(term.center(commonEmptyLine(renderMode))) + + for (index, menuItem) in enumerate(menu): # Menu loop + if index >= paginationStartIndex and index < paginationStartIndex + paginationSize: + lineText = generateLineText(menuItem[0], paddingBefore=paddingBefore) + + # Menu highlight logic + if index == selection: + formattedLineText = '-> {t.blue_on_green}{title}{t.normal} <-'.format(t=term, title=menuItem[0]) + paddedLineText = generateLineText(formattedLineText, textLength=len(menuItem[0]) + selectedTextLength, paddingBefore=paddingBefore - selectedTextLength) + toPrint = paddedLineText + else: + toPrint = '{title}{t.normal}'.format(t=term, title=lineText) + # ##### + + # Menu check render logic + if menuItem[1]["checked"]: + toPrint = " (X) " + toPrint + else: + toPrint = " ( ) " + toPrint + + toPrint = "{bv} {toPrint} {bv}".format(bv=specialChars[renderMode]["borderVertical"], toPrint=toPrint) # Generate border + toPrint = term.center(toPrint) # Center Text (All lines should have the same amount of printable characters) + # ##### + print(toPrint) + + if paginationStartIndex + paginationSize < len(menu): + print(term.center("{b} {daf} {daf}{daf}{daf} {dal} {b}".format( + b=specialChars[renderMode]["borderVertical"], + daf=specialChars[renderMode]["downArrowFull"], + dal=specialChars[renderMode]["downArrowLine"] + ))) + else: + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + + + def mainRender(needsRender, menu, selection): + global paginationStartIndex + global paginationSize + term = Terminal() + + if selection >= paginationStartIndex + paginationSize: + paginationStartIndex = selection - (paginationSize - 1) + 1 + needsRender = 1 + + if selection <= paginationStartIndex - 1: + paginationStartIndex = selection + needsRender = 1 + + if needsRender == 1: + print(term.clear()) + print(term.move_y(term.height // 16)) + print(term.black_on_cornsilk4(term.center('IOTstack Python Matter Server'))) + print("") + print(term.center(commonTopBorder(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Select Python Matter Server {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + + if needsRender >= 1: + renderHotZone(term, needsRender, menu, selection, hotzoneLocation) + + if needsRender == 1: + print(term.center(commonEmptyLine(renderMode))) + if not hideHelpText: + if term.height < 32: + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Not enough vertical room to render controls help text {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + else: + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Controls: {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Space] to select or deselect extras {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Up] and [Down] to move selection cursor {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [H] Show/hide this text {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Enter] to build and save extras list {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Escape] to cancel changes {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonBottomBorder(renderMode))) + + def runSelection(selection): + import types + if len(mainMenuList[selection]) > 1 and isinstance(mainMenuList[selection][1], types.FunctionType): + mainMenuList[selection][1]() + else: + print(term.green_reverse('IOTstack Error: No function assigned to menu item: "{}"'.format(mainMenuList[selection][0]))) + + def isMenuItemSelectable(menu, index): + if len(menu) > index: + if len(menu[index]) > 1: + if "skip" in menu[index][1] and menu[index][1]["skip"] == True: + return False + return True + + def loadAddonsMenu(): + global mainMenuList + if os.path.exists(extrasFileSource): + with open(r'%s' % extrasFileSource) as objExtrasListFile: + extrasKnown = yaml.load(objExtrasListFile) + knownExtrasList = extrasKnown["extrasList"] + if os.path.exists("{serviceDir}{buildSettings}".format(serviceDir=serviceService, buildSettings=buildSettingsFileName)): + with open("{serviceDir}{buildSettings}".format(serviceDir=serviceService, buildSettings=buildSettingsFileName)) as objSavedExtrasListFile: + savedExtrasList = yaml.load(objSavedExtrasListFile) + savedExtras = [] + + try: + savedExtras = savedExtrasList["extras"] + except: + print("Error: Loading saved extras selection. Please resave your selection.") + input("Press Enter to continue...") + + for (index, extrasPath) in enumerate(knownExtrasList): + if extrasPath in savedExtras: + mainMenuList.append([extrasPath, { "checked": True }]) + else: + mainMenuList.append([extrasPath, { "checked": False }]) + + else: # No saved list + for (index, extrasPath) in enumerate(knownExtrasList): + if os.path.exists(extrasPath): + mainMenuList.append([extrasPath, { "checked": True }]) + else: + mainMenuList.append([extrasPath, { "checked": False }]) + + + else: + print("Error: '{extrasFile}' file doesn't exist.".format(extrasFile=extrasFileSource)) + input("Press Enter to continue...") + + def checkMenuItem(selection): + global mainMenuList + if mainMenuList[selection][1]["checked"] == True: + mainMenuList[selection][1]["checked"] = False + else: + mainMenuList[selection][1]["checked"] = True + + def saveAddonList(): + try: + if not os.path.exists(serviceService): + os.makedirs(serviceService, exist_ok=True) + pythonMatterServerYamlExtrasList = { + "version": "1", + "application": "IOTstack", + "service": "pythonMatterServer", + "comment": "Build Settings", + "extras": [] + } + for (index, addon) in enumerate(mainMenuList): + if addon[1]["checked"]: + pythonMatterServerYamlExtrasList["extras"].append(addon[0]) + + with open("{serviceDir}{buildSettings}".format(serviceDir=serviceService, buildSettings=buildSettingsFileName), 'w') as outputFile: + yaml.dump(pythonMatterServerYamlExtrasList, outputFile) + + except Exception as err: + print("Error saving Python Matter Server list", currentServiceName) + print(err) + return False + global hasRebuiltExtrasSelection + hasRebuiltExtrasSelection = True + return True + + + if __name__ == 'builtins': + global signal + term = Terminal() + signal.signal(signal.SIGWINCH, onResize) + loadAddonsMenu() + with term.fullscreen(): + menuNavigateDirection = 0 + mainRender(needsRender, mainMenuList, currentMenuItemIndex) + dockerCommandsSelectionInProgress = True + with term.cbreak(): + while dockerCommandsSelectionInProgress: + menuNavigateDirection = 0 + + if not needsRender == 0: # Only rerender when changed to prevent flickering + mainRender(needsRender, mainMenuList, currentMenuItemIndex) + needsRender = 0 + + key = term.inkey(esc_delay=0.05) + if key.is_sequence: + if key.name == 'KEY_TAB': + if paginationSize == paginationToggle[0]: + paginationSize = paginationToggle[1] + else: + paginationSize = paginationToggle[0] + mainRender(1, mainMenuList, currentMenuItemIndex) + if key.name == 'KEY_DOWN': + menuNavigateDirection += 1 + if key.name == 'KEY_UP': + menuNavigateDirection -= 1 + if key.name == 'KEY_ENTER': + if saveAddonList(): + return True + else: + print("Something went wrong. Try saving the list again.") + if key.name == 'KEY_ESCAPE': + dockerCommandsSelectionInProgress = False + return True + elif key: + if key == ' ': # Space pressed + checkMenuItem(currentMenuItemIndex) # Update checked list + needsRender = 2 + elif key == 'h': # H pressed + if hideHelpText: + hideHelpText = False + else: + hideHelpText = True + mainRender(1, mainMenuList, currentMenuItemIndex) + + if menuNavigateDirection != 0: # If a direction was pressed, find next selectable item + currentMenuItemIndex += menuNavigateDirection + currentMenuItemIndex = currentMenuItemIndex % len(mainMenuList) + needsRender = 2 + + while not isMenuItemSelectable(mainMenuList, currentMenuItemIndex): + currentMenuItemIndex += menuNavigateDirection + currentMenuItemIndex = currentMenuItemIndex % len(mainMenuList) + return True + + return True + +originalSignalHandler = signal.getsignal(signal.SIGINT) +main() +signal.signal(signal.SIGWINCH, originalSignalHandler) diff --git a/.templates/python-matter-server/service.yml b/.templates/python-matter-server/service.yml new file mode 100644 index 000000000..576965952 --- /dev/null +++ b/.templates/python-matter-server/service.yml @@ -0,0 +1,15 @@ +python-matter-server: + container_name: python-matter-server + image: ghcr.io/home-assistant-libs/python-matter-server:stable + restart: unless-stopped + network_mode: host + security_opt: + - apparmor=unconfined + volumes: + - ./volumes/python-matter-server/data:/data + # ports: # For reference only. Matter requires these ports. + # - "5580:5580" + # - "5080:5080" + command: > + --storage-path /data + diff --git a/.templates/python/Dockerfile b/.templates/python/Dockerfile index f06fda813..c46ada4fb 100644 --- a/.templates/python/Dockerfile +++ b/.templates/python/Dockerfile @@ -1,9 +1,48 @@ FROM python:3 -WORKDIR /usr/src/app +# user+group ID defaults for IOTstack +ENV IOTSTACK_UID=1000 +ENV IOTSTACK_GID=1000 -COPY requirements.txt ./ +# the template folder is named +ENV TEMPLATE="app" -RUN pip install --no-cache-dir -r requirements.txt +# where IOTstack template files are stored +ENV PYTHON_DEFAULTS="/iotstack_defaults/${TEMPLATE}" -CMD [ "python", "./app.py" ] \ No newline at end of file +# ensure the defaults directory exists +RUN mkdir -p ${PYTHON_DEFAULTS} + +# copy template files to defaults directory in image +COPY ${TEMPLATE} ${PYTHON_DEFAULTS} + +# the requirements file (if it exists) is +ENV REQUIREMENTS="${PYTHON_DEFAULTS}/requirements.txt" + +# set up requirements (if the file exists) +RUN if [ -e ${REQUIREMENTS} ] ; then pip3 install --no-cache-dir -r ${REQUIREMENTS} ; fi + +# the python working directory is +ENV PYTHON_WORKDIR="/usr/src/${TEMPLATE}" + +# ensure the working directory exists +RUN mkdir -p ${PYTHON_WORKDIR} + +# add an entry-point script +ENV ENTRY_POINT="docker-entrypoint.sh" +COPY ${ENTRY_POINT} /${ENTRY_POINT} +RUN chmod 755 /${ENTRY_POINT} + +# unset variables that are not needed +ENV TEMPLATE= +ENV REQUIREMENTS= +ENV ENTRY_POINT= + +# set the working directory +WORKDIR ${PYTHON_WORKDIR} + +# away we go +ENTRYPOINT ["/docker-entrypoint.sh"] +CMD ["/usr/local/bin/python", "./app.py"] + +# EOF diff --git a/.templates/python/app/app.py b/.templates/python/app/app.py new file mode 100755 index 000000000..38d3e6eb1 --- /dev/null +++ b/.templates/python/app/app.py @@ -0,0 +1,8 @@ +import time + +print("The world is born. Hello World.", flush=True) + +while True: + + time.sleep(10) + print("The world is re-born. Hello World.", flush=True) diff --git a/.templates/python/build.py b/.templates/python/build.py new file mode 100755 index 000000000..9284091a0 --- /dev/null +++ b/.templates/python/build.py @@ -0,0 +1,139 @@ +#!/usr/bin/env python3 + +issues = {} # Returned issues dict +buildHooks = {} # Options, and others hooks +haltOnErrors = True + +# Main wrapper function. Required to make local vars work correctly +def main(): + import os + import time + import shutil + import subprocess + import sys + + from deps.consts import servicesDirectory, templatesDirectory, volumesDirectory + from deps.common_functions import getExternalPorts, getInternalPorts, checkPortConflicts + + global dockerComposeServicesYaml # The loaded memory YAML of all checked services + global toRun # Switch for which function to run when executed + global buildHooks # Where to place the options menu result + global currentServiceName # Name of the current service + global issues # Returned issues dict + global haltOnErrors # Turn on to allow erroring + global hideHelpText # Showing and hiding the help controls text + global serviceService + + serviceService = servicesDirectory + currentServiceName + serviceTemplate = templatesDirectory + currentServiceName + serviceVolume = volumesDirectory + currentServiceName + + try: # If not already set, then set it. + hideHelpText = hideHelpText + except: + hideHelpText = False + + # runtime vars + portConflicts = [] + + # This lets the menu know whether to put " >> Options " or not + # This function is REQUIRED. + def checkForOptionsHook(): + try: + buildHooks["options"] = callable(runOptionsMenu) + except: + buildHooks["options"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForPreBuildHook(): + try: + buildHooks["preBuildHook"] = callable(preBuild) + except: + buildHooks["preBuildHook"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForPostBuildHook(): + try: + buildHooks["postBuildHook"] = callable(postBuild) + except: + buildHooks["postBuildHook"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForRunChecksHook(): + try: + buildHooks["runChecksHook"] = callable(runChecks) + except: + buildHooks["runChecksHook"] = False + return buildHooks + return buildHooks + + # This service will not check anything unless this is set + # This function is optional, and will run each time the menu is rendered + def runChecks(): + checkForIssues() + return [] + + # This function is optional, and will run after the docker-compose.yml file is written to disk. + def postBuild(): + return True + + # This function is optional, and will run just before the build docker-compose.yml code. + def preBuild(): + # Setup service directory + if not os.path.exists(serviceService): + os.makedirs(serviceService, exist_ok=True) + + # copy supporting files + shutil.copy(r'%s/Dockerfile' % serviceTemplate, r'%s/Dockerfile' % serviceService) + shutil.copy(r'%s/docker-entrypoint.sh' % serviceTemplate, r'%s/docker-entrypoint.sh' % serviceService) + shutil.copytree(r'%s/app' % serviceTemplate, r'%s/app' % serviceService) + + return True + + # ##################################### + # Supporting functions below + # ##################################### + + def checkForIssues(): + envFileIssues = checkEnvFiles() + if (len(envFileIssues) > 0): + issues["envFileIssues"] = envFileIssues + for (index, serviceName) in enumerate(dockerComposeServicesYaml): + if not currentServiceName == serviceName: # Skip self + currentServicePorts = getExternalPorts(currentServiceName, dockerComposeServicesYaml) + portConflicts = checkPortConflicts(serviceName, currentServicePorts, dockerComposeServicesYaml) + if (len(portConflicts) > 0): + issues["portConflicts"] = portConflicts + + def checkEnvFiles(): + envFileIssues = [] + if not os.path.exists(serviceTemplate + '/app/requirements.txt'): + envFileIssues.append(serviceTemplate + '/app/requirements.txt does not exist') + if not os.path.exists(serviceTemplate + '/app/app.py'): + envFileIssues.append(serviceTemplate + '/app/app.py does not exist') + return envFileIssues + + # ##################################### + # End Supporting functions + # ##################################### + + if haltOnErrors: + eval(toRun)() + else: + try: + eval(toRun)() + except: + pass + +# This check isn't required, but placed here for debugging purposes +global currentServiceName # Name of the current service +if currentServiceName == 'python': + main() +else: + print("Error. '{}' Tried to run 'python' config".format(currentServiceName)) diff --git a/.templates/python/directoryfix.sh b/.templates/python/directoryfix.sh deleted file mode 100755 index 437d4b45d..000000000 --- a/.templates/python/directoryfix.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash - -# Directoryfix for python - -if [ ! -d ./volumes/python/app ]; then - sudo mkdir -p ./volumes/python/app - sudo chown -R pi:pi ./volumes/python - echo 'print("hello world")' >./volumes/python/app/app.py - -fi diff --git a/.templates/python/docker-entrypoint.sh b/.templates/python/docker-entrypoint.sh new file mode 100755 index 000000000..85f44e39d --- /dev/null +++ b/.templates/python/docker-entrypoint.sh @@ -0,0 +1,21 @@ +#!/bin/bash +set -e + +# does the working directory exist (something is badly wrong if it does not) +if [ -d "$PYTHON_WORKDIR" ] ; then + + # are self-healing defaults available? + if [ -d "$PYTHON_DEFAULTS" ] ; then + + # yes! replace anything that has gone missing + cp -an "$PYTHON_DEFAULTS"/* "$PYTHON_WORKDIR" + + fi + + # set appropriate ownership throughout + chown -R "$IOTSTACK_UID:$IOTSTACK_GID" "$PYTHON_WORKDIR" + +fi + +# start python +exec "$@" diff --git a/.templates/python/service.yml b/.templates/python/service.yml index bcae78778..166bb220d 100644 --- a/.templates/python/service.yml +++ b/.templates/python/service.yml @@ -1,7 +1,13 @@ - python: - container_name: python - build: ./services/python/. - restart: unless-stopped - network_mode: host - volumes: - - ./volumes/python/app:/usr/src/app +python: + container_name: python + build: ./services/python/. + restart: unless-stopped + environment: + - TZ=Etc/UTC + - IOTSTACK_UID=1000 + - IOTSTACK_GID=1000 + x-ports: + - "external:internal" + volumes: + - ./volumes/python/app:/usr/src/app + diff --git a/.templates/qbittorrent/service.yml b/.templates/qbittorrent/service.yml new file mode 100644 index 000000000..55620b784 --- /dev/null +++ b/.templates/qbittorrent/service.yml @@ -0,0 +1,17 @@ +qbittorrent: + image: linuxserver/qbittorrent + container_name: qbittorrent + environment: + - PUID=1000 + - PGID=1000 + - UMASK_SET=022 + - WEBUI_PORT=15080 + volumes: + - ./volumes/qbittorrent/config:/config + - ./volumes/qbittorrent/downloads:/downloads + ports: + - "6881:6881" + - "6881:6881/udp" + - "15080:15080" + - "1080:1080" + diff --git a/.templates/ring-mqtt/service.yml b/.templates/ring-mqtt/service.yml new file mode 100644 index 000000000..7841f930f --- /dev/null +++ b/.templates/ring-mqtt/service.yml @@ -0,0 +1,17 @@ +ring-mqtt: + container_name: ring-mqtt + image: tsightler/ring-mqtt + restart: unless-stopped + environment: + - TZ=${TZ:-Etc/UTC} + - DEBUG=ring-* + ports: + - "8554:8554" + - "55123:55123" + volumes: + - ./volumes/ring-mqtt/data:/data + logging: + options: + max-size: 10m + max-file: "3" + diff --git a/.templates/rtl_433/Dockerfile b/.templates/rtl_433/Dockerfile index 8feebfec0..bab136671 100644 --- a/.templates/rtl_433/Dockerfile +++ b/.templates/rtl_433/Dockerfile @@ -5,6 +5,7 @@ ENV MQTT_PORT 1883 ENV MQTT_USER "" ENV MQTT_PASSWORD "" ENV MQTT_TOPIC RTL_433 +ENV RTL_PARAMS "-C si" RUN apt-get update && apt-get install -y git libtool libusb-1.0.0-dev librtlsdr-dev rtl-sdr cmake automake && \ git clone https://github.com/merbanan/rtl_433.git /tmp/rtl_433 && \ @@ -15,4 +16,4 @@ RUN apt-get update && apt-get install -y git libtool libusb-1.0.0-dev librtlsdr make && \ make install -CMD ["sh", "-c", "rtl_433 -F mqtt://${MQTT_ADDRESS}:${MQTT_PORT},events=${MQTT_TOPIC},user=${MQTT_USER},pass=${MQTT_PASSWORD}"] +CMD ["sh", "-c", "rtl_433 ${RTL_PARAMS} -F mqtt://${MQTT_ADDRESS}:${MQTT_PORT},events=${MQTT_TOPIC},user=${MQTT_USER},pass=${MQTT_PASSWORD}"] diff --git a/.templates/rtl_433/build.py b/.templates/rtl_433/build.py new file mode 100755 index 000000000..70cc4ee75 --- /dev/null +++ b/.templates/rtl_433/build.py @@ -0,0 +1,126 @@ +#!/usr/bin/env python3 + +issues = {} # Returned issues dict +buildHooks = {} # Options, and others hooks +haltOnErrors = True + +# Main wrapper function. Required to make local vars work correctly +def main(): + import os + import time + import shutil + import sys + + from deps.consts import servicesDirectory, templatesDirectory + from deps.common_functions import getExternalPorts, getInternalPorts, checkPortConflicts, checkDependsOn + + global dockerComposeServicesYaml # The loaded memory YAML of all checked services + global toRun # Switch for which function to run when executed + global buildHooks # Where to place the options menu result + global currentServiceName # Name of the current service + global issues # Returned issues dict + global haltOnErrors # Turn on to allow erroring + global hideHelpText # Showing and hiding the help controls text + global serviceService + + serviceService = servicesDirectory + currentServiceName + serviceTemplate = templatesDirectory + currentServiceName + + try: # If not already set, then set it. + hideHelpText = hideHelpText + except: + hideHelpText = False + + # runtime vars + portConflicts = [] + + # This lets the menu know whether to put " >> Options " or not + # This function is REQUIRED. + def checkForOptionsHook(): + try: + buildHooks["options"] = callable(runOptionsMenu) + except: + buildHooks["options"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForPreBuildHook(): + try: + buildHooks["preBuildHook"] = callable(preBuild) + except: + buildHooks["preBuildHook"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForPostBuildHook(): + try: + buildHooks["postBuildHook"] = callable(postBuild) + except: + buildHooks["postBuildHook"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForRunChecksHook(): + try: + buildHooks["runChecksHook"] = callable(runChecks) + except: + buildHooks["runChecksHook"] = False + return buildHooks + return buildHooks + + # This service will not check anything unless this is set + # This function is optional, and will run each time the menu is rendered + def runChecks(): + checkForIssues() + return [] + + # This function is optional, and will run after the docker-compose.yml file is written to disk. + def postBuild(): + return True + + # This function is optional, and will run just before the build docker-compose.yml code. + def preBuild(): + # Setup service directory + if not os.path.exists(serviceService): + os.makedirs(serviceService, exist_ok=True) + + # Files copy + shutil.copy(r'%s/Dockerfile' % serviceTemplate, r'%s/Dockerfile' % serviceService) + return True + + # ##################################### + # Supporting functions below + # ##################################### + + def checkForIssues(): + dependsOnListMissing = checkDependsOn(currentServiceName, dockerComposeServicesYaml) + if (len(dependsOnListMissing) > 0): + issues["dependsOn"] = dependsOnListMissing + for (index, serviceName) in enumerate(dockerComposeServicesYaml): + if not currentServiceName == serviceName: # Skip self + currentServicePorts = getExternalPorts(currentServiceName, dockerComposeServicesYaml) + portConflicts = checkPortConflicts(serviceName, currentServicePorts, dockerComposeServicesYaml) + if (len(portConflicts) > 0): + issues["portConflicts"] = portConflicts + + # ##################################### + # End Supporting functions + # ##################################### + + if haltOnErrors: + eval(toRun)() + else: + try: + eval(toRun)() + except: + pass + +# This check isn't required, but placed here for debugging purposes +global currentServiceName # Name of the current service +if currentServiceName == 'rtl_433': + main() +else: + print("Error. '{}' Tried to run 'rtl_433' config".format(currentServiceName)) diff --git a/.templates/rtl_433/rtl_433.env b/.templates/rtl_433/rtl_433.env deleted file mode 100644 index 0f9a66d70..000000000 --- a/.templates/rtl_433/rtl_433.env +++ /dev/null @@ -1,6 +0,0 @@ -TZ=Africa/Johannesburg -MQTT_ADDRESS=mosquitto -MQTT_PORT=1883 -#MQTT_USER="" -#MQTT_PASSWORD="" -MQTT_TOPIC=RTL_433 \ No newline at end of file diff --git a/.templates/rtl_433/service.yml b/.templates/rtl_433/service.yml index 56bc7c21d..bab5f1931 100644 --- a/.templates/rtl_433/service.yml +++ b/.templates/rtl_433/service.yml @@ -1,9 +1,14 @@ - rtl_433: - container_name: rtl_433 - build: ./services/rtl_433/. - env_file: - - ./services/rtl_433/rtl_433.env - devices: - - /dev/bus/usb - restart: unless-stopped - \ No newline at end of file +rtl_433: + container_name: rtl_433 + build: ./services/rtl_433/. + depends_on: + - mosquitto + environment: + - TZ=${TZ:-Etc/UTC} + - MQTT_ADDRESS=mosquitto + - MQTT_PORT=1883 + - MQTT_TOPIC=RTL_433 + devices: + - /dev/bus/usb + restart: unless-stopped + diff --git a/.templates/scrypted/service.yml b/.templates/scrypted/service.yml new file mode 100644 index 000000000..9dfa59a00 --- /dev/null +++ b/.templates/scrypted/service.yml @@ -0,0 +1,18 @@ +scrypted: + container_name: scrypted + image: koush/scrypted + restart: unless-stopped + environment: + - SCRYPTED_WEBHOOK_UPDATE_AUTHORIZATION=Bearer ${SCRYPTED_WEBHOOK_UPDATE_AUTHORIZATION:?see instructions for generating a token} + - SCRYPTED_WEBHOOK_UPDATE=http://localhost:10444/v1/update + network_mode: host + x-ports: + - "10443:10443" + volumes: + - ./volumes/scrypted:/server/volume + logging: + driver: "json-file" + options: + max-size: "10m" + max-file: "10" + diff --git a/.templates/syncthing/service.yml b/.templates/syncthing/service.yml new file mode 100644 index 000000000..9929d0eed --- /dev/null +++ b/.templates/syncthing/service.yml @@ -0,0 +1,19 @@ +syncthing: + image: linuxserver/syncthing:latest + container_name: syncthing + hostname: raspberrypi # optional + environment: + - PUID=1000 + - PGID=1000 + - HOME=/app + - TZ=${TZ:-Etc/UTC} + volumes: + - ./volumes/syncthing/config:/config + - ./volumes/syncthing/data:/app + x-ports: + - "8384:8384" # Web UI + - "22000:22000/tcp" # TCP file transfers + - "22000:22000/udp" # QUIC file transfers + - "21027:21027/udp" # Receive local discovery broadcasts + network_mode: host + diff --git a/.templates/tasmoadmin/service.yml b/.templates/tasmoadmin/service.yml index 8d26348a9..2c5f9371c 100644 --- a/.templates/tasmoadmin/service.yml +++ b/.templates/tasmoadmin/service.yml @@ -1,9 +1,11 @@ - tasmoadmin: - container_name: tasmoadmin - image: raymondmm/tasmoadmin - restart: unless-stopped - ports: - - "8088:80" - volumes: - - ./volumes/tasmoadmin/data:/data - +tasmoadmin: + container_name: tasmoadmin + image: ghcr.io/tasmoadmin/tasmoadmin:latest + restart: unless-stopped + environment: + - TZ=Etc/UTC + ports: + - "8088:80" + volumes: + - ./volumes/tasmoadmin/data:/data + diff --git a/.templates/telegraf/Dockerfile b/.templates/telegraf/Dockerfile new file mode 100755 index 000000000..0f0f6b3cf --- /dev/null +++ b/.templates/telegraf/Dockerfile @@ -0,0 +1,44 @@ +# Download base image +FROM telegraf:latest + +# Add support tool +RUN apt update && apt install -y rsync + +# where IOTstack template files are stored +ENV IOTSTACK_DEFAULTS_DIR="iotstack_defaults" +ENV BASELINE_CONFIG=/${IOTSTACK_DEFAULTS_DIR}/telegraf-reference.conf +ENV IOTSTACK_CONFIG=/${IOTSTACK_DEFAULTS_DIR}/telegraf.conf +ENV IOTSTACK_ENTRY_POINT="entrypoint.sh" +ENV BASELINE_ENTRY_POINT="entrypoint-reference.sh" + +# copy template files to image +COPY ${IOTSTACK_DEFAULTS_DIR} /${IOTSTACK_DEFAULTS_DIR} + +# 1. copy the default configuration file that ships with the image as +# a baseline reference for the user, and make it read-only. +# 2. strip comment lines and blank lines from the baseline reference to +# use as the starting point for the IOTstack default configuration. +# 3. append auto-inclusions which, among other things, sets up the +# the appropriate URL for influxdb running in another container in +# the same stack. +RUN cp /etc/telegraf/telegraf.conf ${BASELINE_CONFIG} && \ + cat /${IOTSTACK_DEFAULTS_DIR}/auto_include/*.conf >> ${BASELINE_CONFIG} && \ + rm -r /${IOTSTACK_DEFAULTS_DIR}/auto_include && \ + chmod 444 ${BASELINE_CONFIG} && \ + grep -v -e "^[ ]*#" -e "^[ ]*$" ${BASELINE_CONFIG} >${IOTSTACK_CONFIG} + +# replace the docker entry-point script with a self-repairing version +RUN cp /${IOTSTACK_ENTRY_POINT} /${BASELINE_ENTRY_POINT} +COPY ${IOTSTACK_ENTRY_POINT} /${IOTSTACK_ENTRY_POINT} +RUN chmod 755 /${IOTSTACK_ENTRY_POINT} + +# undefine variables not needed at runtime +ENV BASELINE_CONFIG= +ENV IOTSTACK_CONFIG= +ENV IOTSTACK_ENTRY_POINT= +ENV BASELINE_ENTRY_POINT= + +# IOTstack declares this path for persistent storage +VOLUME ["/etc/telegraf"] + +# EOF diff --git a/.templates/telegraf/entrypoint.sh b/.templates/telegraf/entrypoint.sh new file mode 100755 index 000000000..84d33f8ec --- /dev/null +++ b/.templates/telegraf/entrypoint.sh @@ -0,0 +1,36 @@ +#!/bin/bash +set -e + +if [ "${1:0:1}" = '-' ]; then + set -- telegraf "$@" +fi + +# perform IOTstack self-repair +U="$(id -u)" +T="/etc/telegraf" +if [ "$U" = '0' -a -d "$T" ]; then + echo "Performing IOTstack self repair" + rsync -arp --ignore-existing /${IOTSTACK_DEFAULTS_DIR}/ "$T" + chown -R "$U:$U" "$T" +fi + +if [ $EUID -eq 0 ]; then + + # Allow telegraf to send ICMP packets and bind to privliged ports + setcap cap_net_raw,cap_net_bind_service+ep /usr/bin/telegraf || echo "Failed to set additional capabilities on /usr/bin/telegraf" + + # note: at this point, the default version of this file runs: + # + # exec setpriv --reuid telegraf --init-groups "$@" + # + # Inside the container, user "telegraf" is userID 999, which + # isn't a member of the "docker" group outside container-space + # so the practical effect of downgrading privileges in this + # way is to deny access to /var/run/docker.sock, and then you + # get a mess. It's not clear whether the setcap is necessary + # on a Raspberry Pi but it has been left in place in case it + # turns out to be useful in other Docker environments. + +fi + +exec "$@" diff --git a/.templates/telegraf/iotstack_defaults/additions/inputs.mqtt_consumer.conf b/.templates/telegraf/iotstack_defaults/additions/inputs.mqtt_consumer.conf new file mode 100644 index 000000000..306fe3144 --- /dev/null +++ b/.templates/telegraf/iotstack_defaults/additions/inputs.mqtt_consumer.conf @@ -0,0 +1,10 @@ +# Read metrics from MQTT topic(s) +# Credit: https://github.com/gcgarner/IOTstack/blob/master/.templates/telegraf/telegraf.conf +[[inputs.mqtt_consumer]] + servers = ["tcp://mosquitto:1883"] + topics = [ + "telegraf/host01/cpu", + "telegraf/+/mem", + "sensors/#", + ] + data_format = "json" diff --git a/.templates/telegraf/iotstack_defaults/auto_include/inputs.cpu_temp.conf b/.templates/telegraf/iotstack_defaults/auto_include/inputs.cpu_temp.conf new file mode 100644 index 000000000..cf39f5eaf --- /dev/null +++ b/.templates/telegraf/iotstack_defaults/auto_include/inputs.cpu_temp.conf @@ -0,0 +1,5 @@ +[[inputs.file]] + files = ["/sys/class/thermal/thermal_zone0/temp"] + name_override = "cpu_temperature" + data_format = "value" + data_type = "integer" diff --git a/.templates/telegraf/iotstack_defaults/auto_include/inputs.docker.conf b/.templates/telegraf/iotstack_defaults/auto_include/inputs.docker.conf new file mode 100644 index 000000000..359f0472c --- /dev/null +++ b/.templates/telegraf/iotstack_defaults/auto_include/inputs.docker.conf @@ -0,0 +1,14 @@ +# Read metrics about docker containers +# Credit: @tablatronix +[[inputs.docker]] + endpoint = "unix:///var/run/docker.sock" + gather_services = false + source_tag = false + container_name_include = [] + container_name_exclude = [] + timeout = "5s" + perdevice = false + total_include = ["cpu", "blkio", "network"] + docker_label_include = [] + docker_label_exclude = [] + tag_env = ["HEAP_SIZE"] diff --git a/.templates/telegraf/iotstack_defaults/auto_include/outputs.influxdb.conf b/.templates/telegraf/iotstack_defaults/auto_include/outputs.influxdb.conf new file mode 100755 index 000000000..b894ea3fb --- /dev/null +++ b/.templates/telegraf/iotstack_defaults/auto_include/outputs.influxdb.conf @@ -0,0 +1,3 @@ +[[outputs.influxdb]] +urls = ["http://influxdb:8086"] + diff --git a/.templates/telegraf/service.yml b/.templates/telegraf/service.yml index e130ad4ce..dc0a7c799 100644 --- a/.templates/telegraf/service.yml +++ b/.templates/telegraf/service.yml @@ -1,8 +1,18 @@ - telegraf: - container_name: telegraf - image: telegraf - volumes: - - ./services/telegraf/telegraf.conf:/etc/telegraf/telegraf.conf:ro - depends_on: - - influxdb - - mosquitto +telegraf: + container_name: telegraf + build: ./.templates/telegraf/. + hostname: iotstack # optional + restart: unless-stopped + environment: + - TZ=${TZ:-Etc/UTC} + ports: + - "8092:8092/udp" + - "8094:8094/tcp" + - "8125:8125/udp" + volumes: + - ./volumes/telegraf:/etc/telegraf + - /var/run/docker.sock:/var/run/docker.sock:ro + depends_on: + - influxdb + - mosquitto + diff --git a/.templates/telegraf/telegraf.conf b/.templates/telegraf/telegraf.conf deleted file mode 100644 index f377c5a3f..000000000 --- a/.templates/telegraf/telegraf.conf +++ /dev/null @@ -1,237 +0,0 @@ -# Telegraf Configuration -# -# Telegraf is entirely plugin driven. All metrics are gathered from the -# declared inputs, and sent to the declared outputs. -# -# Plugins must be declared in here to be active. -# To deactivate a plugin, comment out the name and any variables. -# -# Use 'telegraf -config telegraf.conf -test' to see what metrics a config -# file would generate. -# -# Environment variables can be used anywhere in this config file, simply surround -# them with ${}. For strings the variable must be within quotes (ie, "${STR_VAR}"), -# for numbers and booleans they should be plain (ie, ${INT_VAR}, ${BOOL_VAR}) - - -# Global tags can be specified here in key="value" format. -[global_tags] - # dc = "us-east-1" # will tag all metrics with dc=us-east-1 - # rack = "1a" - ## Environment variables can be used as tags, and throughout the config file - # user = "$USER" - - -# Configuration for telegraf agent -[agent] - ## Default data collection interval for all inputs - interval = "10s" - ## Rounds collection interval to 'interval' - ## ie, if interval="10s" then always collect on :00, :10, :20, etc. - round_interval = true - - ## Telegraf will send metrics to outputs in batches of at most - ## metric_batch_size metrics. - ## This controls the size of writes that Telegraf sends to output plugins. - metric_batch_size = 1000 - - ## Maximum number of unwritten metrics per output. - metric_buffer_limit = 10000 - - ## Collection jitter is used to jitter the collection by a random amount. - ## Each plugin will sleep for a random time within jitter before collecting. - ## This can be used to avoid many plugins querying things like sysfs at the - ## same time, which can have a measurable effect on the system. - collection_jitter = "0s" - - ## Default flushing interval for all outputs. Maximum flush_interval will be - ## flush_interval + flush_jitter - flush_interval = "10s" - ## Jitter the flush interval by a random amount. This is primarily to avoid - ## large write spikes for users running a large number of telegraf instances. - ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s - flush_jitter = "0s" - - ## By default or when set to "0s", precision will be set to the same - ## timestamp order as the collection interval, with the maximum being 1s. - ## ie, when interval = "10s", precision will be "1s" - ## when interval = "250ms", precision will be "1ms" - ## Precision will NOT be used for service inputs. It is up to each individual - ## service input to set the timestamp at the appropriate precision. - ## Valid time units are "ns", "us" (or "µs"), "ms", "s". - precision = "" - - ## Log at debug level. - # debug = false - ## Log only error level messages. - # quiet = false - - ## Log file name, the empty string means to log to stderr. - # logfile = "" - - ## The logfile will be rotated after the time interval specified. When set - ## to 0 no time based rotation is performed. Logs are rotated only when - ## written to, if there is no log activity rotation may be delayed. - # logfile_rotation_interval = "0d" - - ## The logfile will be rotated when it becomes larger than the specified - ## size. When set to 0 no size based rotation is performed. - # logfile_rotation_max_size = "0MB" - - ## Maximum number of rotated archives to keep, any older logs are deleted. - ## If set to -1, no archives are removed. - # logfile_rotation_max_archives = 5 - - ## Override default hostname, if empty use os.Hostname() - hostname = "" - ## If set to true, do no set the "host" tag in the telegraf agent. - omit_hostname = false - - -############################################################################### -# OUTPUT PLUGINS # -############################################################################### - - -# Configuration for sending metrics to InfluxDB -[[outputs.influxdb]] - # The full HTTP or UDP URL for your InfluxDB instance. - # - # Multiple URLs can be specified for a single cluster, only ONE of the - # urls will be written to each interval. - # urls = ["unix:///var/run/influxdb.sock"] - # urls = ["udp://influxdb:8089"] - urls = ["http://influxdb:8086"] - - # The target database for metrics; will be created as needed. - # For UDP url endpoint database needs to be configured on server side. - database = "telegraf" - - # The value of this tag will be used to determine the database. If this - # tag is not set the 'database' option is used as the default. - database_tag = "" - - # If true, the database tag will not be added to the metric. - exclude_database_tag = false - - # If true, no CREATE DATABASE queries will be sent. Set to true when using - # Telegraf with a user without permissions to create databases or when the - # database already exists. - skip_database_creation = false - - # Name of existing retention policy to write to. Empty string writes to - # the default retention policy. Only takes effect when using HTTP. - retention_policy = "" - - # Write consistency (clusters only), can be: "any", "one", "quorum", "all". - # Only takes effect when using HTTP. - write_consistency = "any" - - # Timeout for HTTP messages. - timeout = "5s" - - # HTTP Basic Auth - username = "telegraf" - password = "metricsmetricsmetricsmetrics" - - # HTTP User-Agent - user_agent = "telegraf" - - # UDP payload size is the maximum packet size to send. - udp_payload = "512B" - - ## Optional TLS Config for use on HTTP connections. - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false - # - ## HTTP Proxy override, if unset values the standard proxy environment - ## variables are consulted to determine which proxy, if any, should be used. - # http_proxy = "http://corporate.proxy:3128" - - ## Additional HTTP headers - # http_headers = {"X-Special-Header" = "Special-Value"} - - ## HTTP Content-Encoding for write request body, can be set to "gzip" to - ## compress body or "identity" to apply no encoding. - # content_encoding = "identity" - - ## When true, Telegraf will output unsigned integers as unsigned values, - ## i.e.: "42u". You will need a version of InfluxDB supporting unsigned - ## integer values. Enabling this option will result in field type errors if - ## existing data has been written. - # influx_uint_support = false - - # Read metrics from MQTT topic(s) - [[inputs.mqtt_consumer]] - ## MQTT broker URLs to be used. The format should be scheme://host:port, - ## schema can be tcp, ssl, or ws. - servers = ["tcp://mosquitto:1883"] - - ## Topics that will be subscribed to. - topics = [ - "telegraf/host01/cpu", - "telegraf/+/mem", - "sensors/#", - ] - - ## The message topic will be stored in a tag specified by this value. If set - ## to the empty string no topic tag will be created. - # topic_tag = "topic" - - ## QoS policy for messages - ## 0 = at most once - ## 1 = at least once - ## 2 = exactly once - ## - ## When using a QoS of 1 or 2, you should enable persistent_session to allow - ## resuming unacknowledged messages. - # qos = 0 - - ## Connection timeout for initial connection in seconds - # connection_timeout = "30s" - - ## Maximum messages to read from the broker that have not been written by an - ## output. For best throughput set based on the number of metrics within - ## each message and the size of the output's metric_batch_size. - ## - ## For example, if each message from the queue contains 10 metrics and the - ## output metric_batch_size is 1000, setting this to 100 will ensure that a - ## full batch is collected and the write is triggered immediately without - ## waiting until the next flush_interval. - max_undelivered_messages = 1000 - - ## Persistent session disables clearing of the client session on connection. - ## In order for this option to work you must also set client_id to identity - ## the client. To receive messages that arrived while the client is offline, - ## also set the qos option to 1 or 2 and don't forget to also set the QoS when - ## publishing. - # persistent_session = false - - ## If unset, a random client ID will be generated. - # client_id = "" - - ## Username and password to connect MQTT server. - # username = "telegraf" - # password = "metricsmetricsmetricsmetrics" - - ## Optional TLS Config - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false - - ## Data format to consume. - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md - - #data_format = "influx" - data_format = "json" - #tag_keys = [ - # "temperature", - # "humidity" - #] \ No newline at end of file diff --git a/.templates/timescaledb/service.yml b/.templates/timescaledb/service.yml new file mode 100644 index 000000000..5232bc844 --- /dev/null +++ b/.templates/timescaledb/service.yml @@ -0,0 +1,13 @@ +timescaledb: + container_name: timescaledb + image: timescale/timescaledb:latest-pg12 + restart: unless-stopped + environment: + - POSTGRES_USER=${IOTSTACK_TIMESCALEDB_USER:-postgres} + - POSTGRES_PASSWORD=${IOTSTACK_TIMESCALEDB_INITIAL_PASSWORD:-IOtSt4ckTim3Scale} + - POSTGRES_DB=postdb + ports: + - "${IOTSTACK_TIMESCALEDB_PORT_INT:-5433}:5432" + volumes: + - ./volumes/timescaledb/data:/var/lib/postgresql/data + diff --git a/.templates/transmission/build.py b/.templates/transmission/build.py new file mode 100755 index 000000000..74c6d63a6 --- /dev/null +++ b/.templates/transmission/build.py @@ -0,0 +1,337 @@ +#!/usr/bin/env python3 + +issues = {} # Returned issues dict +buildHooks = {} # Options, and others hooks +haltOnErrors = True + +# Main wrapper function. Required to make local vars work correctly +def main(): + import os + import time + from blessed import Terminal + from deps.chars import specialChars, commonTopBorder, commonBottomBorder, commonEmptyLine, padText + from deps.consts import servicesDirectory, templatesDirectory, volumesDirectory + from deps.common_functions import getExternalPorts, getInternalPorts, checkPortConflicts, enterPortNumberWithWhiptail + + global dockerComposeServicesYaml # The loaded memory YAML of all checked services + global toRun # Switch for which function to run when executed + global buildHooks # Where to place the options menu result + global currentServiceName # Name of the current service + global issues # Returned issues dict + global haltOnErrors # Turn on to allow erroring + + # runtime vars + portConflicts = [] + serviceVolume = volumesDirectory + currentServiceName + serviceService = servicesDirectory + currentServiceName + serviceTemplate = templatesDirectory + currentServiceName + + try: # If not already set, then set it. + hideHelpText = hideHelpText + except: + hideHelpText = False + + documentationHint = 'https://sensorsiot.github.io/IOTstack/' + + # This lets the menu know whether to put " >> Options " or not + # This function is REQUIRED. + def checkForOptionsHook(): + try: + buildHooks["options"] = callable(runOptionsMenu) + except: + buildHooks["options"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForPreBuildHook(): + try: + buildHooks["preBuildHook"] = callable(preBuild) + except: + buildHooks["preBuildHook"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForPostBuildHook(): + try: + buildHooks["postBuildHook"] = callable(postBuild) + except: + buildHooks["postBuildHook"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForRunChecksHook(): + try: + buildHooks["runChecksHook"] = callable(runChecks) + except: + buildHooks["runChecksHook"] = False + return buildHooks + return buildHooks + + # This service will not check anything unless this is set + # This function is optional, and will run each time the menu is rendered + def runChecks(): + checkForIssues() + return [] + + # This function is optional, and will run after the docker-compose.yml file is written to disk. + def postBuild(): + if not os.path.exists(serviceVolume): + try: + os.makedirs(serviceVolume, exist_ok=True) + print("Created", serviceVolume, "for", currentServiceName) + except Exception as err: + print("Error creating directory", currentServiceName) + print(err) + if not os.path.exists(serviceVolume + "/downloads"): + try: + os.mkdir(serviceVolume + "/downloads") + print("Created", serviceVolume + "/downloads", "for", currentServiceName) + except Exception as err: + print("Error creating downloads directory", currentServiceName) + print(err) + + if not os.path.exists(serviceVolume + "/watch"): + try: + os.makedirs(serviceVolume + "/watch", exist_ok=True) + print("Created", serviceVolume + "/watch", "for", currentServiceName) + except Exception as err: + print("Error creating watch directory", currentServiceName) + print(err) + + if not os.path.exists(serviceVolume + "/config"): + try: + os.makedirs(serviceVolume + "/config", exist_ok=True) + print("Created", serviceVolume + "/config", "for", currentServiceName) + except Exception as err: + print("Error creating config directory", currentServiceName) + print(err) + + return True + + # This function is optional, and will run just before the build docker-compose.yml code. + def preBuild(): + return True + + # ##################################### + # Supporting functions below + # ##################################### + + def checkForIssues(): + for (index, serviceName) in enumerate(dockerComposeServicesYaml): + if not currentServiceName == serviceName: # Skip self + currentServicePorts = getExternalPorts(currentServiceName, dockerComposeServicesYaml) + portConflicts = checkPortConflicts(serviceName, currentServicePorts, dockerComposeServicesYaml) + if (len(portConflicts) > 0): + issues["portConflicts"] = portConflicts + + ############################ + # Menu Logic + ############################ + + global currentMenuItemIndex + global selectionInProgress + global menuNavigateDirection + global needsRender + + selectionInProgress = True + currentMenuItemIndex = 0 + menuNavigateDirection = 0 + needsRender = 1 + term = Terminal() + hotzoneLocation = [((term.height // 16) + 6), 0] + + def goBack(): + global selectionInProgress + global needsRender + selectionInProgress = False + needsRender = 1 + return True + + def enterPortNumberExec(): + # global term + global needsRender + global dockerComposeServicesYaml + externalPort = getExternalPorts(currentServiceName, dockerComposeServicesYaml)[0] + internalPort = getInternalPorts(currentServiceName, dockerComposeServicesYaml)[0] + newPortNumber = enterPortNumberWithWhiptail(term, dockerComposeServicesYaml, currentServiceName, hotzoneLocation, externalPort) + + if newPortNumber > 0: + dockerComposeServicesYaml[currentServiceName]["ports"][0] = "{newExtPort}:{oldIntPort}".format( + newExtPort = newPortNumber, + oldIntPort = internalPort + ) + createMenu() + needsRender = 1 + + def onResize(sig, action): + global transmissionBuildOptions + global currentMenuItemIndex + mainRender(1, transmissionBuildOptions, currentMenuItemIndex) + + transmissionBuildOptions = [] + + def createMenu(): + global transmissionBuildOptions + try: + transmissionBuildOptions = [] + portNumber = getExternalPorts(currentServiceName, dockerComposeServicesYaml)[0] + transmissionBuildOptions.append([ + "Change external WUI Port Number from: {port}".format(port=portNumber), + enterPortNumberExec + ]) + except: # Error getting port + pass + transmissionBuildOptions.append(["Go back", goBack]) + + def runOptionsMenu(): + createMenu() + menuEntryPoint() + return True + + def renderHotZone(term, menu, selection, hotzoneLocation): + lineLengthAtTextStart = 71 + print(term.move(hotzoneLocation[0], hotzoneLocation[1])) + for (index, menuItem) in enumerate(menu): + toPrint = "" + if index == selection: + toPrint += ('{bv} -> {t.blue_on_green} {title} {t.normal} <-'.format(t=term, title=menuItem[0], bv=specialChars[renderMode]["borderVertical"])) + else: + toPrint += ('{bv} {t.normal} {title} '.format(t=term, title=menuItem[0], bv=specialChars[renderMode]["borderVertical"])) + + for i in range(lineLengthAtTextStart - len(menuItem[0])): + toPrint += " " + + toPrint += "{bv}".format(bv=specialChars[renderMode]["borderVertical"]) + + toPrint = term.center(toPrint) + + print(toPrint) + + def mainRender(needsRender, menu, selection): + term = Terminal() + + if needsRender == 1: + print(term.clear()) + print(term.move_y(term.height // 16)) + print(term.black_on_cornsilk4(term.center('IOTstack Transmission Options'))) + print("") + print(term.center(commonTopBorder(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Select Option to configure {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + + if needsRender >= 1: + renderHotZone(term, menu, selection, hotzoneLocation) + + if needsRender == 1: + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + if not hideHelpText: + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Controls: {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Up] and [Down] to move selection cursor {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [H] Show/hide this text {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Enter] to run command or save input {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Escape] to go back to build stack menu {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + if len(documentationHint) > 1: + if len(documentationHint) > 56: + documentationAndPadding = padText(documentationHint, 71) + print(term.center("{bv} Documentation: {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} {dap} {bv}".format(bv=specialChars[renderMode]["borderVertical"], dap=documentationAndPadding))) + else: + documentationAndPadding = padText(documentationHint, 56) + print(term.center("{bv} Documentation: {dap} {bv}".format(bv=specialChars[renderMode]["borderVertical"], dap=documentationAndPadding))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonBottomBorder(renderMode))) + + def runSelection(selection): + import types + global transmissionBuildOptions + if len(transmissionBuildOptions[selection]) > 1 and isinstance(transmissionBuildOptions[selection][1], types.FunctionType): + transmissionBuildOptions[selection][1]() + else: + print(term.green_reverse('IOTstack Error: No function assigned to menu item: "{}"'.format(nodeRedBuildOptions[selection][0]))) + + def isMenuItemSelectable(menu, index): + if len(menu) > index: + if len(menu[index]) > 2: + if menu[index][2]["skip"] == True: + return False + return True + + def menuEntryPoint(): + # These need to be reglobalised due to eval() + global currentMenuItemIndex + global selectionInProgress + global menuNavigateDirection + global needsRender + global hideHelpText + global transmissionBuildOptions + term = Terminal() + with term.fullscreen(): + menuNavigateDirection = 0 + mainRender(needsRender, transmissionBuildOptions, currentMenuItemIndex) + selectionInProgress = True + with term.cbreak(): + while selectionInProgress: + menuNavigateDirection = 0 + + if needsRender: # Only rerender when changed to prevent flickering + mainRender(needsRender, transmissionBuildOptions, currentMenuItemIndex) + needsRender = 0 + + key = term.inkey(esc_delay=0.05) + if key.is_sequence: + if key.name == 'KEY_TAB': + menuNavigateDirection += 1 + if key.name == 'KEY_DOWN': + menuNavigateDirection += 1 + if key.name == 'KEY_UP': + menuNavigateDirection -= 1 + if key.name == 'KEY_LEFT': + goBack() + if key.name == 'KEY_ENTER': + runSelection(currentMenuItemIndex) + if key.name == 'KEY_ESCAPE': + return True + elif key: + if key == 'h': # H pressed + if hideHelpText: + hideHelpText = False + else: + hideHelpText = True + mainRender(1, transmissionBuildOptions, currentMenuItemIndex) + + if menuNavigateDirection != 0: # If a direction was pressed, find next selectable item + currentMenuItemIndex += menuNavigateDirection + currentMenuItemIndex = currentMenuItemIndex % len(transmissionBuildOptions) + needsRender = 2 + + while not isMenuItemSelectable(transmissionBuildOptions, currentMenuItemIndex): + currentMenuItemIndex += menuNavigateDirection + currentMenuItemIndex = currentMenuItemIndex % len(transmissionBuildOptions) + return True + + #################### + # End menu section + #################### + + if haltOnErrors: + eval(toRun)() + else: + try: + eval(toRun)() + except: + pass + +# This check isn't required, but placed here for debugging purposes +global currentServiceName # Name of the current service +if currentServiceName == 'transmission': + main() +else: + print("Error. '{}' Tried to run 'transmission' config".format(currentServiceName)) diff --git a/.templates/transmission/service.yml b/.templates/transmission/service.yml new file mode 100644 index 000000000..d179123da --- /dev/null +++ b/.templates/transmission/service.yml @@ -0,0 +1,17 @@ +transmission: + image: linuxserver/transmission + container_name: transmission + environment: + - PUID=1000 + - PGID=1000 + - TZ=Etc/UTC + volumes: + - ./volumes/transmission/config:/config + - ./volumes/transmission/downloads:/downloads + - ./volumes/transmission/watch:/watch + ports: + - "9091:9091" + - "51413:51413" + - "51413:51413/udp" + restart: unless-stopped + diff --git a/.templates/webthings_gateway/service.yml b/.templates/webthings_gateway/service.yml deleted file mode 100644 index 9bba05b72..000000000 --- a/.templates/webthings_gateway/service.yml +++ /dev/null @@ -1,13 +0,0 @@ - webthings_gateway: - image: mozillaiot/gateway:arm - container_name: webthings_gateway - network_mode: host - #ports: - # - 8080:8080 - # - 4443:4443 - #devices: - # - /dev/ttyACM0:/dev/ttyACM0 - volumes: - - ./volumes/webthings_gateway/share:/home/node/.mozilla-iot - - \ No newline at end of file diff --git a/.templates/webthingsio_gateway/build.py b/.templates/webthingsio_gateway/build.py new file mode 100755 index 000000000..0be4ae8c8 --- /dev/null +++ b/.templates/webthingsio_gateway/build.py @@ -0,0 +1,138 @@ +#!/usr/bin/env python3 + +issues = {} # Returned issues dict +buildHooks = {} # Options, and others hooks +haltOnErrors = True + +# Main wrapper function. Required to make local vars work correctly +def main(): + import os + import time + import shutil + import sys + + from deps.consts import servicesDirectory, templatesDirectory, volumesDirectory + from deps.common_functions import getExternalPorts, getInternalPorts, checkPortConflicts + + global dockerComposeServicesYaml # The loaded memory YAML of all checked services + global toRun # Switch for which function to run when executed + global buildHooks # Where to place the options menu result + global currentServiceName # Name of the current service + global issues # Returned issues dict + global haltOnErrors # Turn on to allow erroring + global hideHelpText # Showing and hiding the help controls text + global serviceService + + serviceVolume = volumesDirectory + currentServiceName + serviceService = servicesDirectory + currentServiceName + serviceTemplate = templatesDirectory + currentServiceName + + try: # If not already set, then set it. + hideHelpText = hideHelpText + except: + hideHelpText = False + + # runtime vars + portConflicts = [] + + # This lets the menu know whether to put " >> Options " or not + # This function is REQUIRED. + def checkForOptionsHook(): + try: + buildHooks["options"] = callable(runOptionsMenu) + except: + buildHooks["options"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForPreBuildHook(): + try: + buildHooks["preBuildHook"] = callable(preBuild) + except: + buildHooks["preBuildHook"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForPostBuildHook(): + try: + buildHooks["postBuildHook"] = callable(postBuild) + except: + buildHooks["postBuildHook"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForRunChecksHook(): + try: + buildHooks["runChecksHook"] = callable(runChecks) + except: + buildHooks["runChecksHook"] = False + return buildHooks + return buildHooks + + # This service will not check anything unless this is set + # This function is optional, and will run each time the menu is rendered + def runChecks(): + checkForIssues() + return [] + + # This function is optional, and will run after the docker-compose.yml file is written to disk. + def postBuild(): + return True + + # This function is optional, and will run just before the build docker-compose.yml code. + def preBuild(): + # Setup service directory + if not os.path.exists(serviceVolume): + os.makedirs(serviceVolume, exist_ok=True) + os.makedirs(serviceVolume + '/share', exist_ok=True) + os.makedirs(serviceVolume + '/share/config', exist_ok=True) + + # Files copy + shutil.copy(r'%s/local.json' % serviceTemplate, r'%s/share/config/local.json' % serviceVolume) + return True + + # ##################################### + # Supporting functions below + # ##################################### + + + def checkForIssues(): + envFileIssues = checkEnvFiles() + if (len(envFileIssues) > 0): + issues["envFileIssues"] = envFileIssues + + for (index, serviceName) in enumerate(dockerComposeServicesYaml): + if not currentServiceName == serviceName: # Skip self + currentServicePorts = getExternalPorts(currentServiceName, dockerComposeServicesYaml) + portConflicts = checkPortConflicts(serviceName, currentServicePorts, dockerComposeServicesYaml) + if (len(portConflicts) > 0): + issues["portConflicts"] = portConflicts + + def checkEnvFiles(): + envFileIssues = [] + if not os.path.exists(serviceTemplate + '/local.json'): + envFileIssues.append(serviceTemplate + '/local.json does not exist') + return envFileIssues + + + # ##################################### + # End Supporting functions + # ##################################### + + if haltOnErrors: + eval(toRun)() + else: + try: + eval(toRun)() + except: + pass + +# This check isn't required, but placed here for debugging purposes +global currentServiceName # Name of the current service +if currentServiceName == 'webthingsio_gateway': + main() +else: + print("Error. '{}' Tried to run 'webthingsio_gateway' config".format(currentServiceName)) diff --git a/.templates/webthingsio_gateway/local.json b/.templates/webthingsio_gateway/local.json new file mode 100644 index 000000000..479d3d162 --- /dev/null +++ b/.templates/webthingsio_gateway/local.json @@ -0,0 +1,6 @@ +{ + "ports": { + "https": 4061, + "http": 4060 + } +} diff --git a/.templates/webthingsio_gateway/service.yml b/.templates/webthingsio_gateway/service.yml new file mode 100644 index 000000000..ef026bf7e --- /dev/null +++ b/.templates/webthingsio_gateway/service.yml @@ -0,0 +1,10 @@ +webthingsio_gateway: + image: webthingsio/gateway:latest + container_name: webthingsio_gateway + network_mode: host + x-ports: + - "4060:4060" + - "4061:4061" + volumes: + - ./volumes/webthingsio_gateway/share:/home/node/.mozilla-iot + diff --git a/.templates/wireguard/service.yml b/.templates/wireguard/service.yml new file mode 100644 index 000000000..761d007b7 --- /dev/null +++ b/.templates/wireguard/service.yml @@ -0,0 +1,23 @@ +wireguard: + container_name: wireguard + image: ghcr.io/linuxserver/wireguard + restart: unless-stopped + environment: + - PUID=1000 + - PGID=1000 + - TZ=${TZ:-Etc/UTC} + - SERVERURL=your.dynamic.dns.name + - SERVERPORT=51820 + - PEERS=laptop,phone,tablet + - PEERDNS=auto + - ALLOWEDIPS=0.0.0.0/0 + ports: + - "51820:51820/udp" + volumes: + - ./volumes/wireguard/config:/config + - ./volumes/wireguard/custom-cont-init.d:/custom-cont-init.d + - ./volumes/wireguard/custom-services.d:/custom-services.d + cap_add: + - NET_ADMIN + sysctls: + - net.ipv4.conf.all.src_valid_mark=1 diff --git a/.templates/wireguard/use-container-dns.sh b/.templates/wireguard/use-container-dns.sh new file mode 100644 index 000000000..7f13f219b --- /dev/null +++ b/.templates/wireguard/use-container-dns.sh @@ -0,0 +1,17 @@ +# Forward DNS requests from remote WireGuard clients to the default +# gateway on the internal bridged network that the WireGuard container +# is attached to. The gateway routes queries out from the bridged network to +# the host's network. This results in queries being sent to any daemon or +# container that is listening on host port 53 (eg PiHole, AdGuardHome, dnsmasq +# or bind9). +# +# Acknowledgement: @ukkopahis + +GW=$(ip route list default | head -1 | cut -d " " -f 3) +echo Creating Corefile to use DNS at $GW +echo "# Generated by use-container-dns.sh +. { + loop + forward . dns://${GW} +}" > /config/coredns/Corefile + diff --git a/.templates/wordpress/service.yml b/.templates/wordpress/service.yml new file mode 100644 index 000000000..36a9ab45e --- /dev/null +++ b/.templates/wordpress/service.yml @@ -0,0 +1,36 @@ +wordpress: + container_name: wordpress + image: wordpress + restart: unless-stopped + environment: + TZ: ${TZ:-Etc/UTC} + WORDPRESS_DB_HOST: wordpress_db + WORDPRESS_DB_USER: ${WORDPRESS_DB_USER:-wordpress} + WORDPRESS_DB_PASSWORD: ${WORDPRESS_DB_PASSWORD:?eg echo WORDPRESS_DB_PASSWORD=userPassword >>~/IOTstack/.env} + WORDPRESS_DB_NAME: ${WORDPRESS_DB_NAME:-wordpress} + volumes: + - ./volumes/wordpress/html:/var/www/html + ports: + - "8084:80" + hostname: ${WORDPRESS_HOSTNAME:?eg echo WORDPRESS_HOSTNAME=hostname >>~/IOTstack/.env} + networks: + - default + - nextcloud + depends_on: + - wordpress_db + +wordpress_db: + container_name: wordpress_db + build: ./.templates/mariadb/. + restart: unless-stopped + environment: + TZ: ${TZ:-Etc/UTC} + MYSQL_ROOT_PASSWORD: ${WORDPRESS_ROOT_PASSWORD:?eg echo WORDPRESS_ROOT_PASSWORD=rootPassword >>~/IOTstack/.env} + MYSQL_USER: ${WORDPRESS_DB_USER:-wordpress} + MYSQL_PASSWORD: ${WORDPRESS_DB_PASSWORD:?eg echo WORDPRESS_DB_PASSWORD=userPassword >>~/IOTstack/.env} + MYSQL_DATABASE: ${WORDPRESS_DB_NAME:-wordpress} + volumes: + - ./volumes/wordpress/db:/config + - ./volumes/wordpress/db_backup:/backup + networks: + - nextcloud diff --git a/.templates/zerotier-client/service.yml b/.templates/zerotier-client/service.yml new file mode 100644 index 000000000..ee53dce3f --- /dev/null +++ b/.templates/zerotier-client/service.yml @@ -0,0 +1,13 @@ +zerotier-client: + container_name: zerotier + image: "zyclonite/zerotier" + restart: unless-stopped + network_mode: host + volumes: + - ./volumes/zerotier-one:/var/lib/zerotier-one + devices: + - "/dev/net/tun:/dev/net/tun" + cap_add: + - NET_ADMIN + - SYS_ADMIN + diff --git a/.templates/zerotier-router/service.yml b/.templates/zerotier-router/service.yml new file mode 100644 index 000000000..5f59d103c --- /dev/null +++ b/.templates/zerotier-router/service.yml @@ -0,0 +1,24 @@ +zerotier-router: + container_name: zerotier + image: "zyclonite/zerotier:router" + restart: unless-stopped + environment: + - TZ=${TZ:-Etc/UTC} + - PUID=1000 + - PGID=1000 + # - ZEROTIER_ONE_NETWORK_IDS=yourNetworkID + - ZEROTIER_ONE_LOCAL_PHYS=eth0 wlan0 + - ZEROTIER_ONE_USE_IPTABLES_NFT=true + - ZEROTIER_ONE_GATEWAY_MODE=both + network_mode: host + x-ports: + - "9993:9993" + volumes: + - ./volumes/zerotier-one:/var/lib/zerotier-one + devices: + - "/dev/net/tun:/dev/net/tun" + cap_add: + - NET_ADMIN + - SYS_ADMIN + - NET_RAW + diff --git a/.templates/zigbee2mqtt/Dockerfile b/.templates/zigbee2mqtt/Dockerfile new file mode 100644 index 000000000..740602985 --- /dev/null +++ b/.templates/zigbee2mqtt/Dockerfile @@ -0,0 +1,19 @@ +# This file is deprecated. It is being retained for backwards +# compatibility with existing docker-compose.yml files but will +# be removed, eventually. + +# Download base image +FROM koenkk/zigbee2mqtt + +# change default configuration.yaml to be IOTstack-friendly +# 1. expect the MQTT service to be in the Mosquitto container +# 2. enable the web front end on port 8080 +RUN sed -i.bak \ + -e 's/mqtt:\/\/localhost/mqtt:\/\/mosquitto/' \ + -e '$s/$/\n\nfrontend:\n port: 8080\n# auth_token: PASSWORD\n/' \ + /app/configuration.yaml + +RUN echo "*** DEPRECATION NOTICE: Please read IOTstack Zigbee2MQTT documentation:" +RUN echo "*** https://sensorsiot.github.io/IOTstack/Containers/Zigbee2MQTT/" + +# EOF diff --git a/.templates/zigbee2mqtt/build.py b/.templates/zigbee2mqtt/build.py new file mode 100755 index 000000000..729a3bd7a --- /dev/null +++ b/.templates/zigbee2mqtt/build.py @@ -0,0 +1,109 @@ +#!/usr/bin/env python3 + +issues = {} # Returned issues dict +buildHooks = {} # Options, and others hooks +haltOnErrors = True + +# Main wrapper function. Required to make local vars work correctly +def main(): + import os + import time + import sys + + from deps.consts import servicesDirectory, templatesDirectory + from deps.common_functions import getExternalPorts, getInternalPorts, checkPortConflicts + + global dockerComposeServicesYaml # The loaded memory YAML of all checked services + global toRun # Switch for which function to run when executed + global buildHooks # Where to place the options menu result + global currentServiceName # Name of the current service + global issues # Returned issues dict + global haltOnErrors # Turn on to allow erroring + global hideHelpText # Showing and hiding the help controls text + global serviceService + + serviceService = servicesDirectory + currentServiceName + serviceTemplate = templatesDirectory + currentServiceName + + try: # If not already set, then set it. + hideHelpText = hideHelpText + except: + hideHelpText = False + + # runtime vars + portConflicts = [] + + # This lets the menu know whether to put " >> Options " or not + # This function is REQUIRED. + def checkForOptionsHook(): + try: + buildHooks["options"] = callable(runOptionsMenu) + except: + buildHooks["options"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForPreBuildHook(): + try: + buildHooks["preBuildHook"] = callable(preBuild) + except: + buildHooks["preBuildHook"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForPostBuildHook(): + try: + buildHooks["postBuildHook"] = callable(postBuild) + except: + buildHooks["postBuildHook"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForRunChecksHook(): + try: + buildHooks["runChecksHook"] = callable(runChecks) + except: + buildHooks["runChecksHook"] = False + return buildHooks + return buildHooks + + # This service will not check anything unless this is set + # This function is optional, and will run each time the menu is rendered + def runChecks(): + return [] + + # This function is optional, and will run after the docker-compose.yml file is written to disk. + def postBuild(): + return True + + # This function is optional, and will run just before the build docker-compose.yml code. + def preBuild(): + return True + + # ##################################### + # Supporting functions below + # ##################################### + + # None + + # ##################################### + # End Supporting functions + # ##################################### + + if haltOnErrors: + eval(toRun)() + else: + try: + eval(toRun)() + except: + pass + +# This check isn't required, but placed here for debugging purposes +global currentServiceName # Name of the current service +if currentServiceName == 'zigbee2mqtt': + main() +else: + print("Error. '{}' Tried to run 'zigbee2mqtt' config".format(currentServiceName)) diff --git a/.templates/zigbee2mqtt/service.yml b/.templates/zigbee2mqtt/service.yml index 9bcf16d6f..110b68244 100644 --- a/.templates/zigbee2mqtt/service.yml +++ b/.templates/zigbee2mqtt/service.yml @@ -1,10 +1,22 @@ - zigbee2mqtt: - container_name: zigbee2mqtt - image: koenkk/zigbee2mqtt - volumes: - - ./volumes/zigbee2mqtt/data:/app/data - devices: - - /dev/ttyAMA0:/dev/ttyACM0 - #- /dev/ttyACM0:/dev/ttyACM0 - restart: unless-stopped - network_mode: host +zigbee2mqtt: + container_name: zigbee2mqtt + image: koenkk/zigbee2mqtt:latest + environment: + - TZ=${TZ:-Etc/UTC} + - ZIGBEE2MQTT_CONFIG_SERIAL_PORT=/dev/ttyACM0 + - ZIGBEE2MQTT_CONFIG_SERIAL_ADAPTER=zstack + - ZIGBEE2MQTT_CONFIG_MQTT_SERVER=mqtt://mosquitto:1883 + # only enable the next line for Zigbee2MQTT v1 + # - ZIGBEE2MQTT_CONFIG_FRONTEND=true + - ZIGBEE2MQTT_CONFIG_FRONTEND_ENABLED=true + - ZIGBEE2MQTT_CONFIG_ADVANCED_LOG_SYMLINK_CURRENT=true + # - DEBUG=zigbee-herdsman* + ports: + - "8080:8080" + volumes: + - ./volumes/zigbee2mqtt/data:/app/data + devices: + - "${ZIGBEE2MQTT_DEVICE_PATH:?eg echo ZIGBEE2MQTT_DEVICE_PATH=/dev/ttyACM0 >>~/IOTstack/.env}:/dev/ttyACM0" + restart: unless-stopped + depends_on: + - mosquitto diff --git a/.templates/zigbee2mqtt_assistant/build.py b/.templates/zigbee2mqtt_assistant/build.py new file mode 100755 index 000000000..b05ddf7e1 --- /dev/null +++ b/.templates/zigbee2mqtt_assistant/build.py @@ -0,0 +1,116 @@ +#!/usr/bin/env python3 + +issues = {} # Returned issues dict +buildHooks = {} # Options, and others hooks +haltOnErrors = True + +# Main wrapper function. Required to make local vars work correctly +def main(): + import os + import time + import sys + + from deps.consts import servicesDirectory, templatesDirectory + from deps.common_functions import getExternalPorts, getInternalPorts, checkPortConflicts + + global dockerComposeServicesYaml # The loaded memory YAML of all checked services + global toRun # Switch for which function to run when executed + global buildHooks # Where to place the options menu result + global currentServiceName # Name of the current service + global issues # Returned issues dict + global haltOnErrors # Turn on to allow erroring + global hideHelpText # Showing and hiding the help controls text + global serviceService + + serviceService = servicesDirectory + currentServiceName + serviceTemplate = templatesDirectory + currentServiceName + + try: # If not already set, then set it. + hideHelpText = hideHelpText + except: + hideHelpText = False + + # runtime vars + portConflicts = [] + + # This lets the menu know whether to put " >> Options " or not + # This function is REQUIRED. + def checkForOptionsHook(): + try: + buildHooks["options"] = callable(runOptionsMenu) + except: + buildHooks["options"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForPreBuildHook(): + try: + buildHooks["preBuildHook"] = callable(preBuild) + except: + buildHooks["preBuildHook"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForPostBuildHook(): + try: + buildHooks["postBuildHook"] = callable(postBuild) + except: + buildHooks["postBuildHook"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForRunChecksHook(): + try: + buildHooks["runChecksHook"] = callable(runChecks) + except: + buildHooks["runChecksHook"] = False + return buildHooks + return buildHooks + + # This service will not check anything unless this is set + # This function is optional, and will run each time the menu is rendered + def runChecks(): + checkForIssues() + return [] + + # This function is optional, and will run after the docker-compose.yml file is written to disk. + def postBuild(): + return True + + # This function is optional, and will run just before the build docker-compose.yml code. + def preBuild(): + return True + + # ##################################### + # Supporting functions below + # ##################################### + + def checkForIssues(): + for (index, serviceName) in enumerate(dockerComposeServicesYaml): + if not currentServiceName == serviceName: # Skip self + currentServicePorts = getExternalPorts(currentServiceName, dockerComposeServicesYaml) + portConflicts = checkPortConflicts(serviceName, currentServicePorts, dockerComposeServicesYaml) + if (len(portConflicts) > 0): + issues["portConflicts"] = portConflicts + + # ##################################### + # End Supporting functions + # ##################################### + + if haltOnErrors: + eval(toRun)() + else: + try: + eval(toRun)() + except: + pass + +# This check isn't required, but placed here for debugging purposes +global currentServiceName # Name of the current service +if currentServiceName == 'zigbee2mqtt_assistant': + main() +else: + print("Error. '{}' Tried to run 'zigbee2mqtt_assistant' config".format(currentServiceName)) diff --git a/.templates/zigbee2mqtt_assistant/service.yml b/.templates/zigbee2mqtt_assistant/service.yml new file mode 100644 index 000000000..401c5df25 --- /dev/null +++ b/.templates/zigbee2mqtt_assistant/service.yml @@ -0,0 +1,11 @@ +zigbee2mqtt_assistant: + container_name: zigbee2mqtt_assistant + image: carldebilly/zigbee2mqttassistant + restart: unless-stopped + ports: + - "8880:80" + environment: + - VIRTUAL_HOST=~^zigbee2mqtt_assistant\..*\.xip\.io + - Z2MA_SETTINGS__MQTTSERVER=mosquitto + - VIRTUAL_PORT=8880 + diff --git a/.templates/zigbee2mqtt_assistant/zigbee2mqtt_assistant.env b/.templates/zigbee2mqtt_assistant/zigbee2mqtt_assistant.env new file mode 100755 index 000000000..e7145a1e0 --- /dev/null +++ b/.templates/zigbee2mqtt_assistant/zigbee2mqtt_assistant.env @@ -0,0 +1,4 @@ +#TZ=Etc/UTC +Z2MA_SETTINGS__MQTTSERVER=mosquitto +#Z2MA_SETTINGS__MQTTUSERNAME=MQTTUSER +#Z2MA_SETTINGS__MQTTPASSWORD=MQTTPASS diff --git a/.tmp/.gitkeep b/.tmp/.gitkeep new file mode 100755 index 000000000..e69de29bb diff --git a/.yamllint.yaml b/.yamllint.yaml new file mode 100644 index 000000000..9cf9912de --- /dev/null +++ b/.yamllint.yaml @@ -0,0 +1,11 @@ +--- + +extends: default + +rules: + empty-lines: + max: 2 + max-start: 0 + max-end: 1 + line-length: + max: 200 diff --git a/README.md b/README.md index b59ba57e3..3e9e61b1a 100644 --- a/README.md +++ b/README.md @@ -1,198 +1,59 @@ -# IOTStack +# IOTstack -IOTstack is a builder for docker-compose to easily make and maintain IoT stacks on the Raspberry Pi +IOTstack is a builder for docker-compose to easily make and maintain IoT stacks on the Raspberry Pi. -## Announcements +## introduction to IOTstack - videos -The bulk of the README has moved to the Wiki. Please check it out [here](https://github.com/gcgarner/IOTstack/wiki) +IOTstack changes a fair bit so YouTube videos only reflect a moment in time and tend to date quickly. These videos contain excellent overviews but you should rely more on the written documentation than anything you see in the videos. -* 2019-12-19 Added python container, tweaked update script -* 2019-12-12 modified zigbee2mqtt template file -* 2019-12-12 Added Function to add custom containers to the stack -* 2019-12-12 PR cmskedgell: Added Homebridge -* 2019-12-12 PR 877dev: Added trimming of online backups -* 2019-12-03 BUGFIX Mosquitto: Fixed issue where mosquitto failed to start as a result of 11-28 change -* 2019-12-03 Added terminal for postgres, temporarily removed setfacl from menu -* 2019-11-28 PR @stfnhmplr added diyHue -* 2019-11-28 Fixed update notification on menu -* 2019-11-28 Fixed mosquitto logs and database not mapping correctly to volumes. Pull new template to fix -* 2019-11-28 added the option to disable swapfile by setting swappiness to 0 -* 2019-11-28 PR @stfnhmplr fixed incorrect shegang on MariaDB terminal.sh -* 2019-11-28 Added native install for RPIEasy -* 2019-11-27 Additions: NextCloud, MariaDB, MotionEye, Mozilla Webthings, blynk-server (fixed issue with selection.txt) -* 2019-11-22 BUGFIX selection.txt failed on fresh install, added pushd IOTstack to menu to ensure correct path -* 2019-11-22 Added notification into menu if project update is available -* 2019-11-20 BUGFIX influxdb backup: Placing docker_backup in crontab caused influxdb backup not to execute correctly -* 2019-11-20 BUGFIX disable swap: swapfile recreation on reboot fixed. Re-run from menu to fix. -* Node-RED: serial port. New template adds privileged which allows acces to serial devices -* EspurinoHub: is available for testing see wiki entry +Andreas Spiess: -*** +* 2023-10-01 Upgrade your Raspberry Pi to a Homelab (instead of a Raspberry Pi 5) -## Highlighted topics + [![20231001](http://img.youtube.com/vi/xVq_5f0aFlw/0.jpg)](https://www.youtube.com/watch?v=xVq_5f0aFlw) -* [Bluetooth and Node-RED](https://github.com/gcgarner/IOTstack/wiki/Node-RED#using-bluetooth) -* [Saving files to disk inside containers](https://github.com/gcgarner/IOTstack/wiki/Node-RED#sharing-files-between-node-red-and-the-host) -* [Updating the Project](https://github.com/gcgarner/IOTstack/wiki/Updating-the-Project) +* 2022-11-20 #443 I found an Excellent Raspberry Pi Replacement for Home Assistant / IOTstack (incl. Proxmox) - *** + [![#443](http://img.youtube.com/vi/rXc_zGRYhLo/0.jpg)](https://www.youtube.com/watch?v=rXc_zGRYhLo) -## Coming soon +* 2020-11-22 #362 Wireguard and NextCloud on a Raspberry Pi = Marvellous (Docker, IOTstack) -* reverse proxy is now next on the list, I cant keep up with the ports -* Detection of arhcitecture for seperate stack options for amd64, armhf, i386 -* autocleanup of backups on cloud -* Gitea (in testing branch) -* OwnCloud + [![#362](http://img.youtube.com/vi/7Pe-Cv0tnLs/0.jpg)](https://www.youtube.com/watch?v=7Pe-Cv0tnLs) -*** +* 2020-09-13 #352 Raspberry Pi4 Home Automation Server (incl. Docker, OpenHAB, HASSIO, NextCloud) -## About + [![#352](http://img.youtube.com/vi/KJRMjUzlHI8/0.jpg)](https://www.youtube.com/watch?v=KJRMjUzlHI8) -Docker stack for getting started on IoT on the Raspberry Pi. +* 2019-10-27 #295 Raspberry Pi Server based on Docker, with VPN, Dropbox backup, Influx, Grafana, etc: IOTstack -This Docker stack consists of: + [![#295](http://img.youtube.com/vi/a6mjt8tWUws/0.jpg)](https://www.youtube.com/watch?v=a6mjt8tWUws) + +OneMarcFifty: -* Node-RED -* Grafana -* InfluxDB -* Postgres -* Mosquitto mqtt -* Portainer -* Adminer -* openHAB -* Home Assistant (HASSIO) -* zigbee2mqtt -* Pi-Hole -* TasmoAdmin (parial wiki) -* Plex media server -* Telegraf (wiki coming soon) -* RTL_433 -* EspruinoHub (testing) -* MotionEye -* MariaDB -* Plex -* Homebridge +* 2021-02-02 Raspberry Pi 4 Home Automation Server with IOTstack -In addition, there is a write-up and some scripts to get a dynamic DNS via duckdns and VPN up and running. + [![20210202](http://img.youtube.com/vi/5a2qhKMetGU/0.jpg)](https://www.youtube.com/watch?v=5a2qhKMetGU) -Firstly what is docker? The correct question is "what are containers?". Docker is just one of the utilities to run a container. +### getting started -A Container can be thought of as ultra-minimal virtual machines, they are a collection of binaries that run in a sandbox environment. You download a preconfigured base image and create a new container. Only the differences between the base and your "VM" are stored. -Containers don't have [GUI](https://en.wikipedia.org/wiki/Graphical_user_interface)s so generally the way you interact with them is via web services or you can launch into a terminal. -One of the major advantages is that the image comes mostly preconfigured. +See [Getting Started](https://sensorsiot.github.io/IOTstack/Getting-Started) in the [IOTstack Wiki](https://sensorsiot.github.io/IOTstack/). It includes: -There are pro's and cons for using native installs vs containers. For me, one of the best parts of containers is that it doesn't "clutter" your device. If you don't need Postgres anymore then just stop and delete the container. It will be like the container was never there. +* How to download the project (including constraints you need to observe). +* How to migrate from the older gcgarner/IOTstack repository. +* Running the menu to install Docker and set up your containers. +* Useful Docker commands (start \& stop the stack, manage containers). +* Stack maintenance. -The container will fail if you try to run the docker and native vesions as the same time. It is best to install this on a fresh system. +### reporting issues -For those looking for a script that installs native applications check out [Peter Scargill's script](https://tech.scargill.net/the-script/) - -## Tested platform +Please use the [issues](https://github.com/SensorsIot/IOTstack/issues) tab to report issues or make suggestions for new features or containers. -Raspberry Pi 3B and 4B Raspbian (Buster) +### submitting pull requests -### Older Pi's +Pull Requests are always welcome. Your contribution helps improve the project for everyone. Please see [creating Pull Requests](https://gist.github.com/Paraphraser/818bf54faf5d3b3ed08d16281f32297d) for a guide on the process. -Docker will not run on a PiZero or A model 1 because of the CPU. It has not been tested on a Model 2. You can still use Peter Scargill's [script](https://tech.scargill.net/the-script/) +### community -## Running under a virtual machine +We have a Discord server setup for discussions: [IOTstack Discord channel](https://discord.gg/ZpKHnks) if you want to comment on features, suggest new container types, or ask the IOTstack community for help. -For those wanting to test out the script in a Virtual Machine before installing on their Pi there are some limitations. The script is designed to work with Debian based distributions. Not all the container have x86_64 images. For example Portainer does not and will give an error when you try and start the stack. Please see the pinned issue [#29](https://github.com/gcgarner/IOTstack/issues/29), there is more info there. - -## Feature Requests - -Please direct all feature requests to [Discord](https://discord.gg/W45tD83) - -## Youtube reference - -This repo was originally inspired by Andreas Spiess's video on using some of these tools. Some containers have been added to extend its functionality. - -[YouTube video](https://www.youtube.com/watch?v=JdV4x925au0): This is an alternative approach to the setup. Be sure to watch the video for the instructions. Just note that the network addresses are different, see the wiki under Docker Networks. - -### YouTube guide - -@peyanski (Kiril) made a YouTube video on getting started using the project, check it out [here](https://youtu.be/5JMNHuHv134) - -## Download the project - -1.On the lite image you will need to install git first - -```bash -sudo apt-get install git -``` - -2.Download the repository with: - -```bash -git clone https://github.com/gcgarner/IOTstack.git ~/IOTstack -``` - -Due to some script restraints, this project needs to be stored in ~/IOTstack - -3.To enter the directory run: - -```bash -cd ~/IOTstack -``` - -## The Menu - -I've added a menu to make things easier. It is good to familiarise yourself with the installation process. -The menu can be used to install docker and build the docker-compose.yml file necessary for starting the stack. It also runs a few common commands. I do recommend you start to learn the docker and docker-compose commands if you plan on using docker in the long run. I've added several helper scripts, have a look inside. - -Navigate to the project folder and run `./menu.sh` - -### Installing from the menu - -Select the first option and follow the prompts - -### Build the docker-compose file - -docker-compose uses the `docker-compose.yml` file to configure all the services. Run through the menu to select the options you want to install. - -### Docker commands - -This menu executes shell scripts in the root of the project. It is not necessary to run them from the menu. Open up the shell script files to see what is inside and what they do. - -### Miscellaneous commands - -Some helpful commands have been added like disabling swap. - -## Running Docker commands - -From this point on make sure you are executing the commands from inside the project folder. Docker-compose commands need to be run from the folder where the docker-compose.yml is located. If you want to move the folder make sure you move the whole project folder. - -## Starting and Stopping containers - -to start the stack navigate to the project folder containing the docker-compose.yml file - -To start the stack run: -`docker-compose up -d` or `./scripts/start.sh` - -To stop: -`docker-compose stop` - -The first time you run 'start' the stack docker will download all the images for the web. Depending on how many containers you selected and your internet speed this can take a long while. - -The `docker-compose down` command stops the containers then deletes them. - -## Persistent data - -Docker allows you to map folders inside your containers to folders on the disk. This is done with the "volume" key. There are two types of volumes. Modification to the container are reflected in the volume. - -## See Wiki for further info - -[Wiki](https://github.com/gcgarner/IOTstack/wiki) - -## Add to the project - -Feel free to add your comments on features or images that you think should be added. - -## Contributions - -If you use some of the tools in the project please consider donating or contributing on their projects. It doesn't have to be monetary, reporting bugs and PRs help improve the projects for everyone. - -### Thanks - -@mrmx, @oscrx, @brianimmel, @Slyke, @AugustasV, @Paulf007, @affankingkhan, @877dev, @Paraphraser, @stfnhmplr, @peyanski, @cmskedgell +If you use some of the tools in the project please consider donating or contributing on their projects. It doesn't have to be monetary. diff --git a/docs/Backups.md.old b/docs/Backups.md.old new file mode 100644 index 000000000..c220ed2ef --- /dev/null +++ b/docs/Backups.md.old @@ -0,0 +1,113 @@ +# Backups +Because containers can easily be rebuilt from docker hub we only have to back up the data in the "volumes" directory. + +## Cloud Backups +### Dropbox-Uploader +This a great utility to easily upload data from your Pi to the cloud. https://magpi.raspberrypi.org/articles/dropbox-raspberry-pi. It can be installed from the Menu under Backups. +### rclone (Google Drive) +This is a service to upload to Google Drive. The config is described [here]( https://medium.com/@artur.klauser/mounting-google-drive-on-raspberry-pi-f5002c7095c2). Install it from the menu then follow the link for these sections: +* Getting a Google Drive Client ID +* Setting up the Rclone Configuration + +When naming the service in `rclone config` ensure to call it "gdrive" + +**The Auto-mounting instructions for the drive in the link don't work on Rasbian**. Auto-mounting of the drive isn't necessary for the backup script. + +If you want your Google Drive to mount on every boot then follow the instructions at the bottom of the wiki page + + +## Influxdb +`~/IOTstack/scripts/backup_influxdb.sh` does a database snapshot and stores it in ~/IOTstack/backups/influxdb/db . This can be restored with the help a script (that I still need to write) + +## Docker backups +The script `~/IOTstack/scripts/docker_backup.sh` performs the master backup for the stack. + +This script can be placed in a cron job to backup on a schedule. +Edit the crontab with`crontab -e` +Then add `0 23 * * * ~/IOTstack/scripts/docker_backup.sh >/dev/null 2>&1` to have a backup every night at 23:00. + +This script cheats by copying the volume folder live. The correct way would be to stop the stack first then copy the volumes and restart. The cheating method shouldn't be a problem unless you have fast changing data like in influxdb. This is why the script makes a database export of influxdb and ignores its volume. + +### Cloud integration +The docker_backup.sh script now no longer requires modification to enable cloud backups. It now tests for the presence of and enable file in the backups folder +#### Drobox-Uploader +The backup tests for a file called `~/IOTstack/backups/dropbox`, if it is present it will upload to dropbox. To disable dropbox upload delete the file. To enable run `sudo touch ~/IOTstack/backups/dropbox` +#### rclone +The backup tests for a file called `~/IOTstack/backups/rclone`, if it is present it will upload to google drive. To disable rclone upload delete the file. To enable run `sudo touch ~/IOTstack/backups/rclone` + +#### Pruning online backups +@877dev has added functionality to prune both local and cloud backups. For dropbox make sure you dont have any files that contain spaces in your backup directory as the script cannot handle it at this time. + +### Restoring a backup +The "volumes" directory contains all the persistent data necessary to recreate the container. The docker-compose.yml and the environment files are optional as they can be regenerated with the menu. Simply copy the volumes directory into the IOTstack directory, Rebuild the stack and start. + +## Added your Dropbox token incorrectly or aborted the install at the token screen + +Make sure you are running the latest version of the project [link](https://sensorsiot.github.io/IOTstack/Updating-the-Project/). + +Run `~/Dropbox-Uploader/dropbox_uploader.sh unlink` and if you have added it key then it will prompt you to confirm its removal. If no key was found it will ask you for a new key. + +Confirm by running `~/Dropbox-Uploader/dropbox_uploader.sh` it should ask you for your key if you removed it or show you the following prompt if it has the key: + +``` console + $ ~/Dropbox-Uploader/dropbox_uploader.sh +Dropbox Uploader v1.0 +Andrea Fabrizi - andrea.fabrizi@gmail.com + +Usage: /home/pi/Dropbox-Uploader/dropbox_uploader.sh [PARAMETERS] COMMAND... + +Commands: + upload + download [LOCAL_FILE/DIR] + delete + move + copy + mkdir +.... + +``` + +Ensure you **are not** running as sudo as this will store your api in the /root directory as `/root/.dropbox_uploader` + +If you ran the command with sudo the remove the old token file if it exists with either `sudo rm /root/.dropbox_uploader` or `sudo ~/Dropbox-Uploader/dropbox_uploader.sh unlink` + +## Auto-mount Gdrive with rclone + +To enable rclone to mount on boot you will need to make a user service. Run the following commands + +``` console +$ mkdir -p ~/.config/systemd/user +$ nano ~/.config/systemd/user/gdrive.service +``` +Copy the following code into the editor, save and exit + +``` +[Unit] +Description=rclone: Remote FUSE filesystem for cloud storage +Documentation=man:rclone(1) + +[Service] +Type=notify +ExecStartPre=/bin/mkdir -p %h/mnt/gdrive +ExecStart= \ + /usr/bin/rclone mount \ + --fast-list \ + --vfs-cache-mode writes \ + gdrive: %h/mnt/gdrive + +[Install] +WantedBy=default.target +``` +enable it to start on boot with: (no sudo) +``` console +$ systemctl --user enable gdrive.service +``` +start with +``` console +$ systemctl --user start gdrive.service +``` +if you no longer want it to start on boot then type: +``` console +$ systemctl --user disable gdrive.service +``` + diff --git a/docs/Basic_setup/Accessing-your-Device-from-the-internet.md b/docs/Basic_setup/Accessing-your-Device-from-the-internet.md new file mode 100644 index 000000000..99bfa1cb0 --- /dev/null +++ b/docs/Basic_setup/Accessing-your-Device-from-the-internet.md @@ -0,0 +1,192 @@ +# Accessing your device from the internet + +The challenge most of us face with remotely accessing our home networks is that our routers usually have a dynamically-allocated IP address on the public (WAN) interface. + +From time to time the IP address that your ISP assigns changes and it's difficult to keep up. Fortunately, there is a solution: Dynamic DNS. The section below shows you how to set up an easy-to-remember domain name that follows your public IP address no matter when it changes. + +Secondly, how do you get into your home network? Your router has a firewall that is designed to keep the rest of the internet out of your network to protect you. The solution to that is a Virtual Private Network (VPN) or "tunnel". + +## Dynamic DNS + +There are two parts to a Dynamic DNS service: + +1. You have to register with a Dynamic DNS service provider and obtain a domain name that is not already taken by someone else. +2. Something on your side of the network needs to propagate updates so that your chosen domain name remains in sync with your router's dynamically-allocated public IP address. + +### Register with a Dynamic DNS service provider + +The first part is fairly simple and there are quite a few Dynamic DNS service providers including: + +* [DuckDNS.org](https://www.duckdns.org) +* [NoIP.com](https://www.noip.com) + +> You can find more service providers by Googling ["Dynamic DNS service"](https://www.google.com/search?q=%22Dynamic%20DNS%20service%22). + +Some router vendors also provide their own built-in Dynamic DNS capabilities for registered customers so it's a good idea to check your router's capabilities before you plough ahead. + +### Dynamic DNS propagation + +The "something" on your side of the network propagating WAN IP address changes can be either: + +* your router; or +* a "behind the router" technique, typically a periodic job running on the same Raspberry Pi that is hosting IOTstack and WireGuard. + +If you have the choice, your router is to be preferred. That's because your router is usually the only device in your network that actually knows when its WAN IP address changes. A Dynamic DNS client running on your router will propagate changes immediately and will only transmit updates when necessary. More importantly, it will persist through network interruptions or Dynamic DNS service provider outages until it receives an acknowledgement that the update has been accepted. + +Nevertheless, your router may not support the Dynamic DNS service provider you wish to use, or may come with constraints that you find unsatisfactory so any behind-the-router technique is always a viable option, providing you understand its limitations. + +A behind-the-router technique usually relies on sending updates according to a schedule. An example is a `cron` job that runs every five minutes. That means any router WAN IP address changes won't be propagated until the next scheduled update. In the event of network interruptions or service provider outages, it may take close to ten minutes before everything is back in sync. Moreover, given that WAN IP address changes are infrequent events, most scheduled updates will be sending information unnecessarily. + +### DuckDNS container + +The recommended and easiest solution is to install the Duckdns docker-container +from the menu. It includes the cron service and logs are handled by Docker. + +For configuration see [Containers/Duck DNS]( ../Containers/Duckdns.md). + +!!! note + This is a recently added container, please don't hesitate to report any + possible faults to Discord or as Github issues. + +### DuckDNS client script { #duckdns-client } + +!!! info + This method will soon be deprecated in favor of the DuckDNS container. + +IOTstack provides a solution for DuckDNS. The best approach to running it is: + +``` console +$ mkdir -p ~/.local/bin +$ cp ~/IOTstack/duck/duck.sh ~/.local/bin +``` + +> The reason for recommending that you make a copy of `duck.sh` is because the "original" is under Git control. If you change the "original", Git will keep telling you that the file has changed and it may block incoming updates from GitHub. + +Then edit `~/.local/bin/duck.sh` to add your DuckDNS domain name(s) and token: + +```bash +DOMAINS="YOURS.duckdns.org" +DUCKDNS_TOKEN="YOUR_DUCKDNS_TOKEN" +``` + +For example: + +```bash +DOMAINS="downunda.duckdns.org" +DUCKDNS_TOKEN="8a38f294-b5b6-4249-b244-936e997c6c02" +``` + +Note: + +* The `DOMAINS=` variable can be simplified to just "YOURS", with the `.duckdns.org` portion implied, as in: + + ```bash + DOMAINS="downunda" + ``` + +Once your credentials are in place, test the result by running: + +``` console +$ ~/.local/bin/duck.sh +ddd, dd mmm yyyy hh:mm:ss ±zzzz - updating DuckDNS +OK +``` + +The timestamp is produced by the `duck.sh` script. The [expected responses from the DuckDNS service](https://www.duckdns.org/spec.jsp) are: + +* "OK" - indicating success; or +* "KO" - indicating failure. + +Check your work if you get "KO" or any other errors. + +Next, assuming `dig` is installed on your Raspberry Pi (`sudo apt install dnsutils`), you can test propagation by sending a directed query to a DuckDNS name server. For example, assuming the domain name you registered was `downunda.duckdns.org`, you would query like this: + +``` console +$ dig @ns1.duckdns.org downunda.duckdns.org +short +``` + +The expected result is the IP address of your router's WAN interface. It is a good idea to confirm that it is the same as you get from [whatismyipaddress.com](https://whatismyipaddress.com). + +A null result indicates failure so check your work. + +Remember, the Domain Name System is a *distributed* database. It takes *time* for changes to propagate. The response you get from directing a query to ns1.duckdns.org may not be the same as the response you get from any other DNS server. You often have to wait until cached records expire and a recursive query reaches the authoritative DuckDNS name-servers. + +#### Running the DuckDNS client automatically + +The recommended arrangement for keeping your Dynamic DNS service up-to-date is to invoke `duck.sh` from `cron` at five minute intervals. + +If you are new to `cron`, see these guides for more information about setting up and editing your `crontab`: + +* [raspberrytips.com](https://raspberrytips.com/schedule-task-raspberry-pi/) +* [pimylifeup.com](https://pimylifeup.com/cron-jobs-and-crontab/) + +A typical `crontab` will look like this: + +```bash +SHELL=/bin/bash +HOME=/home/pi +PATH=/home/pi/.local/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin + +*/5 * * * * duck.sh >/dev/null 2>&1 +``` + +The first three lines construct the runtime environment correctly and should be at the start of any `crontab`. + +The last line means "run duck.sh every five minutes". See [crontab.guru](https://crontab.guru/#*/5_*_*_*_*) if you want to understand the syntax of the last line. + +When launched in the background by `cron`, the script supplied with IOTstack adds a random delay of up to one minute to try to reduce the "hammering effect" of a large number of users updating DuckDNS simultaneously. + +Standard output and standard error are redirected to `/dev/null` which is appropriate in this instance. When DuckDNS is working correctly (which is most of the time), the only output from the `curl` command is "OK". Logging that every five minutes would add wear and tear to SD cards for no real benefit. + +If you suspect DuckDNS is misbehaving, you can run the `duck.sh` command from a terminal session, in which case you will see all the `curl` output in the terminal window. + +If you wish to keep a log of `duck.sh` activity, the following will get the job done: + +1. Make a directory to hold log files: + + ``` console + $ mkdir -p ~/Logs + ``` + +2. Edit the last line of the `crontab` like this: + + ```bash + */5 * * * * duck.sh >>./Logs/duck.log 2>&1 + ``` + +Remember to prune the log from time to time. The generally-accepted approach is: + +``` console +$ cat /dev/null >~/Logs/duck.log +``` + +## Virtual Private Network + +### WireGuard + +WireGuard is supplied as part of IOTstack. See [WireGuard documentation](../Containers/WireGuard.md). + +### PiVPN + +pimylifeup.com has an excellent tutorial on how to install [PiVPN](https://pimylifeup.com/raspberry-pi-vpn-server/) + +In point 17 and 18 they mention using noip for their dynamic DNS. Here you can use the DuckDNS address if you created one. + +Don't forget you need to open the port 1194 on your firewall. Most people won't be able to VPN from inside their network so download OpenVPN client for your mobile phone and try to connect over mobile data. ([More info.](https://en.wikipedia.org/wiki/Hairpinning)) + +Once you activate your VPN (from your phone/laptop/work computer) you will effectively be on your home network and you can access your devices as if you were on the wifi at home. + +I personally use the VPN any time I'm on public wifi, all your traffic is secure. + +### Zerotier + +https://www.zerotier.com/ + +Zerotier is an alternative to PiVPN that doesn't require port forwarding on your router. It does however require registering for their free tier service [here](https://my.zerotier.com/login). + +Kevin Zhang has written a how to guide [here](https://iamkelv.in/blog/2017/06/zerotier.html). Just note that the install link is outdated and should be: + +``` console +$ curl -s 'https://raw.githubusercontent.com/zerotier/ZeroTierOne/master/doc/contact%40zerotier.com.gpg' | gpg --import && \ +if z=$(curl -s 'https://install.zerotier.com/' | gpg); then echo "$z" | sudo bash; fi +``` diff --git a/docs/Basic_setup/Backup-and-Restore.md b/docs/Basic_setup/Backup-and-Restore.md new file mode 100644 index 000000000..48de688a1 --- /dev/null +++ b/docs/Basic_setup/Backup-and-Restore.md @@ -0,0 +1,110 @@ +# Backing up and restoring IOTstack +This page explains how to use the backup and restore functionality of IOTstack. + +## Backup +The backup command can be executed from IOTstack's menu, or from a cronjob. + +### Running backup +To ensure that all your data is saved correctly, the stack should be brought down. This is mainly due to databases potentially being in a state that could cause data loss. + +There are 2 ways to run backups: + +* From the menu: `Backup and Restore` > `Run backup` +* Running the following command: `bash ./scripts/backup.sh` + +The command that's run from the command line can also be executed from a cronjob: + +```0 2 * * * cd /home/pi/IOTstack && /bin/bash ./scripts/backup.sh``` + +The current directory of bash must be in IOTstack's directory, to ensure that it can find the relative paths of the files it's meant to back up. In the example above, it's assume that it's inside the `pi` user's home directory. + +### Arguments +``` +./scripts/backup.sh {TYPE=3} {USER=$(whoami)} +``` + +* Types: + * 1 = Backup with Date + * A tarball file will be created that contains the date and time the backup was started, in the filename. + * 2 = Rolling Date + * A tarball file will be created that contains the day of the week (0-6) the backup was started, in the filename. + * If a tarball already exists with the same name, it will be overwritten. + * 3 = Both +* User: + This parameter only becomes active if run as root. This script will default to the current logged in user + If this parameter is not supplied when run as root, the script will ask for the username as input + +Backups: + + * You can find the backups in the ./backups/ folder. With rolling being in ./backups/rolling/ and date backups in ./backups/backup/ + * Log files can also be found in the ./backups/logs/ directory. + +### Examples: + + * `./scripts/backup.sh` + * `./scripts/backup.sh 3` + +Either of these will run both backups. + + * `./scripts/backup.sh 2` + +This will only produce a backup in the rollowing folder. It will be called 'backup_XX.tar.gz' where XX is the current day of the week (as an int) + + * `sudo bash ./scripts/backup.sh 2 pi` + +This will only produce a backup in the rollowing folder and change all the permissions to the 'pi' user. + +## Restore +There are 2 ways to run a restore: + +* From the menu: `Backup and Restore` > `Restore from backup` +* Running the following command: `bash ./scripts/restore.sh` + +**Important**: The restore script assumes that the IOTstack directory is fresh, as if it was just cloned. If it is not fresh, errors may occur, or your data may not correctly be restored even if no errors are apparent. + +*Note*: It is suggested that you test that your backups can be restored after initially setting up, and anytime you add or remove a service. Major updates to services can also break backups. + +### Arguments +``` +./scripts/restore.sh {FILENAME=backup.tar.gz} {noask} +``` +The restore script takes 2 arguments: + +* Filename: The name of the backup file. The file must be present in the `./backups/` directory, or a subfolder in it. That means it should be moved from `./backups/backup` to `./backups/`, or that you need to specify the `backup` portion of the directory (see examples) +* NoAsk: If a second parameter is present, is acts as setting the no ask flag to true. + +## Pre and post script hooks +The script checks if there are any pre and post back up hooks to execute commands. Both of these files will be included in the backup, and have also been added to the `.gitignore` file, so that they will not be touched when IOTstack updates. + +### Prebackup script hook +The prebackup hook script is executed before any compression happens and before anything is written to the temporary backup manifest file (`./.tmp/backup-list_{{NAME}}.txt`). It can be used to prepare any services (such as databases that IOTstack isn't aware of) for backing up. + +To use it, simple create a `./pre_backup.sh` file in IOTstack's main directory. It will be executed next time a backup runs. + +### Postbackup script hook +The postbackup hook script is executed after the tarball file has been written to disk, and before the final backup log information is written to disk. + +To use it, simple create a `./post_backup.sh` file in IOTstack's main directory. It will be executed after the next time a backup runs. + +### Post restore script hook +The post restore hook script is executed after all files have been extracted and written to disk. It can be used to apply permissions that your custom services may require. + +To use it, simple create a `./post_restore.sh` file in IOTstack's main directory. It will be executed after a restore happens. + +## Third party integration +This section explains how to backup your files with 3rd party software. + +### Dropbox +Coming soon. + +### Google Drive +Coming soon. + +### rsync +Coming soon. + +### Duplicati +Coming soon. + +### SFTP +Coming soon. diff --git a/docs/Basic_setup/Custom.md b/docs/Basic_setup/Custom.md new file mode 100644 index 000000000..79a3836d4 --- /dev/null +++ b/docs/Basic_setup/Custom.md @@ -0,0 +1,258 @@ +# Custom overrides + +Each time you build the stack from the menu, the Docker Compose file +`docker-compose.yml` is recreated, losing any custom changes you've made. There +are different ways of dealing with this: + +1. Not using the menu after you've made changes. Do remember to backup your + customized `docker-compose.yml`, in case you overwrite it by mistake or + habit from the menu. +2. Use the Docker Compose [inbuilt override mechanism]( + https://docs.docker.com/compose/extends/) by creating a file named + `docker-compose.override.yml`. This limits you to changing values and + appending to lists already present in your docker-compose.yml, but it's + handy as changes are immediately picked up by docker-compose commands. To + see the resulting final config run `docker-compose config`. +3. IOTstack menu, in the default master-branch, implements a mechanism to + merge the yaml file `compose-override.yml` with the menu-generated stack + into `docker-compose.yml`. This can be used to add even complete new + services. See below for details. +4. This is not an actual extension mechanism, but well worth mentioning: If + you need a new services that doesn't communicate with the services in + IOTstack, create it completely separately and independently into its own + folder, e.g. `~/customStack/docker-compose.yml`. This composition can then + be independently managed from that folder: `cd ~/customStack` and use + `docker-compose` commands as normal. The best override is the one you don't + have to make. + +## Custom services and overriding default settings for IOTstack +You can specify modifcations to the `docker-compose.yml` file, including your own networks and custom containers/services. + +Create a file called `compose-override.yml` in the main directory, and place your modifications into it. These changes will be merged into the `docker-compose.yml` file next time you run the build script. + +The `compose-override.yml` file has been added to the `.gitignore` file, so it shouldn't be touched when upgrading IOTstack. It has been added to the backup script, and so will be included when you back up and restore IOTstack. Always test your backups though! New versions of IOTstack may break previous builds. + +## How it works +1. After the build process has been completed, a temporary docker compose file is created in the `tmp` directory. +2. The script then checks if `compose-override.yml` exists: + * If it exists, then continue to step `3` + * If it does not exist, copy the temporary docker compose file to the main directory and rename it to `docker-compose.yml`. +3. Using the `yaml_merge.py` script, merge both the `compose-override.yml` and the temporary docker compose file together; Using the temporary file as the default values and interating through each level of the yaml structure, check to see if the `compose-override.yml` has a value set. +4. Output the final file to the main directory, calling it `docker-compose.yml`. + +## A word of caution +If you specify an override for a service, and then rebuild the `docker-compose.yml` file, but deselect the service from the list, then the YAML merging will still produce that override. + +For example, lets say NodeRed was selected to have have the following override specified in `compose-override.yml`: +``` yaml +services: + nodered: + restart: always +``` + +When rebuilding the menu, ensure to have NodeRed service always included because if it's no longer included, the only values showing in the final `docker-compose.yml` file for NodeRed will be the `restart` key and its value. Docker Compose will error with the following message: + +`Service nodered has neither an image nor a build context specified. At least one must be provided.` + +When attempting to bring the services up with `docker-compose up -d`. + +Either remove the override for NodeRed in `compose-override.yml` and rebuild the stack, or ensure that NodeRed is built with the stack to fix this. + +## Examples + +### Overriding default settings +Lets assume you put the following into the `compose-override.yml` file: +``` yaml +services: + mosquitto: + ports: + - 1996:1996 + - 9001:9001 +``` + +Normally the mosquitto service would be built like this inside the `docker-compose.yml` file: +``` yaml +version: '3.6' +services: + mosquitto: + container_name: mosquitto + image: eclipse-mosquitto + restart: unless-stopped + user: "1883" + ports: + - 1883:1883 + - 9001:9001 + volumes: + - ./volumes/mosquitto/data:/mosquitto/data + - ./volumes/mosquitto/log:/mosquitto/log + - ./volumes/mosquitto/pwfile:/mosquitto/pwfile + - ./services/mosquitto/mosquitto.conf:/mosquitto/config/mosquitto.conf + - ./services/mosquitto/filter.acl:/mosquitto/config/filter.acl +``` + +Take special note of the ports list. + +If you run the build script with the `compose-override.yml` file in place, and open up the final `docker-compose.yml` file, you will notice that the port list have been replaced with the ones you specified in the `compose-override.yml` file. +``` yaml +version: '3.6' +services: + mosquitto: + container_name: mosquitto + image: eclipse-mosquitto + restart: unless-stopped + user: "1883" + ports: + - 1996:1996 + - 9001:9001 + volumes: + - ./volumes/mosquitto/data:/mosquitto/data + - ./volumes/mosquitto/log:/mosquitto/log + - ./volumes/mosquitto/pwfile:/mosquitto/pwfile + - ./services/mosquitto/mosquitto.conf:/mosquitto/config/mosquitto.conf + - ./services/mosquitto/filter.acl:/mosquitto/config/filter.acl +``` + +Do note that it will replace the entire list, if you were to specify +``` yaml +services: + mosquitto: + ports: + - 1996:1996 +``` + +Then the final output will be: +``` yaml +version: '3.6' +services: + mosquitto: + container_name: mosquitto + image: eclipse-mosquitto + restart: unless-stopped + user: "1883" + ports: + - 1996:1996 + volumes: + - ./volumes/mosquitto/data:/mosquitto/data + - ./volumes/mosquitto/log:/mosquitto/log + - ./volumes/mosquitto/pwfile:/mosquitto/pwfile + - ./services/mosquitto/mosquitto.conf:/mosquitto/config/mosquitto.conf + - ./services/mosquitto/filter.acl:/mosquitto/config/filter.acl +``` + +### Using env files instead of docker-compose variables + +If you need or prefer to use *.env files for docker-compose environment variables in a separate file instead of using overrides, you can do so like this: + +``` yaml +services: + grafana: + env_file: + - ./services/grafana/grafana.env + environment: +``` + +This will remove the default environment variables set in the template, and tell docker-compose to use the variables specified in your file. It is not mandatory that the *.env file be placed in the service's service directory, but is strongly suggested. Keep in mind the [PostBuild Script](../Developers/PostBuild-Script.md) functionality to automatically copy your *.env files into their directories on successful build if you need to. + +### Adding custom services + +Custom services can be added in a similar way to overriding default settings for standard services. Lets add a Minecraft and rcon server to IOTstack. +Firstly, put the following into `compose-override.yml`: +``` yaml +services: + mosquitto: + ports: + - 1996:1996 + - 9001:9001 + minecraft: + image: itzg/minecraft-server + ports: + - "25565:25565" + volumes: + - "./volumes/minecraft:/data" + environment: + EULA: "TRUE" + TYPE: "PAPER" + ENABLE_RCON: "true" + RCON_PASSWORD: "PASSWORD" + RCON_PORT: 28016 + VERSION: "1.15.2" + REPLACE_ENV_VARIABLES: "TRUE" + ENV_VARIABLE_PREFIX: "CFG_" + CFG_DB_HOST: "http://localhost:3306" + CFG_DB_NAME: "IOTstack Minecraft" + CFG_DB_PASSWORD_FILE: "/run/secrets/db_password" + restart: unless-stopped + rcon: + image: itzg/rcon + ports: + - "4326:4326" + - "4327:4327" + volumes: + - "./volumes/rcon_data:/opt/rcon-web-admin/db" +secrets: + db_password: + file: ./db_password +``` + +Then create the service directory that the new instance will use to store persistant data: + +`mkdir -p ./volumes/minecraft` + +and + +`mkdir -p ./volumes/rcon_data` + +Obviously you will need to give correct folder names depending on the `volumes` you specify for your custom services. If your new service doesn't require persistant storage, then you can skip this step. + +Then simply run the `./menu.sh` command, and rebuild the stack with what ever services you had before. + +Using the Mosquitto example above, the final `docker-compose.yml` file will look like: + +``` yaml +version: '3.6' +services: + mosquitto: + ports: + - 1996:1996 + - 9001:9001 + container_name: mosquitto + image: eclipse-mosquitto + restart: unless-stopped + user: '1883' + volumes: + - ./volumes/mosquitto/data:/mosquitto/data + - ./volumes/mosquitto/log:/mosquitto/log + - ./services/mosquitto/mosquitto.conf:/mosquitto/config/mosquitto.conf + - ./services/mosquitto/filter.acl:/mosquitto/config/filter.acl + minecraft: + image: itzg/minecraft-server + ports: + - 25565:25565 + volumes: + - ./volumes/minecraft:/data + environment: + EULA: 'TRUE' + TYPE: PAPER + ENABLE_RCON: 'true' + RCON_PASSWORD: PASSWORD + RCON_PORT: 28016 + VERSION: 1.15.2 + REPLACE_ENV_VARIABLES: 'TRUE' + ENV_VARIABLE_PREFIX: CFG_ + CFG_DB_HOST: http://localhost:3306 + CFG_DB_NAME: IOTstack Minecraft + CFG_DB_PASSWORD_FILE: /run/secrets/db_password + restart: unless-stopped + rcon: + image: itzg/rcon + ports: + - 4326:4326 + - 4327:4327 + volumes: + - ./volumes/rcon_data:/opt/rcon-web-admin/db +secrets: + db_password: + file: ./db_password +``` + +Do note that the order of the YAML keys is not guaranteed. diff --git a/docs/Basic_setup/Default-Configs.md b/docs/Basic_setup/Default-Configs.md new file mode 100644 index 000000000..5280eb4e7 --- /dev/null +++ b/docs/Basic_setup/Default-Configs.md @@ -0,0 +1,60 @@ +# Default ports + +Here you can find a list of the default mode and ports used by each service found in the .templates directory. + +This list can be generated by running the default_ports_md_generator.sh script. + +| Service Name | Mode | Port(s)
*External:Internal* | +| ------------ | -----| --------------- | +| adguardhome | non-host | 53:53
8089:8089
3001:3000
| +| adminer | non-host | 9080:8080
| +| blynk_server | non-host | 8180:8080
8440:8440
9443:9443
| +| chronograf | non-host | 8888:8888
| +| dashmachine | non-host | 5000:5000
| +| deconz | non-host | 8090:80
443:443
5901:5900
| +| diyhue | non-host | 8070:80
1900:1900
1982:1982
2100:2100
| +| domoticz | non-host | 8083:8080
6144:6144
1443:1443
| +| dozzle | non-host | 8889:8080
| +| duckdns | host | | +| espruinohub | host | | +| gitea | non-host | 7920:3000
2222:22
| +| grafana | non-host | 3000:3000
| +| heimdall | non-host | 8880:80
8883:443
| +| home_assistant | host | | +| homebridge | host | | +| homer | non-host | 8881:8080
| +| influxdb | non-host | 8086:8086
| +| influxdb2 | non-host | 8087:8086
| +| kapacitor | non-host | 9092:9092
| +| mariadb | non-host | 3306:3306
| +| mosquitto | non-host | 1883:1883
| +| "motioneye" | non-host | 8765:8765
8081:8081
| +| "n8n" | non-host | 5678:5678
| +| nextcloud | non-host | 9321:80
| +| nodered | non-host | 1880:1880
| +| octoprint | non-host | 9980:80
| +| openhab | host | | +| pihole | non-host | 8089:80
53:53
67:67
| +| plex | host | | +| portainer-ce | non-host | 8000:8000
9000:9000
| +| portainer-agent | non-host | 9001:9001
| +| postgres | non-host | 5432:5432
| +| prometheus-cadvisor | non-host | 8082:8080
| +| prometheus-nodeexporter | non-host | | +| prometheus | non-host | 9090:9090
| +| python | non-host | | +| qbittorrent | non-host | 6881:6881
15080:15080
1080:1080
| +| ring-mqtt | non-host | 8554:8554
55123:55123
| +| rtl_433 | non-host | | +| scrypted | host | 10443:10443
| +| syncthing | host | | +| tasmoadmin | non-host | 8088:80
| +| telegraf | non-host | 8092:8092
8094:8094
8125:8125
| +| timescaledb | non-host | | +| transmission | non-host | 9091:9091
51413:51413
| +| webthingsio_gateway | host | | +| wireguard | non-host | 51820:51820
| +| zerotier | host | | +| zerotier | host | | +| zigbee2mqtt | non-host | 8080:8080
| +| zigbee2mqtt_assistant | non-host | 8880:80
| diff --git a/docs/Basic_setup/Docker.md b/docs/Basic_setup/Docker.md new file mode 100644 index 000000000..7e5851fe6 --- /dev/null +++ b/docs/Basic_setup/Docker.md @@ -0,0 +1,60 @@ + +## Logging + +When Docker starts a container, it executes its *entrypoint* command. Any +output produced by this command is logged by Docker. By default Docker stores +logs internally together with other data associated to the container image. + +This has the effect that when recreating or updating a container, logs shown by +`docker-compose logs` won't show anything associated with the previous +instance. Use `docker system prune` to remove old instances and free up disk +space. Keeping logs only for the latest instance is helpful when testing, but +may not be desirable for production. + +By default there is no limit on the log size. Surprisingly, when using a +SD-card this is exactly what you want. If a runaway container floods the log +with output, writing will stop when the disk becomes full. Without a mechanism +to prevent such excessive writing, the SD-card would keep being written to +until the flash hardware [program-erase cycle]( +https://www.techtarget.com/searchstorage/definition/P-E-cycle) limit is +reached, after which it is permanently broken. + +When using a quality **SSD-drive**, potential flash-wear isn't usually a +concern. Then you can enable log-rotation by either: + +1. Configuring Docker to do it for you automatically. Edit your + `docker-compose.yml` and add a top-level *x-logging* and a *logging:* to + each service definition. The Docker compose reference documentation has + a good [example](https://docs.docker.com/compose/compose-file/compose-file-v3/#extension-fields). + +2. Configuring Docker to [log to the host system's journald]( + https://github.com/SensorsIot/IOTstack/issues/508#issuecomment-1094372250). + + ps. if `/etc/docker/daemon.json` doesn't exist, just create it. + +## Aliases + +Bash aliases for stopping and starting the stack and other common operations +are in the file `.bash_aliases`. To use them immediately and in future logins, +run in a console: + +``` console +$ source ~/IOTstack/.bash_aliases +$ echo "source ~/IOTstack/.bash_aliases" >> ~/.profile +``` + +These commands no longer need to be executed from the IOTstack directory and can be executed in any directory + +``` bash title=".bash_aliases" +--8<-- ".bash_aliases" +``` + +You can now type `iotstack_up`. The aliases also accept additional parameters, +e.g. `iotstack_stop portainer`. + +The `iotstack_update_docker_images` alias will [update docker images]( +http://localhost:8000/Updates/#recommended-update-only-docker-images) to newest +released images, build and recreate containers. Do note that using this will +result in a broken containers from time to time, as upstream may release faulty +docker images. Have proper backups, or be prepared to manually pin a previous +release build by editing `docker-compose.yml`. diff --git a/docs/Basic_setup/Menu.md b/docs/Basic_setup/Menu.md new file mode 100644 index 000000000..25f9db6c1 --- /dev/null +++ b/docs/Basic_setup/Menu.md @@ -0,0 +1,60 @@ +The `menu.sh`-script is used to create or modify the `docker-compose.yml`-file. +This file defines how all containers added to the stack are configured. + +## Miscellaneous + +### log2ram + +One of the drawbacks of an sd card is that it has a limited lifespan. One way +to reduce the load on the sd card is to move your log files to RAM. [log2ram]( +https://github.com/azlux/log2ram) is a convenient tool to simply set this up. +It can be installed from the miscellaneous menu. + +This only affects logs written to /var/log, and won't have any effect on Docker +logs or logs stored inside containers. + +### Dropbox-Uploader +This a great utility to easily upload data from your PI to the cloud. The +[MagPi](https://magpi.raspberrypi.org/articles/dropbox-raspberry-pi) has an +excellent explanation of the process of setting up the Dropbox API. +Dropbox-Uploader is used in the backup script. + +## Backup and Restore + +See [Backing up and restoring IOTstack](Backup-and-Restore.md) + +## Native Installs + +### RTL_433 +RTL_433 can be installed from the "Native install sections" + +[This video](https://www.youtube.com/watch?v=L0fSEbGEY-Q&t=386s) demonstrates +how to use RTL_433 + +### RPIEasy + +The installer will install any dependencies. If `~/rpieasy` exists it will +update the project to its latest, if not it will clone the project + +RPIEasy can be run by `sudo ~/rpieasy/RPIEasy.py` + +To have RPIEasy start on boot in the webui under hardware look for "RPIEasy +autostart at boot" + +RPIEasy will select its ports from the first available one in the list +(80,8080,8008). If you run Hass.io then there will be a conflict so check the +next available port + +## Old-menu branch details +The build script creates the ./services directory and populates it from the +template file in .templates . The script then appends the text withing each +service.yml file to the docker-compose.yml . When the stack is rebuilt the menu +does not overwrite the service folder if it already exists. Make sure to sync +any alterations you have made to the docker-compose.yml file with the +respective service.yml so that on your next build your changes pull through. + +The .gitignore file is setup such that if you do a `git pull origin master` it +does not overwrite the files you have already created. Because the build script +does not overwrite your service directory any changes in the .templates +directory will have no affect on the services you have already made. You will +need to move your service folder out to get the latest version of the template. diff --git a/docs/Basic_setup/Networking.md b/docs/Basic_setup/Networking.md new file mode 100644 index 000000000..caed06d0f --- /dev/null +++ b/docs/Basic_setup/Networking.md @@ -0,0 +1,30 @@ +# Networking +The docker-compose instruction creates an internal network for the containers to communicate in, the ports get exposed to the PI's IP address when you want to connect from outside. It also creates a "DNS" the name being the container name. So it is important to note that when one container talks to another they talk by name. All the containers names are lowercase like nodered, influxdb... + +An easy way to find out your IP is by typing `ip address` in the terminal and look next to eth0 or wlan0 for your IP. It is highly recommended that you set a static IP for your PI or at least reserve an IP on your router so that you know it + +Check the docker-compose.yml to see which ports have been used + +
+![net](https://user-images.githubusercontent.com/46672225/66702353-0bcc4080-ed07-11e9-994b-62219f50b096.png) +
+ +## Examples +- You want to connect your nodered to your mqtt server. In nodered drop an mqtt node, when you need to specify the address type `mosquitto` +- You want to connect to your influxdb from grafana. You are in the Docker network and you need to use the name of the Container. The address you specify in the grafana is `http://influxdb:8086` +- You want to connect to the web interface of grafana from your laptop. Now you are outside the container environment you type PI's IP eg 192.168.n.m:3000 + +## Ports +Many containers try to use popular ports such as 80,443,8080. For example openHAB and Adminer both want to use port 8080 for their web interface. Adminer's port has been moved 9080 to accommodate this. Please check the description of the container in the README to see if there are any changes as they may not be the same as the port you are used to. + +Port mapping is done in the docker-compose.yml file. Each service should have a section that reads like this: +``` yaml + ports: + - HOST_PORT:CONTAINER_PORT +``` +For adminer: +``` yaml + ports: + - 9080:8080 +``` +Port 9080 on Host Pi is mapped to port 8080 of the container. Therefore 127.0.0.1:8080 will take you to openHAB, where 127.0.0.1:9080 will take you to adminer diff --git a/docs/Basic_setup/Troubleshooting.md b/docs/Basic_setup/Troubleshooting.md new file mode 100644 index 000000000..9f4886611 --- /dev/null +++ b/docs/Basic_setup/Troubleshooting.md @@ -0,0 +1,278 @@ + +## Resources + +* Search github [issues](https://github.com/SensorsIot/IOTstack/issues?q=). + + - Closed issues or pull-requests may also have valuable hints. + +* Ask questions on [IOTStack Discord](https://discord.gg/ZpKHnks). Or report + how you were able to fix a problem. + +* There are over 40 gists about IOTstack. These address a diverse range of + topics from small convenience scripts to complete guides. These are + individual contributions that aren't reviewed. + + You can add your own keywords into the search: + [https://gist.github.com/search?q=iotstack](https://gist.github.com/search?q=iotstack) + +## FAQ + +!!! danger "Breaking update" + A change done 2022-01-18 will require [manual steps]( + ../Updates/migration-network-change.md) + or you may get an error like: + `ERROR: Service "influxdb" uses an undefined network "iotstack_nw"` + +## Device Errors + +If you are trying to run IOTstack on non-Raspberry Pi hardware, you will probably get the following error from `docker-compose` when you try to bring up your stack for the first time: + +``` +Error response from daemon: error gathering device information while adding custom device "/dev/ttyAMA0": no such file or directory +``` + +> You will get a similar message about any device which is not known to your hardware. + +The `/dev/ttyAMA0` device is the Raspberry Pi's built-in serial port so it is guaranteed to exist on any "real" Raspberry Pi. As well as being referenced by containers that can actually use the serial port, `ttyAMA0` is often employed as a placeholder. + +Examples: + +* Node-RED flows can use the `node-red-node-serialport` node to access the serial port. This is an example of "actual use"; +* The Zigbee2MQTT container employs `ttyAMA0` as a placeholder. This allows the container to start. Once you have worked out how your Zigbee adapter appears on your system, you will substitute your adapter's actual device path. For example: + + ``` yaml + - "/dev/serial/by-id/usb-Texas_Instruments_TI_CC2531_USB_CDC___0X00125B0028EEEEE0-if00:/dev/ttyACM0" + ``` + +The simplest approach to solving "error gathering device information" problems is just to comment-out every device mapping that produces an error and, thereafter, treat the comments as documentation about what the container is expecting at run-time. For example, this is the devices list for Node-RED: + +``` yaml + devices: + - "/dev/ttyAMA0:/dev/ttyAMA0" + - "/dev/vcio:/dev/vcio" + - "/dev/gpiomem:/dev/gpiomem" +``` + +Those are, in turn, the Raspberry Pi's: + +* serial port +* videoCore multimedia processor +* mechanism for accessing GPIO pin headers + +If none of those is available on your chosen platform (the usual situation on non-Pi hardware), commenting-out the entire block is appropriate: + +``` yaml +# devices: +# - "/dev/ttyAMA0:/dev/ttyAMA0" +# - "/dev/vcio:/dev/vcio" +# - "/dev/gpiomem:/dev/gpiomem" +``` + +You interpret each line in a device map like this: + +``` yaml + - "«external»:«internal»" +``` + +The *«external»* device is what the platform (operating system plus hardware) sees. The *«internal»* device is what the container sees. Although it is reasonably common for the two sides to be the same, this is **not** a requirement. It is usual to replace the *«external»* device with the actual device while leaving the *«internal»* device unchanged. + +Here is an example. On macOS, a CP2102 USB-to-Serial adapter shows up as: + +``` +/dev/cu.SLAB_USBtoUART +``` + +Assume you are running the Node-RED container in macOS Docker Desktop, and that you want a flow to communicate with the CP2102. You would change the service definition like this: + +``` yaml + devices: + - "/dev/cu.SLAB_USBtoUART:/dev/ttyAMA0" +# - "/dev/vcio:/dev/vcio" +# - "/dev/gpiomem:/dev/gpiomem" +``` + +In other words, the *«external»* (real world) device `cu.SLAB_USBtoUART` is mapped to the *«internal»* (container) device `ttyAMA0`. The flow running in the container is expecting to communicate with `ttyAMA0` and is none-the-wiser. + +## Needing to use `sudo` to run docker commands + +You should never (repeat **never**) use `sudo` to run docker or docker compose commands. Forcing docker to do something with `sudo` almost always creates more problems than it solves. Please see [What is sudo?](https://sensorsiot.github.io/IOTstack/Basic_setup/What-is-sudo/) to understand how `sudo` actually works. + +If `docker` or `docker-compose` commands *seem* to need elevated privileges, the most likely explanation is incorrect group membership. Please read the [next section](#dockerGroup) about errors involving `docker.sock`. The solution (two `usermod` commands) is the same. + +If, however, the current user *is* a member of the `docker` group *but* you still get error responses that *seem* to imply a need for `sudo`, it implies that something fundamental is broken. Rather than resorting to `sudo`, you are better advised to rebuild your system. + +## Errors involving `docker.sock` { #dockerGroup } + +If you encounter permission errors that mention `/var/run/docker.sock`, the most likely explanation is the current user (usually "pi") not being a member of the "docker" group. + +You can check membership with the `groups` command: + +``` console +$ groups +pi adm dialout cdrom sudo audio video plugdev games users input render netdev bluetooth lpadmin docker gpio i2c spi +``` + +In that list, you should expect to see both `bluetooth` and `docker`. If you do not, you can fix the problem like this: + +``` console +$ sudo usermod -G docker -a $USER +$ sudo usermod -G bluetooth -a $USER +$ exit +``` + +The `exit` statement is **required**. You must logout and login again for the two `usermod` commands to take effect. An alternative is to reboot. + +## System freezes or SSD problems + +You should read this section if you experience any of the following problems: + +* Apparent system hangs, particularly if Docker containers were running at the time the system was shutdown or rebooted; +* Much slower than expected performance when reading/writing your SSD; or +* Suspected data-corruption on your SSD. + +### Try a USB2 port + +Start by shutting down your Pi and moving your SSD to one of the USB2 ports. The slower speed will often alleviate the problem. + +Tips: + +1. If you don't have sufficient control to issue a shutdown and/or your Pi won't shut down cleanly: + + - remove power + - move the SSD to a USB2 port + - apply power again. + +2. If you run "headless" and find that the Pi responds to pings but you can't connect via SSH: + + - remove power + - connect the SSD to a support platform (Linux, macOS, Windows) + - create a file named "ssh" at the top level of the boot partition + - eject the SSD from your support platform + - connect the SSD to a USB2 port on your Pi + - apply power again. + +### Check the `dhcpcd` patch + +Next, verify that the [dhcpcd patch](https://sensorsiot.github.io/IOTstack/Basic_setup/#patch-1-restrict-dhcp) is installed. There seems to be a timing component to the deadlock which is why it can be *alleviated*, to some extent, by switching the SSD to a USB2 port. + +If the `dhcpcd` patch was not installed but you have just installed it, try returning the SSD to a USB3 port. + +### Try a quirks string + +If problems persist even when the `dhcpcd` patch is in place, you *may* have an SSD which isn't up to the Raspberry Pi's expectations. Try the following: + +1. If your IOTstack is running, take it down. +2. If your SSD is attached to a USB3 port, shut down your Pi, move the SSD to a USB2 port, and apply power. +3. Run the following command: + + ``` console + $ dmesg | grep "\] usb [[:digit:]]-" + ``` + + In the output, identify your SSD. Example: + + ``` + [ 1.814248] usb 2-1: new SuperSpeed Gen 1 USB device number 2 using xhci_hcd + [ 1.847688] usb 2-1: New USB device found, idVendor=f0a1, idProduct=f1b2, bcdDevice= 1.00 + [ 1.847708] usb 2-1: New USB device strings: Mfr=99, Product=88, SerialNumber=77 + [ 1.847723] usb 2-1: Product: Blazing Fast SSD + [ 1.847736] usb 2-1: Manufacturer: Suspect Drives + ``` + + In the above output, the second line contains the Vendor and Product codes that you need: + + * `idVendor=f0a1` + * `idProduct=f1b2` + +4. Substitute the values of *«idVendor»* and *«idProduct»* into the following command template: + + ``` console + sed -i.bak '1s/^/usb-storage.quirks=«idVendor»:«idProduct»:u /' "$CMDLINE" + ``` + + This is known as a "quirks string". Given the `dmesg` output above, the string would be: + + ``` console + sed -i.bak '1s/^/usb-storage.quirks=f0a1:f1b2:u /' "$CMDLINE" + ``` + + Make sure that you keep the space between the `:u` and `/'`. You risk breaking your system if that space is not there. + +5. Run these commands - the second line is the one you prepared in step 4 using `sudo`: + + ``` console + $ CMDLINE="/boot/firmware/cmdline.txt" && [ -e "$CMDLINE" ] || CMDLINE="/boot/cmdline.txt" + $ sudo sed -i.bak '1s/^/usb-storage.quirks=f0a1:f1b2:u /' "$CMDLINE" + ``` + + The command: + + - makes a backup copy of `cmdline.txt` as `cmdline.txt.bak` + - inserts the quirks string at the start of `cmdline.txt`. + + You can confirm the result as follows: + + * display the original (baseline reference): + + ``` + $ cat "$CMDLINE.bak" + console=serial0,115200 console=tty1 root=PARTUUID=06c69364-02 rootfstype=ext4 fsck.repair=yes rootwait quiet splash plymouth.ignore-serial-consoles + ``` + + * display the modified version: + + ``` + $ cat "$CMDLINE" + usb-storage.quirks=f0a1:f1b2:u console=serial0,115200 console=tty1 root=PARTUUID=06c69364-02 rootfstype=ext4 fsck.repair=yes rootwait quiet splash plymouth.ignore-serial-consoles + ``` + +6. Shutdown your Pi. +7. Connect your SSD to a USB3 port and apply power. + +There is more information about this problem [on the Raspberry Pi forum](https://forums.raspberrypi.com/viewtopic.php?t=245931&sid=66012d5cf824004bbb414cb84874c8a4). + +## Getting a clean slate + +If you create a mess and can't see how to recover, try proceeding like this: + +``` console +$ cd ~/IOTstack +$ docker-compose down +$ cd +$ mv IOTstack IOTstack.old +$ git clone https://github.com/SensorsIot/IOTstack.git IOTstack +``` + +In words: + +1. Be in the right directory. +2. Take the stack down. +3. The `cd` command without any arguments changes your working directory to + your home directory (variously known as `~` or `$HOME` or `/home/pi`). +4. Move your existing IOTstack directory out of the way. If you get a + permissions problem: + + * Re-try the command with `sudo`; and + * Read [a word about the `sudo` command](What-is-sudo.md). Needing `sudo` + in this situation is an example of over-using `sudo`. + +5. Check out a clean copy of IOTstack. + +Now, you have a clean slate and can start afresh by running the menu: + +``` console +$ cd ~/IOTstack +$ ./menu.sh +``` + +The `IOTstack.old` directory remains available as a reference for as long as +you need it. Once you have no further use for it, you can clean it up via: + +``` console +$ cd +$ sudo rm -rf ./IOTstack.old # (1) +``` + +1. The `sudo` command is needed in this situation because some files and + folders (eg the "volumes" directory and most of its contents) are owned by + root. diff --git a/docs/Basic_setup/Understanding-Containers.md b/docs/Basic_setup/Understanding-Containers.md new file mode 100644 index 000000000..8f247b6fd --- /dev/null +++ b/docs/Basic_setup/Understanding-Containers.md @@ -0,0 +1,113 @@ +# What is Docker? + +In simple terms, Docker is a software platform that simplifies the process of building, running, +managing and distributing applications. It does this by virtualizing the operating system of the +computer on which it is installed and running. + +## The Problem +Let’s say you have three different Python-based applications that you plan to host on a single server +(which could either be a physical or a virtual machine). + +Each of these applications makes use of a different version of Python, as well as the associated +libraries and dependencies, differ from one application to another. + +Since we cannot have different versions of Python installed on the same machine, this prevents us from +hosting all three applications on the same computer. + +## The Solution +Let’s look at how we could solve this problem without making use of Docker. In such a scenario, we +could solve this problem either by having three physical machines, or a single physical machine, which +is powerful enough to host and run three virtual machines on it. + +Both the options would allow us to install different versions of Python on each of these machines, +along with their associated dependencies. + +The machine on which Docker is installed and running is usually referred to as a Docker Host or Host in +simple terms. So, whenever you plan to deploy an application on the host, it would create a logical +entity on it to host that application. In Docker terminology, we call this logical entity a Container or +Docker Container to be more precise. + +Whereas the kernel of the host’s operating system is shared across all the containers that are running +on it. + +This allows each container to be isolated from the other present on the same host. Thus it supports +multiple containers with different application requirements and dependencies to run on the same host, +as long as they have the same operating system requirements. + +## Docker Terminology + +Docker Images and Docker Containers are the two essential things that you will come across daily while +working with Docker. + +In simple terms, a Docker Image is a template that contains the application, and all the dependencies +required to run that application on Docker. + +On the other hand, as stated earlier, a Docker Container is a logical entity. In more precise terms, +it is a running instance of the Docker Image. + +## What is Docker-Compose? + +Docker Compose provides a way to orchestrate multiple containers that work together. Docker compose +is a simple yet powerful tool that is used to run multiple containers as a single service. +For example, suppose you have an application which requires Mqtt as a communication service between IOT devices +and OpenHAB instance as a Smarthome application service. In this case by docker-compose, you can create one +single file (docker-compose.yml) which will create both the containers as a single service without starting +each separately. It wires up the networks (literally), mounts all volumes and exposes the ports. + +The IOTstack with the templates and menu is a generator for that docker-compose service descriptor. + +## How Docker Compose Works? + +use yaml files to configure application services (docker-compose.yaml) +can start all the services with a single command ( docker-compose up ) +can stop all the service with a single command ( docker-compose down ) + +## How are the containers connected +The containers are automagically connected when we run the stack with docker-compose up. +The containers using same logical network (by default) where the instances can access each other with the instance +logical name. Means if there is an instance called *mosquitto* and an *openhab*, when openHAB instance need +to access mqtt on that case the domain name of mosquitto will be resolved as the runnuning instance of mosquitto. + +## How the container are connected to host machine + +### Volumes + +The containers are enclosed processes which state are lost with the restart of container. To be able to +persist states volumes (images or directories) can be used to share data with the host. +Which means if you need to persist some database, configuration or any state you have to bind volumes where the +running service inside the container will write files to that binded volume. +In order to understand what a Docker volume is, we first need to be clear about how the filesystem normally works +in Docker. Docker images are stored as series of read-only layers. When we start a container, Docker takes +the read-only image and adds a read-write layer on top. If the running container modifies an existing file, +the file is copied out of the underlying read-only layer and into the top-most read-write layer where the +changes are applied. The version in the read-write layer hides the underlying file, but does not +destroy it -- it still exists in the underlying layer. When a Docker container is deleted, +relaunching the image will start a fresh container without any of the changes made in the previously +running container -- those changes are lost, thats the reason that configs, databases are not persisted, + +Volumes are the preferred mechanism for persisting data generated by and used by Docker containers. +While bind mounts are dependent on the directory structure of the host machine, volumes are completely +managed by Docker. In IOTstack project uses the volumes directory in general to bind these container volumes. + +### Ports +When containers running a we would like to delegate some services to the outside world, for example +OpenHAB web frontend have to be accessible for users. There are several ways to achive that. One is +mounting the port to the most machine, this called port binding. On that case service will have a dedicated +port which can be accessed, one drawback is one host port can be used one serice only. Another way is reverse proxy. +The term reverse proxy (or Load Balancer in some terminology) is normally applied to a service that sits in front +of one or more servers (in our case containers), accepting requests from clients for resources located on the +server(s). From the client point of view, the reverse proxy appears to be the web server and so is +totally transparent to the remote user. Which means several service can share same port the server +will route the request by the URL (virtual domain or context path). For example, there is *grafana* and *openHAB* +instances, where the *opeanhab.domain.tld* request will be routed to openHAB instance 8181 port while +*grafana.domain.tld* to grafana instance 3000 port. On that case the proxy have to be mapped for host port 80 and/or +444 on host machine, the proxy server will access the containers via the docker virtual network. + + +Source materials used: + +https://takacsmark.com/docker-compose-tutorial-beginners-by-example-basics/ +https://www.freecodecamp.org/news/docker-simplified-96639a35ff36/ +https://www.cloudflare.com/learning/cdn/glossary/reverse-proxy/ +https://blog.container-solutions.com/understanding-volumes-docker + diff --git a/docs/Basic_setup/What-is-sudo.md b/docs/Basic_setup/What-is-sudo.md new file mode 100644 index 000000000..43477bb82 --- /dev/null +++ b/docs/Basic_setup/What-is-sudo.md @@ -0,0 +1,59 @@ +# What is sudo? + +Many first-time users of IOTstack get into difficulty by misusing the `sudo` command. The problem is best understood by example. In the following, you would expect `~` (tilde) to expand to `/home/pi`. It does: + +``` console +$ echo ~/IOTstack +/home/pi/IOTstack +``` + +The command below sends the same `echo` command to `bash` for execution. This is what happens when you type the name of a shell script. You get a new instance of `bash` to run the script: + +``` console +$ bash -c 'echo ~/IOTstack' +/home/pi/IOTstack +``` + +Same answer. Again, this is what you expect. But now try it with `sudo` on the front: + +``` console +$ sudo bash -c 'echo ~/IOTstack' +/root/IOTstack +``` + +Different answer. It is different because `sudo` means "become root, and then run the command". The process of becoming root changes the home directory, and that changes the definition of `~`. + +Any script designed for working with IOTstack assumes `~` (or the equivalent `$HOME` variable) expands to `/home/pi`. That assumption is invalidated if the script is run by `sudo`. + +Of necessity, any script designed for working with IOTstack will have to invoke `sudo` **inside** the script **when it is required**. You do not need to second-guess the script's designer. + +Please try to minimise your use of `sudo` when you are working with IOTstack. Here are some rules of thumb: + +1. Is what you are about to run a script? If yes, check whether the script already contains `sudo` commands. Using `menu.sh` as the example: + + ``` console + $ grep -c 'sudo' ~/IOTstack/menu.sh + 28 + ``` + + There are numerous uses of `sudo` within `menu.sh`. That means the designer thought about when `sudo` was needed. + +2. Did the command you **just executed** work without `sudo`? Note the emphasis on the past tense. If yes, then your work is done. If no, and the error suggests elevated privileges are necessary, then re-execute the last command like this: + + ``` console + $ sudo !! + ``` + +It takes time, patience and practice to learn when `sudo` is **actually** needed. Over-using `sudo` out of habit, or because you were following a bad example you found on the web, is a very good way to find that you have created so many problems for yourself that will need to reinstall your IOTstack. *Please* err on the side of caution! + +## Configuration + +To edit sudo functionality and permissions use: `sudo visudo` + +For instance, to allow sudo usage without prompting for a password: +```bash +# Allow members of group sudo to execute any command without password prompt +%sudo ALL=(ALL:ALL) NOPASSWD:ALL +``` + +For more information: `man sudoers` diff --git a/docs/Basic_setup/index.md b/docs/Basic_setup/index.md new file mode 100644 index 000000000..802fc7bc3 --- /dev/null +++ b/docs/Basic_setup/index.md @@ -0,0 +1,663 @@ +# Getting Started + +## About IOTstack { #conventions } + +IOTstack is not a *system.* It is a set of *conventions* for assembling arbitrary collections of containers into something that has a reasonable chance of working out-of-the-box. The three most important conventions are: + +1. If a container needs information to persist across restarts (and most containers do) then the container's *persistent store* will be found at: + + ``` + ~/IOTstack/volumes/«container» + ``` + + Most *service definitions* examples found on the web have a scattergun approach to this problem. IOTstack imposes order on this chaos. + +2. To the maximum extent possible, network port conflicts have been sorted out in advance. + + Sometimes this is not possible. For example, Pi-hole and AdGuardHome both offer Domain Name System services. The DNS relies on port 53. You can't have two containers claiming port 53 so the only way to avoid this is to pick *either* Pi-hole *or* AdGuardHome. +3. Where multiple containers are needed to implement a single user-facing service, the IOTstack service definition will include everything needed. A good example is NextCloud which relies on MariaDB. IOTstack implements MariaDB as a private instance which is only available to NextCloud. This strategy ensures that you are able to run your own separate MariaDB container without any risk of interference with your NextCloud service. + +## Requirements + +IOTstack makes the following assumptions: + +1. Your hardware is capable of running Debian or one of its derivatives. Examples that are known to work include: + + - a Raspberry Pi (typically a 3B+ or 4B) + + > The Raspberry Pi Zero W2 has been tested with IOTstack. It works but the 512MB RAM means you should not try to run too many containers concurrently. + + - Orange Pi Win/Plus [see also issue 375](https://github.com/SensorsIot/IOTstack/issues/375) + - an Intel-based Mac running macOS plus Parallels with a Debian guest. + - an Intel-based platform running Proxmox with a Debian guest. + +2. Your host or guest system is running a reasonably-recent version of Debian or an operating system which is downstream of Debian in the Linux family tree, such as Raspberry Pi OS (aka "Raspbian") or Ubuntu. + + IOTstack is known to work in 32-bit mode but not all containers have images on DockerHub that support 320bit mode. If you are setting up a new system from scratch, you should choose a 64-bit option. + + IOTstack was known to work with Buster but it has not been tested recently. Bullseye is known to work but if you are setting up a new system from scratch, you should choose Bookworm. + + Please don't waste your own time trying Linux distributions from outside the Debian family tree. They are unlikely to work. + +3. You are logged-in as the default user (ie not root). In most cases, this is the user with ID=1000 and is what you get by default on either a Raspberry Pi OS or Debian installation. + + This assumption is not really an IOTstack requirement as such. However, many containers assume UID=1000 exists and you are less likely to encounter issues if this assumption holds. + +Please don't read these assumptions as saying that IOTstack will not run on other hardware, other operating systems, or as a different user. It is just that IOTstack gets most of its testing under these conditions. The further you get from these implicit assumptions, the more your mileage may vary. + +## New installation + +You have two choices: + +1. If you have an **existing** system and you want to add IOTstack to it, then the [add-on](#addonInstall) method is your best choice. +2. If you are setting up a **new** system from scratch, then [PiBuilder](#pibuilderInstall) is probably your best choice. You can, however, also use the [add-on](#addonInstall) method in a green-fields installation. + +### add-on method { #addonInstall } + +This method assumes an **existing** system rather than a green-fields installation. The script uses the principle of least interference. It only installs the bare minimum of prerequisites and, with the exception of adding some boot time options to your Raspberry Pi (but not any other kind of hardware), makes no attempt to tailor your system. + +To use this method: + +1. Install `curl`: + + ``` console + $ sudo apt install -y curl + ``` + +2. Run the following command: + + ``` console + $ curl -fsSL https://raw.githubusercontent.com/SensorsIot/IOTstack/master/install.sh | bash + ``` + +The `install.sh` script is *designed* to be run multiple times. If the script discovers a problem, it will explain how to fix that problem and, assuming you follow the instructions, you can safely re-run the script. You can repeat this process until the script completes normally. + +### PiBuilder method { #pibuilderInstall } + +Compared with the [add-on method](#addonInstall), PiBuilder is far more comprehensive. PiBuilder: + +1. Does everything the [add-on method](#addonInstall) does. +2. Adds support packages and debugging tools that have proven useful in the IOTstack context. +3. Installs all required system patches (see next section). +4. In addition to cloning IOTstack (this repository), PiBuilder also clones: + + * [IOTstackBackup](https://github.com/Paraphraser/IOTstackBackup) which is an alternative to the backup script supplied with IOTstack but does not require your stack to be taken down to perform backups; and + * [IOTstackAliases](https://github.com/Paraphraser/IOTstackAliases) which provides shortcuts for common IOTstack operations. + +5. Performs extra tailoring intended to deliver a rock-solid platform for IOTstack. + +PiBuilder does, however, assume a **green fields** system rather than an existing installation. Although the PiBuilder scripts will *probably* work on an existing system, that scenario has never been tested so it's entirely at your own risk. + +PiBuilder actually has two specific use-cases: + +1. A first-time build of a system to run IOTstack; and +2. The ability to create your own customised version of PiBuilder so that you can quickly rebuild your Raspberry Pi or Proxmox guest after a disaster. Combined with IOTstackBackup, you can go from bare metal to a running system with data restored in about half an hour. + +## Required system patches + +You can skip this section if you used [PiBuilder](https://github.com/Paraphraser/PiBuilder) to construct your system. That's because PiBuilder installs all necessary patches automatically. + +If you used the [add-on method](#addonInstall), you should consider applying these patches by hand. Unless you know that a patch is **not** required, assume that it is needed. + +### patch 1 – restrict DHCP + +Run the following commands: + +``` console +$ sudo bash -c '[ $(egrep -c "^allowinterfaces eth\*,wlan\*" /etc/dhcpcd.conf) -eq 0 ] && echo "allowinterfaces eth*,wlan*" >> /etc/dhcpcd.conf' +``` + +This patch prevents the `dhcpcd` daemon from trying to allocate IP addresses to Docker's `docker0` and `veth` interfaces. Docker assigns the IP addresses itself and `dhcpcd` trying to get in on the act can lead to a deadlock condition which can freeze your Pi. + +See [Issue 219](https://github.com/SensorsIot/IOTstack/issues/219) and [Issue 253](https://github.com/SensorsIot/IOTstack/issues/253) for more information. + +### patch 2 – update libseccomp2 + +This patch is **ONLY** for Raspbian Buster. Do **NOT** install this patch if you are running Raspbian Bullseye or Bookworm. + +1. check your OS release + + Run the following command: + + ``` console + $ grep "PRETTY_NAME" /etc/os-release + PRETTY_NAME="Raspbian GNU/Linux 10 (buster)" + ``` + + If you see the word "buster", proceed to step 2. Otherwise, skip this patch. + +2. if you are indeed running "buster" + + Without this patch on Buster, Docker images will fail if: + + * the image is based on Alpine and the image's maintainer updates to [Alpine 3.13](https://wiki.alpinelinux.org/wiki/Release_Notes_for_Alpine_3.13.0#time64_requirement); and/or + * an image's maintainer updates to a library that depends on 64-bit values for *Unix epoch time* (the so-called Y2038 problem). + + To install the patch: + + ``` console + $ sudo apt-key adv --keyserver hkps://keyserver.ubuntu.com:443 --recv-keys 04EE7237B7D453EC 648ACFD622F3D138 + $ echo "deb http://httpredir.debian.org/debian buster-backports main contrib non-free" | sudo tee -a "/etc/apt/sources.list.d/debian-backports.list" + $ sudo apt update + $ sudo apt install libseccomp2 -t buster-backports + ``` + +### patch 3 - kernel control groups + +Kernel control groups need to be enabled in order to monitor container specific +usage. This makes commands like `docker stats` fully work. Also needed for full +monitoring of docker resource usage by the telegraf container. + +Enable by running (takes effect after reboot): + +``` console +$ CMDLINE="/boot/firmware/cmdline.txt" && [ -e "$CMDLINE" ] || CMDLINE="/boot/cmdline.txt" +$ echo $(cat "$CMDLINE") cgroup_memory=1 cgroup_enable=memory | sudo tee "$CMDLINE" +$ sudo reboot +``` + +## the IOTstack menu { #iotstackMenu} + +The menu is used to construct your `docker-compose.yml` file. That file is read by `docker-compose` which issues the instructions necessary for starting your stack. + +The menu is a great way to get started quickly but it is only an aid. It is a good idea to learn the various `docker` and `docker-compose` commands so you can use them outside the menu. It is also a good idea to study the `docker-compose.yml` generated by the menu to see how everything is put together. You will gain a lot of flexibility if you learn how to add containers by hand. + +In essence, the menu is a concatenation tool which appends *service definitions* that exist inside the hidden `~/IOTstack/.templates` folder to your `docker-compose.yml`. + +Once you understand what the menu does (and, more importantly, what it doesn't do), you will realise that the real power of IOTstack lies not in its menu system but resides in its [conventions](#conventions). + +### menu item: Build Stack { #buildStack} + +To create your first `docker-compose.yml`: + +``` console +$ cd ~/IOTstack +$ ./menu.sh +Select "Build Stack" +``` + +Follow the on-screen prompts and select the containers you need. + +> The best advice we can give is "start small". Limit yourself to the core containers you actually need (eg Mosquitto, Node-RED, InfluxDB, Grafana, Portainer). You can always add more containers later. Some users have gone overboard with their initial selections and have run into what seem to be Raspberry Pi OS limitations. + +Key point: + +* If you are running "new menu" (master branch) and you select Node-RED, you **must** press the right-arrow and choose at least one add-on node. If you skip this step, Node-RED will not build properly. +* Old menu forces you to choose add-on nodes for Node-RED. + +The process finishes by asking you to bring up the stack: + +``` console +$ cd ~/IOTstack +$ docker-compose up -d +``` + +The first time you run `up` the stack docker will download all the images from DockerHub. How long this takes will depend on how many containers you selected and the speed of your internet connection. + +Some containers also need to be built locally. Node-RED is an example. Depending on the Node-RED nodes you select, building the image can also take a very long time. This is especially true if you select the SQLite node. + +Be patient (and, if you selected the SQLite node, ignore the huge number of warnings). + +### menu item: Docker commands + +The commands in this menu execute shell scripts in the root of the project. + +### other menu items + +The old and new menus differ in the options they offer. You should come back and explore them once your stack is built and running. + +## useful commands: docker & docker-compose + +Handy rules: + +* `docker` commands can be executed from anywhere, but +* `docker-compose` commands need to be executed from within `~/IOTstack` + +### starting your IOTstack + +To start the stack: + +``` console +$ cd ~/IOTstack +$ docker-compose up -d +``` + +Once the stack has been brought up, it will stay up until you take it down. This includes shutdowns and reboots of your Raspberry Pi. If you do not want the stack to start automatically after a reboot, you need to stop the stack before you issue the reboot command. + +#### logging journald errors + +If you get docker logging error like: + +``` +Cannot create container for service [service name here]: unknown log opt 'max-file' for journald log driver +``` + +1. Run the command: + + ``` console + $ sudo nano /etc/docker/daemon.json + ``` + +2. change: + + ``` json + "log-driver": "journald", + ``` + + to: + + ``` json + "log-driver": "json-file", + ``` + +Logging limits were added to prevent Docker using up lots of RAM if log2ram is enabled, or SD cards being filled with log data and degraded from unnecessary IO. See [Docker Logging configurations](https://docs.docker.com/config/containers/logging/configure/) + +You can also turn logging off or set it to use another option for any service by using the IOTstack `docker-compose-override.yml` file mentioned at [IOTstack/Custom](Custom.md). + +Another approach is to change `daemon.json` to be like this: + +``` json +{ + "log-driver": "local", + "log-opts": { + "max-size": "1m" + } +} +``` + +The `local` driver is specifically designed to prevent disk exhaustion. Limiting log size to one megabyte also helps, particularly if you only have a limited amount of storage. + +If you are familiar with system logging where it is best practice to retain logs spanning days or weeks, you may feel that one megabyte is unreasonably small. However, before you rush to increase the limit, consider that each container is the equivalent of a small computer dedicated to a single task. By their very nature, containers tend to either work as expected or fail outright. That, in turn, means that it is usually only recent container logs showing failures as they happen that are actually useful for diagnosing problems. + +### starting an individual container + +To start a particular container: + +``` console +$ cd ~/IOTstack +$ docker-compose up -d «container» +``` + +### stopping your IOTstack + +Stopping aka "downing" the stack stops and deletes all containers, and removes the internal network: + +``` console +$ cd ~/IOTstack +$ docker-compose down +``` + +To stop the stack without removing containers, run: + +``` console +$ cd ~/IOTstack +$ docker-compose stop +``` + +### stopping an individual container + +`stop` can also be used to stop individual containers, like this: + +``` console +$ cd ~/IOTstack +$ docker-compose stop «container» +``` + +This puts the container in a kind of suspended animation. You can resume the container with + +``` console +$ cd ~/IOTstack +$ docker-compose start «container» +``` + +You can also `down` a container: + +``` console +$ cd ~/IOTstack +$ docker-compose down «container» +``` + +Note: + +* If the `down` command returns an error suggesting that you can't use it to down a container, it actually means that you have an obsolete version of `docker-compose`. You should upgrade your system. The workaround is to you the old syntax: + + ``` console + $ cd ~/IOTstack + $ docker-compose rm --force --stop -v «container» + ``` + +To reactivate a container which has been stopped and removed: + +``` console +$ cd ~/IOTstack +$ docker-compose up -d «container» +``` + +### checking container status + +You can check the status of containers with: + +``` console +$ docker ps +``` + +or + +``` console +$ cd ~/IOTstack +$ docker-compose ps +``` + +### viewing container logs + +You can inspect the logs of most containers like this: + +``` console +$ docker logs «container» +``` + +for example: + +``` console +$ docker logs nodered +``` + +You can also follow a container's log as new entries are added by using the `-f` flag: + +``` console +$ docker logs -f nodered +``` + +Terminate with a Control+C. Note that restarting a container will also terminate a followed log. + +### restarting a container + +You can restart a container in several ways: + +``` console +$ cd ~/IOTstack +$ docker-compose restart «container» +``` + +This kind of restart is the least-powerful form of restart. A good way to think of it is "the container is only restarted, it is not rebuilt". + +If you change a `docker-compose.yml` setting for a container and/or an environment variable file referenced by `docker-compose.yml` then a `restart` is usually not enough to bring the change into effect. You need to make `docker-compose` notice the change: + +``` console +$ cd ~/IOTstack +$ docker-compose up -d «container» +``` + +This type of "restart" rebuilds the container. + +Alternatively, to force a container to rebuild (without changing either `docker-compose.yml` or an environment variable file): + +``` console +$ cd ~/IOTstack +$ docker-compose up -d --force-recreate «container» +``` + +See also [updating images built from Dockerfiles](#updating-images-not-built-from-dockerfiles) if you need to force `docker-compose` to notice a change to a Dockerfile. + +## persistent data + +Docker allows a container's designer to map folders inside a container to a folder on your disk (SD, SSD, HD). This is done with the "volumes" key in `docker-compose.yml`. Consider the following snippet for Node-RED: + +```yaml +volumes: + - ./volumes/nodered/data:/data +``` + +You read this as two paths, separated by a colon. The: + +* external path is `./volumes/nodered/data` +* internal path is `/data` + +In this context, the leading "." means "the folder containing`docker-compose.yml`", so the external path is actually: + +* `~/IOTstack/volumes/nodered/data` + +This type of volume is a +[bind-mount](https://docs.docker.com/storage/bind-mounts/), where the +container's internal path is directly linked to the external path. All +file-system operations, reads and writes, are mapped to directly to the files +and folders at the external path. + +### deleting persistent data + +If you need a "clean slate" for a container, you can delete its volumes. Using InfluxDB as an example: + +``` console +$ cd ~/IOTstack +$ docker-compose rm --force --stop -v influxdb +$ sudo rm -rf ./volumes/influxdb +$ docker-compose up -d influxdb +``` + +When `docker-compose` tries to bring up InfluxDB, it will notice this volume mapping in `docker-compose.yml`: + +```yaml + volumes: + - ./volumes/influxdb/data:/var/lib/influxdb +``` + +and check to see whether `./volumes/influxdb/data` is present. Finding it not there, it does the equivalent of: + +``` console +$ sudo mkdir -p ./volumes/influxdb/data +``` + +When InfluxDB starts, it sees that the folder on right-hand-side of the volumes mapping (`/var/lib/influxdb`) is empty and initialises new databases. + +This is how **most** containers behave. There are exceptions so it's always a good idea to keep a backup. + +## stack maintenance + +!!! danger "Breaking update" + Recent changes will require [manual steps]( + ../Updates/migration-network-change.md) + or you may get an error like: + `ERROR: Service "influxdb" uses an undefined network "iotstack_nw"` + +### update Raspberry Pi OS + +You should keep your Raspberry Pi up-to-date. Despite the word "container" suggesting that *containers* are fully self-contained, they sometimes depend on operating system components ("WireGuard" is an example). + +``` console +$ sudo apt update +$ sudo apt upgrade -y +``` + +### git pull + +Although the menu will generally do this for you, it does not hurt to keep your local copy of the IOTstack repository in sync with the master version on GitHub. + +``` console +$ cd ~/IOTstack +$ git pull +``` + +### container image updates + +There are two kinds of images used in IOTstack: + +* Those not built using Dockerfiles (the majority) +* Those built using Dockerfiles (special cases) + + > A Dockerfile is a set of instructions designed to customise an image before it is instantiated to become a running container. + +The easiest way to work out which type of image you are looking at is to inspect the container's service definition in your `docker-compose.yml` file. If the service definition contains the: + +* `image:` keyword then the image is **not** built using a Dockerfile. +* `build:` keyword then the image **is** built using a Dockerfile. + +#### updating images not built from Dockerfiles + +If new versions of this type of image become available on DockerHub, your local IOTstack copies can be updated by a `pull` command: + +``` console +$ cd ~/IOTstack +$ docker-compose pull +$ docker-compose up -d +$ docker system prune +``` + +The `pull` downloads any new images. It does this without disrupting the running stack. + +The `up -d` notices any newly-downloaded images, builds new containers, and swaps old-for-new. There is barely any downtime for affected containers. + +#### updating images built from Dockerfiles + +Containers built using Dockerfiles have a two-step process: + +1. A *base* image is downloaded from from DockerHub; and then +2. The Dockerfile "runs" to build a *local* image. + +Node-RED is a good example of a container built from a Dockerfile. The Dockerfile defines some (or possibly all) of your add-on nodes, such as those needed for InfluxDB or Tasmota. + +There are two separate update situations that you need to consider: + +* If your Dockerfile changes; or +* If a newer base image appears on DockerHub + +Node-RED also provides a good example of why your Dockerfile might change: if you decide to add or remove add-on nodes. + +Note: + +* You can also add nodes to Node-RED using Manage Palette. + +##### when Dockerfile changes (*local* image only) + +When your Dockerfile changes, you need to rebuild like this: + +``` console +$ cd ~/IOTstack +$ docker-compose up --build -d «container» +$ docker system prune +``` + +This only rebuilds the *local* image and, even then, only if `docker-compose` senses a *material* change to the Dockerfile. + +If you are trying to force the inclusion of a later version of an add-on node, you need to treat it like a [DockerHub update](#updating-images-built-from-dockerfiles). + +Key point: + +* The *base* image is not affected by this type of update. + +Note: + +* You can also use this type of build if you get an error after modifying Node-RED's environment: + + ``` console + $ cd ~/IOTstack + $ docker-compose up --build -d nodered + ``` + +##### when DockerHub updates (*base* and *local* images) + +When a newer version of the *base* image appears on DockerHub, you need to rebuild like this: + +``` console +$ cd ~/IOTstack +$ docker-compose build --no-cache --pull «container» +$ docker-compose up -d «container» +$ docker system prune +$ docker system prune +``` + +This causes DockerHub to be checked for the later version of the *base* image, downloading it as needed. + +Then, the Dockerfile is run to produce a new *local* image. The Dockerfile run happens even if a new *base* image was not downloaded in the previous step. + +### deleting unused images + +As your system evolves and new images come down from DockerHub, you may find that more disk space is being occupied than you expected. Try running: + +``` console +$ docker system prune +``` + +This recovers anything no longer in use. Sometimes multiple `prune` commands are needed (eg the first removes an old *local* image, the second removes the old *base* image). + +If you add a container via `menu.sh` and later remove it (either manually or via `menu.sh`), the associated images(s) will probably persist. You can check which images are installed via: + +``` console +$ docker images + +REPOSITORY TAG IMAGE ID CREATED SIZE +influxdb latest 1361b14bf545 5 days ago 264MB +grafana/grafana latest b9dfd6bb8484 13 days ago 149MB +iotstack_nodered latest 21d5a6b7b57b 2 weeks ago 540MB +portainer/portainer-ce latest 5526251cc61f 5 weeks ago 163MB +eclipse-mosquitto latest 4af162db6b4c 6 weeks ago 8.65MB +nodered/node-red latest fa3bc6f20464 2 months ago 376MB +portainer/portainer latest dbf28ba50432 2 months ago 62.5MB +``` + +Both "Portainer CE" and "Portainer" are in that list. Assuming "Portainer" is no longer in use, it can be removed by using either its repository name or its Image ID. In other words, the following two commands are synonyms: + +``` console +$ docker rmi portainer/portainer +$ docker rmi dbf28ba50432 +``` + +In general, you can use the repository name to remove an image but the Image ID is sometimes needed. The most common situation where you are likely to need the Image ID is after an image has been updated on DockerHub and pulled down to your Raspberry Pi. You will find two containers with the same name. One will be tagged "latest" (the running version) while the other will be tagged "\" (the prior version). You use the Image ID to resolve the ambiguity. + +### pinning to specific versions + +See [container image updates](#container-image-updates) to understand how to tell the difference between images that are used "as is" from DockerHub versus those that are built from local Dockerfiles. + +Note: + +* You should **always** visit an image's DockerHub page before pinning to a specific version. This is the only way to be certain that you are choosing the appropriate version suffix. + +To pin an image to a specific version: + +* If the image comes straight from DockerHub, you apply the pin in `docker-compose.yml`. For example, to pin Grafana to version 7.5.7, you change: + + ```yaml + grafana: + container_name: grafana + image: grafana/grafana:latest + … + ``` + + to: + + ```yaml + grafana: + container_name: grafana + image: grafana/grafana:7.5.7 + … + ``` + + To apply the change, "up" the container: + + ``` console + $ cd ~/IOTstack + $ docker-compose up -d grafana + ``` + +* If the image is built using a local Dockerfile, you apply the pin in the Dockerfile. For example, to pin Mosquitto to version 1.6.15, edit `~/IOTstack/.templates/mosquitto/Dockerfile` to change: + + ```dockerfile + # Download base image + FROM eclipse-mosquitto:latest + … + ``` + + to: + + ```dockerfile + # Download base image + FROM eclipse-mosquitto:1.6.15 + … + ``` + + To apply the change, "up" the container and pass the `--build` flag: + + ``` console + $ cd ~/IOTstack + $ docker-compose up -d --build mosquitto + ``` + diff --git a/docs/Containers/.pages b/docs/Containers/.pages new file mode 100644 index 000000000..f566ad3cf --- /dev/null +++ b/docs/Containers/.pages @@ -0,0 +1 @@ +title: Containers \ No newline at end of file diff --git a/docs/Containers/AdGuardHome.md b/docs/Containers/AdGuardHome.md new file mode 100644 index 000000000..c22bfbcb6 --- /dev/null +++ b/docs/Containers/AdGuardHome.md @@ -0,0 +1,75 @@ +# AdGuard Home + +## References + +* [AdGuard Home GitHub](https://github.com/AdguardTeam/AdGuardHome) +* [AdGuard Home DockerHub](https://hub.docker.com/r/adguard/adguardhome) + +## Either *AdGuard Home* or *PiHole*, but not both + +AdGuard Home and PiHole perform similar functions. They use the same ports so you can **not** run both at the same time. You must choose one or the other. + +## Quick Start { #quickStart } + +When you first install AdGuard Home: + +1. Use a web browser to connect to it using port 3001. For example: + + ``` + http://raspberrypi.local:3001 + ``` + +2. Click "Getting Started". + +3. Change the port number for the Admin Web Interface to be "8089". Leave the other settings on the page at their defaults and click "Next". +4. Enter a username and password and click "Next". +5. Click "Open Dashboard". This redirects to port 8089. +6. After the initial setup, you connect to AdGuard Home via port 8089: + + ``` + http://raspberrypi.local:8089 + ``` + +## About port 8089 + +Port 8089 is the default administrative user interface for AdGuard Home running under IOTstack. + +Port 8089 is not active until you have completed the [Quick Start](#quickStart) procedure. You must start by connecting to port 3001. + +Because of AdGuard Home limitations, you must take special precautions if you decide to change to a different port number: + +1. The internal and external ports **must** be the same; and + +2. You **must** convince AdGuard Home that it is a first-time installation: + + ```console + $ cd ~/IOTstack + $ docker-compose stop adguardhome + $ docker-compose rm -f adguardhome + $ sudo rm -rf ./volumes/adguardhome + $ docker-compose up -d adguardhome + ``` + +3. Repeat the [Quick Start](#quickStart) procedure, this time substituting the new Admin Web Interface port where you see "8089". + +## About port 3001:3000 + +Port 3001 (external, 3000 internal) is only used during [Quick Start](#quickStart) procedure. Once port 8089 becomes active, port 3001 ceases to be active. + +In other words, you need to keep port 3001 reserved even though it is only ever used to set up port 8089. + +## About Host Mode + +If you want to run AdGuard Home as your DHCP server, you need to put the container into "host mode". You need edit the AdGuard Home service definition in `docker-compose.yml` to: + +1. add the line: + + ```yaml + network_mode: host + ``` + +2. remove the `ports:` directive and **all** of the port mappings. + +Note: + +* It is not really a good idea to offer DHCP services from a container. This is because containers generally start far too late in a boot process to be useful. If you want to use AdGuard Home for DHCP, you should probably consider a native installation. diff --git a/docs/Containers/Adminer.md b/docs/Containers/Adminer.md new file mode 100644 index 000000000..5353916b4 --- /dev/null +++ b/docs/Containers/Adminer.md @@ -0,0 +1,12 @@ +# Adminer +## References +- [Docker](https://hub.docker.com/_/adminer) +- [Website](https://www.adminer.org/) + +## About + +This is a nice tool for managing databases. Web interface has moved to port 9080. There was an issue where openHAB and Adminer were using the same ports. If you have an port conflict edit the docker-compose.yml and under the adminer service change the line to read: +``` yaml + ports: + - 9080:8080 +``` diff --git a/docs/Containers/Blynk_server.md b/docs/Containers/Blynk_server.md new file mode 100755 index 000000000..edb403c43 --- /dev/null +++ b/docs/Containers/Blynk_server.md @@ -0,0 +1,291 @@ +# Blynk server + +This document discusses an IOTstack-specific version of Blynk-Server. It is built on top of an [Ubuntu](https://hub.docker.com/_/ubuntu) base image using a *Dockerfile*. + +## References { #references } + +- [Ubuntu base image](https://hub.docker.com/_/ubuntu) at DockerHub +- [Peter Knight Blynk-Server fork](https://github.com/Peterkn2001/blynk-server) at GitHub (includes documentation) +- [Peter Knight Blynk-Server releases](https://github.com/Peterkn2001/blynk-server/releases/) at GitHub +- [Blynk home page](https://blynk.io) at blynk.io +- [Blynk documentation](https://docs.blynk.io/en/) at blynk.io +- [Blynk community forum](https://community.blynk.cc/) at community.blynk.cc +- [Interesting post by Peter Knight on MQTT/Node Red flows]( +https://community.blynk.cc/t/my-home-automation-projects-built-with-mqtt-and-node-red/29045) at community.blynk.cc +- [Blynk flow examples](https://github.com/877dev/Node-Red-flow-examples) at GitHub + +Acknowledgement: + +- Original writeup from @877dev + +## Significant directories and files { #significantFiles } + +``` +~/IOTstack +├── .templates +│   └── blynk_server +│      ├── Dockerfile ❶ +│      ├── docker-entrypoint.sh ❷ +│      ├── iotstack_defaults ❸ +│      │   ├── mail.properties +│      │   └── server.properties +│      └── service.yml ❹ +├── services +│   └── blynk_server +│      └── service.yml ❺ +├── docker-compose.yml ❻ +└── volumes + └── blynk_server ❼ +    ├── config ❽ +    │   ├── mail.properties +    │   └── server.properties +    └── data +``` + +1. The *Dockerfile* used to construct Blynk Server on top of Ubuntu. +2. A start-up script designed to handle container self-repair. +3. A folder holding the default versions of the configuration files. +4. The *template service definition*. +5. The *working service definition* (only relevant to old-menu, copied from ❹). +6. The *Compose* file (includes ❹). +7. The *persistent storage area* for the `blynk_server` container. +8. Working copies of the configuration files (copied from ❸). + +Everything in ❽: + +* will be replaced if it is not present when the container starts; but +* will never be overwritten if altered by you. + +## How Blynk Server gets built for IOTstack { #howBlynkServerIOTstackGetsBuilt } + +### GitHub Updates { #dockerHubImages } + +Periodically, the source code is updated and a new version is released. You can check for the latest version at the [releases page](https://github.com/Peterkn2001/blynk-server/releases/). + +### IOTstack menu { #iotstackMenu } + +When you select Blynk Server in the IOTstack menu, the *template service definition* is copied into the *Compose* file. + +> Under old menu, it is also copied to the *working service definition* and then not really used. + +### IOTstack first run { #iotstackFirstRun } + +On a first install of IOTstack, you run the menu, choose your containers, and are told to do this: + +```console +$ cd ~/IOTstack +$ docker-compose up -d +``` + +`docker-compose` reads the *Compose* file. When it arrives at the `blynk_server` fragment, it finds: + +```yaml + blynk_server: + build: + context: ./.templates/blynk_server/. + args: + - BLYNK_SERVER_VERSION=0.41.16 +``` + +The `build` statement tells `docker-compose` to look for: + +``` +~/IOTstack/.templates/blynk_server/Dockerfile +``` + +The `BLYNK_SERVER_VERSION` argument is passed into the build process. This implicitly pins each build to the version number in the *Compose* file (eg 0.41.16). If you need to update to a + +> The *Dockerfile* is in the `.templates` directory because it is intended to be a common build for **all** IOTstack users. This is different to the arrangement for Node-RED where the *Dockerfile* is in the `services` directory because it is how each individual IOTstack user's version of Node-RED is customised. + +The *Dockerfile* begins with: + +```Dockerfile +FROM ubuntu +``` + +The `FROM` statement tells the build process to pull down the ***base image*** from [*DockerHub*](https://hub.docker.com). + +> It is a ***base*** image in the sense that it never actually runs as a container on your Raspberry Pi. + +The remaining instructions in the *Dockerfile* customise the ***base image*** to produce a ***local image***. The customisations are: + +1. Add packages to satisfy dependencies. +2. Add the default versions of the configuration files so that the container can perform self-repair each time it is launched. +3. Download an install the Java package that implements the Blynk Server. + +The ***local image*** is instantiated to become your running container. + +When you run the `docker images` command after Blynk Server has been built, you *may* see two rows that are relevant: + +```console +$ docker images +REPOSITORY TAG IMAGE ID CREATED SIZE +iotstack_blynk_server latest 3cd6445f8a7e 3 hours ago 652MB +ubuntu latest 897590a6c564 7 days ago 49.8MB +``` + +* `ubuntu ` is the ***base image***; and +* `iotstack_blynk_server ` is the ***local image***. + +You *may* see the same pattern in *Portainer*, which reports the ***base image*** as "unused". You should not remove the ***base*** image, even though it appears to be unused. + +> Whether you see one or two rows depends on the version of `docker-compose` you are using and how your version of `docker-compose` builds local images. + +## Logging { #logging } + +You can inspect Blynk Server's log by: + +```console +$ docker logs blynk_server +``` + +## Changing Blynk Server's configuration { #editConfiguration } + +The first time you launch the `blynk_server` container, the following structure will be created in the persistent storage area: + +``` +~/IOTstack/volumes/blynk_server +├── [drwxr-xr-x pi ] config +│   ├── [-rw-r--r-- pi ] mail.properties +│   └── [-rw-r--r-- pi ] server.properties +└── [drwxr-xr-x root ] data +``` + +The two `.properties` files can be used to alter Blynk Server's configuration. When you make change to these files, you activate then by restarting the container: + +```console +$ cd ~/IOTstack +$ docker-compose restart blynk_server +``` + +## Getting a clean slate { #cleanSlate } + +Erasing Blynk Server's persistent storage area triggers self-healing and restores known defaults: + +```console +$ cd ~/IOTstack +$ docker-compose down blynk_server +$ sudo rm -rf ./volumes/blynk_server +$ docker-compose up -d blynk_server +``` +Notes: + +* You can also remove individual configuration files and then trigger self-healing. For example, if you decide to edit `server.properties` and make a mess, you can restore the original default version like this: + + ```console + $ cd ~/IOTstack + $ rm volumes/blynk_server/config/server.properties + $ docker-compose restart blynk_server + ``` + +* See also [if downing a container doesn't work](../Basic_setup/index.md/#downContainer) + +## Upgrading Blynk Server { #upgradingBlynkServer } + +To find out when a new version has been released, you need to visit the [Blynk-Server releases](https://github.com/Peterkn2001/blynk-server/releases/) page at GitHub. + +At the time of writing, version 0.41.16 was the most up-to-date. Suppose that version 0.41.17 has been released and that you decide to upgrade: + +1. Edit your *Compose* file to change the version nuumber: + + ```yaml + blynk_server: + build: + context: ./.templates/blynk_server/. + args: + - BLYNK_SERVER_VERSION=0.41.17 + ``` + + Note: + + - You can use this method to pin Blynk Server to any available version. + +2. You then have two options: + + - If you only want to reconstruct the **local** image: + + ```console + $ cd ~/IOTstack + $ docker-compose up --build -d blynk_server + $ docker system prune -f + ``` + + - If you want to update the Ubuntu **base** image at the same time: + + ```console + $ cd ~/IOTstack + $ docker-compose build --no-cache --pull blynk_server + $ docker-compose up -d blynk_server + $ docker system prune -f + $ docker system prune -f + ``` + + The second `prune` will only be needed if there is an old *base image* and that, in turn, depends on the version of `docker-compose` you are using and how your version of `docker-compose` builds local images. + +## Using Blynk Server { #usingBlynkServer } + +See the [References](#references) for documentation links. + +### Connecting to the administrative UI { #blynkAdmin } + +To connect to the administrative interface, navigate to: + +``` +https://:9444/admin +``` + +You may encounter browser security warnings which you will have to acknowledge in order to be able to connect to the page. The default credentials are: + +- username = `admin@blynk.cc` +- password = `admin` + +### Change username and password { #changePassword } + +1. Click on Users > "email address" and edit email, name and password. +2. Save changes. +3. Restart the container using either Portainer or the command line: + + ```console + $ cd ~/IOTstack + $ docker-compose restart blynk_server + ``` + +### Setup gmail { #gmailSetup } + +Optional step, useful for getting the auth token emailed to you. +(To be added once confirmed working....) + +### iOS/Android app setup { #mobileSetup } + +1. When setting up the application on your mobile be sure to select "custom" setup [see](https://github.com/Peterkn2001/blynk-server#app-and-sketch-changes). +2. Press "New Project" +3. Give it a name, choose device "Raspberry Pi 3 B" so you have plenty of [virtual pins](http://help.blynk.cc/en/articles/512061-what-is-virtual-pins) available, and lastly select WiFi. +4. Create project and the [auth token](https://docs.blynk.cc/#getting-started-getting-started-with-the-blynk-app-4-auth-token) will be emailed to you (if emails configured). You can also find the token in app under the phone app settings, or in the admin web interface by clicking Users>"email address" and scroll down to token. + +### Quick usage guide for app { #quickAppGuide } + +1. Press on the empty page, the widgets will appear from the right. +2. Select your widget, let's say a button. +3. It appears on the page, press on it to configure. +4. Give it a name and colour if you want. +5. Press on PIN, and select virtual. Choose any pin i.e. V0 +6. Press ok. +7. To start the project running, press top right Play button. +8. You will get an offline message, because no devices are connected to your project via the token. + +Enter Node-Red..... + +### Node-RED { #enterNodeRed } + +1. Install `node-red-contrib-blynk-ws` from Manage Palette. +2. Drag a "write event" node into your flow, and connect to a debug node +3. Configure the Blynk node for the first time: + + ``` + URL: wss://youripaddress:9444/websockets + ``` + + There is more information [here](https://github.com/gablau/node-red-contrib-blynk-ws/blob/master/README.md#how-to-use). +4. Enter your [auth token](https://docs.blynk.cc/#getting-started-getting-started-with-the-blynk-app-4-auth-token) from before and save/exit. +5. When you deploy the flow, notice the app shows connected message, as does the Blynk node. +6. Press the button on the app, you will notice the payload is sent to the debug node. diff --git a/docs/Containers/Chronograf.md b/docs/Containers/Chronograf.md new file mode 100644 index 000000000..ae7ec9cc0 --- /dev/null +++ b/docs/Containers/Chronograf.md @@ -0,0 +1,71 @@ +# Chronograf + +## References + +- [*influxdata Chronograf* documentation](https://docs.influxdata.com/chronograf/) +- [*GitHub*: influxdata/influxdata-docker/chronograf](https://github.com/influxdata/influxdata-docker/tree/master/chronograf) +- [*DockerHub*: influxdata Chronograf](https://hub.docker.com/_/chronograf) + +## Kapacitor integration + +If you selected Kapacitor in the menu and want Chronograf to be able to interact with it, you need to edit `docker-compose.yml` to un-comment the lines which are commented-out in the following: + +```yaml +chronograf: + … + environment: + … + # - KAPACITOR_URL=http://kapacitor:9092 + depends_on: + … + # - kapacitor +``` + +If the Chronograf container is already running when you make this change, run: + +``` console +$ cd ~IOTstack +$ docker-compose up -d chronograf +``` + +## Upgrading Chronograf + +You can update the container via: + +``` console +$ cd ~/IOTstack +$ docker-compose pull +$ docker-compose up -d +$ docker system prune +``` + +In words: + +* `docker-compose pull` downloads any newer images; +* `docker-compose up -d` causes any newly-downloaded images to be instantiated as containers (replacing the old containers); and +* the `prune` gets rid of the outdated images. + +### Chronograf version pinning + +If you need to pin to a particular version: + +1. Use your favourite text editor to open `docker-compose.yml`. +2. Find the line: + + ``` yaml + image: chronograf:latest + ``` + +3. Replace `latest` with the version you wish to pin to. For example, to pin to version 1.9.0: + + ``` yaml + image: chronograf:1.9.0 + ``` + +4. Save the file and tell `docker-compose` to bring up the container: + + ``` console + $ cd ~/IOTstack + $ docker-compose up -d chronograf + $ docker system prune + ``` diff --git a/docs/Containers/DashMachine.md b/docs/Containers/DashMachine.md new file mode 100644 index 000000000..595e13934 --- /dev/null +++ b/docs/Containers/DashMachine.md @@ -0,0 +1,18 @@ +# DashMachine + +## References +* [Homepage](https://github.com/rmountjoy92/DashMachine) +* [Docker](https://hub.docker.com/r/rmountjoy/dashmachine/) + +## Web Interface +The web UI can be found on `"your_ip":5000`. + +The default credentials are: +* User: `admin` +* Password: `admin` + +## About *DashMachine* + +DashMachine is a web application bookmark dashboard. It allows you to have all your application bookmarks available in one place, grouped and organized how you want to see them. + +Within the context of IOTstack, DashMachine can help you organize your deployed services. diff --git a/docs/Containers/Deconz.md b/docs/Containers/Deconz.md new file mode 100644 index 000000000..e023860f3 --- /dev/null +++ b/docs/Containers/Deconz.md @@ -0,0 +1,63 @@ +# deCONZ + +## References +- [Docker](https://hub.docker.com/r/marthoc/deconz) +- [Website](https://github.com/dresden-elektronik/deconz-rest-plugin/blob/master/README.md) + +## Setup + +### Old menu (old menu branch) + +If you use "old menu", you may get an error message similar to the following on first launch: + +``` +parsing ~/IOTstack/docker-compose.yml: error while interpolating services.deconz.devices.[]: required variable DECONZ_DEVICE_PATH is missing a value: eg echo DECONZ_DEVICE_PATH=/dev/serial0 >>~/IOTstack/.env +``` + +The message is telling you that you need to define the path to your deCONZ device. Common examples are: + +- Raspbee at `/dev/serial0` +- Conbee at `/dev/ttyUSB0` +- Conbee II at `/dev/ttyACM0` + +Once you have identified the appropriate device path, you can define it like this: + +```console +$ echo DECONZ_DEVICE_PATH=/dev/serial0 >>~/IOTstack/.env +``` + +This example uses `/dev/serial0`. Substitute your actual device path if it is different. + +### New menu (master branch) + +New menu offers a sub-menu (place the cursor on `deconz` and press the right arrow) where you can select the appropriate device path. + +## Dialout group + +Before running `docker-compose up -d`, make sure your Linux user is part of the dialout group, which allows the user access to serial devices (i.e. Conbee/Conbee II/RaspBee). If you are not certain, simply add your user to the dialout group by running the following command (username "pi" being used as an example): + +```console +$ sudo usermod -a -G dialout pi +``` + +## Troubleshooting + +Your Conbee/Conbee II/RaspBee gateway must be plugged in when the deCONZ Docker container is being brought up. If your gateway is not detected, or no lights can be paired, try moving the device to another usb port. A reboot may help too. + +Use a 0.5-1m usb extension cable with ConBee (II) to avoid wifi and bluetooth noise/interference from your Raspberry Pi (recommended by the manufacturer and often the solution to poor performance). + +## Accessing the Phoscon UI +The Phoscon UI is available using port 8090 (http://your.local.ip.address:8090/) + +## Viewing the deCONZ Zigbee mesh +The Zigbee mesh can be viewed using VNC on port 5901. The default VNC password is "changeme". + +## Connecting deCONZ and Node-RED +Install [node-red-contrib-deconz](https://flows.nodered.org/node/node-red-contrib-deconz) via the "Manage palette" menu in Node-RED (if not already installed) and follow these 2 simple steps (also shown in the video below): + +Step 1: In the Phoscon UI, Go to Settings > Gateway > Advanced and click "Authenticate app". + +Step 2: In Node-RED, open a deCONZ node, select "Add new deonz-server", insert your ip adress and port 8090 and click "Get settings". Click "Add", "Done" and "Deploy". Your device list will not be updated before deploying. + + +![installing deCONZ](https://github.com/DIYtechie/resources/blob/master/images/Setup%20deCONZ%20in%20Node-RED.gif?raw=true) diff --git a/docs/Containers/DiyHue.md b/docs/Containers/DiyHue.md new file mode 100644 index 000000000..6fe62078e --- /dev/null +++ b/docs/Containers/DiyHue.md @@ -0,0 +1,20 @@ +# DIY hue +* [website](https://diyhue.org/getting-started/) + +## About + +diyHue is a utility to contol the lights in your home + +## Setup + +Before you start diyHue you will need to get your IP and MAC addresses. Run `ip addr` in the terminal + +![image](https://user-images.githubusercontent.com/46672225/69816794-c2c24400-1201-11ea-9d97-e8e03b98d9f4.png) + +Enter these values into the `./services/diyhue/diyhue.env` file + +The default username and password it `Hue` and `Hue` respectively + +## Usage + +The web interface is available on port 8070 \ No newline at end of file diff --git a/docs/Containers/Domoticz.md b/docs/Containers/Domoticz.md new file mode 100644 index 000000000..291786d2a --- /dev/null +++ b/docs/Containers/Domoticz.md @@ -0,0 +1,87 @@ +# Domoticz + +## References + +- [Domoticz home](https://www.domoticz.com) + + - [User Guide](https://www.domoticz.com/DomoticzManual.pdf) (pdf) + +- [GitHub: domoticz/domoticz](https://github.com/domoticz/domoticz) +- [DockerHub: domoticz/domoticz](https://hub.docker.com/r/domoticz/domoticz) + +## Invitation + +There is no IOTstack documentation for Domoticz. + +This is a standing invitation to anyone who is familiar with this container to submit a Pull Request to provide some documentation. + +## Environment Variables + +* `TZ=${TZ:-Etc/UTC}` + + If `TZ` is defined in `~/IOTstack/.env` then the value there is applied, otherwise the default of `Etc/UTC` is used. You can initialise `.env` like this: + + ``` console + $ cd ~/IOTstack + $ [ $(grep -c "^TZ=" .env) -eq 0 ] && echo "TZ=$(cat /etc/timezone)" >>.env + ``` + +* `LOG_PATH=/opt/domoticz/userdata/domoticz.log` + + This is disabled by default. If you enable it, Domoticz will write a log to that *internal* path. The path corresponds with the *external* path: + + ``` + ~/IOTstack/volumes/domoticz/domoticz.log + ``` + + Note that this log is persistent. In other words, it will survive container restarts. This means you are responsible for pruning it from time to time. The Unix tradition for pruning logs is: + + ``` console + $ cd ~/IOTstack/volumes/domoticz/ + $ cat /dev/null | sudo tee domoticz.log + ``` + + If, instead, you decide to delete the log file, you should stop the container first: + + ``` console + $ cd ~/IOTstack + $ docker-compose down domoticz + $ sudo rm ./volumes/domoticz/domoticz.log + $ docker-compose up -d domoticz + ``` + +* `EXTRA_CMD_ARG=` + + This is disabled by default. It can be enabled and used to override the default parameters and pass [command-line parameters](https://www.domoticz.com/wiki/Command_line_parameters) of your choosing to Domoticz. + +## Devices + +The service definition includes an `x-devices:` clause. The `x-` prefix has the same effect as commenting-out the entire clause. If you wish to map an external device into the container: + +1. Adjust the left-hand-side of the example path to point to the device as it appears on your Raspberry Pi; +2. Remove the `x-` prefix. +3. Recreate the container: + + ``` + $ cd ~/IOTstack + $ docker-compose up -d domoticz + ``` + +## Migration Notes + +1. Older IOTstack service definitions for Domoticz used the `lscr.io/linuxserver/domoticz:latest` image. The current service definition uses the `domoticz/domoticz:stable` image. +2. The location of the persistent store has changed, as has its relationship to the internal path: + + service definition | persistent store | internal path + -------------------|----------------------------------|-------------- + older | ~/IOTstack/volumes/domoticz/data | config + current | ~/IOTstack/volumes/domoticz | /opt/domoticz/userdata + + If you have have been using the older service definition and wish to upgrade to the current service definition, you can try migrating like this: + + ``` console + $ cd ~/IOTstack/volumes + $ sudo mv domoticz domoticz.old + $ sudo cp -a domoticz.old/data domoticz + ``` + diff --git a/docs/Containers/Dozzle.md b/docs/Containers/Dozzle.md new file mode 100644 index 000000000..04d7031bb --- /dev/null +++ b/docs/Containers/Dozzle.md @@ -0,0 +1,12 @@ +# Dozzle + +## Reference +* [Dozzle GitHub](https://github.com/amir20/dozzle) + +## Webinterface +Webninterface is available at `"your_ip":8889` + + +## About *Dozzle* +Dozzle is a small lightweight application with a web based interface to monitor Docker logs. +It doesn’t store any log files. It is for live monitoring of your container logs only. \ No newline at end of file diff --git a/docs/Containers/Duckdns.md b/docs/Containers/Duckdns.md new file mode 100644 index 000000000..a3859a550 --- /dev/null +++ b/docs/Containers/Duckdns.md @@ -0,0 +1,84 @@ +# Duck DNS + +Duckdns is a free public DNS service that provides you with a domain name you +can update to match your dynamic IP-address. + +This container automates the process to keep the duckdns.org domain updated +when your IP-address changes. + +## Configuration + +First, register an account, add your subdomain and get your token from +[http://www.duckdns.org/](http://www.duckdns.org/) + +Either edit `~/IOTstack/docker-compose.yml` or create a file +`~/IOTstack/docker-compose.override.yml`. Place your Duckdns token and +subdomain name (without .duckdns.org) there: + +``` yaml title="docker-compose.override.yml" +version: '3.6' +services: + duckdns: + environment: + TOKEN: your-duckdns-token + SUBDOMAINS: subdomain +``` + +Observe that at least the initial update is successful: + +``` console +$ cd ~/IOTstack +$ docker-compose up -d duckdns +$ docker-compose logs -f duckdns +...SNIP... +duckdns | Sat May 21 11:01:00 UTC 2022: Your IP was updated +...SNIP... +(ctrl-c to stop following the log) +``` + +If there is a problem, check that the resulting effective configuration of +'duckdns:' looks OK: +``` console +$ cd ~/IOTstack && docker-compose config +``` + +### Domain name for the private IP + +!!! note inline end "Example public/private IP:s and domains" + + ``` mermaid + flowchart + I([Internet]) + G("Router\npublic IP: 52.85.51.71\nsubdomain.duckdns.org") + R(Raspberry pi\nprivate IP: 192.168.0.100\nprivate_subdomain.duckdns.org) + I --- |ISP| G --- |LAN| R + ``` + +As a public DNS server, Duckdns is not meant to be used for private IPs. It's +recommended that for resolving internal LAN IPs you use the [Pi +Hole](Pi-hole.md) container or run a dedicated DNS server. + +That said, it's possible to update a Duckdns subdomain to your private LAN IP. +This may be convenient if you have devices that don't support mDNS (.local) or +don't want to run Pi-hole. This is especially useful if you can't assign a +static IP to your RPi. No changes to your DNS resolver settings are needed. + +First, as for the public subdomain, add the domain name to your Duckdns account +by logging in from their homepage. Then add a `PRIVATE_SUBDOMAINS` variable +indicating this subdomain: + +``` yaml +version: '3.6' +services: + duckdns: + environment: + TOKEN: ... + SUBDOMAINS: ... + PRIVATE_SUBDOMAINS: private_subdomain +``` + +## References + +* uses ukkopahis' [fork](https://github.com/ukkopahis/docker-duckdns) based on + the linuxserver + [docker-duckdns](https://github.com/linuxserver/docker-duckdns) container diff --git a/docs/Containers/ESPHome.md b/docs/Containers/ESPHome.md new file mode 100644 index 000000000..6b6e592b5 --- /dev/null +++ b/docs/Containers/ESPHome.md @@ -0,0 +1,295 @@ +# ESPHome + +*ESPHome is a system to control your microcontrollers by simple yet powerful configuration files and control them remotely through Home Automation systems.* + +## Resources + +- [ESPHome documentation](https://esphome.io) +- [DockerHub](https://hub.docker.com/r/esphome/esphome) +- [GitHub](https://github.com/esphome/feature-requests) + +## IOTstack service definition {#serviceDefinition} + +``` yaml linenums="1" +esphome: + container_name: esphome + image: esphome/esphome + restart: unless-stopped + environment: + - TZ=${TZ:-Etc/UTC} + - USERNAME=${ESPHOME_USERNAME:-esphome} + - PASSWORD=${ESPHOME_PASSWORD:?eg echo ESPHOME_PASSWORD=ChangeMe >>~/IOTstack/.env} + network_mode: host + x-ports: + - "6052:6052" + volumes: + - ./volumes/esphome/config:/config + device_cgroup_rules: + - 'c 188:* rw' +``` + +Notes: + +1. The container runs in "host" mode, meaning it binds to the host port 6052. +2. The `x-` prefix on the `x-ports` clause has the same effect as commenting-out lines 10 and 11. It serves the twin purposes of documenting the fact that the ESPHome container uses port 6052 and minimising the risk of port number collisions. + +## Container installation + +### via the IOTstack menu + +If you select ESPHome in the IOTstack menu, as well as adding the [service definition](#serviceDefinition) to your compose file, the menu: + +1. Copies a rules file into `/etc/udev/rules.d`. +2. Checks `~/IOTstack/.env` for the presence of the `ESPHOME_USERNAME` and initialises it to the value `esphome` if it is not found. +3. Checks `~/IOTstack/.env` for the presence of the `ESPHOME_PASSWORD` and initialises it to a random value if it is not found. + +### manual installation {#manualInstall} + +If you prefer to avoid the menu, you can install ESPHome like this: + +1. Be in the correct directory: + + ``` console + $ cd ~/IOTstack + ``` + +2. If you are on the "master" branch, add the service definition like this: + + ``` console + $ sed -e "s/^/ /" ./.templates/esphome/service.yml >>docker-compose.yml + ``` + + Alternatively, if you are on the "old-menu" branch, do this: + + ``` console + $ cat ./.templates/esphome/service.yml >>docker-compose.yml + ``` + +3. Replace `«username»` and `«password»` in the following commands with values of your choice and then run the commands: + + ``` console + $ echo "ESPHOME_USERNAME=«username»” >>.env + $ echo "ESPHOME_PASSWORD=«password»" >>.env + ``` + + This initialises the required environment variables. Although the username defaults to `esphome`, there is no default for the password. If you forget to set a password, `docker-compose` will remind you when you try to start the container: + + ``` + error while interpolating services.esphome.environment.[]: \ + required variable ESPHOME_PASSWORD is missing a value: \ + eg echo ESPHOME_PASSWORD=ChangeMe >>~/IOTstack/.env + ``` + + The values of the username and password variables are applied each time you start the container. In other words, if you decide to change these credentials, all you need to do is edit the `.env` file and “up” the container. + +4. Copy the UDEV rules file into place and ensure it has the correct permissions: + + ``` console + $ sudo cp ./.templates/esphome/88-tty-iotstack-esphome.rules /etc/udev/rules.d/ + $ sudo chmod 644 /etc/udev/rules.d/88-tty-iotstack-esphome.rules + ``` + +## A quick tour + +ESPHome provides a number of methods for provisioning an ESP device. These instructions focus on the situation where the device is connected to your Raspberry Pi via a USB cable. + +### start the container + +To start the container: + +``` console +$ cd ~/IOTstack +$ docker-compose up -d esphome +``` + +Tip: + +* You can always retrieve your ESPHome login credentials from the `.env` file. For example: + + ``` console + $ grep “^ESPHOME_” .env + ESPHOME_USERNAME=esphome + ESPHOME_PASSWORD=8AxXG5ZVsO4UGTMt + ``` + +### connect your ESP device + +Connect your ESP device to one of your Raspberry Pi’s USB ports. You need to connect the device *while* the ESPHome container is running so that the [UDEV rules file](#udevRules) can propagate the device (typically `/dev/ttyUSBn`) into the container. + +So long as the container is running, you can freely connect and disconnect ESP devices to your Raspberry Pi’s USB ports, and the container will keep “in sync”. + +### in your browser + +Launch your browser. For maximum flexibility, ESPHome recommends browsers that support *WebSerial*, like Google Chrome or Microsoft Edge. + +1. Connect to your Raspberry Pi on port 6052 (reference point 🄰 in the following screen shot): + + ![main login screen](./images/esphome-010-login.png) + + You can use your Raspberry Pi’s: + + * multicast domain name (eg `raspberrypi.local`); + * IP address (eg 192.168.1.100); or + * domain name (if you run your own Domain Name System server). + +2. Enter your ESPHome credentials at 🄱 and click Login. + +3. Click either of the + New Device buttons 🄲: + + ![add new device](./images/esphome-020-new-device.png) + + Read the dialog and then click Continue 🄳: + + ![new device dialog](./images/esphome-030-new-device-continue.png) + +4. Give the configuration a name at 🄴: + + ![create configuration dialog](./images/esphome-040-create-config.png) + + In the fields at 🄵, enter the Network Name (SSID) and password (PSK) of the WiFi network that you want your ESP devices to connect to when they power up. + + > The WiFi fields are only displayed the very first time you set up a device. Thereafter, ESPHome assumes all your devices will use the same WiFi network. + + Click “Next” 🄶. + +5. Select the appropriate SoC (System on a Chip) type for your device. Here, I am using a generic ESP32 at 🄷: + + ![select device type dialog](./images/esphome-050-device-type.png) + + Clicking on the appropriate line proceeds to the next step. + +6. You can either make a note of the encryption key or, as is explained in the dialog, defer that until you actually need it for Home Assistant. Click “Install” 🄸. + + ![device encryption key](./images/esphome-060-encryption-key.png) + +7. The primary reason for running ESPHome as a container in IOTstack is so you can program ESP devices attached to your Raspberry Pi. You need to tell ESPHome what you are doing by selecting “Plug into the computer running ESPHome Dashboard” 🄹: + + ![choose device connection method](./images/esphome-070-install-method.png) + +8. If all has gone well, your device will appear in the list. Select it 🄺: + + ![pick server USB port](./images/esphome-080-server-port.png) + + If, instead, you see the window below, it likely means you did not connect your ESP device *while* the ESPHome container was running: + + ![no device detected alert](./images/esphome-085-no-server-port.png) + + Try disconnecting and reconnecting your ESP device, and waiting for the panel 🄺 to refresh. If that does not cure the problem then it likely means the [UDEV rules](#udevRules) are not matching on your particular device for some reason. You may need to consider [privileged mode](#privileged). + +9. The container will begin the process of compiling the firmware and uploading it to your device. The first time you do this takes significantly longer than second-or-subsequent builds, mainly because the container downloads almost 2GB of data. + + ![build process log window](./images/esphome-090-build-sequence.png) + + The time to compile depends on the speed of your Raspberry Pi hardware (ie a Raspberry Pi 5 will be significantly faster than a model 4, than a model 3). Be patient! + + When the progress log 🄻 implies the process has been completed, you can click Stop 🄼 to dismiss the window. + +10. Assuming normal completion, your ESP device should show as “Online” 🄽. You can edit or explore the configuration using the “Edit” and “⋮” buttons. + + ![job done, device online](./images/esphome-100-device-online.png) + +## Getting a clean slate + +If ESPHome misbehaves or your early experiments leave a lot of clutter behind, and you decide it would be best to start over with a clean installation, run the commands below: + +``` console +$ cd ~/IOTstack +$ docker-compose down esphome +$ sudo rm -rf ./volumes/esphome +$ docker-compose up -d esphome +``` + +Notes: + +1. Always be careful with `sudo rm`. Double-check the command **before** you press enter. +2. The `sudo rm` may seem to take longer than you expect. Do not be concerned. ESPHome downloads a lot of data which it stores at the hidden path: + + ``` + /IOTstack/volumes/esphome/config/.esphome + ``` + + A base install has more than 13,000 files and over 3,000 directories. Even on a solid state disk, deleting that meny directory entries takes time! + +## Device mapping + +### UDEV rules file {#udevRules} + +The [service definition](#serviceDefinition) contains the following lines: + +``` yaml linenums="14" + device_cgroup_rules: + - 'c 188:* rw' +``` + +Those lines assume the presence of a rules file at: + +``` +/etc/udev/rules.d/88-tty-iotstack-esphome.rules +``` + +That file is copied into place automatically if you use the IOTstack menu to select ESPHome. It should also have been copied if you [installed ESPHome manually](#manualInstall). + +What the rules file does is to wait for you to connect any USB device which maps to a major device number of 188. That includes most (hopefully all) USB-to-serial adapters that are found on ESP dev boards, or equivalent standalone adapters such as those made by Future Technology Devices International (FTDI) and Silicon Laboratories Incorporated where you typically connect jumper wires to the GPIO pins which implement the ESP's primary serial interface. + +Whenever you **connect** such a device to your Raspberry Pi, the rules file instructs the ESPHome container to add a matching node. Similarly, when you **remove** such a device, the rules file instructs the ESPHome container to delete the matching node. The **container** gains the ability to access the USB device (the ESP) via the `device_cgroup_rules` clause. + +You can check whether a USB device is known to the container by running: + +``` console +$ docker exec esphome ls /dev +``` + +The mechanism is not 100% robust. In particular, it will lose synchronisation if the system is rebooted, or the container is started when a USB device is already mounted. Worst case should be the need to unplug then re-plug the device, after which the container should catch up. + +#### Removing the rules file {#udevRulesRemoval} + +The UDEV rules "fire" irrespective of whether or not the ESPHome container is actually running. All that happens if the container is not running is an error message in the system log. However, if you decide to remove the ESPHome container, you should remove the rules file by hand: + +``` console +$ sudo rm /etc/udev/rules.d/88-tty-iotstack-esphome.rules +``` + +### Privileged mode {#privileged} + +The [UDEV rules approach](#udevRules) uses the principle of least privilege but it relies upon an assumption about how ESP devices represent themselves when connected to a Raspberry Pi. + +If you encounter difficulties, you can consider trying this instead: + +1. Follow the instructions to [remove the UDEV rules file](#udevRulesRemoval). +2. Edit the [service definition](#serviceDefinition) so that it looks like this: + + ``` yaml linenums="14" + x-device_cgroup_rules: + - 'c 188:* rw' + privileged: true + ``` + + The `x-` prefix has the effect of commenting-out lines 14 and 15, making it easy to restore them later. + +3. Start the container: + + ``` console + $ cd ~/IOTstack + $ docker-compose up -d esphome + ``` + +The `privileged` flag gives the container unrestricted access to **all** of `/dev`. The container runs as root so this is the same as granting any process running inside the ESPHome container full and unrestricted access to **all** corners of your hardware platform, including your mass storage devices (SD, HD, SSD). You should use privileged mode *sparingly* and in full knowledge that it is entirely at your own risk! + +## Routine maintenance + +You can keep ESPHome up-to-date with routine “pull” commands: + +``` console +$ cd ~/IOTstack +$ docker-compose pull +$ docker-compose up -d +$ docker system prune -f +``` + +If a `pull` downloads a more-recent image for ESPHome, the subsequent `up` will (logically) disconnect any connected ESP device from the container. + +The same will happen if you “down” and “up” the ESPHome container, or reboot the Raspberry Pi, while an ESP device is physically connected to the Raspberry Pi. + +> In every case, the device will still be known to the Raspberry Pi, just not the ESPHome container. In a logical sense, the container is “out of sync” with the host system. + +If this happens, disconnect and reconnect the device. The [UDEV rule](#udevRules) will “fire” and propagate the device back into the running container. diff --git a/docs/Containers/EspruinoHub.md b/docs/Containers/EspruinoHub.md new file mode 100644 index 000000000..4a8b0ffed --- /dev/null +++ b/docs/Containers/EspruinoHub.md @@ -0,0 +1,13 @@ +# Espruinohub + +This is a testing container + +I tried it however the container keeps restarting `docker logs espruinohub` I get "BLE Broken?" but could just be i dont have any BLE devices nearby + +web interface is on "{your_Pis_IP}:1888" + +see [EspruinoHub#status--websocket-mqtt--espruino-web-ide](https://github.com/espruino/EspruinoHub#status--websocket-mqtt--espruino-web-ide) for other details. + +there were no recommendations for persistent data volumes. so `docker-compose down` may destroy all you configurations so use `docker-compose stop` in stead + +Please [check existing issues](https://github.com/SensorsIot/IOTstack/issues) if you encounter a problem, and then open a new issue if your problem has not been reported. diff --git a/docs/Containers/Grafana.md b/docs/Containers/Grafana.md new file mode 100755 index 000000000..22793f7bb --- /dev/null +++ b/docs/Containers/Grafana.md @@ -0,0 +1,156 @@ +# Grafana + +## References + +- [Docker](https://hub.docker.com/r/grafana/grafana) +- [Website](https://grafana.com/) + +## Adding InfluxDB datasource + +When you have logged into Grafana (default user/pass: admin/admin), you have +to add a data source to be used for the graphs. + +Select `Data Sources` -> `Add data source` -> `InfluxDB`. + +Set options: + +* HTTP / URL: `http://influxdb:8086` +* InfluxDB Details / Database: `telegraf` +* InfluxDB Details / User: `nodered` +* InfluxDB Details / Password: `nodered` + +## Overriding configuration variables + +Grafana documentation contains [a list of +settings](https://grafana.com/docs/grafana/latest/administration/configuration/). +Settings are described in terms of how they appear in ".ini" files. + +Grafana configuration is usually done in *grafana.ini*, but when used via +docker as the IOTstack does, it should be configured using [environment +variables](https://grafana.com/docs/grafana/latest/administration/configuration/#override-configuration-with-environment-variables). + +Edit `docker-compose.yml` and find `grafana:` and under it +`environment:` this is where you can place the ini-options, but formatted as: +```yaml + - GF__= +``` +If you are using old-menu edit `~/IOTstack/services/grafana/grafana.env` +instead and add the lines directly there, but without the leading dash: +`GF__=` + +For any changes to take effect you need recreate the Grafana container: + +``` console +$ docker-compose up -d grafana +``` + +### Setting your time-zone + +Change the right hand side to [your own +timezone](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones): + +```yaml + - TZ=Etc/UTC +``` + +### Anonymous login + +To allow anonymous logins add: + +```yaml + - GF_AUTH_ANONYMOUS_ENABLED=true +``` + +### Custom admin user and password (not recommended) + +If you do not change anything then, when you bring up the stack and use a browser to connect to your Raspberry Pi on port 3000, Grafana will: + +* Expect you to login as user "admin" with password "admin"; and then +* Force you to change the default password to something else. + +Thereafter, you will login as "admin" with whatever password you chose. You can change the administrator's password as often as you like via the web UI (*profile* button, *change password* tab). + +This default operation can be changed by configuration options. They will have +any effect only if Grafana has just been added to the stack, but has **never** +been launched. Thus, if the folder *~/IOTstack/volumes/grafana* exists, Grafana +has already been started, and adding and changing these options **will not** +have any effect. + +To customize, editing the file as describe above, add the following lines under +the `environment:` clause. For example, to set the administrative username to be "maestro" with password "123456": + +```yaml + - GF_SECURITY_ADMIN_USER=maestro + - GF_SECURITY_ADMIN_PASSWORD=123456 +``` + +If you change the default password, Grafana will not force you to change the +password on first login but you will still be able to change it via the web UI. + +As a summary, the environment variables only take effect if you set them up **before** Grafana is launched for the first time: + +* `GF_SECURITY_ADMIN_USER` has a default value of "admin". You *can* explicitly set it to "admin" or some other value. Whatever option you choose then that's the account name of Grafana's administrative user. But choosing any value other than "admin" is probably a bad idea. +* `GF_SECURITY_ADMIN_PASSWORD` has a default value of "admin". You can explicitly set it to "admin" or some other value. If its value is "admin" then you will be forced to change it the first time you login to Grafana. If its value is something other than "admin" then that will be the password until you change it via the web UI. + +### Options with spaces + +To set an options with a space, you must enclose the whole value in quotes: + +```yaml + - "GF_AUTH_ANONYMOUS_ORG_NAME=Main Org." +``` + +## HELP – I forgot my Grafana admin password! + +Assuming Grafana is started, run: + +``` +$ docker exec grafana grafana cli admin reset-admin-password «NEWPASSWORD» +``` + +where `«NEWPASSWORD»` is the value of your choice. + +Note: + +* If you have customized `GF_SECURITY_ADMIN_USER` to be something other than "admin", the password change will be applied to that username. In other words, in the `docker exec` command above, the two references to "admin" are referring to the administrator's account, not the username of the administrator's account. Run the command "as is". Do **not** replace "admin" with the username of the administrator's account. + +## HELP - Resetting to a clean slate + +"I made a bit of a mess with Grafana. First time user. Steep learning curve. False starts, many. Mistakes, unavoidable. Been there, done that. But now I **really** need to start from a clean slate. And, yes, I understand there is no *undo* for this." + +Begin by stopping Grafana: + +``` console +$ cd ~/IOTstack +$ docker-compose down grafana +``` + +> see also [if downing a container doesn't work](../Basic_setup/index.md/#downContainer) + +You have two options: + +1. Destroy your settings and dashboards but retain any plugins you may have installed: + + ``` console + $ sudo rm ~/IOTstack/volumes/grafana/data/grafana.db + ``` + +2. Nuke everything (triple-check this command **before** you hit return): + + ``` console + $ sudo rm -rf ~/IOTstack/volumes/grafana/data + ``` + +This is where you should edit *docker-compose.yml* or +*~/IOTstack/services/grafana/grafana.env* to correct any problems (such as +choosing an administrative username other than "admin"). + +When you are ready, bring Grafana back up again: + +``` console +$ cd ~/IOTstack +$ docker-compose up -d grafana +``` + +Grafana will automatically recreate everything it needs. You will be able to login as "admin/admin" (or the credentials you set using `GF_SECURITY_ADMIN_USER` and `GF_SECURITY_ADMIN_PASSWORD`). + diff --git a/docs/Containers/Heimdall.md b/docs/Containers/Heimdall.md new file mode 100644 index 000000000..1d1cd1f03 --- /dev/null +++ b/docs/Containers/Heimdall.md @@ -0,0 +1,21 @@ +# Heimdall + +## References + +* [Homepage](https://heimdall.site/) +* [Docker](https://hub.docker.com/r/linuxserver/heimdall/) + +## Web Interface + +The web UI can be found on: + +* HTTP: `"your_ip":8882` +* HTTPS: `"your_ip":8883` + +## About *Heimdall* + +From the [Heimdall website](https://heimdall.site/): + +> Heimdall Application Dashboard is a dashboard for all your web applications. It doesn't need to be limited to applications though, you can add links to anything you like. There are no iframes here, no apps within apps, no abstraction of APIs. if you think something should work a certain way, it probably does. + +Within the context of IOTstack, the Heimdall Application Dashboard can help you organize your deployed services. diff --git a/docs/Containers/Home-Assistant.md b/docs/Containers/Home-Assistant.md new file mode 100644 index 000000000..dc2531cc0 --- /dev/null +++ b/docs/Containers/Home-Assistant.md @@ -0,0 +1,263 @@ +# Home Assistant + +Home Assistant is a home automation platform. It is able to track and control all devices at your home and offer a platform for automating control. + +## References { #references } + +- [Home Assistant home page](https://www.home-assistant.io/) + + - [Raspberry Pi installation](https://www.home-assistant.io/installation/raspberrypi/) + - [General installation](https://www.home-assistant.io/installation) (may be useful if you are trying to run on other hardware). + +- [GitHub repository](https://github.com/home-assistant/core) +- [DockerHub](https://hub.docker.com/r/homeassistant/home-assistant/) + + +## Home Assistant: two versions { #twoVersions } + +There are two versions of Home Assistant: + +* Home Assistant Container; and +* Supervised Home Assistant (also known as both "Hass.io" and "Home Assistant Core"). + +Each version: + +* provides a web-based management interface on port 8123; and +* runs in "host mode" in order to discover devices on your LAN, including devices communicating via multicast traffic. + +Home Assistant Container runs as a **single** Docker container, and doesn't support all the features that Supervised Home Assistant does (such as add-ons). Supervised Home Assistant runs as a **collection** of Docker containers under its own orchestration. + +The **only** method supported by IOTstack is Home Assistant Container. + +> To understand why, see [about Supervised Home Assistant](#hassioBackground). + +If Home Assistant Container will not do what you want then, basically, you will need two Raspberry Pis: + +* One running Raspberry Pi OS ("Raspbian") hosting IOTstack; and +* Another dedicated to running [Home Assistant Operating System](https://www.home-assistant.io/installation/raspberrypi). + +## Installing Home Assistant Container { #installHAContainer } + +Home Assistant (Container) can be found in the `Build Stack` menu. Selecting it in this menu results in a service definition being added to: + +``` +~/IOTstack/docker-compose.yml +``` + +The normal IOTstack commands apply to Home Assistant Container such as: + +```console +$ cd ~/IOTstack +$ docker-compose up -d +``` + +## Using bluetooth from the container { #usingBluetooth } + +In order to be able to use BT & BLE devices from HA integrations, make sure that Bluetooth is enabled: + +```console +$ hciconfig +hci0: Type: Primary Bus: UART + BD Address: DC:89:FB:A6:32:4B ACL MTU: 1021:8 SCO MTU: 64:1 + UP RUNNING + RX bytes:2003 acl:0 sco:0 events:159 errors:0 + TX bytes:11583 acl:0 sco:0 commands:159 errors:0 +``` + +The "UP" in the third line of output indicates that Bluetooth is enabled. If Bluetooth is not enabled, check: + +```console +$ grep "^AutoEnable" /etc/bluetooth/main.conf +AutoEnable=true +``` + +If `AutoEnable` is either missing or not set to `true`, then: + +1. Use `sudo` to and your favouring text editor to open: + + ``` + /etc/bluetooth/main.conf + ``` + +2. Find `AutoEnable` and make it `true`. + + > If `AutoEnable` is missing, it needs to be added to the `[Policy]` section. + +3. Reboot your Raspberry Pi. +4. Check that the Bluetooth interface is enabled. + +See also: [Scribles: Auto Power On Bluetooth Adapter on Boot-up](https://scribles.net/auto-power-on-bluetooth-adapter-on-boot-up/). + +### Possible service definition changes { #serviceDefinition } + +Although the [Home Assistant documentation](https://www.home-assistant.io/installation/raspberrypi#docker-compose) does not mention this, it is *possible* that you may also need to make the following changes to the Home Assistant service definition in your `docker-compose.yml`: + +* Add the following mapping to the `volumes:` clause: + + ```yaml + - /var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket + ``` + +* Add the following `devices:` clause: + + ```yaml + devices: + - "/dev/serial1:/dev/ttyAMA0" + - "/dev/vcio:/dev/vcio" + - "/dev/gpiomem:/dev/gpiomem" + ``` + +Notes: + +* These changes are *specific* to the Raspberry Pi. If you need Bluetooth support on non-Pi hardware, you will need to figure out the details for your chosen platform. +* Historically, `/dev/ttyAMA0` meant "the serial interface" on Raspberry Pis. Subsequently, it came to mean "the Bluetooth interface" where Bluetooth support was present. Now, `/dev/serial1` is used to mean "the Raspberry Pi's Bluetooth interface". The example above maps that to the internal device `/dev/ttyAMA0` because that is **probably** what the container expects. There are no guarantees and you may need to experiment with internal device names. + +## HTTPS with a valid SSL certificate { #httpsWithSSLcert } + +Some HA integrations (e.g google assistant) require your HA API to be +accessible via https with a valid certificate. You can configure HA to do this: +[docs](https://www.home-assistant.io/docs/configuration/remote/) / +[guide](https://www.home-assistant.io/docs/ecosystem/certificates/lets_encrypt/) +or use a reverse proxy container, as described below. + +The linuxserver Secure Web Access Gateway container +([swag](https://docs.linuxserver.io/general/swag)) ([Docker hub +docs](https://hub.docker.com/r/linuxserver/swag)) will automatically generate a +SSL-certificate, update the SSL certificate before it expires and act as a +reverse proxy. + +1. First test your HA is working correctly: `http://raspberrypi.local:8123/` (assuming +your RPi hostname is raspberrypi) +2. Make sure you have duckdns working. +3. On your internet router, forward public port 443 to the RPi port 443 +4. Add swag to ~/IOTstack/docker-compose.yml beneath the `services:`-line: + + ```yaml + swag: + image: ghcr.io/linuxserver/swag + cap_add: + - NET_ADMIN + environment: + - PUID=1000 + - PGID=1000 + - TZ=${TZ:-Etc/UTC} + - URL=.duckdns.org + - SUBDOMAINS=wildcard + - VALIDATION=duckdns + - DUCKDNSTOKEN= + - CERTPROVIDER=zerossl + - EMAIL= # required when using zerossl + volumes: + - ./volumes/swag/config:/config + ports: + - 443:443 + restart: unless-stopped + ``` + + Replace the bracketed values. Do NOT use any "-characters to enclose the values. + +5. Start the swag container, this creates the file to be edited in the next step: + + ```console + $ cd ~/IOTstack + $ docker-compose up -d + ``` + + Check it starts up OK: `docker-compose logs -f swag`. It will take a minute or two before it finally logs "Server ready". + +6. Enable reverse proxy for `raspberrypi.local`. `homassistant.*` is already by default. and fix homeassistant container name ("upstream_app"): + + ```console + $ cd ~/IOTstack + $ sed -e 's/server_name/server_name *.local/' \ + volumes/swag/config/nginx/proxy-confs/homeassistant.subdomain.conf.sample \ + > volumes/swag/config/nginx/proxy-confs/homeassistant.subdomain.conf + ``` + +7. Forward to correct IP when target is a container running in "network_mode: + host" (like Home Assistant does): + + + ``` bash title="Note: in order for copy-paste to work properly, the usual $-prompts are omitted" + cd ~/IOTstack + cat << 'EOF' | sudo tee volumes/swag/config/custom-cont-init.d/add-host.docker.internal.sh + #!/bin/sh + DOCKER_GW=$(ip route | awk 'NR==1 {print $3}') + + sed -i -e "s/upstream_app .*/upstream_app ${DOCKER_GW};/" \ + /config/nginx/proxy-confs/homeassistant.subdomain.conf + EOF + sudo chmod u+x volumes/swag/config/custom-cont-init.d/add-host.docker.internal.sh + ``` + + (This needs to be copy-pasted/entered as-is, ignore any "> "-prefixes printed + by bash) + +8. (optional) Add reverse proxy password protection if you don't want to rely + on the HA login for security, doesn't affect API-access: + + ```console + $ cd ~/IOTstack + $ sed -i -e 's/#auth_basic/auth_basic/' \ + volumes/swag/config/nginx/proxy-confs/homeassistant.subdomain.conf + $ docker-compose exec swag htpasswd -c /config/nginx/.htpasswd anyusername + ``` + +9. Add `use_x_forwarded_for` and `trusted_proxies` to your homeassistant [http + config](https://www.home-assistant.io/integrations/http). The configuration + file is at `volumes/home_assistant/configuration.yaml` For a default install + the resulting http-section should be: + + ```yaml + http: + use_x_forwarded_for: true + trusted_proxies: + - 192.168.0.0/16 + - 172.16.0.0/12 + - 10.77.0.0/16 + ``` + +10. Refresh the stack: `cd ~/IOTstack && docker-compose stop && docker-compose + up -d` (again may take 1-3 minutes for swag to start if it recreates + certificates) +11. Test homeassistant is still working correctly: + `http://raspberrypi.local:8123/` (assuming your RPi hostname is + raspberrypi) +12. Test the reverse proxy https is working correctly: + `https://raspberrypi.local/` (browser will issue a warning about wrong + certificate domain, as the certificate is issued for you duckdns-domain, we + are just testing) + + Or from the command line in the RPi: + + ```console + $ curl --resolve homeassistant..duckdns.org:443:127.0.0.1 \ + https://homeassistant..duckdns.org/ + ``` + + (output should end in `if (!window.latestJS) { }`) + +13. And finally test your router forwards correctly by accessing it from + outside your LAN(e.g. using a mobile phone): + `https://homeassistant..duckdns.org/` Now the certificate + should work without any warnings. + +## about Supervised Home Assistant { #hassioBackground } + +IOTstack used to offer a menu entry leading to a convenience script that could install Supervised Home Assistant. That script stopped working when Home Assistant changed their approach. The script's author [made it clear](https://github.com/Kanga-Who/home-assistant/blob/master/Supervised%20on%20Raspberry%20Pi%20with%20Debian.md) that script's future was bleak so the affordance was [removed from IOTstack](https://github.com/SensorsIot/IOTstack/pull/493). + +For a time, you could manually install Supervised Home Assistant using their [installation instructions for advanced users](https://github.com/home-assistant/supervised-installer). Once you got HA working, you could install IOTstack, and the two would (mostly) happily coexist. + +The direction being taken by the Home Assistant folks is to supply a [ready-to-run image for your Raspberry Pi](https://www.home-assistant.io/installation/raspberrypi). They still support the installation instructions for advanced users but the [requirements](https://github.com/home-assistant/architecture/blob/master/adr/0014-home-assistant-supervised.md#supported-operating-system-system-dependencies-and-versions) are very specific. In particular: + +> Debian Linux Debian 11 aka Bullseye (no derivatives) + +Raspberry Pi OS is a Debian *derivative* and it is becoming increasingly clear that the "no derivatives" part of that requirement must be taken literally and seriously. Recent examples of significant incompatibilities include: + +* [introducing a dependency on `grub` (GRand Unified Bootloader)](https://github.com/home-assistant/supervised-installer/pull/201). The Raspberry Pi does not use `grub` but the change is actually about forcing Control Groups version 1 when the Raspberry Pi uses version 2. +* [unilaterally starting `systemd-resolved`](https://github.com/home-assistant/supervised-installer/pull/202). This is a DNS resolver which claims port 53. That means you can't run your own DNS service like PiHole, AdGuardHome or BIND9 as an IOTstack container. + +Because of the self-updating nature of Supervised Home Assistant, your Raspberry Pi might be happily running Supervised Home Assistant plus IOTstack one day, and suddenly start misbehaving the next day, simply because Supervised Home Assistant assumed it was in total control of your Raspberry Pi. + +If you want Supervised Home Assistant to work, reliably, it really needs to be its own dedicated appliance. If you want IOTstack to work, reliably, it really needs to be kept well away from Supervised Home Assistant. If you want both Supervised Home Assistant and IOTstack, you really need two Raspberry Pis. diff --git a/docs/Containers/Homebridge.md b/docs/Containers/Homebridge.md new file mode 100644 index 000000000..672200b3c --- /dev/null +++ b/docs/Containers/Homebridge.md @@ -0,0 +1,45 @@ +# Homebridge + +## References + +* [GitHub home](https://github.com/oznu/docker-homebridge) +* [Configuration Guide](https://github.com/oznu/docker-homebridge/wiki/Homebridge-on-Raspberry-Pi) +* [DockerHub](https://hub.docker.com/r/oznu/homebridge) + +## Configuration + +Homebridge documentation has a comprehensive [configuration guide](https://github.com/oznu/docker-homebridge/wiki/Homebridge-on-Raspberry-Pi) which you are encouraged to read. + +Homebridge is configured using environment variables. In IOTstack: + +* If you are running new menu (master branch, the default), environment variables are kept inline in `docker-compose.yml`. +* If you are running old menu (old-menu branch), environment variables are at the path: + + ``` + ~/IOTstack/services/homebridge/homebridge.env + ``` + +In either case, you apply changes by editing the relevant file (`docker-compose.yml` or `homebridge.env`) and then: + +```console +$ cd ~/IOTstack +$ docker-compose up -d homebridge +``` + +### About "avahi" + +"avahi", "multicast DNS", "Rendezvous", "Bonjour" and "ZeroConf" are synonyms. + +Current Homebridge images disable avahi services by default. The Homebridge container runs in "host mode" which means it can participate in multicast traffic flows. If you have a plugin that requires avahi, it can enabled by setting the environment variable: + +```yaml +ENABLE_AVAHI=1 +``` + +## Web Interface + +The web UI for Homebridge can be found on `"your_ip":8581`. You can change the port by adjusting the environment variable: + +``` +HOMEBRIDGE_CONFIG_UI_PORT=8581 +``` diff --git a/docs/Containers/Homer.md b/docs/Containers/Homer.md new file mode 100644 index 000000000..afd8220c1 --- /dev/null +++ b/docs/Containers/Homer.md @@ -0,0 +1,18 @@ +# Homer + +## References +* [Homepage](https://github.com/bastienwirtz/homer) +* [Docker](https://hub.docker.com/r/b4bz/homer) + +## Web Interface +The web UI can be found on `"your_ip":8881` + +## About *Homer* + +From the [Homer README](https://github.com/bastienwirtz/homer/blob/main/README.md): + +> A dead simple static HOMepage for your servER to keep your services on hand, from a simple `yaml` configuration file. + +You can find an example of the `config.yml` file [here](https://github.com/bastienwirtz/homer/blob/main/docs/configuration.md). + +Within the context of IOTstack, Homer can help you organize your deployed services. diff --git a/docs/Containers/InfluxDB.md b/docs/Containers/InfluxDB.md new file mode 100644 index 000000000..a643dd1e9 --- /dev/null +++ b/docs/Containers/InfluxDB.md @@ -0,0 +1,750 @@ +# InfluxDB + +InfluxDB is a time series database. What that means is *time* is the primary key of each table. + +Another feature of InfluxDB is the separation of *attributes* into: + +* *fields:* which are intended to hold variable data (data that is likely to be different in each row, such as a temperature reading from a sensor); and +* *tags:* which are intended to hold metadata (data that is unlikely to be different in each row, such as the name of the sensor). + +InfluxDB has configurable aggregation and retention policies allowing measurement resolution reduction, storing all added data points for recent data and only aggregated values for older data. + +## References { #references } + +- [DockerHub](https://hub.docker.com/_/influxdb/tags) +- [GitHub home page](https://github.com/influxdata/influxdata-docker) (for the container) +- [InfluxDB 1.8 documentation](https://docs.influxdata.com/influxdb/v1.8/) +- [InfluxDB 1.8 configuration reference](https://docs.influxdata.com/influxdb/v1.8/administration/config) + +Note: + +* IOTstack uses the `influxdb:1.8` image. Substituting the `:latest` tag will get you InfluxDB version 2 and *will* create a mess. + +## Configuration { #configuration } + +All InfluxDB [settings](https://docs.influxdata.com/influxdb/v1.8/administration/config) can be applied using environment variables. Environment variables override any settings in the [InfluxDB configuration file](#configFile): + +* Under "new menu" (master branch), environment variables are stored inline in + + ``` + ~IOTstack/docker-compose.yml + ``` + +* Under "old menu", environment variables are stored in: + + ``` + ~/IOTstack/services/influxdb/influxdb.env + ``` + +Whenever you change an environment variable, you activate it like this: + +``` console +$ cd ~/IOTstack +$ docker-compose up -d influxdb +``` + +The default service definition provided with IOTstack exposes the following environment variables: + +- `TZ=Etc/UTC` set this to your local timezone. Do **not** use quote marks! +- `INFLUXDB_HTTP_FLUX_ENABLED=false` set this `true` if you wish to use Flux queries rather than InfluxQL: + + > At the time of writing, Grafana queries use InfluxQL. + +- `INFLUXDB_REPORTING_DISABLED=false` InfluxDB activates *phone-home* reporting by default. This variable disables it for IOTstack. You can activate it if you want your InfluxDB instance to send reports to the InfluxDB developers. +- `INFLUXDB_MONITOR_STORE_ENABLED=FALSE` disables automatic creation of the `_internal` database. This database stores metrics about InfluxDB itself. The database is *incredibly* busy. Side-effects of enabling this feature include increased wear and tear on SD cards and, occasionally, driving CPU utilisation through the roof and generally making your IOTstack unstable. + + > To state the problem in a nutshell: *do you want Influx self-metrics, or do you want a usable IOTstack?* You really can't have both. See also [issue 19543](https://github.com/influxdata/influxdb/issues/19543). + +- Authentication variables: + + - `INFLUXDB_HTTP_AUTH_ENABLED=false` + - `INFLUX_USERNAME=dba` + - `INFLUX_PASSWORD=supremo` + + Misunderstanding the purpose and scope of these variables is a common mistake made by new users. Please do not guess! Please read [Authentication](#authentication) **before** you enable or change any of these variables. In particular, `dba` and `supremo` are **not** defaults for database access. + +- UDP data acquisition variables: + + - `INFLUXDB_UDP_ENABLED=false` + - `INFLUXDB_UDP_BIND_ADDRESS=0.0.0.0:8086` + - `INFLUXDB_UDP_DATABASE=udp` + + Read [UDP support](#udpSupport) before making any decisions on these variables. + +### about `influxdb.conf` { #configFile } + +A lot of InfluxDB documentation and help material on the web refers to the `influxdb.conf` configuration file. Such instructions are only appropriate when InfluxDB is installed *natively*. + +When InfluxDB runs in a *container*, changing `influxdb.conf` is neither necessary nor recommended. Anything that you can do with `influxdb.conf` can be done with environment variables. + +However, if you believe that you have a use case that absolutely demands the use of `influxdb.conf` then you can set it up like this: + +1. Make sure the InfluxDB container is running! +2. Execute the following commands: + + ``` console + $ cd ~/IOTstack + $ docker cp influxdb:/etc/influxdb/influxdb.conf . + ``` + +3. Edit `docker-compose.yml`, find the `influxdb` service definition, and add the following line to the `volumes:` directive: + + ``` yaml + - ./volumes/influxdb/config:/etc/influxdb + ``` + +4. Execute the following commands: + + ``` console + $ docker-compose up -d influxdb + $ sudo mv influxdb.conf ./volumes/influxdb/config/ + $ docker-compose restart influxdb + ``` + +At this point, you can start making changes to: + +``` +~/IOTstack/volumes/influxdb/config/influxdb.conf +``` + +You can apply changes by sending a `restart` to the container (as above). However, from time to time you may find that your settings disappear or revert to defaults. Make sure you keep good backups. + +## Connecting to InfluxDB { #connecting } + +By default, InfluxDB runs in non-host mode and respects the following port-mapping directive in its service definition: + +``` yaml +ports: + - "8086:8086" +``` + +If you are connecting from: + +* another container (eg Node-RED or Grafana) that is also running in non-host mode, use: + + ``` + http://influxdb:8086 + ``` + + In this context, `8086` is the *internal* (right hand side) port number. + +* either the Raspberry Pi itself or from another container running in host mode, use: + + ``` + http://localhost:8086 + ``` + + In this context, `8086` is the *external* (left hand side) port number. + +* a different host, you use either the IP address of the Raspberry Pi or its fully-qualified domain name. Examples: + + ``` + http://192.168.1.10:8086 + http://raspberrypi.local:8086 + http://iot-hub.mydomain.com:8086 + ``` + + In this context, `8086` is the *external* (left hand side) port number. + +## Interacting with the Influx CLI { #influxCLI } + +You can open the `influx` CLI interactive shell by: + +``` console +$ docker exec -it influxdb influx +Connected to http://localhost:8086 version 1.8.10 +InfluxDB shell version: 1.8.10 +> +``` + +The command prompt in the CLI is `>`. While in the CLI you can type commands such as: + +``` console +> help +> create database MYTESTDATABASE +> show databases +> USE MYTESTDATABASE +> show measurements +> show series +> select * from «someMeasurement» where «someCriterion» +``` + +You may also wish to set retention policies on your databases. This is an example of creating a database named "mydb" where any data older than 52 weeks is deleted: + +``` console +> create database mydb + +> show retention policies on mydb +name duration shardGroupDuration replicaN default +---- -------- ------------------ -------- ------- +autogen 0s 168h0m0s 1 true + +> alter retention policy "autogen" on "mydb" duration 52w shard duration 1w replication 1 default + +> show retention policies on mydb +name duration shardGroupDuration replicaN default +---- -------- ------------------ -------- ------- +autogen 8736h0m0s 168h0m0s 1 true +``` + +To exit the CLI, either press Control+d or type: + +``` console +> exit +$ +``` + +### useful alias { #usefulAlias } + +Consider adding the following alias to your `.bashrc`: + +``` console +alias influx='docker exec -it influxdb influx -precision=rfc3339' +``` + +With that alias installed, typing `influx` and pressing return, gets you straight into the influx CLI. The `-precision` argument tells the influx CLI to display dates in human-readable form. Omitting that argument displays dates as integer nanoseconds since 1970-01-01. + +Note: + +* This alias is installed by [IOTstackAliases](https://github.com/Paraphraser/IOTstackAliases). + +## Authentication { #authentication } + +### warning { #authWarning } + +This tutorial also assumes that you do not have any existing databases so it starts by creating two. One database will be provided with access controls but the other will be left alone so that the behaviour can be compared. + +However, you need to understand that enabling authentication in InfluxDB is *all-or-nothing*. If you have any existing InfluxDB databases, you will need to: + +* define access rights for **all** of your databases; and +* provide credentials to processes like Node-Red and Grafana that access your databases. + +If you do not do this, your existing Node-Red flows, Grafana dashboards and other processes that write to or query your databases will stop working as soon as you [activate authentication](#authStep4) below. + +### create two test databases { #authStep1 } + +Create two databases named "mydatabase1" and "mydatabase2": + +``` console +$ influx +> CREATE DATABASE "mydatabase1" +> CREATE DATABASE "mydatabase2" +``` + +> Typing `influx` didn't work? See [useful alias](#usefulAlias) above. + +### define users { #authStep2 } + +Define an administrative user. In this example, that user is "dba" (database administrator) with the password "supremo": + +``` console +> CREATE USER "dba" WITH PASSWORD 'supremo' WITH ALL PRIVILEGES +``` + +* Key point: the mixture of "double" and 'single' quotes is **intentional** and **required**. + +Define some garden-variety users: + +``` console +> CREATE USER "nodered_user" WITH PASSWORD 'nodered_user_pw' +> CREATE USER "grafana_user" WITH PASSWORD 'grafana_user_pw' +``` + +You can define any usernames you like. The reason for using "nodered\_" and "grafana\_" prefixes in these examples is because those are common candidates in an IOTstack environment. The reason for the "\_user" suffixes is to make it clear that a *username* is separate and distinct from a *container* name. + +### assign access rights { #authStep3 } + +The user "dba" already has access to everything but, for all other users, you need to state which database(s) the user can access, and whether that access is: + +* READ (aka read-only) +* WRITE (aka write-only) +* ALL (implies both READ and WRITE) + +``` console +> GRANT WRITE ON "mydatabase1" TO "nodered_user" +> GRANT READ ON "mydatabase1" TO "grafana_user" +``` + +* Key point: you CREATE a user *once* but you need to GRANT access to every database to which that user needs access. + +Once you have finished defining users and assigning access rights, drop out of the influx CLI: + +``` console +> exit +$ +``` + +### activate authentication { #authStep4 } + +Make sure you read the [warning](#authWarning) above, then edit the InfluxDB environment variables to enable this key: + +``` yaml +- INFLUXDB_HTTP_AUTH_ENABLED=true +``` + +Put the change into effect by "upping" the container: + +``` console +$ cd ~/IOTstack +$ docker-compose up -d influxdb + +Recreating influxdb ... done +``` + +The `up` causes `docker-compose` to notice that the environment has changed, and to rebuild the container with the new settings. + +* Note: You should always wait for 30 seconds after a rebuild for InfluxDB to become available. Any time you see a message like this: + + ``` + Failed to connect to http://localhost:8086: Get http://localhost:8086/ping: dial tcp 127.0.0.1:8086: connect: connection refused + Please check your connection settings and ensure 'influxd' is running. + ``` + + it simply means that you did not wait long enough. Be patient! + +### experiments { #authStep5 } + +Start the influx CLI: + +``` console +$ influx +``` + +Unless you have also set up the `INFLUX_USERNAME` and `INFLUX_PASSWORD` environment variables (described later under [Authentication Hints](#authHints)), your session will not be authenticated as any user so you will not be able to access either database: + +``` console +> USE mydatabase1 +ERR: unable to parse authentication credentials +DB does not exist! +> USE mydatabase2 +ERR: unable to parse authentication credentials +DB does not exist! +``` + +* Key point: This is what will happen to any of your pre-existing databases if you enable authentication without a lot of care. You **must** define users and access rights for **all** of your databases, and you **must** provide those credentials to the relevant processes like Node-Red and Grafana. + +Authenticate as "nodered_user" and try again: + +``` console +> AUTH +username: nodered_user +password: +> USE mydatabase1 +Using database mydatabase1 +> USE mydatabase2 +ERR: Database mydatabase2 doesn't exist. Run SHOW DATABASES for a list of existing databases. +DB does not exist! +``` + +The "nodered_user" can access "mydatabase1" but not "mydatabase2". You will get similar behaviour for the "grafana_user" (try it). + +Authenticate as the "dba" and try again: + +``` console +> AUTH +username: dba +password: +> USE mydatabase1 +Using database mydatabase1 +> USE mydatabase2 +Using database mydatabase2 +``` + +The super-user can access both databases. + +To get a list of users: + +``` console +> SHOW USERS +user admin +---- ----- +dba true +nodered_user false +grafana_user false +``` + +* Key point: you must be authenticated as the "dba" to run SHOW USERS. + +To find out what privileges a user has on a database: + +``` console +> SHOW GRANTS FOR "nodered_user" +database privilege +-------- --------- +mydatabase1 WRITE +``` + +* Key point: you must be authenticated as the "dba" to run SHOW GRANTS. + +To test grants, you can try things like this: + +``` console +AUTH +username: nodered_user +password: +> USE "mydatabase1" +Using database mydatabase1 +> INSERT example somefield=123 +``` + +"nodered_user" has WRITE access to "mydatabase1". + +``` console +> SELECT * FROM example +ERR: error authorizing query: nodered_user not authorized to execute statement 'SELECT * FROM example', requires READ on mydatabase1 +``` + +"nodered_user" does not have READ access to "mydatabase1". + +Authenticate as "grafana_user" and try the query again: + +``` console +> AUTH +username: grafana_user +password: +> SELECT * FROM example +name: example +time somefield +---- --------- +2020-09-19T01:41:09.6390883Z 123 +``` + +"grafana_user" has READ access to "mydatabase1". Try an insertion as "grafana_user": + +``` console +> INSERT example somefield=456 +ERR: {"error":"\"grafana_user\" user is not authorized to write to database \"mydatabase1\""} +``` + +"grafana_user" does not have WRITE access to "mydatabase1". + +Change the privileges for "nodered_user" to ALL then try both an insertion and a query. Note that changing privileges requires first authenticating as "dba": + +``` console +> AUTH +username: dba +password: +> GRANT ALL ON "mydatabase1" TO "nodered_user" +> AUTH +username: nodered_user +password: +> INSERT example somefield=456 +> SELECT * FROM example +name: example +time somefield +---- --------- +2020-09-19T01:41:09.6390883Z 123 +2020-09-19T01:42:36.85766382Z 456 +``` + +"nodered_user" has both READ and WRITE access to "mydatabase1". + +### notes { #authNotes } + +1. Some inferences to draw from the above: + + * user definitions are **global** rather than per-database. Grants are what tie users to particular databases. + * setting `INFLUXDB_HTTP_AUTH_ENABLED=true` is how authentication is activated and enforced. If it is false, all enforcement goes away (a handy thing to know if you lose passwords or need to recover from a mess). + * as the "HTTP" in `INFLUXDB_HTTP_AUTH_ENABLED` suggests, it applies to access via HTTP. This includes the influx CLI and processes like Node-Red and Grafana. + +2. Always keep in mind that the InfluxDB log is your friend: + + ``` console + $ docker logs influxdb + ``` + +### hints { #authHints } + +After you enable authentication, there are a couple of ways of speeding-up your daily activities. You can pass the dba username and password on the end of the influx alias: + +``` console +$ influx -database mydatabase1 -username dba -password supremo +``` + +but this is probably sub-optimal because of the temptation to hard-code your dba password into scripts. An alternative is to enable these environment variables: + +``` yaml +- INFLUX_USERNAME=dba +- INFLUX_PASSWORD=supremo +``` + +and then "up" the container as explained above to apply the changes. + +Misunderstandings about the scope and purpose of `INFLUX_USERNAME` and `INFLUX_PASSWORD` are quite common so make sure you realise that the variables: + +* do **not** "set" any username or password within InfluxDB; +* **only** apply to starting the influx CLI – they are just synonyms for the `-username` and `-password` parameters on the `influx` CLI command; and +* are **not** some kind of general-access credentials that apply to everything. They will not work from Node-RED or Grafana! + +In other words, with `INFLUX_USERNAME` and `INFLUX_PASSWORD` added to the environment, the following two commands are identical: + +``` console +$ influx -database mydatabase1 -username dba -password supremo +$ influx -database mydatabase1 +``` + +The `INFLUX_USERNAME` and `INFLUX_PASSWORD` variables also work if you start a shell into the InfluxDB container and then invoke the influx CLI from there: + +``` console +$ docker exec -it influxdb bash +# influx +> +``` + +That is **all** the `INFLUX_USERNAME` and `INFLUX_PASSWORD` variables do. + +### cleaning up { #authCleanup } + +To undo the steps in this tutorial, first set `INFLUXDB_HTTP_AUTH_ENABLED=false` and then "up" influxdb. Then: + +``` console +$ influx +> DROP USER "dba" +> DROP USER "nodered_user" +> DROP USER "grafana_user" +> DROP DATABASE "mydatabase1" +> DROP DATABASE "mydatabase2" +> exit +``` + +## UDP support { #udpSupport } + +Assumptions: + +* you want to enable UDP support; and +* your goal is to log traffic arriving on UDP port 8086 into an InfluxDB database named "udp". + +### aliases { #udpAliases } + +This tutorial uses the following aliases: + +* `influx` - explained earlier - see [useful alias](#usefulAlias). +* `DPS` which is the equivalent of: + + ``` console + $ docker ps --format "table {{.Names}}\t{{.RunningFor}}\t{{.Status}}" + ``` + + The focus is: *what containers are running?* + +* `DNET` which is the equivalent of: + + ``` console + $ docker ps --format "table {{.Names}}\t{{.Ports}}" + ``` + + The focus is: *what ports are containers using?* + + > Any container where no ports are listed is either exposing no ports and/or is running in host mode. + +Although both `DPS` & `DNET` invoke `docker ps`, the formatting means the output usually fits on your screen without line wrapping. + +All three aliases are installed by [IOTstackAliases](https://github.com/Paraphraser/IOTstackAliases). + +### confirm that UDP is not enabled { #udpStep1 } + +``` console +$ DNET +NAMES PORTS +influxdb 0.0.0.0:8086->8086/tcp +``` + +Interpretation: Docker is listening on TCP port 8086, and is routing the traffic to the same port on the influxdb container. There is no mention of UDP. + +### create a database to receive the traffic { #udpStep2 } + +This tutorial uses the database name of "udp". + +``` console +$ influx +> create database udp +> exit +> $ +``` + +### define a UDP port mapping { #udpStep3 } + +Edit `docker-compose.yml` to define a UDP port mapping (the second line in the `ports` grouping below): + +``` yaml +influxdb: + … + ports: + - "8086:8086" + - "8086:8086/udp" + … +``` + +### enable UDP support { #udpStep4 } + +Edit your `docker-compose.yml` and change the InfluxDB environment variables to glue it all together: + +``` yaml +environment: + - INFLUXDB_UDP_DATABASE=udp + - INFLUXDB_UDP_ENABLED=true + - INFLUXDB_UDP_BIND_ADDRESS=0.0.0.0:8086 +``` + +In this context, the IP address "0.0.0.0" means "this host" (analogous to the way "255.255.255.255" means "all hosts"). + +### rebuild the container { #udpStep5 } + +``` console +$ cd ~/IOTstack +$ docker-compose up -d influxdb + +Recreating influxdb ... done +``` + +The `up` causes `docker-compose` to notice that the environment has changed, and to rebuild the container with the new settings. + +### confirm that UDP is enabled { #udpStep6 } + +``` console +$ DNET +NAMES PORTS +influxdb 0.0.0.0:8086->8086/tcp, 0.0.0.0:8086->8086/udp +``` + +Interpretation: In addition to the TCP port, Docker is now listening on UDP port 8086, and is routing the traffic to the same port on the influxdb container. + +### check your work { #udpStep7 } + +Check the log: + +``` console +$ docker logs influxdb +``` + +If you see a line like this: + +``` +ts=2020-09-18T03:09:26.154478Z lvl=info msg="Started listening on UDP" log_id=0PJnqbK0000 service=udp addr=0.0.0.0:8086 +``` + +then everything is probably working correctly. If you see anything that looks like an error message then you will need to follow your nose. + +### start sending traffic { #udpStep8 } + +Although the how-to is beyond the scope of this tutorial, you will need a process that can send "line format" payloads to InfluxDB using UDP port 8086. + +Once that is set up, you can inspect the results like this: + +``` console +$ influx -database udp +> show measurements +``` + +If data is being received, you will get at least one measurement name. An empty list implies no data is being received. + +If you get at least one measurement name then you can inspect the data using: + +``` console +> select * from «measurement» +``` + +where `«measurement»` is one of the names in the `show measurements` list. + +## Reducing flash wear-out { #flashWear } + +SSD-drives have pretty good controllers spreading out writes, so this isn't a this isn't really a concern for them. But if you store data on an SD-card, flash wear may cause the card to fail prematurely. Flash memory has a limited number of erase-write cycles per physical block. These blocks may be multiple megabytes. You can use `sudo lsblk -D` to see how big the erase granularity is on your card. The goal is to avoid writing lots of small changes targeting the same physical blocks. Here are some tips to mitigate SD-card wear: + +* Don't use short retention policies. This may mask heavy disk IO without increasing disk space usage. Depending on the flash card and file system used, new data may be re-written to the same blocks that were freed by the expiration, wearing them out. +* Take care not to add measurements too often. If possible no more often than once a minute. Add all measurements in one operation. Even a small write will physically write a whole new block and erase the previously used block. +* Adding measurements directly to Influxdb will cause a write on every operation. If your client code can't aggregate multiple measurements into one write, consider routing them via Telegraf. It has the `flush_interval`-option, which will combine the measurements into one write. +* All InfluxDB queries are logged by default and logs are written to the SD-card. To disable this, add into docker-compose.yml, next to the other INFLUXDB_\* entries: + + ```yaml + - INFLUXDB_DATA_QUERY_LOG_ENABLED=false + - INFLUXDB_HTTP_LOG_ENABLED=false + ``` + + This is especially important if you plan on having Grafana or Chronograf displaying up-to-date data on a dashboard, making queries all the time. + +### Debugging { #debugging } + +### Container won't start { #debugInspection } + +Sometimes you need start the container without starting influxdb to access its maintenance tools. Usually when influx crashes on startup. + +Add a new line below `influxdb:` to your docker-compose.yml: + +```yaml +influxdb: + … + entrypoint: sleep infinity +``` + +Recreate the container using the new entrypoint: + +``` console +$ docker-compose up -d influxdb +Recreating influxdb ... done +``` + +Now the container should start and you can get a shell to poke around and try the `influx_inspect` command: + +``` console +$ docker exec -it influxdb bash +# influx_inspect +Usage: influx_inspect [[command] [arguments]] +``` + +Once you have finished poking around, you should undo the change by removing the custom entrypoint and `up -d` again to return to normal container behaviour where you can then test to see if your fixes worked. + +### Adding packages { #debugPackages } + +The container is pretty bare-bones by default. It is OK to install additional tools. Start by running: + +``` console +# apt update +``` + +and then use `apt install` to add whatever you need. Packages you add will persist until the next time the container is re-created. + +### Sniffing traffic { #debugSniff } + +If you need to see the actual packets being sent to Influx for insertion into your database, you can set it up like this: + +``` console +$ docker exec influxdb bash -c 'apt update && apt install tcpdump -y' +``` + +That adds `tcpdump` to the running container and, as noted above, that will persist until you re-create the container. + +To capture traffic: + +``` console +$ docker exec influxdb tcpdump -i eth0 -s 0 -n -c 100 -w /var/lib/influxdb/capture.pcap dst port 8086 +``` + +Breaking that down: + +* `-i eth0` is the container's internal virtual Ethernet network interface (attached to the internal bridged network) +* `-s 0` means "capture entire packets" +* `-n` means "do not try to resolve IP addresses to domain names +* `-c 100` is optional and means "capture 100 packets then stop". If you omit this option, `tcpdump` will capture packets until you press control+C. +* `-w /var/lib/influxdb/capture.pcap` is the internal path to the file where captured packets are written. You can, of course, substitute any filename you like for `capture.pcap`. +* `dst port 8086` captures all packets where the destination port field is 8086, which is the InfluxDB internal port number. + +The internal path: + +``` +/var/lib/influxdb/capture.pcap +``` + +maps to the external path: + +``` +~/IOTstack/volumes/influxdb/data/capture.pcap +``` + +You can copy that file to another system where you have a tool like WireShark installed. WireShark will open the file and you can inspect packets and verify that the information being sent to InfluxDB is what you expect. + +Do not forget to clean-up any packet capture files: + +``` +$ cd ~/IOTstack/volumes/influxdb/data +$ sudo rm capture.pcap +``` diff --git a/docs/Containers/InfluxDB2.md b/docs/Containers/InfluxDB2.md new file mode 100755 index 000000000..97e1b7bd5 --- /dev/null +++ b/docs/Containers/InfluxDB2.md @@ -0,0 +1,592 @@ +# InfluxDB 2 + +## references + +* [InfluxData home page](https://docs.influxdata.com) +* [DockerHub](https://hub.docker.com/_/influxdb) +* [GitHub](https://github.com/influxdata/influxdata-docker) + +## assumptions + +1. Your Raspberry Pi is running **full** 64-bit Raspberry Pi OS Debian GNU/Linux 11 (bullseye). + + - DockerHub does not have a 32-bit image for InfluxDB 2 so you can't run this container until you have upgraded. + - Running full 64-bit is **not** the same as enabling the 64-bit kernel in `/boot/config.txt`. User-mode needs to be 64-bit capable as well. You must start from a [full 64-bit image](https://www.raspberrypi.com/software/operating-systems/#raspberry-pi-os-64-bit). + +2. Node-RED is your principal mechanism for feeding data to InfluxDB 1.8. + + - You may have other services feeding data to InfluxDB 1.8 (eg Telegraf). The steps documented here will migrate all your existing data but do not discuss how to adapt services other than Node-RED to feed new data to InfluxDB 2. + +3. Grafana is your principle mechanism for creating dashboards based on data stored in InfluxDB 1.8. + + - You may have other visualisation tools. You may gain insights from studying how Grafana needs to be changed to run Flux queries against InfluxDB 2 buckets but this documentation does not explore alternatives. + +4. Node-RED, InfluxDB 1.8 and Grafana are all running in *non-host* mode on the same Docker instance, and that it is your intention to deploy InfluxDB 2 in *non-host* mode as well. + + - If you are running any containers in *host* mode or have distributed the services across multiple Docker instances, you will have to adapt appropriately. + +## terminology: *database* vs *bucket* + +InfluxDB 1.8 and InfluxDB 2 are both database management systems (DBMS), sometimes referred to as "engines", optimised for storage and retrieval of time-series data. InfluxDB 1.8 uses the term *database* to mean a collection of *measurements*. InfluxDB 2 uses the term *bucket* to mean the same thing. + +When an InfluxDB 1.8 *database* is migrated, it becomes an InfluxDB 2 *bucket*. You will see this change in terminology in various places, such as the InfluxDB-out node in Node-RED. When that node is set to: + +* Version 1.x, the user interface has a "Database" field which travels with the *connection*. For example: + + - [v1.x] influxdb:8086/power (set up in the connection sheet) + + This implies that you need one *connection* per *database*. + +* Version 2.0, the user interface has a "Bucket" field which is independent of the connection. For example: + + - [v2.0] influxdb2:8086 (set up in the connection sheet) + - Bucketpower/autogen (set up in the node) + + This implies that you need one *connection* per *engine*. It is a subtle but important difference. + +## reference service definition + +The InfluxDB 2 service definition is added to your compose file by the IOTstack menu. + +```{ .yaml linenums="1" } +influxdb2: + container_name: influxdb2 + image: "influxdb:latest" + restart: unless-stopped + environment: + - TZ=Etc/UTC + - DOCKER_INFLUXDB_INIT_USERNAME=me + - DOCKER_INFLUXDB_INIT_PASSWORD=mypassword + - DOCKER_INFLUXDB_INIT_ORG=myorg + - DOCKER_INFLUXDB_INIT_BUCKET=mybucket + - DOCKER_INFLUXDB_INIT_ADMIN_TOKEN=my-super-secret-auth-token + - DOCKER_INFLUXDB_INIT_MODE=setup + # - DOCKER_INFLUXDB_INIT_MODE=upgrade + ports: + - "8087:8086" + volumes: + - ./volumes/influxdb2/data:/var/lib/influxdb2 + - ./volumes/influxdb2/config:/etc/influxdb2 + - ./volumes/influxdb2/backup:/var/lib/backup + # - ./volumes/influxdb.migrate/data:/var/lib/influxdb:ro + healthcheck: + test: ["CMD", "influx", "ping"] + interval: 30s + timeout: 10s + retries: 3 + start_period: 30s +``` + +As an alternative to using the menu, you can copy and paste the service definition into your compose file from the template at: + +``` +~/IOTstack/.templates/influxdb2/service.yml +``` + +### required edits + +Edit the service definition in your compose file to change the following variables: + +- `TZ=`«country»/«city» + +- `DOCKER_INFLUXDB_INIT_USERNAME=`«username» + + This name becomes the administrative user. It is associated with your [«password»](#influxPassword) and [«token»](#influxToken). + +- `DOCKER_INFLUXDB_INIT_PASSWORD=`«password» + + Your «username» and «password» form your login credentials when you administer InfluxDB 2 using its web-based graphical user interface. The strength of your password is up to you. + +- `DOCKER_INFLUXDB_INIT_ORG=`«organisation» + + An organisation name is **required**. Examples: + + - myorg + - my-house + - com.mydomain.myhouse + +- `DOCKER_INFLUXDB_INIT_BUCKET=`«bucket» + + A default bucket name is **required**. The name does not matter because you won't actually be using it so you can accept the default of "mybucket". You can [delete the unused bucket](#delBucket) later if you want to be tidy. + +- `DOCKER_INFLUXDB_INIT_ADMIN_TOKEN=`«token» + + Although you can let InfluxDB 2 generate your access token for you, it will keep things simple if you generate your own. Here are some possible approaches: + + 1. use a universally-unique ID: + + ```bash + $ uuidgen + 4fef85b4-2f56-480f-b143-fa5cb6e8f18a + ``` + + 2. use GnuPG to generate a random string: + + ```bash + $ gpg --gen-random -a 0 25 + bYS3EsnnY0AlRxJ2uk44Hzwm7GMKYu5unw== + ``` + + 3. use a password-generator of your choosing. + +Note: + +* Unless a container's documentation explicitly states that it is supported, you should never use quote marks to encapsulate the values you supply via environment variables. InfluxDB 2 treats quotes as being part of the value (eg a password of "fred" is the 6-character string that *includes* the quotes). If you put quote marks around anything as you were editing, please go back and remove them. + +### Table 1: mode-specific directives + +InfluxDB 2 operates in three distinct modes which are controlled by the `DOCKER_INFLUXDB_INIT_MODE` environment variable. The table below summarises the variables and volumes mappings that need to be active in each mode. + +![Table 1](./images/influxdb2-table1.png) + +## initialising InfluxDB 2 + +If you have only just included the [template service definition](#svcDef) in your compose file and performed the [required edits](#svcDefEdits), then you can follow the initialisation process below. + +However, if you want to **re-initialise** the container, go to [re-initialising InfluxDB 2](#reinitContainer). + +To initialise InfluxDB 2: + +1. Confirm that the service definition directives are set as per the "setup" column of [Table 1](#svcDefVars). +2. Be in the correct directory (assumed throughout): + + ```bash + $ cd ~/IOTstack + ``` + +3. Start the InfluxDB 2 container: + + ```bash + $ docker-compose up -d influxdb2 + ``` + +4. InfluxDB 2 will notice the following environment variable: + + ```yaml + DOCKER_INFLUXDB_INIT_MODE=setup + ``` + + This instructs the container to initialise the database engine structures based on a combination of defaults and the values you provide via the other environment variables. + +5. Confirm that the InfluxDB 2 container is not in a restart loop and isn't reporting errors by using commands like: + + ```bash + $ docker ps + $ docker logs influxdb2 + ``` + +If you don't need to migrate any data from InfluxDB 1.8 you can go straight to [running InfluxDB 2](#runContainer), otherwise follow the [data-migration procedure](#migrateContainer) instructions below. + +## data-migration procedure + +Successful migration depends on the following assumptions being true: + +* The InfluxDB 2 container is running and has just been initialised as per [initialising InfluxDB 2](#initContainer). +* The InfluxDB 1.8 container is running, and is based on the IOTstack service definition (or reasonable facsimile) at: + + ``` + ~/IOTstack/.templates/influxdb/service.yml + ``` + +To migrate your InfluxDB 1.8 data: + +1. Be in the correct directory (assumed throughout): + + ```bash + $ cd ~/IOTstack + ``` + +2. InfluxDB 1.8 runs as root and its persistent store is owned by root but not all files and folders in the persistent store are *group* or *world* readable. InfluxDB 2 runs as user ID 1000 (user "influxdb" inside the container). Because of this, you need to give InfluxDB 2 permission to read the InfluxDB 1.8 persistent store. + + It is not a good idea to interfere with a persistent store while a container is running so best practice is to stop InfluxDB 1.8 for long enough to make a copy of its persistent store: + + ```bash + $ sudo rm -rf ./volumes/influxdb.migrate + $ docker-compose down influxdb + $ sudo cp -a ./volumes/influxdb ./volumes/influxdb.migrate + $ docker-compose up -d influxdb + $ sudo chown -R 1000:1000 ./volumes/influxdb.migrate/data + ``` + + > see also [if downing a container doesn't work](../Basic_setup/index.md/#downContainer) + + In words: + + 1. Ensure any previous attempts at migration are removed. Always be *extremely* careful with any `sudo rm` command. Check your work **before** you press return. + 2. Stop InfluxDB 1.8. + 3. Make a copy of the InfluxDB 1.8 persistent store. + 4. Start InfluxDB 1.8 again. + 5. Change ownership of the **copy** of the InfluxDB 1.8 persistent store. + +3. Edit your compose file as per the "upgrade" column of [Table 1](#svcDefVars). The changes you need to make are: + + 1. Change the initialisation mode from `setup` to `upgrade`: + + - before editing: + + ```{ .yaml linenums="12" } + - DOCKER_INFLUXDB_INIT_MODE=setup + # - DOCKER_INFLUXDB_INIT_MODE=upgrade + ``` + + - after editing: + + ```{ .yaml linenums="12" } + # - DOCKER_INFLUXDB_INIT_MODE=setup + - DOCKER_INFLUXDB_INIT_MODE=upgrade + ``` + + 2. Activate the volume mapping to give InfluxDB 2 read-only access to the **copy** of the InfluxDB 1.8 persistent store that you made in step 2: + + - before editing: + + ```{ .yaml linenums="20" } + # - ./volumes/influxdb.migrate/data:/var/lib/influxdb:ro + ``` + + - after editing: + + ```{ .yaml linenums="20" } + - ./volumes/influxdb.migrate/data:/var/lib/influxdb:ro + ``` + + Save your work but do not execute any `docker-compose` commands. + +4. InfluxDB 2 creates a "bolt" (lock) file to prevent accidental data-migrations. That file needs to be removed: + + ```bash + $ rm ./volumes/influxdb2/data/influxd.bolt + ``` + +5. The InfluxDB 2 container is still running. The following command causes the container to be recreated with the edits you made in step 3: + + ```bash + $ docker-compose up -d influxdb2 + ``` + +6. InfluxDB 2 will notice the following environment variable: + + ```yaml + DOCKER_INFLUXDB_INIT_MODE=upgrade + ``` + + This, combined with the absence of the "bolt" file, starts the migration process. You need to wait until the migration is complete. The simplest way to do that is to watch the size of the persistent store for InfluxDB 2 until it stops increasing. Experience suggests that the InfluxDB 2 persistent store will usually be a bit larger than InfluxDB 1.8. For example: + + * reference size for an InfluxDB 1.8 installation: + + ```bash + $ sudo du -sh ./volumes/influxdb + 633M ./volumes/influxdb + ``` + + * final size after migration to InfluxDB 2: + + ```bash + $ sudo du -sh ./volumes/influxdb2 + 721M ./volumes/influxdb2 + ``` + +7. Data migration is complete once the folder size stops changing. + +Proceed to [running InfluxDB 2](#runContainer) below. + +## running InfluxDB 2 + +The container now needs to be instructed to run in normal mode. + +1. Be in the correct directory (assumed throughout): + + ```bash + $ cd ~/IOTstack + ``` + +2. Edit your compose file as per the "(omitted)" column of [Table 1](#svcDefVars). The changes are: + + 1. Deactivate all `DOCKER_INFLUXDB_INIT_` environment variables. After editing, the relevant lines should look like: + + ```{ .yaml linenums="7" } + # - DOCKER_INFLUXDB_INIT_USERNAME=me + # - DOCKER_INFLUXDB_INIT_PASSWORD=mypassword + # - DOCKER_INFLUXDB_INIT_ORG=myorg + # - DOCKER_INFLUXDB_INIT_BUCKET=mybucket + # - DOCKER_INFLUXDB_INIT_ADMIN_TOKEN=my-super-secret-auth-token + # - DOCKER_INFLUXDB_INIT_MODE=setup + # - DOCKER_INFLUXDB_INIT_MODE=upgrade + ``` + + 2. Deactivate the volume mapping if it is active. After editing, the line should look like: + + ```{ .yaml linenums="20" } + # - ./volumes/influxdb.migrate/data:/var/lib/influxdb:ro + ``` + + Save your work. + +3. The InfluxDB 2 container is still running. The following command causes the container to be recreated with the edits you have just made: + + ```bash + $ docker-compose up -d influxdb2 + ``` + + The absence of an active `DOCKER_INFLUXDB_INIT_MODE` variable places InfluxDB 2 into normal run mode. + +4. If you have just performed a data migration, you can remove the **copy** of the InfluxDB 1.8 persistent store: + + ```bash + $ sudo rm -rf ./volumes/influxdb.migrate + ``` + + > always be *extremely* careful with any `sudo rm` command. Always check your work **before** you press return. + +## re-initialising InfluxDB 2 + +If you need to start over from a clean slate: + +1. Be in the correct directory (assumed throughout): + + ```bash + $ cd ~/IOTstack + ``` + +2. Terminate the InfluxDB 2 container: + + ```bash + $ docker-compose down influxdb2 + ``` + + > see also [if downing a container doesn't work](../Basic_setup/index.md/#downContainer) + +3. Remove the persistent store: + + ```bash + $ sudo rm -rf ./volumes/influxdb2 + ``` + + > always be *extremely* careful with any `sudo rm` command. Always check your work **before** you press return. + +4. Edit your compose file as per the "setup" column of [Table 1](#svcDefVars). After editing, the relevant lines should look like this: + + ```{ .yaml linenums="7" } + - DOCKER_INFLUXDB_INIT_USERNAME=me + - DOCKER_INFLUXDB_INIT_PASSWORD=mypassword + - DOCKER_INFLUXDB_INIT_ORG=myorg + - DOCKER_INFLUXDB_INIT_BUCKET=mybucket + - DOCKER_INFLUXDB_INIT_ADMIN_TOKEN=my-super-secret-auth-token + - DOCKER_INFLUXDB_INIT_MODE=setup + # - DOCKER_INFLUXDB_INIT_MODE=upgrade + ``` + +Go to [initialising InfluxDB 2](#initContainer). + +## exploring InfluxDB 2 data + +![browse data](./images/influxdb2-explorer.jpeg) + +1. Launch a browser and connect it to port 8087 on your Raspberry Pi. For example: + + ``` + http://raspberrypi.local:8087 + ``` + + You can also use the IP address or domain name of your Raspberry Pi. In this context, 8087 is the *external* port number from the left hand side of the port mapping in the service definition: + + ```{ .yaml linenums="14" } + ports: + - "8087:8086" + ``` + +2. Sign in to the InfluxDB 2 instance using your [«username»](#influxUsername) and [«password»](#influxPassword). +3. Click on "Explore" in the left-hand tool strip. That is marked [A] in the screen shot. In the area marked [B] you should be able to see a list of the *buckets* that were migrated from InfluxDB 1.8 *databases*. + + In the screen shot, I clicked on other fields to create a query: + + - In area [B], I selected the "power/autogen" *bucket*; + - In area [C], I selected the "hiking2" (electricity meter) *measurement*; + - In area [D], I selected the "voltage" *field*; + - The bucket in this test is a migrated copy of an InfluxDB 1.8 database. It was not ingesting live data so I also needed to change the duration popup menu [E] to a time-span that included the most-recent insertions; + - Then I clicked the "Submit" button [F]; and + - The result was the graph in [G]. + +You can explore your own tables using similar techniques. + +## Flux queries via point-and-click + +Grafana does not (yet) seem to have the ability to let you build Flux queries via point-and-click like you can with InfluxQL queries. Until Grafana gains that ability, it's probably a good idea to learn how to build Flux queries in InfluxDB, so you can copy-and-paste the Flux statements into Grafana. + +Once you have constructed a query in the "Query Builder", click the "Script Editor" button [H] to switch to the editor view. + +For this example, the query text is: + +```flux +from(bucket: "power/autogen") + |> range(start: v.timeRangeStart, stop: v.timeRangeStop) + |> filter(fn: (r) => r["_measurement"] == "hiking2") + |> filter(fn: (r) => r["_field"] == "voltage") + |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false) + |> yield(name: "mean") +``` + +Two important things to note here are: + +1. The *bucket* name: `power/autogen`; and +2. The *measurement* name: `hiking2`. + +## example: adapting Node-RED + +![Node-RED flow models](./images/influxdb2-nodered-flow-models.jpeg) + +1. Assume you have an existing flow (eg a fairly standard [3-node flow](https://gist.github.com/Paraphraser/c9db25d131dd4c09848ffb353b69038f)) which is logging to an InfluxDB 1.8 database. Your goal is to modify the flow to log the same data to the recently-migrated InfluxDB 2 bucket. + +2. Start Node-RED if it is not running: + + ```bash + $ cd ~/IOTstack + $ docker-compose up -d nodered + ``` + +3. Use a web browser to connect to your Node-RED instance. +4. Drag a **new** InfluxDB-out node onto the canvas: + + - This is exactly the same InfluxDB-out node that you have been using to write to your InfluxDB 1.8 databases. There isn't a different node or package for InfluxDB 2. + - Always drag a **new** InfluxDB-out node from the palette onto the canvas. Do not make the mistake of re-using an existing InfluxDB-out node (eg via copy and paste) because that is a very good way of breaking your flows. + +5. Double-click the InfluxDB-out node to open it: + + ![InfluxDB-out node](./images/influxdb2-nodered-db-out-node.jpeg) + + * At [A], give the node a sensible name. + * Click the pencil icon [B] adjacent to the Server field: + + - Leave the Name field [C] blank. This ensures that the title in the popup menu [D] automatically reflects the version and connection URL. + - Change the Version popup menu [E] to "2.0". + - Set the URL [F] to point to your InfluxDB 2 instance: + + ``` + http://influxdb2:8086 + ``` + + > In this context, "influxdb2" is the container name and 8086 is the container's *internal* port. Node-RED communicates with InfluxDB 2 across the internal bridged network (see [assumptions](#keyAssumptions)). + + - Paste your [«token»](#influxToken) into the Token field [G]. + - Click "Update" [H]. + + * Set the Organisation field [I] to your [«organisation»](#influxOrg). + * Set the Bucket [J] to the correct value. You can get that from either: + + - area [[B]](#influxExplorer-B) in the Influx Explorer screen shot; or + - the [bucket name](#bucketName) from the saved Flux query. + + In this example, the bucket name is "power/autogen". + + * Set the Measurement [K] to the measurement name. You can get that from either: + + - area [[C]](#influxExplorer-C) in the Influx Explorer screen shot; or + - the [measurement name ](#measurementName) from the saved Flux query. + + In this example, the measurement name is "hiking2". + + * Click Done [L]. + +6. Connect the outlet of the Change node to the inlet of the InfluxDB-out node. +7. Click Deploy. +8. Watch the debug panel to make sure no errors are being reported. +9. Go back to the InfluxDB 2 Data Explorer and click the refresh button "I". If everything has gone according to plan, you should see recent observations added to your graph. + + > You may need to wait until your sensor has sent new data. + +## example: adapting Grafana + +### defining an InfluxDB 2 data source + +1. Start Grafana if it is not running: + + ```bash + $ cd ~/IOTstack + $ docker-compose up -d grafana + ``` + +2. Use a web browser to connect to your Grafana instance and login as an administrator. +3. Hover your mouse over the "gear" icon in the tool-strip on the left hand side, and choose "Data sources". +4. Click the "Add data source" button. +5. Select the "InfluxDB" option. +6. Configure as follows: + + ![Grafana config DB source](./images/influxdb2-grafana-db-source.jpeg) + + - Change the Name [A] to a meaningful title that reflects the **bucket** you are going to query. For example, "InfluxDB 2.0 power". + - Change the Query Language popup menu [B] to "Flux". + + > Ignore the advice about Flux support being in beta. + + - Change the URL [C] to point to your InfluxDB 2 instance: + + ``` + http://influxdb2:8086 + ``` + + > In this context, "influxdb2" is the container name and 8086 is the container's *internal* port. Grafana communicates with InfluxDB 2 across the internal bridged network (see [assumptions](#keyAssumptions)). + + - Turn off all the switches in the "Auth" group [D]. + - Set the Organisation [E] to your [«organisation»](#influxOrg). + - Paste your [«token»](#influxToken) into the Token field [F]. + + > ignore the fact that the prompt text says "password" - you need the token! + + - Set the Default Bucket [G] to the bucket (database) you want to query. You can get that from either: + + - area [[B]](#influxExplorer-B) in the Influx Explorer screen shot; or + - the [bucket name](#bucketName) from the saved Flux query. + + In this example, the value is "power/autogen". + + - Click Save & Test [H]. + +### using an InfluxDB 2 data source in a dashboard + +1. Find the + icon in the tool-strip on the left hand side, hover your mouse over it and choose "Create » dashboard". +2. Click "Add a new panel". +3. Change the "Data source" popup to the [bucket connection](#bucketConnection) you created earlier ("InfluxDB 2.2 power"). +4. The editor automatically switches into Flux mode. +5. Paste the [query text](#queryText) you saved earlier from the InfluxDB 2 query inspector. +6. If necessary, change the duration to a period that is likely to contain some data to display. +7. Click the Refresh button. +8. Click Apply. + +In the side-by-side screen shots below, observations before the straight-line (missing data) segment were imported from InfluxDB 1.8 while observations after the straight-line segment were inserted by the new InfluxDB-out node in Node-RED. + +![compare results](./images/influxdb2-chart-vs-grafana.jpeg) + +## odds and ends + +1. Forgot your token: + + ```bash + $ docker exec influxdb2 influx auth ls + ``` + +2. Create a new user, password and token: + + ```bash + $ docker exec influxdb2 influx user create --name «username» --password «password» + $ docker exec influxdb2 influx auth create --user «username» --all-access + ``` + +3. List available buckets: + + ```bash + $ docker exec influxdb2 influx bucket ls + ``` + +4. Delete the [default «bucket»](#influxBucket): + + ```bash + $ docker exec influxdb2 influx bucket delete --org «organisation» --name «bucket» + ``` + +## migration strategy + +From the fact that both InfluxDB 1.8 and InfluxDB 2 can run in parallel, with Node-RED feeding the same data to both, it should be self-evident that you can repeat the data-migration as often as necessary, simply by starting from [re-initialising InfluxDB 2](#reinitContainer). + +This implies that you can concentrate on one database at a time, adjusting Node-RED so that it writes each row of sensor data to both the InfluxDB 1.8 database and corresponding InfluxDB 2 bucket. + +Having the data going to both engines means you can take your time adjusting your Grafana dashboards to be based on Flux queries. You can either retrofit InfluxDB 2 bucket sources and Flux queries to existing dashboards, or build parallel dashboards from the ground up. diff --git a/docs/Containers/Kapacitor.md b/docs/Containers/Kapacitor.md new file mode 100644 index 000000000..027b08a68 --- /dev/null +++ b/docs/Containers/Kapacitor.md @@ -0,0 +1,53 @@ +# Kapacitor + +## References + +- [*influxdata Kapacitor* documentation](https://docs.influxdata.com/kapacitor/) +- [*GitHub*: influxdata/influxdata-docker/kapacitor](https://github.com/influxdata/influxdata-docker/tree/master/kapacitor) +- [*DockerHub*: influxdata Kapacitor](https://hub.docker.com/_/kapacitor) + +## Upgrading Kapacitor + +You can update the container via: + +``` console +$ cd ~/IOTstack +$ docker-compose pull +$ docker-compose up -d +$ docker system prune +``` + +In words: + +* `docker-compose pull` downloads any newer images; +* `docker-compose up -d` causes any newly-downloaded images to be instantiated as containers (replacing the old containers); and +* the `prune` gets rid of the outdated images. + +### Kapacitor version pinning + +If you need to pin to a particular version: + +1. Use your favourite text editor to open `docker-compose.yml`. +2. Find the line: + + ``` yaml + image: kapacitor:1.5 + ``` + +3. Replace `1.5` with the version you wish to pin to. For example, to pin to version 1.5.9: + + ``` yaml + image: kapacitor:1.5.9 + ``` + + Note: + + * Be cautious about using the `latest` tag. At the time of writing, there was no `linux/arm/v7` architecture support. + +4. Save the file and tell `docker-compose` to bring up the container: + + ``` console + $ cd ~/IOTstack + $ docker-compose up -d kapacitor + $ docker system prune + ``` diff --git a/docs/Containers/MJPEG-Streamer.md b/docs/Containers/MJPEG-Streamer.md new file mode 100644 index 000000000..ecaba0ca7 --- /dev/null +++ b/docs/Containers/MJPEG-Streamer.md @@ -0,0 +1,335 @@ +# Motion JPEG Streamer + +The `mjpg-streamer` container lets you pass a video stream from a local camera to a `motioneye` container. The `mjpg-streamer` and `motioneye` containers can be running on the *same* or *different* hosts. + +Each `mjpg-streamer` container can process a stream from an official Raspberry Pi "ribbon cable" camera, or from a third-party USB-connected camera, such as those from [Logitech](https://www.logitech.com/en-au/products/webcams.html). + +Using `mjpg-streamer` to handle your video streams gives you a consistent approach to supporting multiple cameras and camera types. You do not need to care about distinctions between "ribbon" or USB cameras, nor which hosts are involved. + +## Raspberry Pi Ribbon Camera + +> This section is only relevant if you are trying to use a camera that connects to your Raspberry Pi via a ribbon cable. + +Beginning with Raspberry Pi OS Bullseye, the Raspberry Pi Foundation introduced the [LibCamera](https://www.raspberrypi.com/documentation/computers/camera_software.html) subsystem and withdrew support for the earlier `raspistill` and `raspivid` mechanisms which then became known as the *legacy* camera system. + +The introduction of the *LibCamera* subsystem triggered quite a few articles (and videos) on the topic, of which this is one example: + +* [How to use Raspberry Pi Cameras with Bullseye OS Update](https://core-electronics.com.au/guides/raspberry-pi-bullseye-camera-commands/) + +Although the *LibCamera* subsystem works quite well with "native" applications, it has never been clear whether it supports passing camera streams to Docker containers. At the time of writing (2023-10-23), this author has never been able to find any examples which demonstrate that such support exists. + +It is important to understand that: + +1. This **only** applies to the Raspberry Pi Ribbon Camera; +2. In order to access a Raspberry Pi Ribbon Camera, the `mjpg-streamer` container depends on the *legacy* camera system; and +3. The *LibCamera* subsystem and the *legacy* camera system are mutually exclusive. + +In other words, if you want to use the `mjpg-streamer` container to process a stream from a Raspberry Pi Ribbon Camera, you have to forgo using the *LibCamera* subsystem. + +### preparing your Raspberry Pi + +If you have a Raspberry Pi Ribbon Camera, prepare your system like this: + +1. Check the version of your system by running: + + ``` console + $ grep "VERSION_CODENAME" /etc/os-release + ``` + + The answer should be one of "buster", "bullseye" or "bookworm". + +2. Configure camera support: + + * if your system is running Buster, run this command: + + ``` console + $ sudo raspi-config nonint do_camera 0 + ``` + + Buster pre-dates *LibCamera* so this is the same as enabling the *legacy* camera system. In this context, `0` means "enable" and `1` means "disable". + + * if your system is running Bullseye or Bookworm, run these commands: + + ``` console + $ sudo raspi-config nonint do_camera 1 + $ sudo raspi-config nonint do_legacy 0 + ``` + + The first command is protective and turns off the *LibCamera* subsystem, while the second command enables the *legacy* camera system. + + > When executed from the command line, both the `do_camera` and `do_legacy` commands are supported in the Bookworm version of `raspi-config`. However, neither command is available when `raspi-config` is invoked as a GUI in a Bookworm system. This likely implies that the commands have been deprecated and will be removed, in which case this documentation will break. + +3. Reboot your system: + + ``` console + $ sudo reboot + ``` + +4. Make a note that your ribbon camera will be accessible on `/dev/video0`. + +## Third-party cameras + +The simplest approach is: + +1. Connect your camera to a USB port. +2. Run: + + ``` console + $ ls -l /dev/v4l/by-id + ``` + + This is an example of the response with a LogiTech "C920 PRO FHD Webcam 1080P" camera connected: + + ``` + lrwxrwxrwx 1 root root 12 Oct 23 15:42 usb-046d_HD_Pro_Webcam_C920-video-index0 -> ../../video1 + lrwxrwxrwx 1 root root 12 Oct 23 15:42 usb-046d_HD_Pro_Webcam_C920-video-index1 -> ../../video2 + ``` + + In general, the device at `index0` is where your camera will be accessible, as in: + + ``` + /dev/v4l/by-id/usb-046d_HD_Pro_Webcam_C920-video-index0 + ``` + +If you don't get a sensible response to the `ls` command then try disconnecting and reconnecting your camera, and rebooting your system. + +## Container variables + +### environment variables + +variable | default | remark +---------------------------------|:-------------:|------------------------------ +`MJPG_STREAMER_USERNAME` | container ID | *changes each time the container is recreated* +`MJPG_STREAMER_PASSWORD` | random UUID | *changes each time the container restarts* +`MJPG_STREAMER_SIZE` | `640x480` | should be one of your camera's natural resolutions +`MJPG_STREAMER_FPS` | `5` | frames per second + +### device variable + +variable | default | remark +---------------------------------|:-------------:|------------------------------ +`MJPG_STREAMER_EXTERNAL_DEVICE` | `/dev/video0` | must be set to your video device + +## Setting your variables + +To initialise your environment, begin by using a text editor (eg `vim`, `nano`) to edit `~/IOTstack/.env` (which may or may not already exist): + +1. If your `.env` file does not already define your time-zone, take the opportunity to set it. For example: + + ``` + TZ=Australia/Sydney + ``` + +2. The access credentials default to random values which change each time the container starts. This is reasonably secure but is unlikely to be useful in practice, so you need to invent some credentials of your own. Example: + + ``` + MJPG_STREAMER_USERNAME=streamer + MJPG_STREAMER_PASSWORD=oNfDG-d1kgzC + ``` + +3. Define the **external** device path to your camera. Two examples have been given above: + + * a ribbon camera: + + ``` + MJPG_STREAMER_EXTERNAL_DEVICE=/dev/video0 + ``` + + * a Logitech C920 USB camera: + + ``` + MJPG_STREAMER_EXTERNAL_DEVICE=/dev/v4l/by-id/usb-046d_HD_Pro_Webcam_C920-video-index + ``` + +4. If you know your camera supports higher resolutions, you can also set the size. Examples: + + * the ribbon camera can support: + + ``` + MJPG_STREAMER_SIZE=1152x648 + ``` + + * the Logitech C920 can support: + + ``` + MJPG_STREAMER_SIZE=1920x1080 + ``` + +5. If the `mjpg-streamer` and `motioneye` containers are going to be running on: + + * the **same** host, you can consider increasing the frame rate: + + ``` + MJPG_STREAMER_FPS=30 + ``` + + Even though we are setting up a *web* camera, the traffic will never leave the host and will not traverse your Ethernet or WiFi networks. + + * **different** hosts, you should probably leave the rate at 5 frames per second until you understand the impact on network traffic. + +6. Save your work. + +Tip: + +* Do **not** use quote marks (either single or double quotes) to surround the values of your environment variables. This is because docker-compose treats the quotes as part of the string. If you used quotes, please go back and remove them. + +### alternative approach + +It is still a good idea to define `TZ` in your `.env` file. Most IOTstack containers now use the `TZ=${TZ:-Etc/UTC}` syntax so a single entry in your `.env` sets the timezone for all of your containers. + +However, if you prefer to keep most of your environment variables inline in your `docker-compose.yml` rather than in `.env`, you can do that. Example: + +``` yaml +environment: + - TZ=${TZ:-Etc/UTC} + - MJPG_STREAMER_USERNAME=streamer + - MJPG_STREAMER_PASSWORD=oNfDG-d1kgzC + - MJPG_STREAMER_SIZE=1152x648 + - MJPG_STREAMER_FPS=5 +``` + +Similarly for the camera device mapping: + +``` yaml +devices: + - "/dev/v4l/by-id/usb-046d_HD_Pro_Webcam_C920-video-index:/dev/video0" +``` + +### about variable substitution syntax + +If you're wondering about the syntax used for environment variables: + +``` yaml + - MJPG_STREAMER_USERNAME=${MJPG_STREAMER_USERNAME:-} +``` + +it means that `.env` will be checked for the presence of `MJPG_STREAMER_USERNAME=value`. If the key is found, its value will be used. If the key is not found, the value will be set to a null string. Then, inside the container, a null string is used as the trigger to apply the defaults listed in the table above. + +In the case of the camera device mapping, this syntax: + +``` yaml + - "${MJPG_STREAMER_EXTERNAL_DEVICE:-/dev/video0}:/dev/video0" +``` + +means that `.env` will be checked for the presence of `MJPG_STREAMER_EXTERNAL_DEVICE=path`. If the key is found, the path will be used. If the key is not found, the path will be set to `/dev/video0` on the assumption that a camera is present and the device exists. + +Regardless of whether a device path comes from `.env`, or is defined inline, or defaults to `/dev/video0`, if the device does not actually exist then `docker-compose` will refuse to start the container with the following error: + +``` +Error response from daemon: error gathering device information while adding custom device "«path»": no such file or directory +``` + +## Starting the container + +1. Start the container like this: + + ``` console + $ cd ~/IOTstack + $ docker-compose up -d mjpg-streamer + ``` + + The first time you do this triggers a fairly long process. First, a basic operating system image is downloaded from DockerHub, then a Dockerfile is run to add the streamer software and construct a local image, after which the local image is instantiated as your running container. Subsequent launches use the local image so the container starts immediately. See also [container maintenance](#maintenance). + +2. Once the container is running, make sure it is behaving normally and has not gone into a restart loop: + + ``` console + $ docker ps -a --format "table {{.Names}}\t{{.RunningFor}}\t{{.Status}}" + ``` + + > The `docker ps` command produces a lot of output which generally results in line-wrapping and can be hard to read. The `--format` argument reduces this clutter by focusing on the interesting columns. If you have [IOTstackAliases](https://github.com/Paraphraser/IOTstackAliases) installed, you can use `DPS` instead of copy/pasting the above command. + + If the container is restarting, you will see evidence of that in the STATUS column. If that happens, re-check the values set in the `.env` file and "up" the container again. The container's log (see below) may also be helpful. + +3. Check the container's log: + + ``` console + $ docker logs mjpg-streamer + i: Using V4L2 device.: /dev/video0 + i: Desired Resolution: 1152 x 648 + i: Frames Per Second.: 5 + i: Format............: JPEG + i: TV-Norm...........: DEFAULT + o: www-folder-path......: /usr/local/share/mjpg-streamer/www/ + o: HTTP TCP port........: 80 + o: HTTP Listen Address..: (null) + o: username:password....: streamer:oNfDG-d1kgzC + o: commands.............: enabled + ``` + + Many of the values you set earlier using environment variables show up here so viewing the log is a good way of making sure everything is being passed to the container. + + Note: + + * The `/dev/video0` in the first line of output is the **internal** device path (inside the container). This is **not** the same as the **external** device path associated with `MJPG_STREAMER_EXTERNAL_DEVICE`. The container doesn't know about the **external** device path so it has no way to display it. + +## Connecting the camera to MotionEye + +1. Use a browser to connect with MotionEye on port 8765. +2. Authenticate as an administrator (the default is "admin" with no password). +3. Click the ☰ icon at the top, left of the screen so that it rotates 90° and exposes the "Camera" popup menu. +3. In the "Camera" popup menu field, click the ▾ and choose "Add Camera…". +4. Change the "Camera Type" field to "Network Camera". +5. If the `motioneye` and `mjpg-streamer` containers are running on: + + * the **same** host, the URL should be: + + ``` + http://mjpg-streamer:80/?action=stream + ``` + + Here: + + - `mjpg-streamer` is the name of the **container**. Technically, it is a **host** name (rather than a domain name); and + - port 80 is the **internal** port that the streamer process running inside the container is listening to. It comes from the *right* hand side of the port mapping in the service definition: + + ``` yaml + ports: + - "8980:80" + ``` + + * **different** hosts, the URL should be in this form: + + ``` + http://«name-or-ip»:8980/?action=stream + ``` + + Here: + + - `«name-or-ip»` is the domain name or IP address of the host on which the `mjpg-streamer` container is running. Examples: + + ``` + http://raspberrypi.local:8980/?action=stream + http://my-spy.domain.com:8980/?action=stream + http://192.168.200.200:8980/?action=stream + ``` + + - port 8980 is the **external** port that the host where the `mjpg-streamer` container is running is listening on behalf of the container. It comes from the *left* hand side of the port mapping in the service definition: + + ``` yaml + ports: + - "8980:80" + ``` + +6. Enter the Username ("streamer" in this example). +7. Enter the Password ("oNfDG-d1kgzC" in this example). +8. Click in the Username field again. This causes MotionEye to retry the connection, after which the camera should appear in the Camera field. +9. Click OK. The camera feed should start working. + +## Container maintenance { #maintenance } + +Because it is built from a local Dockerfile, the `mjpg-streamer` does not get updated in response to a normal "pull". If you want to rebuild the container, proceed like this: + +``` console +$ cd ~/IOTstack +$ docker-compose build --no-cache --pull mjpg-streamer +$ docker-compose up -d mjpg-streamer +$ docker system prune -f +``` + +If you have [IOTstackAliases](https://github.com/Paraphraser/IOTstackAliases) installed, the above is: + +``` console +$ REBUILD mjpg-streamer +$ UP mjpg-streamer +$ PRUNE +``` diff --git a/docs/Containers/MariaDB.md b/docs/Containers/MariaDB.md new file mode 100755 index 000000000..cc0ef854c --- /dev/null +++ b/docs/Containers/MariaDB.md @@ -0,0 +1,182 @@ +# MariaDB + +## Source + +* [Docker hub](https://hub.docker.com/r/linuxserver/mariadb/) +* [Webpage](https://mariadb.org/) + +## About + +MariaDB is a fork of MySQL. This is an unofficial image provided by linuxserver.io because there is no official image for arm. + +## Connecting to the DB + +The port is 3306. It exists inside the docker network so you can connect via `mariadb:3306` for internal connections. For external connections use `:3306` + +![image](https://user-images.githubusercontent.com/46672225/69734358-7f030800-1137-11ea-9874-7d2c86b3d239.png) + +## Setup + +Before starting the stack, edit the `docker-compose.yml` file and check your environment variables. In particular: + +```yaml + environment: + - TZ=Etc/UTC + - MYSQL_ROOT_PASSWORD= + - MYSQL_DATABASE=default + - MYSQL_USER=mariadbuser + - MYSQL_PASSWORD= +``` + +If you are running old-menu, you will have to set both passwords. Under new-menu, the menu may have allocated random passwords for you but you can change them if you like. + +You only get the opportunity to change the `MQSL_` prefixed environment variables before you bring up the container for the first time. If you decide to change these values after initialisation, you will either have to: + +1. Erase the persistent storage area and start again. There are three steps: + + * Stop the container and remove the persistent storage area: + + ``` console + $ cd ~/IOTstack + $ docker-compose down mariadb + $ sudo rm -rf ./volumes/mariadb + ``` + + > see also [if downing a container doesn't work](../Basic_setup/index.md/#downContainer) + + * Edit `docker-compose.yml` and change the variables. + * Bring up the container: + + ``` console + $ docker-compose up -d mariadb + ``` + +2. Open a terminal window within the container (see below) and change the values by hand. + + > The how-to is beyond the scope of this documentation. Google is your friend! + +## Terminal + +You can open a terminal session within the mariadb container via: + +``` console +$ docker exec -it mariadb bash +``` + +To connect to the database: `mysql -uroot -p` + +To close the terminal session, either: + +* type "exit" and press return; or +* press control+d. + +## Container health check { #healthCheck } + +### theory of operation { #healthCheckTheory } + +A script , or "agent", to assess the health of the MariaDB container has been added to the *local image* via the *Dockerfile*. In other words, the script is specific to IOTstack. + +The agent is invoked 30 seconds after the container starts, and every 30 seconds thereafter. The agent: + +1. Runs the command: + + ``` + mysqladmin ping -h localhost + ``` + +2. If that command succeeds, the agent compares the response returned by the command with the expected response: + + ``` + mysqld is alive + ``` + +3. If the command returned the expected response, the agent tests the responsiveness of the TCP port the `mysqld` daemon should be listening on (see [customising health-check](#healthCheckCustom)). + +4. If all of those steps succeed, the agent concludes that MariaDB is functioning properly and returns "healthy". + +### monitoring health-check { #healthCheckMonitor } + +Portainer's *Containers* display contains a *Status* column which shows health-check results for all containers that support the feature. + +You can also use the `docker ps` command to monitor health-check results. The following command narrows the focus to mariadb: + +``` console +$ docker ps --format "table {{.Names}}\t{{.Status}}" --filter name=mariadb +``` + +Possible reply patterns are: + +1. The container is starting and has not yet run the health-check agent: + + ``` + NAMES STATUS + mariadb Up 5 seconds (health: starting) + ``` + +2. The container has been running for at least 30 seconds and the health-check agent has returned a positive result within the last 30 seconds: + + ``` + NAMES STATUS + mariadb Up 33 seconds (healthy) + ``` + +3. The container has been running for more than 90 seconds but has failed the last three successive health-check tests: + + ``` + NAMES STATUS + mariadb Up About a minute (unhealthy) + ``` + +### customising health-check { #healthCheckCustom } + +You can customise the operation of the health-check agent by editing the `mariadb` service definition in your *Compose* file: + +1. By default, the `mysqld` daemon listens to **internal** port 3306. If you need change that port, you also need to inform the health-check agent via an environment variable. For example, suppose you changed the **internal** port to 12345: + + ```yaml + environment: + - MYSQL_TCP_PORT=12345 + ``` + + Notes: + + * The `MYSQL_TCP_PORT` variable is [defined by MariaDB](https://mariadb.com/kb/en/mariadb-environment-variables/), not IOTstack, so changing this variable affects more than just the health-check agent. + * If you are running "old menu", this change should be made in the file: + + ``` + ~/IOTstack/services/mariadb/mariadb.env + ``` + +2. The `mysqladmin ping` command relies on the root password supplied via the `MYSQL_ROOT_PASSWORD` environment variable in the *Compose* file. The command will not succeed if the root password is not correct, and the agent will return "unhealthy". + +3. If the health-check agent misbehaves in your environment, or if you simply don't want it to be active, you can disable all health-checking for the container by adding the following lines to its service definition: + + ```yaml + healthcheck: + disable: true + ``` + + Note: + + * The mere presence of a `healthcheck:` clause in the `mariadb` service definition overrides the supplied agent. In other words, the following can't be used to re-enable the supplied agent: + + ```yaml + healthcheck: + disable: false + ``` + + You must remove the entire `healthcheck:` clause. + +## Keeping MariaDB up-to-date + +To update the `mariadb` container: + +``` console +$ cd ~/IOTstack +$ docker-compose build --no-cache --pull mariadb +$ docker-compose up -d mariadb +$ docker system prune +$ docker system prune +``` + +The first "prune" removes the old *local* image, the second removes the old *base* image. diff --git a/docs/Containers/Mosquitto.md b/docs/Containers/Mosquitto.md new file mode 100644 index 000000000..d3f1b9ef1 --- /dev/null +++ b/docs/Containers/Mosquitto.md @@ -0,0 +1,776 @@ +# Mosquitto + +This document discusses an IOTstack-specific version of Mosquitto built on top of [Eclipse/Mosquitto](https://github.com/eclipse/mosquitto) using a *Dockerfile*. + +> If you want the documentation for the original implementation of Mosquitto (just "as it comes" from *DockerHub*) please see [Mosquitto.md](https://github.com/SensorsIot/IOTstack/blob/old-menu/docs/Containers/Mosquitto.md) on the old-menu branch. + +
+ +## References + +- [*Eclipse Mosquitto* home](https://mosquitto.org) +- [*GitHub*: eclipse/mosquitto](https://github.com/eclipse/mosquitto) +- [*DockerHub*: eclipse-mosquitto](https://hub.docker.com/_/eclipse-mosquitto) +- [Setting up passwords](https://www.youtube.com/watch?v=1msiFQT_flo) (video) +- [Tutorial: from MQTT to InfluxDB via Node-Red](https://gist.github.com/Paraphraser/c9db25d131dd4c09848ffb353b69038f) + +## Significant directories and files + +``` +~/IOTstack +├── .templates +│ └── mosquitto +│ ├── service.yml ❶ +│ ├── Dockerfile ❷ +│ ├── docker-entrypoint.sh ❸ +│ └── iotstack_defaults ❹ +│ ├── config +│ │ ├── filter.acl +│ │ └── mosquitto.conf +│ └── pwfile +│ └── pwfile +├── services +│ └── mosquitto +│ └── service.yml ❺ +├── docker-compose.yml ❻ +└── volumes + └── mosquitto ❼ + ├── config + │ ├── filter.acl + │ └── mosquitto.conf + ├── data + │ └── mosquitto.db + ├── log + └── pwfile + └── pwfile +``` + +1. The *template service definition*. +2. The *Dockerfile* used to customise Mosquitto for IOTstack. +3. A replacement for the Eclipse-Mosquitto script of the same name, extended to handle container self-repair. +4. A standard set of defaults for IOTstack (used to initialise defaults on first run, and for container self-repair). +5. The *working service definition* (only relevant to old-menu, copied from ❶). +6. The *Compose* file (includes ❶). +7. The *persistent storage area*: + + * Directories and files in ❼ are owned by userID 1883. This is enforced each time Mosquitto starts. + * You will normally need `sudo` to make changes in this area. + * Each time Mosquitto starts, it automatically replaces anything originating in ❹ that has gone missing from ❼. This "self-repair" function is intended to provide reasonable assurance that Mosquitto will at least **start** instead of going into a restart loop. + +## How Mosquitto gets built for IOTstack + +### Mosquitto source code ([*GitHub*](https://github.com)) + +The source code for Mosquitto lives at [*GitHub* eclipse/mosquitto](https://github.com/eclipse/mosquitto). + +### Mosquitto images ([*DockerHub*](https://hub.docker.com)) + +Periodically, the source code is recompiled and the resulting image is pushed to [eclipse-mosquitto](https://hub.docker.com/_/eclipse-mosquitto?tab=tags&page=1&ordering=last_updated) on *DockerHub*. + +### IOTstack menu + +When you select Mosquitto in the IOTstack menu, the *template service definition* is copied into the *Compose* file. + +> Under old menu, it is also copied to the *working service definition* and then not really used. + +### IOTstack first run + +On a first install of IOTstack, you run the menu, choose Mosquitto as one of your containers, and are told to do this: + +```console +$ cd ~/IOTstack +$ docker-compose up -d +``` + +> See also the [Migration considerations](#migration) (below). + +`docker-compose` reads the *Compose* file. When it arrives at the `mosquitto` fragment, it finds: + +```yaml + mosquitto: + container_name: mosquitto + build: + context: ./.templates/mosquitto/. + args: + - MOSQUITTO_BASE=eclipse-mosquitto:latest + … +``` + +Note: + +* Earlier versions of the Mosquitto service definition looked like this: + + ```yaml + mosquitto: + container_name: mosquitto + build: ./.templates/mosquitto/. + … + ``` + + The single-line `build` produces *exactly* the same result as the four-line `build`, save that the single-line form does not support [pinning Mosquitto to a specific version](#versionPinning). + +The `./.templates/mosquitto/.` path associated with the `build` tells `docker-compose` to look for: + +``` +~/IOTstack/.templates/mosquitto/Dockerfile +``` + +> The *Dockerfile* is in the `.templates` directory because it is intended to be a common build for **all** IOTstack users. This is different to the arrangement for Node-RED where the *Dockerfile* is in the `services` directory because it is how each individual IOTstack user's version of Node-RED is customised. + +The *Dockerfile* begins with: + +```dockerfile +ARG MOSQUITTO_BASE=eclipse-mosquitto:latest +FROM $MOSQUITTO_BASE +``` + +The `FROM` statement tells the build process to pull down the ***base image*** from [*DockerHub*](https://hub.docker.com). + +> It is a ***base*** image in the sense that it never actually runs as a container on your Raspberry Pi. + +The remaining instructions in the *Dockerfile* customise the *base image* to produce a ***local image***. The customisations are: + +1. Add the `rsync` and `tzdata` packages. + + * `rsync` helps the container perform self-repair; while + * `tzdata` enables Mosquitto to respect the "TZ" environment variable. + +2. Add a standard set of configuration defaults appropriate for IOTstack. +3. Replace `docker-entrypoint.sh` with a version which: + + * Calls `rsync` to perform self-repair if configuration files go missing; and + * Enforces 1883:1883 ownership in `~/IOTstack/volumes/mosquitto`. + +The *local image* is instantiated to become your running container. + +When you run the `docker images` command after Mosquitto has been built, you *may* see two rows for Mosquitto: + +```console +$ docker images +REPOSITORY TAG IMAGE ID CREATED SIZE +iotstack_mosquitto latest cf0bfe1a34d6 4 weeks ago 11.6MB +eclipse-mosquitto latest 46ad1893f049 4 weeks ago 8.31MB +``` + +* `eclipse-mosquitto` is the *base image*; and +* `iotstack_mosquitto` is the *local image*. + +You *may* see the same pattern in Portainer, which reports the *base image* as "unused". You should not remove the *base* image, even though it appears to be unused. + +> Whether you see one or two rows depends on the version of `docker-compose` you are using and how your version of `docker-compose` builds local images. + +### Migration considerations { #migration } + +Under the original IOTstack implementation of Mosquitto (just "as it comes" from *DockerHub*), the service definition expected the configuration files to be at: + +``` +~/IOTstack/services/mosquitto/mosquitto.conf +~/IOTstack/services/mosquitto/filter.acl +``` + +Under this implementation of Mosquitto, the configuration files have moved to: + +``` +~/IOTstack/volumes/mosquitto/config/mosquitto.conf +~/IOTstack/volumes/mosquitto/config/filter.acl +``` + +> The change of location is one of the things that allows self-repair to work properly. + +The default versions of each configuration file are the **same**. Only the **locations** have changed. If you did not alter either file when you were running the original IOTstack implementation of Mosquitto, there will be no change in Mosquitto's behaviour when it is built from a *Dockerfile*. + +However, if you did alter either or both configuration files, then you should compare the old and new versions and decide whether you wish to retain your old settings. For example: + +```console +$ cd ~/IOTstack +$ diff ./services/mosquitto/mosquitto.conf ./volumes/mosquitto/config/mosquitto.conf +``` + +> You can also use the `-y` option on the `diff` command to see a side-by-side comparison of the two files. + +Using `mosquitto.conf` as the example, assume you wish to use your existing file instead of the default: + +1. To move your existing file into the new location: + + ```console + $ cd ~/IOTstack + $ sudo mv ./services/mosquitto/mosquitto.conf ./volumes/mosquitto/config/mosquitto.conf + ``` + + > The move overwrites the default. At this point, the moved file will probably be owned by user "pi" but that does not matter. + +2. Mosquitto will always enforce correct ownership (1883:1883) on any restart but it will not overwrite permissions. If in doubt, use mode 644 as your default for permissions: + + ```console + $ sudo chmod 644 ./services/mosquitto/mosquitto.conf + ``` + +3. Restart Mosquitto: + + ```console + $ docker-compose restart mosquitto + ``` + +4. Check your work: + + ```console + $ ls -l ./volumes/mosquitto/config/mosquitto.conf + -rw-r--r-- 1 1883 1883 ssss mmm dd hh:mm ./volumes/mosquitto/config/mosquitto.conf + ``` + +5. If necessary, repeat these steps with `filter.acl`. + +## Logging + +Mosquitto logging is controlled by `mosquitto.conf`. This is the default configuration: + +```apacheconf +#log_dest file /mosquitto/log/mosquitto.log +log_dest stdout +log_timestamp_format %Y-%m-%dT%H:%M:%S +# Reduce size and SD-card flash wear, safe to remove if using a SSD +connection_messages false +``` + +When `log_dest` is set to `stdout`, you inspect Mosquitto's logs like this: + +```console +$ docker logs mosquitto +``` + +Logs written to `stdout` are stored and persisted to disk as managed by Docker. +They are kept over reboots, but are lost when your Mosquitto container is +removed or updated. + +The alternative, which *may* be more appropriate if you are running on an SSD or HD, is to change `mosquitto.conf` to be like this: + +``` +log_dest file /mosquitto/log/mosquitto.log +#log_dest stdout +log_timestamp_format %Y-%m-%dT%H:%M:%S +``` + +and then restart Mosquitto: + +```console +$ cd ~/IOTstack +$ docker-compose restart mosquitto +``` + +The path `/mosquitto/log/mosquitto.log` is an **internal** path. When this style of logging is active, you inspect Mosquitto's logs using the **external** path like this: + +```console +$ sudo tail ~/IOTstack/volumes/mosquitto/log/mosquitto.log +``` + +> You need to use `sudo` because the log is owned by userID 1883 and Mosquitto creates it without "world" read permission. + +Logs written to `mosquitto.log` persist until you take action to prune the file. + +## Security + +### Configuring security + +Mosquitto security is controlled by `mosquitto.conf`. These are the relevant directives: + +``` +#password_file /mosquitto/pwfile/pwfile +allow_anonymous true +``` + +Mosquitto security can be in four different states, which are summarised in the following table: + +`password_file` | `allow_anonymous` | security enforcement | remark | +:--------------:|:-----------------:|:--------------------:|-------------------| +disabled | true | open access | default | +disabled | false | all access denied | not really useful | +enabled | true | credentials optional | | +enabled | false | credentials required | | + + +### Password file management + +The password file for Mosquitto is part of a mapped volume: + +* The **internal** path is `/mosquitto/pwfile/pwfile` +* The **external** path is `~/IOTstack/volumes/mosquitto/pwfile/pwfile` + +A common problem with the previous version of Mosquitto for IOTstack occurred when the `password_file` directive was enabled but the `pwfile` was not present. Mosquitto went into a restart loop. + +The Mosquitto container performs self-repair each time the container is brought up or restarts. If `pwfile` is missing, an empty file is created as a placeholder. This prevents the restart loop. What happens next depends on `allow_anonymous`: + +* If `true` then: + + - Any MQTT request *without* credentials will be permitted; + - Any MQTT request *with* credentials will be rejected (because `pwfile` is empty so there is nothing to match on). + +* If `false` then **all** MQTT requests will be rejected. + +#### create username and password + +To create a username and password, use the following as a template. + +```console +$ docker exec mosquitto mosquitto_passwd -b /mosquitto/pwfile/pwfile «username» «password» +``` + +Replace «username» and «password» with appropriate values, then execute the command. For example, to create the username "hello" with password "world": + +```console +$ docker exec mosquitto mosquitto_passwd -b /mosquitto/pwfile/pwfile hello world +``` + +Note: + +* See also [customising health-check](#healthCheckCustom). If you are creating usernames and passwords, you may also want to create credentials for the health-check agent. + +#### check password file + +There are two ways to verify that the password file exists and has the expected content: + +1. View the file using its **external** path: + + ```console + $ sudo cat ~/IOTstack/volumes/mosquitto/pwfile/pwfile + ``` + + > `sudo` is needed because the file is neither owned nor readable by `pi`. + +2. View the file using its **internal** path: + + ```console + $ docker exec mosquitto cat /mosquitto/pwfile/pwfile + ``` + +Each credential starts with the username and occupies one line in the file: + +``` +hello:$7$101$ZFOHHVJLp2bcgX+h$MdHsc4rfOAhmGG+65NpIEJkxY0beNeFUyfjNAGx1ILDmI498o4cVOaD9vDmXqlGUH9g6AgHki8RPDEgjWZMkDA== +``` + +#### remove entry from password file + +To remove an entry from the password file: + +```console +$ docker exec mosquitto mosquitto_passwd -D /mosquitto/pwfile/pwfile «username» +``` + +#### reset the password file + +There are several ways to reset the password file. Your options are: + +1. Remove the password file and restart Mosquitto: + + ```console + $ cd ~/IOTstack + $ sudo rm ./volumes/mosquitto/pwfile/pwfile + $ docker-compose restart mosquitto + ``` + + The result is an empty password file. + +2. Clear all existing passwords while adding a new password: + + ```console + $ docker exec mosquitto mosquitto_passwd -c -b /mosquitto/pwfile/pwfile «username» «password» + ``` + + The result is a password file with a single entry. + +3. Clear all existing passwords in favour of a single dummy password which is then removed: + + ```console + $ docker exec mosquitto mosquitto_passwd -c -b /mosquitto/pwfile/pwfile dummy dummy + $ docker exec mosquitto mosquitto_passwd -D /mosquitto/pwfile/pwfile dummy + ``` + + The result is an empty password file. + +### Activate Mosquitto security + +1. Use `sudo` and your favourite text editor to open the following file: + + ``` + ~/IOTstack/volumes/mosquitto/config/mosquitto.conf + ``` + +2. Remove the comment indicator from the following line: + + ``` + #password_file /mosquitto/pwfile/pwfile + ``` + + so that it becomes: + + ``` + password_file /mosquitto/pwfile/pwfile + ``` + +3. Set `allow_anonymous` as required: + + ``` + allow_anonymous true + ``` + + If `true` then: + + * Any MQTT request without credentials will be permitted; + * The validity of credentials supplied with any MQTT request will be enforced. + + If `false` then: + + * Any MQTT request without credentials will be rejected; + * The validity of credentials supplied with any MQTT request will be enforced. + +4. Save the modified configuration file and restart Mosquitto: + + ```console + $ cd ~/IOTstack + $ docker-compose restart mosquitto + ``` + +### Testing Mosquitto security + +#### assumptions + +1. You have created at least one username ("hello") and password ("world"). +2. `password_file` is enabled. +3. `allow_anonymous` is `false`. + +#### install testing tools + +If you do not have the Mosquitto clients installed on your Raspberry Pi (ie `$ which mosquitto_pub` does not return a path), install them using: + +```console +$ sudo apt install -y mosquitto-clients +``` + +#### test: *anonymous access is prohibited* + +Test **without** providing credentials: + +```console +$ mosquitto_pub -h 127.0.0.1 -p 1883 -t "/password/test" -m "up up and away" +Connection Refused: not authorised. +Error: The connection was refused. +``` + +Note: + +* The error is the expected result and shows that Mosquitto will not allow anonymous access. + +#### test: *access with credentials is permitted* + +Test with credentials + +```console +$ mosquitto_pub -h 127.0.0.1 -p 1883 -t "/password/test" -m "up up and away" -u hello -P world +$ +``` + +Note: + +* The absence of any error message means the message was sent. Silence = success! + +#### test: *round-trip with credentials is permitted* + +Prove round-trip connectivity will succeed when credentials are provided. First, set up a subscriber as a background process. This mimics the role of a process like Node-Red: + +```console +$ mosquitto_sub -v -h 127.0.0.1 -p 1883 -t "/password/test" -F "%I %t %p" -u hello -P world & +[1] 25996 +``` + +Repeat the earlier test: + +```console +$ mosquitto_pub -h 127.0.0.1 -p 1883 -t "/password/test" -m "up up and away" -u hello -P world +2021-02-16T14:40:51+1100 /password/test up up and away +``` + +Note: + +* the second line above is coming from the `mosquitto_sub` running in the background. + +When you have finished testing you can kill the background process (press return twice after you enter the `kill` command): + +```console +$ kill %1 +$ +[1]+ Terminated mosquitto_sub -v -h 127.0.0.1 -p 1883 -t "/password/test" -F "%I %t %p" -u hello -P world +``` + +## Container health check + +### theory of operation + +A script , or "agent", to assess the health of the Mosquitto container has been added to the *local image* via the *Dockerfile*. In other words, the script is specific to IOTstack. + +The agent is invoked 30 seconds after the container starts, and every 30 seconds thereafter. The agent: + +* Publishes a retained MQTT message to the broker running in the same container. The message payload is the current date and time, and the default topic string is: + + ``` + iotstack/mosquitto/healthcheck + ``` + +* Subscribes to the same broker for the same topic for a single message event. +* Compares the payload sent with the payload received. If the payloads (ie time-stamps) match, the agent concludes that the Mosquitto broker (the process running inside the same container) is functioning properly for round-trip messaging. + +### monitoring health-check { #healthCheckMonitor } + +Portainer's *Containers* display contains a *Status* column which shows health-check results for all containers that support the feature. + +You can also use the `docker ps` command to monitor health-check results. The following command narrows the focus to mosquitto: + +```console +$ docker ps --format "table {{.Names}}\t{{.Status}}" --filter name=mosquitto +``` + +Possible reply patterns are: + +1. The container is starting and has not yet run the health-check agent: + + ``` + NAMES STATUS + mosquitto Up 3 seconds (health: starting) + ``` + +2. The container has been running for at least 30 seconds and the health-check agent has returned a positive result within the last 30 seconds: + + ``` + NAMES STATUS + mosquitto Up 34 seconds (healthy) + ``` + +3. The container has been running for more than 90 seconds but has failed the last three successive health-check tests: + + ``` + NAMES STATUS + mosquitto Up About a minute (unhealthy) + ``` + +You can also subscribe to the same topic that the health-check agent is using to view the retained messages as they are published: + +```console +$ mosquitto_sub -v -h localhost -p 1883 -t "iotstack/mosquitto/healthcheck" -F "%I %t %p" +``` + +Notes: + +* This assumes you are running the command *outside* container-space on the *same* host as your Mosquitto container. If you run this command from *another* host, replace `localhost` with the IP address or domain name of the host where your Mosquitto container is running. +* The `-p 1883` is the *external* port. You will need to adjust this if you are using a different *external* port for your MQTT service. +* If you enable authentication for your Mosquitto broker, you will need to add `-u «user»` and `-P «password»` parameters to this command. +* You should expect to see a new message appear approximately every 30 seconds. That indicates the health-check agent is functioning normally. Use control+c to terminate the command. + +### customising health-check { #healthCheckCustom } + +You can customise the operation of the health-check agent by editing the `mosquitto` service definition in your *Compose* file: + +1. By default, the mosquitto broker listens to **internal** port 1883. If you need change that port, you also need to inform the health-check agent via an environment variable. For example, suppose you changed the **internal** port to 12345: + + ```yaml + environment: + - HEALTHCHECK_PORT=12345 + ``` + +2. If the default topic string used by the health-check agent causes a name-space collision, you can override it. For example, you could use a Universally-Unique Identifier (UUID): + + ```yaml + environment: + - HEALTHCHECK_TOPIC=4DAA361F-288C-45D5-9540-F1275BDCAF02 + ``` + + Note: + + * You will also need to use the same topic string in the `mosquitto_sub` command shown at [monitoring health-check](#healthCheckMonitor). + +3. If you have enabled authentication for your Mosquitto broker service, you will need to provide appropriate credentials for your health-check agent: + + ```yaml + environment: + - HEALTHCHECK_USER=healthyUser + - HEALTHCHECK_PASSWORD=healthyUserPassword + ``` + +4. If the health-check agent misbehaves in your environment, or if you simply don't want it to be active, you can disable all health-checking for the container by adding the following lines to its service definition: + + ```yaml + healthcheck: + disable: true + ``` + + Notes: + + * The directives to disable health-checking are independent of the environment variables. If you want to disable health-checking temporarily, there is no need to remove any `HEALTHCHECK_` environment variables that may already be in place. + * Conversely, the mere presence of a `healthcheck:` clause in the `mosquitto` service definition overrides the supplied agent. In other words, the following can't be used to re-enable the supplied agent: + + ```yaml + healthcheck: + disable: false + ``` + + You must remove the entire `healthcheck:` clause. + +## Upgrading Mosquitto + +You can update most containers like this: + +```console +$ cd ~/IOTstack +$ docker-compose pull +$ docker-compose up -d +$ docker system prune +``` + +In words: + +* `docker-compose pull` downloads any newer images; +* `docker-compose up -d` causes any newly-downloaded images to be instantiated as containers (replacing the old containers); and +* the `prune` gets rid of the outdated images. + +This strategy doesn't work when a *Dockerfile* is used to build a *local image* on top of a *base image* downloaded from [*DockerHub*](https://hub.docker.com). The *local image* is what is running so there is no way for the `pull` to sense when a newer version becomes available. + +The only way to know when an update to Mosquitto is available is to check the [eclipse-mosquitto tags page](https://hub.docker.com/_/eclipse-mosquitto?tab=tags&page=1&ordering=last_updated) on *DockerHub*. + +Once a new version appears on *DockerHub*, you can upgrade Mosquitto like this: + +```console +$ cd ~/IOTstack +$ docker-compose build --no-cache --pull mosquitto +$ docker-compose up -d mosquitto +$ docker system prune +$ docker system prune +``` + +Breaking it down into parts: + +* `build` causes the named container to be rebuilt; +* `--no-cache` tells the *Dockerfile* process that it must not take any shortcuts. It really **must** rebuild the *local image*; +* `--pull` tells the *Dockerfile* process to actually check with [*DockerHub*](https://hub.docker.com) to see if there is a later version of the *base image* and, if so, to download it before starting the build; +* `mosquitto` is the named container argument required by the `build` command. + +Your existing Mosquitto container continues to run while the rebuild proceeds. Once the freshly-built *local image* is ready, the `up` tells `docker-compose` to do a new-for-old swap. There is barely any downtime for your MQTT broker service. + +The `prune` is the simplest way of cleaning up. The first call removes the old *local image*. The second call cleans up the old *base image*. Whether an old *base image* exists depends on the version of `docker-compose` you are using and how your version of `docker-compose` builds local images. + +### Mosquitto version pinning { #versionPinning } + +If an update to Mosquitto introduces a breaking change, you can revert to an earlier know-good version by pinning to that version. Here's how: + +1. Use your favourite text editor to open: + + ``` + ~/IOTstack/docker-compose.yml + ``` + +2. Find the Mosquitto service definition. If your service definition contains this line: + + ```yaml + build: ./.templates/mosquitto/. + ``` + + then replace that line with the following four lines: + + ```yaml + build: + context: ./.templates/mosquitto/. + args: + - MOSQUITTO_BASE=eclipse-mosquitto:latest + ``` + + Notes: + + * The four-line form of the `build` directive is now the default for Mosquitto so those lines may already be present in your compose file. + * Remember to use spaces, not tabs, when editing compose files. + +3. Replace `latest` with the version you wish to pin to. For example, to pin to version 2.0.13: + + ```yaml + - MOSQUITTO_BASE=eclipse-mosquitto:2.0.13 + ``` + +4. Save the file and tell `docker-compose` to rebuild the local image: + + ```console + $ cd ~/IOTstack + $ docker-compose build --no-cache --pull mosquitto + $ docker-compose up -d mosquitto + $ docker system prune + ``` + + The new *local image* is built, then the new container is instantiated based on that image. The `prune` deletes the old *local image*. + +5. Images built in this way will always be tagged with "latest", as in: + + ```console + $ docker images iotstack_mosquitto + REPOSITORY TAG IMAGE ID CREATED SIZE + iotstack_mosquitto latest 8c0543149b9b About a minute ago 16.2MB + ``` + + You may find it useful to assign an explicit tag to help you remember the version number used for the build. For example: + + ```console + $ docker tag iotstack_mosquitto:latest iotstack_mosquitto:2.0.13 + $ docker images iotstack_mosquitto + REPOSITORY TAG IMAGE ID CREATED SIZE + iotstack_mosquitto 2.0.13 8c0543149b9b About a minute ago 16.2MB + iotstack_mosquitto latest 8c0543149b9b About a minute ago 16.2MB + ``` + + You can also query the image metadata to discover version information: + + ```console + $ docker image inspect iotstack_mosquitto:latest | jq .[0].Config.Labels + { + "com.github.SensorsIot.IOTstack.Dockerfile.based-on": "https://github.com/eclipse/mosquitto", + "com.github.SensorsIot.IOTstack.Dockerfile.build-args": "eclipse-mosquitto:2.0.13", + "description": "Eclipse Mosquitto MQTT Broker", + "maintainer": "Roger Light " + } + ``` + +## About Port 9001 + +Earlier versions of the IOTstack service definition for Mosquitto included two port mappings: + +```yaml +ports: + - "1883:1883" + - "9001:9001" +``` + +[Issue 67](https://github.com/SensorsIot/IOTstack/issues/67) explored the topic of port 9001 and showed that: + +* The base image for Mosquitto did not expose port 9001; and +* The running container was not listening to port 9001. + +On that basis, the mapping for port 9001 was removed from `service.yml`. + +If you have a use-case that needs port 9001, you can re-enable support by: + +1. Inserting the port mapping under the `mosquitto` definition in `docker-compose.yml`: + + ```yaml + - "9001:9001" + ``` + +2. Inserting the additional listener in `mosquitto.conf`: + + ```apacheconf + listener 1883 + listener 9001 + ``` + + You need **both** lines. If you omit 1883 then Mosquitto will stop listening to port 1883 and will only listen to port 9001. + +3. Restarting the container: + + ```console + $ cd ~/IOTstack + $ docker-compose restart mosquitto + ``` + +Please consider raising an issue to document your use-case. If you think your use-case has general application then please also consider creating a pull request to make the changes permanent. diff --git a/docs/Containers/MotionEye.md b/docs/Containers/MotionEye.md new file mode 100755 index 000000000..e947819f4 --- /dev/null +++ b/docs/Containers/MotionEye.md @@ -0,0 +1,151 @@ +# MotionEye + +## About + +MotionEye is a web frontend for the Motion project. + +## References + +* MotionEye: + - [Wiki](https://github.com/motioneye-project/motioneye/wiki) + - [GitHub](https://github.com/motioneye-project/motioneye) + - [DockerHub](https://hub.docker.com/r/dontobi/motioneye.rpi) +* Motion project: + - [Documentation](https://motion-project.github.io/) + - [GitHub](https://github.com/Motion-Project/motion) + +## Service Definition + +This is the default service definition: + +``` yaml +motioneye: + image: dontobi/motioneye.rpi:latest + container_name: "motioneye" + restart: unless-stopped + ports: + - "8765:8765" + - "8766:8081" + environment: + - TZ=${TZ:-Etc/UTC} + volumes: + - ./volumes/motioneye/etc_motioneye:/etc/motioneye + - ./volumes/motioneye/var_lib_motioneye:/var/lib/motioneye +``` + +## Administrative interface + +MotionEye's administrative interface is available on port 8765. For example: + +``` +http://raspberrypi.local:8765 +``` + +The default username is `admin` (all lower case) with no password. + +## Camera streams + +The first camera you define in the administrative interface is assigned to internal port 8081. The default service definition maps that to port 8766: + +``` yaml +- "8766:8081" +``` + +You can access the stream with a web browser on port 8766. For example: + +``` +http://raspberrypi.local:8766 +``` + +Each subsequent camera you define in the administrative interface will be assigned a new internal port number: + +* Camera 2 will be internal port 8082, then +* Camera 3 will be internal port 8083, +* and so on. + +Each camera you define after the first will need its own port mapping in the service definition in your compose file. For example: + +``` yaml +- "8767:8082" +- "8768:8083" +- … +``` + +Key points: + +1. You do not have to make camera streams available outside the container. It is optional. +2. You do not have to accept the default internal port assignments of 8081, 8082 and so on. You can change internal ports in the administrative interface if you wish. If you do this, remember to update the internal (right hand side) ports in the service definition in your compose file. +3. You do not have to adopt the external port sequence 8766, 8767 and so on. Port 8766 is the default for the first camera only because it does not conflict with any other IOTstack template. + +## Clip Storage + +By default local camera data is stored at the internal path: + +``` +/var/lib/motioneye/«camera_name» +``` + +That maps to the external path: + +``` +~/IOTstack/volumes/motioneye/var_lib_motioneye/«camera_name» +``` + +Tips: + +* The automatic mapping to `«camera_name»` can be unreliable. After defining a camera, it is a good idea to double-check the actual path in the "Root Directory" field of the "File Storage" section in the administrative interface. +* Movie clips are kept forever by default. Depending on other settings, this can quickly run your Pi out of disk space so it's a good idea to tell MotionEye to discard old footage using the "Preserve Movies" field of the "Movies" section in the administrative interface. + +### Backup considerations + +Although it depends on your exact settings, MotionEye's video storage can represent a significant proportion of your backup files. If you want to constrain your backup files to reasonable sizes, consider excluding the video storage from your routine backups by changing where MotionEye videos are kept. This is one approach: + +1. Be in the appropriate directory: + + ``` bash + $ cd ~/IOTstack + ``` + +2. Terminate the motioneye container: + + ``` bash + $ docker-compose down motioneye + ``` + + > see also [if downing a container doesn't work](../Basic_setup/index.md/#downContainer) + +3. Move the video storage folder: + + ``` bash + $ sudo mv ./volumes/motioneye/var_lib_motioneye ~/motioneye-videos + ``` + +4. Open your `docker-compose.yml` in a text editor. Find this line in your `motioneye` service definition: + + ``` yaml + - ./volumes/motioneye/var_lib_motioneye:/var/lib/motioneye + ``` + + and change it to be: + + ``` yaml + - /home/pi/motioneye-videos:/var/lib/motioneye + ``` + + then save the edited compose file. + +5. Start the container again: + + ``` bash + $ docker-compose up -d motioneye + ``` + +This change places video storage outside of the usual `~/IOTstack/volumes` path, where IOTstack backup scripts will not see it. + +An alternative approach is to omit the volume mapping for `/var/lib/motioneye` entirely. Clips will be still be recorded inside the container and you will be able to play and download the footage using the administrative interface. However, any saved clips will disappear each time the container is re-created (not just restarted). Clips stored inside the container also will not form part of any backup. + +If you choose this method, make sure you configure MotionEye to discard old footage using the "Preserve Movies" field of the "Movies" section in the administrative interface. This is a per-camera setting so remember to do it for **all** your cameras. If you do not do this, you are still at risk of running your Pi out of disk space, and it's a difficult problem to diagnose. + +## Remote motioneye + +If you have connected to a remote motion eye note that the directory is on that device and has nothing to do with the container. diff --git a/docs/Containers/NextCloud.md b/docs/Containers/NextCloud.md new file mode 100755 index 000000000..a7d2b52f9 --- /dev/null +++ b/docs/Containers/NextCloud.md @@ -0,0 +1,364 @@ +# Nextcloud + +## Service definition { #serviceDefinition } + +This is the **core** of the IOTstack Nextcloud service definition: + +``` { .yaml linenums="1" } +nextcloud: + container_name: nextcloud + image: nextcloud + restart: unless-stopped + environment: + - TZ=${TZ:-Etc/UTC} + - MYSQL_HOST=nextcloud_db + - MYSQL_PASSWORD=%randomMySqlPassword% + - MYSQL_DATABASE=nextcloud + - MYSQL_USER=nextcloud + ports: + - "9321:80" + - "9343:443" + volumes: + - ./volumes/nextcloud/html:/var/www/html + depends_on: + - nextcloud_db + networks: + - default + - nextcloud + +nextcloud_db: + container_name: nextcloud_db + build: ./.templates/mariadb/. + restart: unless-stopped + environment: + - TZ=${TZ:-Etc/UTC} + - PUID=1000 + - PGID=1000 + - MYSQL_ROOT_PASSWORD=%randomPassword% + - MYSQL_PASSWORD=%randomMySqlPassword% + - MYSQL_DATABASE=nextcloud + - MYSQL_USER=nextcloud + volumes: + - ./volumes/nextcloud/db:/config + - ./volumes/nextcloud/db_backup:/backup + networks: + - nextcloud +``` + +There are two containers, one for the cloud service itself, and the other for the database. Both containers share the same persistent storage area in the volumes subdirectory so they are treated as a unit. This will not interfere with any other MariaDB containers you might wish to run. + +Key points: + +* You do **not** need to select MariaDB in the IOTstack menu just to run NextCloud. Some tutorials suggest you do. They are wrong! +* If you *choose* to select MariaDB in the IOTstack menu, understand that it is a *separate* instance of the relational database management system. It has no relationship with NextCloud. + +Under old-menu, you are responsible for setting passwords. The passwords are "internal use only" and it is unlikely that you will need them unless you plan to go ferreting-about in the database using SQL. The rules are: + +* The two instances of `«user_password»` **must** be the same. +* The instance of `«root_password»` *should* be different from `«user_password»`. + +Under new-menu, the menu can generate random passwords for you. You can either use that feature or roll your own using the old-menu approach by replacing: + +* Two instances of `%randomMySqlPassword%` (the `«user_password»`) +* One instance of `%randomPassword%` (the `«root_password»`) + +The passwords need to be set before you bring up the Nextcloud service for the first time. However, the following initialisation steps assume you might not have done that and always start from a clean slate. + +## Initialising Nextcloud { #initialisation } + +1. Be in the correct directory: + + ```console + $ cd ~/IOTstack + ``` + +2. If the stack is running, take it down: + + ```console + $ docker-compose down + ``` + + > see also [if downing a container doesn't work](../Basic_setup/index.md/#downContainer) + +3. Erase the persistent storage area for Nextcloud (double-check the command *before* you hit return): + + ```console + $ sudo rm -rf ./volumes/nextcloud + ``` + + This is done to force re-initialisation. In particular, it gives you assurance that the passwords in your `docker-compose.yml` are the ones that are actually in effect. + +4. Bring up the stack: + + ```console + $ docker-compose up -d + ``` + +5. Check for errors: + + Repeat the following command two or three times at 10-second intervals: + + ```console + $ docker ps + ``` + + You are looking for evidence that the `nextcloud` and `nextcloud_db` containers are up, stable, and not restarting. If you see any evidence of restarts, try to figure out why using: + + ```console + $ docker logs nextcloud + ``` + +6. On a computer that is **not** the device running Nextcloud, launch a browser and point to the device running Nextcloud using your chosen connection method. Examples: + + ``` + http://192.168.203.200:9321 + http://myrpi.mydomain.com:9321 + http://myrpi.local:9321 + http://myrpi:9321 + ``` + + The expected result is: + + ![Create Administrator Account](./images/nextcloud-createadminaccount.png) + +7. Create an administrator account and then click "Install" and wait for the loading to complete. + +8. Eventually, the dashboard will appear. Then the dashboard will be obscured by the "Nextcloud Hub" floating window which you can dismiss: + + ![Post-initialisation](./images/nextcloud-postinitialisation.png) + +9. Congratulations. Your IOTstack implementation of Nextcloud is ready to roll: + + ![Dashboard](./images/nextcloud-dashboard.png) + +## "Access through untrusted domain" { #untrustedDomain } + +> If you are reading this because you are staring at an "access through untrusted domain" message then you have come to the right place. + +Let's assume the following: + +* You used `raspi-config` to give your Raspberry Pi the name "myrpi". +* Your Raspberry Pi has the fixed IP address "192.168.203.200" (via either a static binding in your DHCP server or a static IP address on your Raspberry Pi). + +Out of the box, a Raspberry Pi participates in multicast DNS so it will also have the mDNS name: + +* "myrpi.local" + +Let's also assume you have a local Domain Name System server where your Raspberry Pi: + +* has the canonical name (A record) "myrpi.mydomain.com"; plus +* an alias (CNAME record) of "nextcloud.mydomain.com". + +Rolling all that together, you would expect your Nextcloud service to be reachable at any of the following URLs: + +* `http://192.168.203.200:9321` +* `http://myrpi.local:9321` +* `http://myrpi.mydomain.com:9321` +* `http://nextcloud.mydomain.com:9321` + +To tell Nextcloud that all of those URLs are valid, you need to use `sudo` and your favourite text editor to edit this file: + +``` +~/IOTstack/volumes/nextcloud/html/config/config.php +``` + +Hint: + +* It is a good idea to make a backup of any file before you edit it. For example: + + ```console + $ cd ~/IOTstack/volumes/nextcloud/html/config/ + $ sudo cp config.php config.php.bak + ``` + +Search for "trusted_domains". To tell Nextcloud to trust **all** of the URLs above, edit the array structure like this: + +``` + 'trusted_domains' => + array ( + 0 => '192.168.203.200:9321', + 1 => 'myrpi.local:9321', + 2 => 'myrpi.mydomain.com:9321', + 3 => 'nextcloud.mydomain.com:9321', + ), +``` + +> Note: *all* the trailing commas are intentional! + +Once you have finished editing the file, save your work then restart Nextcloud: + +```console +$ cd ~/IOTstack +$ docker-compose restart nextcloud +``` + +Use `docker ps` to check that the container has restarted properly and hasn't gone into a restart loop. + +See also: + +* [Nextcloud documentation - trusted domains](https://docs.nextcloud.com/server/21/admin_manual/installation/installation_wizard.html#trusted-domains). + +### Using a DNS alias for your Nextcloud service { #dnsAlias } + +> The information in this section *may* be out of date. Recent tests suggest it is no longer necessary to add a `hostname` clause to your `docker-compose.yml` to silence warnings when using DNS aliases to reach your NextCloud service. This section is being left here so you will know what to do if you encounter the problem. + +The examples above include using a DNS alias (a CNAME record) for your Nextcloud service. If you decide to do that, you may see this warning in the log: + +``` +Could not reliably determine the server's fully qualified domain name +``` + +You can silence the warning by editing the Nextcloud service definition in `docker-compose.yml` to add your fully-qualified DNS alias using a `hostname` directive. For example: + +```yaml + hostname: nextcloud.mydomain.com +``` + +## Security considerations { #security } + +Nextcloud traffic is not encrypted. Do **not** expose it to the web by opening a port on your home router. Instead, use a VPN like Wireguard to provide secure access to your home network, and let your remote clients access Nextcloud over the VPN tunnel. + +The IOTstack service definition for NextCloud reserves port 9343 for HTTPS access but leaves it as an exercise for the reader to figure out how to make it work. You may get some guidance [here](https://youtu.be/qlcVx-k-02E?si=qtVNFAvSB8w202Jh). + +## Container health check { #healthCheck } + +A script , or "agent", to assess the health of the MariaDB container has been added to the *local image* via the *Dockerfile*. In other words, the script is specific to IOTstack. + +Because it is an instance of MariaDB, Nextcloud_DB inherits the health-check agent. See the [IOTstack MariaDB](MariaDB.md) documentation for more information. + +## Keeping Nextcloud up-to-date { #updatingNextcloud } + +To update the `nextcloud` container: + +```console +$ cd ~/IOTstack +$ docker-compose pull nextcloud +$ docker-compose up -d nextcloud +$ docker system prune +``` + +To update the `nextcloud_db` container: + +```console +$ cd ~/IOTstack +$ docker-compose build --no-cache --pull nextcloud_db +$ docker-compose up -d nextcloud_db +$ docker system prune +``` + +> You may need to run the `prune` command twice if you are using a 1.x version of `docker-compose`. + +## Backups { #backups } + +Nextcloud is currently excluded from the IOTstack-supplied backup scripts due to its potential size. + +> [Paraphraser/IOTstackBackup](https://github.com/Paraphraser/IOTstackBackup) includes backup and restore for NextCloud. + +If you want to take a backup, something like the following will get the job done: + +```console +$ cd ~/IOTstack +$ BACKUP_TAR_GZ=$PWD/backups/$(date +"%Y-%m-%d_%H%M").$HOSTNAME.nextcloud-backup.tar.gz +$ touch "$BACKUP_TAR_GZ" +$ docker-compose down nextcloud nextcloud_db +$ sudo tar -czf "$BACKUP_TAR_GZ" -C "./volumes/nextcloud" . +$ docker-compose up -d nextcloud +``` + +Notes: + +* A *baseline* backup takes over 400MB and about 2 minutes. Once you start adding your own data, it will take even more time and storage. +* The `up` of the NextCloud container implies the `up` of the Nextcloud_DB container. +* See also [if downing a container doesn't work](../Basic_setup/index.md/#downContainer) + +To restore, you first need to identify the name of the backup file by looking in the `backups` directory. Then: + +```console +$ cd ~/IOTstack +$ RESTORE_TAR_GZ=$PWD/backups/2021-06-12_1321.sec-dev.nextcloud-backup.tar.gz +$ docker-compose down nextcloud nextcloud_db +$ sudo rm -rf ./volumes/nextcloud/* +$ sudo tar -x --same-owner -z -f "$RESTORE_TAR_GZ" -C "./volumes/nextcloud" +$ docker-compose up -d nextcloud +``` + +If you are running from an SD card, it would be a good idea to mount an external drive to store the data. Something like: + +![image](https://user-images.githubusercontent.com/46672225/69873297-a3d6b700-12c0-11ea-98c9-40f358137b77.png) + +The external drive will have to be an ext4 formatted drive because smb, fat32 and NTFS can't handle Linux file permissions. If the permissions aren't set to "www-data" then the container won't be able to write to the disk. + +Finally, a warning: + +* If your database gets corrupted then your Nextcloud is pretty much stuffed. + +## Network Model { #networkModel } + +A walkthrough of a network model may help you to understand how Nextcloud and its database communicate. To help set the scene, the following model shows a Raspberry Pi with Docker running four containers: + +* `nextcloud` and `nextcloud_db` - both added when you select "NextCloud" +* `mariadb` - optional container added when you select "MariaDB" +* `wireguard` - optional container added when you select "WireGuard" + +![Network Model](./images/nextcloud-network-model.jpeg) + +The first thing to understand is that the `nextcloud_db` and `mariadb` containers are both instances of MariaDB. They are instantiated from the same *image* but they have completely separate existences. They have different persistent storage areas (ie databases) and they do not share data. + +The second thing to understand is how the networks inside the "Docker" rectangle shown in the model are created. The `networks` section of your compose file defines the networks: + +``` yaml +networks: + + default: + driver: bridge + ipam: + driver: default + + nextcloud: + driver: bridge + internal: true + ipam: + driver: default +``` + +At run time, the lower-case representation of the directory containing the compose file (ie "iotstack") is prepended to the network names, resulting in: + +* `default` ⟹ `iotstack_default` +* `nextcloud` ⟹ `iotstack_nextcloud` + +Each network is assigned a /16 IPv4 subnet. Unless you override it, the subnet ranges are chosen at random. This model assumes: + +* `iotstack_default` is assigned 172.18.0.0/16 +* `iotstack_nextcloud` is assigned 172.19.0.0/16 + +The logical router on each network takes the `.0.1` address. + +> The reason why two octets are devoted to the host address is because a /16 network prefix implies a 16-bit host portion. Each octet describes 8 bits. + +As each container is brought up, the network(s) it joins are governed by the following rules: + +1. If there is an explicit `networks:` clause in the container's service definition then the container joins the network(s) listed in the body of the clause; otherwise +2. The container joins the `default` network. + +Assuming that the `mariadb` and `wireguard` containers do not have `networks:` clauses, the result of applying those rules is shown in the following table. + +![Effect of networks clause](./images/nextcloud-networks-clause.jpeg) + +Each container is assigned an IPv4 address on each network it joins. In general, the addresses are assigned in the order in which the containers start. + +No container can easily predict either the network prefix of the networks it joins or the IP address of any other container. However, Docker provides a mechanism for any container to reach any other container with which it shares a network by using the destination container's name. + +In this model there are two MariaDB instances, one named `nextcloud_db` and the other named `mariadb`. How does the `nextcloud` container know which **name** to use? Simple. It's passed in an environment variable: + +``` +environment: + - MYSQL_HOST=nextcloud_db +``` + +At runtime, the `nextcloud` container references `nextcloud_db:3306`. Docker resolves `nextcloud_db` to 172.19.0.2 so the traffic traverses the 172.19/16 internal bridged network and arrives at the `nextcloud_db` container. + +The `nextcloud` container *could* reach the `mariadb` container via `mariadb:3306`. There's no ambiguity because Docker resolves `mariadb` to 172.18.0.2, which is a different subnet and an entirely different internal bridged network. + +> There would still be no ambiguity even if all containers attached to the `iotstack_default` network because each container name still resolves to a distinct IP address. + +In terms of **external** ports, only `mariadb` exposes port 3306. Any external process trying to reach 192.168.203.60:3306 will always be port-forwarded to the `mariadb` container. The `iotstack_nextcloud` network is declared "internal" which means it is unreachable from beyond the Raspberry Pi. Any port-mappings associated with that network are ignored. diff --git a/docs/Containers/Node-RED.md b/docs/Containers/Node-RED.md new file mode 100644 index 000000000..b357701ab --- /dev/null +++ b/docs/Containers/Node-RED.md @@ -0,0 +1,1713 @@ +# Node-RED + +## References { #references } + +- [nodered.org home](https://nodered.org/) +- [GitHub: node-red/node-red-docker](https://github.com/node-red/node-red-docker) +- [DockerHub: nodered/node-red](https://hub.docker.com/r/nodered/node-red) +- [Tutorial: from MQTT to InfluxDB via Node-Red](https://gist.github.com/Paraphraser/c9db25d131dd4c09848ffb353b69038f) + +## Significant files { #significant-files } + +``` +~/IOTstack +├── .templates +│ └── nodered +│ └── service.yml ❶ +├── services +│ └── nodered +│ ├── Dockerfile ❷ +│ └── service.yml ❸ +├── docker-compose.yml ❹ +└── volumes + └── nodered ❺ + ├── data ❻ + └── ssh ❼ +``` + +1. Template *service definition*. +2. The *Dockerfile*. +3. Working *service definition* (old-menu only, copied from ❶). +4. The *Compose* file (includes ❶) +5. Persistent storage area. +6. Data directory (mapped volume). +7. SSH directory (mapped volume). + +## How Node-RED gets built for IOTstack { #iotstackBuild } + +### Node-RED source code ([GitHub](https://github.com)) { #gitHubSource } + +The source code for Node-RED lives at [GitHub node-red/node-red-docker](https://github.com/node-red/node-red-docker). + +### Node-RED images ([DockerHub](https://hub.docker.com)) { #dockerHubImages } + +Periodically, the source code is recompiled and pushed to [nodered/node-red](https://hub.docker.com/r/nodered/node-red/tags?page=1&ordering=last_updated) on *DockerHub*. See [Node-RED and `node.js` versions](#containerVersions) for an explanation of the versioning tags associated with images on *DockerHub*. + +### IOTstack menu { #iotstackMenu } + +When you select Node-RED in the IOTstack menu, the *template service definition* ❶ is copied into the *Compose* file ❹. + +> Under old menu, it is also copied to the *working service definition* ❸ and then not really used. + +You choose add-on nodes from a supplementary menu. We recommend accepting the default nodes, and adding others that you think you are likely to need. Node-RED will not build if you do not select at least one add-on node. + +Key points: + +* Under new menu, you must press the right arrow to access the supplementary menu. Under old menu, the list of add-on nodes is displayed automatically. +* Do not be concerned if you can't find an add-on node you need in the list. You can also add nodes via Manage Palette once Node-RED is running. See [component management](#componentManagement). + +Choosing add-on nodes in the menu causes the *Dockerfile* ❷ to be created. + +### IOTstack first run { #iotstackFirstRun } + +On a first install of IOTstack, you are told to do this: + +``` console +$ cd ~/IOTstack +$ docker-compose up -d +``` + +`docker-compose` reads the *Compose* file ❹. When it arrives at the `nodered` service definition, it finds : + +``` yaml linenums="1" + nodered: + container_name: nodered + build: + context: ./services/nodered/. + args: + - DOCKERHUB_TAG=latest + - EXTRA_PACKAGES= +``` + +Note: + +* Prior to July 2022, IOTstack used the following one-line syntax for the `build` directive: + + ``` yaml linenums="3" + build: ./services/nodered/. + ``` + + The older syntax meant all local customisations (version-pinning and adding extra packages) needed manual edits to the *Dockerfile* ❷. Those edits would be overwritten each time the menu was re-run to alter the selected add-on nodes. The newer multi-line syntax avoids that problem. + + See also [updating to July 2022 syntax](#july2022syntax). + +In either case, the path `./services/nodered/.` tells `docker-compose` to look for ❷: + +``` +~/IOTstack/services/nodered/Dockerfile +``` + +which contains instructions to download a *base* image from [*DockerHub*](https://hub.docker.com) and then apply local customisations such as the add-on nodes you chose in the IOTstack menu. The result is a *local* image which is instantiated to become your running container. + +Notes: + +1. During the build you may see warnings and deprecation notices. You may also see messages about "vulnerabilities" along with recommendations to run `npm audit fix`. You should ignore all such messages. There is no need to take any action. +2. If SQLite is in your list of nodes, be aware that it needs to be compiled from its source code. It takes a *long* time, outputs an astonishing number of warnings and, from time to time, will look as if it has gotten stuck. Be patient. + + > Acknowledgement: Successful installation of the SQLite node is thanks to @fragolinux. + +When you run the `docker images` command after Node-RED has been built, you will see something like this: + +``` console +$ docker images +REPOSITORY TAG IMAGE ID CREATED SIZE +iotstack-nodered latest 9feeb87019cd 11 days ago 945MB +``` + +The image name `iotstack-nodered` is the concatenation of two components: + +1. The `docker-compose` *project* name. This is the all-lower-case representation of the name of the folder containing `docker-compose.yml`. In a default clone of IOTstack, the folder name is `IOTstack` so the project name is `iotstack`. +2. The name of the service definition which, for Node-RED is `nodered`. + +When you install Node-RED for the first time, the entire process of downloading a *base* image from Dockerhub, building a *local* image by running your local Dockerfile ❷, and then instantiating that *local* image as your running container, is all completely automatic. + +However, *after* that first build, your *local* image is essentially frozen and it needs special action on your part to keep it up-to-date. See [maintaining Node-RED](#maintainNodeRed) and, in particular: + +* [Re-building the local image](#rebuildNodeRed) if you change the Dockerfile; and +* [Upgrading Node-RED](#upgradeNodeRed) if you want to reconstruct your *local* image based on an updated *base* image which has become available on [DockerHub](https://hub.docker.com/r/nodered/node-red/tags). + +## Securing Node-RED { #securingNodeRed } + +### Setting an encryption key for your credentials { #encryptionKey } + +After you install Node-RED, you should set an encryption key. Completing this step will silence the warning you will see when you run: + +``` console +$ docker logs nodered +… +--------------------------------------------------------------------- +Your flow credentials file is encrypted using a system-generated key. + +If the system-generated key is lost for any reason, your credentials +file will not be recoverable, you will have to delete it and re-enter +your credentials. + +You should set your own key using the 'credentialSecret' option in +your settings file. Node-RED will then re-encrypt your credentials +file using your chosen key the next time you deploy a change. +--------------------------------------------------------------------- +… +``` + +Setting an encryption key also means that any credentials you create will be *portable*, in the sense that you can backup Node-RED on one machine and restore it on another. + +The encryption key can be any string. For example, if you have UUID support installed (`sudo apt install -y uuid-runtime`), you could generate a UUID as your key: + +``` console +$ uuidgen +2deb50d4-38f5-4ab3-a97e-d59741802e2d +``` + +Once you have defined your encryption key, use `sudo` and your favourite text editor to open this file: + +``` +~/IOTstack/volumes/nodered/data/settings.js +``` + +Search for `credentialSecret`: + +``` + //credentialSecret: "a-secret-key", +``` + +Un-comment the line and replace `a-secret-key` with your chosen key. Do not remove the comma at the end of the line. The result should look something like this: + +``` + credentialSecret: "2deb50d4-38f5-4ab3-a97e-d59741802e2d", +``` + +Save the file and then restart Node-RED: + +``` console +$ cd ~/IOTstack +$ docker-compose restart nodered +``` + +### Setting a username and password for Node-RED { #credentials } + +To secure Node-RED you need a password hash. Run the following command, replacing `PASSWORD` with your own password: + +``` console +$ docker exec nodered node -e "console.log(require('bcryptjs').hashSync(process.argv[1], 8));" PASSWORD +``` + +You will get an answer that looks something like this: + +``` +$2a$08$gTdx7SkckJVCw1U98o4r0O7b8P.gd5/LAPlZI6geg5LRg4AUKuDhS +``` + +Copy that text to your clipboard, then follow the instructions at [Node-RED User Guide - Securing Node-RED - Username & Password-based authentication](https://nodered.org/docs/user-guide/runtime/securing-node-red#usernamepassword-based-authentication). + +## Referring to other containers { #containerNames } + +Node-RED can run in two modes. By default, it runs in "non-host mode" but you can also move the container to "host mode" by editing the Node-RED service definition in your *Compose* file to: + +1. Add the following directive: + + ``` yml + network_mode: host + ``` + +2. Remove the `ports` directive and the mapping of port 1880. + +### When Node-RED is not in host mode { #nonHostMode } + +Most examples on the web assume Node-RED and other services in the MING (Mosquitto, InfluxDB, Node-RED, Grafana) stack have been installed natively, rather than in Docker containers. Those examples typically include the loopback address + port syntax, like this: + +``` +127.0.0.1:1883 +``` + +The loopback address will not work when Node-RED is in non-host mode. This is because each container behaves like a self-contained computer. The loopback address means "this container". It does not mean "this Raspberry Pi". + +You refer to other containers by their container name. For example, a flow subscribing to an MQTT feed provided by the mosquitto container uses: + +``` +mosquitto:1883 +``` + +Similarly, if a flow writes to an InfluxDB database maintained by the influxdb container, the flow uses: + +``` +influxdb:8086 +``` + +Behind the scenes, Docker maintains a table, similar to an `/etc/hosts` file, mapping container names to the IP addresses on the internal bridged network that are assigned, dynamically, by Docker, when it spins up each container. + +### When Node-RED is in host mode { #hostmode } + +This is where you use loopback+port syntax, such as the following to communicate with Mosquitto: + +``` +127.0.0.1:1883 +``` + +What actually occurs is that Docker is listening to external port 1883 on behalf of Mosquitto. It receives the packet and routes it (layer three) to the internal bridged network, performing network address translation (NAT) along the way to map the external port to the internal port. Then the packet is delivered to Mosquitto. The reverse happens when Mosquitto replies. It works but is less efficient than when all containers are in non-host mode. + +## Referring to the host { #hostReference } + +When the container is running in non-host mode, there are several ways in which it can refer to the host on which the container is running: + +1. via the IP address of one of the host's interfaces; +2. via the fully-qualified domain name of the host (ie same as the above but via the Domain Name System) +3. via the default gateway on the Docker bridge network. + +The problem with the first two is that they tie your flows to the specific host. + +The third method is *portable*, meaning a flow can conceptually refer to "this" host and be independent of the actual host on which the container is running. + +### Bridge network - default gateway { #defaultBridge } + +* Method 1 + + The default gateway on the Docker bridge network is *usually* "172.17.0.1". You can confirm the IP address by running: + + ``` console + $ docker network inspect bridge | jq .[0].IPAM.Config[0].Gateway + "172.17.0.1" + ``` + + > If `jq` is not installed on your system, you can install it by running `sudo apt install -y jq`. + + If you use this method, your flows can refer to "this" host using the IP address "172.17.0.1". + +* Method 2 + + Alternatively, you can add the following lines to your Node-RED service definition: + + ``` yaml + extra_hosts: + - "host.docker.internal:host-gateway" + ``` + + If you use this method, your flows can refer to "this" host using the domain name "host.docker.internal". + + Generally the second method is recommended for IOTstack. That is because your flows will continue to work even if the 172.17.0.1 IP address changes. However, it does come with the disadvantage that, if you publish a flow containing this domain name, the flow will not work unless the recipient also adds the `extra_hosts` clause. + +## GPIO Access { #accessGPIO } + +To communicate with your Raspberry Pi's GPIO you need to do the following: + +1. Install dependencies: + + ``` console + $ sudo apt update + $ sudo apt install pigpio python-pigpio python3-pigpio + ``` + + Notes: + + * `pigpio` and `python3-pigpio` are usually installed by default in standard releases of Raspberry Pi OS. + * Only `pigpio` is actually *required*. + * The Python packages are *optional*. + +2. Install the `node-red-node-pi-gpiod` node. See [component management](#componentManagement). It allows you to connect to multiple Pis from the same Node-RED service. + + Note: + + * Unless you explicitly removed `node-red-node-pi-gpiod` from the list of add-on nodes added to your [Dockerfile](#viaDockerfile) by the IOTstack menu, it will be installed already. You can confirm this by examining your Node-RED Dockerfile ❷. + +3. Configure the `pigpdiod` daemon: + + * copy the following text to the clipboard: + + ``` ini linenums="1" + [Unit] + Requires=default.target + After=default.target + [Service] + ExecStart= + ExecStart=/usr/bin/pigpiod + [Install] + WantedBy= + WantedBy=default.target + ``` + + > Acknowledgement: some of the above from [joan2937/pigpio issue 554](https://github.com/joan2937/pigpio/issues/554#issuecomment-1405364041) + + * execute the following commands: + + ``` console + $ sudo systemctl stop pigpiod + $ sudo systemctl revert pigpiod + $ sudo systemctl edit pigpiod + ``` + + * follow the on-screen instructions and paste the contents of the clipboard into the blank area between the lines. The final result should be (lines 4…12 are the pasted material): + + ``` ini linenums="1" + ### Editing /etc/systemd/system/pigpiod.service.d/override.conf + ### Anything between here and the comment below will become the new contents of the file + + [Unit] + Requires=default.target + After=default.target + [Service] + ExecStart= + ExecStart=/usr/bin/pigpiod + [Install] + WantedBy= + WantedBy=default.target + + ### Lines below this comment will be discarded + + ### /lib/systemd/system/pigpiod.service + # [Unit] + # Description=Daemon required to control GPIO pins via pigpio + # [Service] + # ExecStart=/usr/bin/pigpiod -l + # ExecStop=/bin/systemctl kill pigpiod + # Type=forking + # [Install] + # WantedBy=multi-user.target + ``` + + * Save your work by pressing: + + - control+O (letter O not zero) + - return + - control+X + + * Check your work by running: + + ``` console + $ sudo systemctl cat pigpiod + ``` + + The expected result is: + + ``` ini linenums="1" + # /lib/systemd/system/pigpiod.service + [Unit] + Description=Daemon required to control GPIO pins via pigpio + [Service] + ExecStart=/usr/bin/pigpiod -l + ExecStop=/bin/systemctl kill pigpiod + Type=forking + [Install] + WantedBy=multi-user.target + + # /etc/systemd/system/pigpiod.service.d/override.conf + [Unit] + Requires=default.target + After=default.target + [Service] + ExecStart= + ExecStart=/usr/bin/pigpiod + [Install] + WantedBy= + WantedBy=default.target + ``` + + Lines 12…20 should be those you copied to the clipboard at the start of this step. If you do not see the expected result, go back and start from the beginning of this step. + + * Activate the daemon: + + ``` console + $ sudo systemctl enable pigpiod + $ sudo systemctl start pigpiod + ``` + + * Reboot. + + * Check that the daemon is running: + + ``` console + $ sudo systemctl status pigpiod + ``` + + Once you have configured `pigpiod` correctly and it has come up after a reboot, you should not need to worry about it again. + + `pigpiod` provides open access to your Raspberry Pi's GPIO via port 8888. Consult the `man` pages if you want to make it more secure. Once you have decided what to do, start over from the beginning of this step, and add your parameters to the line: + + ``` ini linenums="6" + ExecStart=/usr/bin/pigpiod + ``` + +4. Drag a `pi gpio` node onto the canvas. Configure it according to your needs. + + The `Host` field should be set to one of: + + * `172.17.0.1`; or + * `host.docker.internal` + + See also [Bridge network - default gateway](#defaultBridge). + + Don't try to use 127.0.0.1 because that is the loopback address of the Node-RED container. + +## Serial Devices { #accessSerial } + +### USB devices { #usbSerial } + +Node-RED running in a container *can* communicate with serial devices attached to your Raspberry Pi's USB ports. However, it does not work "out of the box". You need to set it up. + +Let's make an assumption. A device connected to one of your Raspberry Pi's USB ports presents itself as: + +``` +/dev/ttyUSB0 +``` + +You have three basic options: + +1. You can map the device into the container using that name: + + ``` yaml + devices: + - "/dev/ttyUSB0:/dev/ttyUSB0" + ``` + + This is simple and effective but it suffers from a few problems: + + * If the device is disconnected while the container is running, there's a good chance the container will crash. + * `docker-compose` will not start your container if the device is not present when you bring up your stack. + * You can't guarantee that the device will *always* enumerate as "ttyUSB0". It might come up as "ttyUSB1". + + You can deal with the last problem by using the device's "by-id" path. There's an example of this in the [Zigbee2MQTT](https://sensorsiot.github.io/IOTstack/Containers/Zigbee2MQTT/#identifyAdapter) documentation. + + Options 2 and 3 (below) deal with the first two problems in the sense that: + + * a device disconnection is unlikely to cause the container to crash (the flow might); + * `docker-compose` will always start the container, irrespective of whether devices are actually attached to your USB ports. + + Options 2 and 3 (below) can't provide a workaround for devices being given different names via enumeration but you can still deal with that by using the device's "by-id" path (as explained above). + +2. You can map a *class* of devices: + + * modify the `volumes` clause to add a read-only mapping for `/dev`: + + ``` yaml + volumes: + - /dev:/dev:ro + ``` + + > The "read-only" flag (`:ro`) prevents the container from doing dangerous things like destroying your Raspberry Pi's SD or SSD. Please don't omit that flag! + + * discover the major number for your device: + + ``` console + $ ls -l /dev/ttyUSB0 + crw-rw---- 1 root dialout 188, 0 Feb 18 15:30 /dev/ttyUSB0 + ``` + + In the above, the `188, 0` string means the major number for ttyUSB0 is "188" and "0" the minor number. + + * add two device CGroup rules: + + ``` yaml + device_cgroup_rules: + - 'c 1:* rw' # access to devices like /dev/null + - 'c 188:* rmw' # change numbers to your device + ``` + + In the above: + + * "188" is the major number for ttyUSB0 and you should substitute accordingly if your device has a different major number. + + * the "*" is a wildcard for the minor number. + +3. Use the "privileged" flag by adding the following to your Node-RED service definition: + + ``` yaml + privileged: true + ``` + + Please make sure you read the following references **BEFORE** you select this option: + + * [Privileged vs root](https://www.howtogeek.com/devops/privileged-vs-root-in-docker-whats-the-difference/) + * [Mind the 'privileged' flag](https://betterprogramming.pub/docker-tips-mind-the-privileged-flag-d6e2ae71bdb4) + +### node-red-node-serialport { #nodeSerial } + +At the time of writing (Feb 2023), it was not possible to add `node-red-node-serialport` to the list of nodes in your Dockerfile. Attempting to do so crashed the Node-RED container with a *segmentation fault.* The workaround is to build the node from source by adding an extra line at the *end* of your Dockerfile: + +``` Dockerfile +RUN npm install node-red-node-serialport --build-from-source +``` + +### hardware serial port { #piSerial } + +Historically, `/dev/ttyAMA0` referred to the Raspberry Pi's serial port. The situation became less straightforward once Pis gained Bluetooth capabilities: + +* On Pis *without* Bluetooth hardware: + + - `/dev/ttyAMA0` means the serial port; and + - `/dev/serial0` is a symlink to `/dev/ttyAMA0` + +* On Pis *with* Bluetooth capabilities: + + - `/dev/ttyS0` means the serial port; and + - `/dev/serial0` is a symlink to `/dev/ttyS0` + + In addition, whether `/dev/ttyS0` (and, therefore, `/dev/serial0`) are present at runtime depends on adding the following line to `config.txt`: + + ``` + enable_uart=1 + ``` + + And, if that isn't sufficiently confusing, the location of `config.txt` depends on the OS version: + + * Bullseye (and earlier): `/boot/config.txt` + * Bookworm: `/boot/firmware/config.txt` + +Rolling all that together, if you want access to the hardware serial port from Node-RED, you need to: + +1. Add `enable_uart=1` to `config.txt`. +2. Reboot. +3. Add a device-mapping to Node-RED's service definition: + + ``` yaml + devices: + - /dev/serial0:/dev/«internalDevice» + ``` + + where `«internalDevice»` is whatever device the add-on node you're using is expecting, such as `ttyAMA0`. + +4. Recreate the Node-RED container by running: + + ``` console + $ cd ~/IOTstack + $ docker-compose up -d nodered + ``` + +### Bluetooth device { #bluetoothSupport } + +If you enable the `node-red-contrib-generic-ble` add on node, you will also need to make the following changes: + +1. If you are running Bookworm, you will need to use `sudo` to edit this file: + + ``` + /boot/firmware/config.txt + ``` + + You need to add this line to the end of the file: + + ``` + dtparam=krnbt=off + ``` + + You then need to reboot. This adds the Bluetooth device to `/dev`. + +2. Find the the Node-RED service definition in your `docker-compose.yml`: + + * Add the following mapping to the `volumes:` clause: + + ```yaml + - /var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket + ``` + + * Add the following `devices:` clause: + + ```yaml + devices: + - "/dev/serial1:/dev/serial1" + - "/dev/vcio:/dev/vcio" + - "/dev/gpiomem:/dev/gpiomem" + ``` + +3. Recreate the Node-RED container: + + ``` console + $ cd ~/IOTstack + $ docker-compose up -d nodered + ``` + +Notes: + +* These changes are *specific* to the Raspberry Pi. If you need Bluetooth support on non-Pi hardware, you will need to figure out the details for your chosen platform. +* Historically, `/dev/ttyAMA0` meant the serial interface. Subsequently, it came to mean the Bluetooth interface but only where Bluetooth hardware was present, otherwise it still meant the serial interface. + + On Bookworm and later, if it is present, `/dev/ttyAMA1` means the Bluetooth Interface. + + On Bullseye and later, `/dev/serial1` is a symbolic link pointing to whichever of `/dev/ttyAMA0` or `/dev/ttyAMA1` means the Bluetooth interface. This means that `/dev/serial1` is the most reliable way of referring to the Bluetooth Interface. That's why it appears in the `devices:` clause above. + +## Sharing files between Node-RED and the Raspberry Pi { #fileSharing } + +Containers run in a sandboxed environment. A process running inside a container can't see the Raspberry Pi's file system. Neither can a process running outside a container access files inside the container. + +This presents a problem if you want write to a file outside a container, then read from it inside the container, or vice-versa. + +IOTstack containers have been set up with shared volume mappings. Each volume mapping associates a specific directory in the Raspberry Pi file system with a specific directory inside the container. If you write to files in a shared directory (or one of its sub-directories), both the host and the container can see the same sub-directories and files. + +Key point: + +* Files and directories in the shared volume are persistent between restarts. If you save your data anywhere else inside the container, it will be lost when the container is rebuilt. + +The Node-RED service definition in the *Compose* file includes the following: + +``` yaml +volumes: + - ./volumes/nodered/data:/data +``` + +That decomposes into: + +* external path = `./volumes/nodered/data` +* internal path = `/data` + +The leading "." on the external path implies "the folder containing the *Compose* file so it actually means: + +* external path = `~/IOTstack/volumes/nodered/data` +* internal path = `/data` + +If you write to the **internal** path from **inside** the Node-RED container, the Raspberry Pi will see the results at the **external** path, and vice versa. Example: + +``` console +$ docker exec -it nodered bash +# echo "The time now is $(date)" >/data/example.txt +# cat /data/example.txt +The time now is Thu Apr 1 11:25:56 AEDT 2021 +# exit +$ cat ~/IOTstack/volumes/nodered/data/example.txt +The time now is Thu Apr 1 11:25:56 AEDT 2021 +$ sudo rm ~/IOTstack/volumes/nodered/data/example.txt +``` + +In words: + +1. Open a shell into the Node-RED container. Two things happen: + + * You are now **inside** the container. Any commands you execute while in this shell are run **inside** the container; and + * The prompt changes to a "#" indicating that you are running as the "root" user, meaning you don't need `sudo` for anything. + +2. Use the `echo` command to create a small file which embeds the current timestamp. The path is in the `/data` directory which is mapped to the Raspberry Pi's file system. +3. Show that the file has been created inside the container. +4. Exit the shell: + * You can either type the `exit` command and press return, or press Control+D. + * Exiting the shell drops you out of the container so the "$" prompt returns, indicating that you are **outside** the Node-Red container, running as a non-root user ("pi"). +5. Show that the same file can be seen from **outside** the container. +6. Tidy-up by removing the file. You need `sudo` to do that because the persistent storage area at the **external** path is owned by root, and you are running as user "pi". + +You can do the same thing from within a Node-RED flow. + +![image](./images/nodered-flow-write-persistent-file.png) + +The flow comprises: + +* An Inject node, wired to a Template node. + + - *When an Inject node's input tab is clicked, it sets the message payload to the number of seconds since 1/1/1970 UTC and triggers the flow.* + +* A Template node, wired to both a Debug node and a File node. The template field is set to: + + ``` + The time at the moment is {{payload}} seconds since 1/1/1970 UTC ! + ``` + + - *When this node runs, it replaces `{{payload}}` with the seconds value supplied by the Inject node.* + +* A Debug node. + + - *When this node runs, it displays the payload in the debug window on the right hand side of the Node-RED GUI.* + +* A File node. The "Filename" field of the node is set to write to the path: + + ``` + /data/flow-example.txt + ``` + + - *When this node runs, it writes the payload to the specified file. Remember that `/data` is an internal path within the Node-RED container.* + +Deploying the flow and clicking on the Inject node results in the debug message shown on the right hand side of the screen shot. The embedded terminal window shows that the same information is accessible from outside the container. + +You can reverse this process. Any file you place within the path `~/IOTstack/volumes/nodered/data` can be read by a "File in" node. + +## Executing commands outside the Node-RED container { #sshOutside } + +A reasonably common requirement in a Node-RED flow is the ability to execute a command on the host system. The standard tool for this is an "exec" node. + +An "exec" node works as expected when Node-RED is running as a native service but not when Node-RED is running in a container. That's because the command spawned by the "exec" node runs **inside** the container. + +To help you understand the difference, consider this command: + +``` console +$ grep "^PRETTY_NAME=" /etc/os-release +``` + +When you run that command on a Raspberry Pi **outside** container-space, the answer will be something like: + +``` +PRETTY_NAME="Debian GNU/Linux 11 (bullseye)" +``` + +If you run the same command **inside** a Node-RED container, the output will reflect the operating system upon which the container is based, such as: + +``` +PRETTY_NAME="Alpine Linux v3.16" +``` + +The same thing will happen if a Node-RED "exec" node executes that `grep` command when Node-RED is running in a container. It will see the "Alpine Linux" answer. + +Docker doesn't provide any mechanism for a container to execute an arbitrary command **outside** of its container. A workaround is to utilise SSH. This remainder of this section explains how to set up the SSH scaffolding so that "exec" nodes running in a Node-RED container can invoke arbitrary commands **outside** container-space. + +### Task Goal { #sshTaskGoal } + +Be able to use a Node-RED "exec" node to perform the equivalent of: + +``` console +$ ssh host.docker.internal «COMMAND» +``` + +where `«COMMAND»` is any command known to the target host. + +This section uses `host.docker.internal` throughout. That name comes from method 2 of [bridge network - default gateway](#defaultBridge) but, in principle, you can refer to the host using any mechanism described in [referring to the host](#hostReference). + +### Assumptions { #sshAssumptions } + +* [SensorsIot/IOTstack](https://github.com/SensorsIot/IOTstack) is installed on your Raspberry Pi. +* The Node-RED container is running. +* The user name of the account on the host where you want Node-RED flows to be able to run commands is "pi". This user name is not mandatory. Simply substitute your own user name wherever you see "pi" in these examples. + +These instructions are specific to IOTstack but the underlying concepts should apply to any installation of Node-RED in a Docker container. + +### Executing commands "inside" a container { #dockerExec } + +These instructions make frequent use of the ability to run commands "inside" the Node-RED container. For example, suppose you want to execute: + +``` console +$ grep "^PRETTY_NAME=" /etc/os-release +``` + +You have several options: + +1. You can do it from the normal Raspberry Pi command line using a Docker command. The basic syntax is: + + ``` console + $ docker exec {-it} «containerName» «command and parameters» + ``` + + The actual command you would need would be: + + ``` console + $ docker exec nodered grep "^PRETTY_NAME=" /etc/os-release + ``` + + Note: + + * The `-it` flags are *optional*. They mean "interactive" and "allocate pseudo-TTY". Their presence tells Docker that the command may need user interaction, such as entering a password or typing "yes" to a question. + +2. You can open a shell into the container, run as many commands as you like inside the container, and then exit. For example: + + ``` console + $ docker exec -it nodered bash + # grep "^PRETTY_NAME=" /etc/os-release + # whoami + # exit + $ + ``` + + In words: + + * Run the `bash` shell inside the Node-RED container. You need to be able to interact with the shell to type commands so the `-it` flag is required. + * The "#" prompt is coming from `bash` running inside the container. It also signals that you are running as the root user inside the container. + * You run the `grep`, `whoami` and any other commands. + * You finish with the `exit` command (or Control+D). + * The "$" prompt means you have left the container and are back at the normal Raspberry Pi command line. + +3. Run the command from Portainer by selecting the container, then clicking the ">_ console" link. This is identical to opening a shell. + +### Step 1: *Generate SSH key-pair for Node-RED* (one time) { #sshStep1 } + +Create a key-pair for Node-RED. This is done by executing the `ssh-keygen` command **inside** the container: + +``` console +$ docker exec -it nodered ssh-keygen -q -t ed25519 -C "Node-RED container key-pair" -N "" +``` + +Notes: + +* The "ed25519" elliptic curve algorithm is recommended (generally described as quicker and more secure than RSA) but you can use the default RSA algorithm if you prefer. +* Respond to the "Enter file in which to save the key" prompt by pressing return to accept the default location. +* If `ssh-keygen` displays an "Overwrite (y/n)?" message, it implies that a key-pair already exists. You will need to decide what to do: + * press y to overwrite (and lose the old keys) + * press n to terminate the command, after which you can investigate why a key-pair already exists. + +### Step 2: *Exchange keys with target hosts* (once per target host) { #sshStep2 } + +Node-RED's public key needs to be copied to the "pi" user account on the host where you want a Node-RED "exec" node to be able to execute commands. At the same time, the Node-RED container needs to learn the host's public key. The `ssh-copy-id` command does both steps. The command is: + +``` console +$ docker exec -it nodered ssh-copy-id pi@host.docker.internal +``` + +The output will be something similar to the following: + +``` +/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/root/.ssh/id_ed25519.pub" +The authenticity of host 'host.docker.internal (172.17.0.1)' can't be established. +ED25519 key fingerprint is SHA256:gHMlhvArbUPJ807vh5qNEuyRCeNUQQTKEkmDS6qKY6c. +This key is not known by any other names +Are you sure you want to continue connecting (yes/no/[fingerprint])? yes +``` + +Respond to the prompt by typing "yes" and pressing return. + +The output continues: + +``` +/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed +expr: warning: '^ERROR: ': using '^' as the first character +of a basic regular expression is not portable; it is ignored +/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys +pi@host.docker.internal's password: +``` +The response may look like it contains errors but those can be ignored. + +Enter the password you use to login as "pi" on the host and press return. + +Normal completion looks similar to this: + +``` +Number of key(s) added: 1 + +Now try logging into the machine, with: "ssh 'pi@host.docker.internal'" +and check to make sure that only the key(s) you wanted were added. +``` + +If you do not see an indication that a key has been added, you may need to retrace your steps. + +### Step 3: *Perform the recommended test* { #sshStep3 } + +The output above recommends a test. The test needs to be run **inside** the Node-RED container so the syntax is: + +``` console +$ docker exec -it nodered ssh pi@host.docker.internal ls -1 /home/pi/IOTstack +``` + +You should not be prompted for a password. If you are, you may need to retrace your steps. + +If everything works as expected, you should see a list of the files in your IOTstack folder. + +Assuming success, think about what just happened? You told SSH **inside** the Node-RED container to run the `ls` command **outside** the container on your Raspberry Pi. You broke through the containerisation. + +### Understanding what's where and what each file does { #sshWhatsWhere } + +#### What files are where { #sshFileLocations } + +Six files are relevant to Node-RED's ability to execute commands outside of container-space: + +* in `/etc/ssh`: + + - `ssh_host_ed25519_key` is the Raspberry Pi's private host key + - `ssh_host_ed25519_key.pub` is the Raspberry Pi's public host key + + Those keys were created when your Raspberry Pi was initialised. They are unique to the host. + + Unless you take precautions, those keys will change whenever your Raspberry Pi is rebuilt from scratch and that **will** prevent a Node-RED "exec" node from being able to invoke SSH to call out of the container. + + You can recover by re-running [`ssh-copy-id`](#sshStep2). + +* in `~/IOTstack/volumes/nodered/ssh`: + + - `id_ed25519` is the Node-RED container's private key + - `id_ed25519.pub` is the Node-RED container's public key + + Those keys were created when you generated the SSH key-pair for Node-RED. + + They are unique to Node-RED but will follow the container in backups and will work on the same machine, or other machines, if you restore the backup. + + It does not matter if the Node-RED container is rebuilt or if a new version of Node-RED comes down from DockerHub. These keys will remain valid until lost or overwritten. + + If you lose or destroy these keys, that **will** prevent a Node-RED "exec" node from being able to invoke SSH to call out of the container. + + You can recover by [generating new keys](#sshStep1) and then re-running [`ssh-copy-id`](#sshStep2). + + - `known_hosts` + + The `known_hosts` file contains a copy of the Raspberry Pi's public host key. It was put there by `ssh-copy-id`. + + If you lose this file or it gets overwritten, invoking SSH inside the container **will** still work but it will re-prompt for authorisation to connect. You will see the prompt if you run commands via `docker exec -it` but not when invoking SSH from an "exec" node. + + Note that authorising the connection at the command line ("Are you sure you want to continue connecting?") will auto-repair the `known_hosts` file. + +* in `~/.ssh/`: + + - `authorized_keys` + + That file contains a copy of the Node-RED container's public key. It was put there by `ssh-copy-id`. + + Pay attention to the path. It implies that there is one `authorized_keys` file per user, per target host. + + If you lose this file or it gets overwritten, SSH **will** still work but will ask for the password for "pi". This works when you are running commands from `docker exec -it` but not when invoking SSH from an "exec" node. + + Note that providing the correct password at the command line will auto-repair the `authorized_keys` file. + +#### What each file does { #sshFilePurpose } + +SSH running **inside** the Node-RED container uses the Node-RED container's private key to provide assurance to SSH running **outside** the container that it (the Node-RED container) is who it claims to be. + +SSH running **outside** container-space verifies that assurance by using its copy of the Node-RED container's public key in `authorized_keys`. + +SSH running **outside** container-space uses the Raspberry Pi's private host key to provide assurance to SSH running **inside** the Node-RED container that it (the RPi) is who it claims to be. + +SSH running **inside** the Node-RED container verifies that assurance by using its copy of the Raspberry Pi's public host key stored in `known_hosts`. + +### Config file (optional) { #sshConfig } + +You don't **have** to do this step but it will simplify your exec node commands and reduce your maintenance problems if you do. + +At this point, SSH commands can be executed from **inside** the container using this syntax: + +``` console +# ssh pi@host.docker.internal «COMMAND» +``` + +A `config` file is needed to achieve the task goal of the simpler syntax: + +``` console +# ssh host.docker.internal «COMMAND» +``` + +The goal is to set up this file: + +``` +-rw-r--r-- 1 root root ~/IOTstack/volumes/nodered/ssh/config +``` + +The file needs the ownership and permissions shown. There are several ways of going about this and you are free to choose the one that works for you. The method described here creates the file first, then sets correct ownership and permissions, and then moves the file into place. + +Start in a directory where you can create a file without needing `sudo`. The IOTstack folder is just as good as anywhere else: + +``` console +$ cd ~/IOTstack +$ touch config +``` + +Select the following text, copy it to the clipboard. + +``` sshconfig +host host.docker.internal + user pi + IdentitiesOnly yes + IdentityFile /root/.ssh/id_ed25519 +``` + +Open `~/IOTstack/config` in your favourite text editor and paste the contents of the clipboard. Save the file. Change the config file's ownership and permissions, and move it into the correct directory: + +``` console +$ chmod 644 config +$ sudo chown root:root config +$ sudo mv config ./volumes/nodered/ssh +``` + +#### Re-test with config file in place { #sshConfigTest } + +The previous test used this syntax: + +``` console +$ docker exec nodered ssh pi@host.docker.internal ls -1 /home/pi/IOTstack +``` + +Now that the config file is in place, the syntax changes to: + +``` console +$ docker exec nodered ssh host.docker.internal ls -1 /home/pi/IOTstack +``` + +The result should be the same as the earlier test. + +### A test flow { #sshTestFlow } + +![node-red-exec-node-ssh-test](./images/nodered-exec-node-ssh-test.jpeg) + +In the Node-RED GUI: + +1. Click the "+" to create a new, empty flow. +2. Drag the following nodes onto the canvas: + - One "inject" node + - Two "exec" nodes + - Two "debug" nodes +3. Wire the outlet of the "inject" node to the inlet of both "exec" nodes. +4. Wire the uppermost "stdout" outlet of the first "exec" node to the inlet of the first "debug" node. +5. Repeat step 4 with the other "exec" and "debug" node. +6. Open the first "exec" node and: + - set the "command" field to: + + ``` + grep "^PRETTY_NAME=" /etc/os-release + ``` + - turn off the "append msg.payload" checkbox + - set the timeout to a reasonable value (eg 10 seconds) + - click "Done". +7. Repeat step 6 with the other "exec" node, with one difference: + - set the "command" field to: + + ``` + ssh host.docker.internal grep "^PRETTY_NAME=" /etc/os-release + ``` + +8. Click the Deploy button. +9. Set the right hand panel to display debug messages. +10. Click the touch panel of the "inject" node to trigger the flow. +11. Inspect the result in the debug panel. You should see payload differences similar to the following: + + ``` + PRETTY_NAME="Alpine Linux v3.16"" + PRETTY_NAME="Debian GNU/Linux 11 (bullseye)" + ``` + + The first line is the result of running the command inside the Node-RED container. The second line is the result of running the same command outside the Node-RED container on the Raspberry Pi. + +## Maintaining Node-RED { #maintainNodeRed } + +### Starting Node-RED { #startNodeRed } + +Use these commands to: + +1. Start the container; or +2. Re-create the container if you have made a material change to the container's service definition in your *Compose* file. + +``` console +$ cd ~/IOTstack +$ docker-compose up -d nodered +``` + +The first time you execute this command, the *base* image of Node-RED is downloaded from DockerHub, and then the *Dockerfile* is run to produce a *local* image. The *local* image is then instantiated to become the running container. + + +### Stopping Node-RED { #stopNodeRed } + +To stop the running container: + +``` console +$ cd ~/IOTstack +$ docker-compose down nodered +``` + +> see also [if downing a container doesn't work](../Basic_setup/index.md/#downContainer) + +Alternatively, you can stop the entire stack: + +``` console +$ cd ~/IOTstack +$ docker-compose down +``` + +### Restarting Node-RED { #restartNodeRed } + +The `restart` command sends a signal to the processes running within the container. The container itself does not stop. + +``` console +$ cd ~/IOTstack +$ docker-compose restart nodered +``` + +### Re-building the local image { #rebuildNodeRed } + +You need to rebuild the *local* image if you do any of the following: + +1. Change either of the build arguments (`DOCKERHUB_TAG` or `EXTRA_PACKAGES`) in your *Compose* file. +2. Make a material change to your Node-RED *Dockerfile*, such as re-running the menu to change your selection of add-on nodes. + +To rebuild your *local* image: + +``` console +$ cd ~/IOTstack +$ docker-compose up --build -d nodered +$ docker system prune -f +``` + +Think of these commands as "re-running the *Dockerfile*". The only time a *base* image will be downloaded from *DockerHub is when a *base* image with a tag matching the value of `DOCKERHUB_TAG` can't be found on your Raspberry Pi. + +Your existing Node-RED container continues to run while the rebuild proceeds. Once the freshly-built *local* image is ready, the `up` tells `docker-compose` to do a new-for-old swap. There is barely any downtime for your Node-RED service. + +### Checking for Node-RED updates { #updateNodeRed } + +IOTstack provides a convenience script which can help you work out if a new version of Node-RED is available. You can run it like this: + +``` console +$ ~/IOTstack/scripts/nodered_version_check.sh +``` + +The script is not infallible. It works by comparing the version number in the Node-RED image on your system with a version number stored on GitHub. + +GitHub is always updated *before* a new image appears on *DockerHub*. Sometimes there is a delay of weeks between the two events. For that reason, the script should be viewed more like a meteorological forecast than hard fact. + +The script assumes that your local image builds as `iotstack-nodered:latest`. If you use different tags, you can pass that information to the script. Example: + +``` console +$ ~/IOTstack/scripts/nodered_version_check.sh iotstack-nodered:3.0.2 +``` + +### Upgrading Node-RED { #upgradeNodeRed } + +The only way to know, for certain, when an update to Node-RED is available is to check the [nodered/node-red tags page](https://hub.docker.com/r/nodered/node-red/tags?page=1&ordering=last_updated) on *DockerHub*. + +Once a new version appears on [*DockerHub*](https://hub.docker.com), you can upgrade Node-RED like this: + +``` console +$ cd ~/IOTstack +$ docker-compose build --no-cache --pull nodered +$ docker-compose up -d nodered +$ docker system prune -f +``` + +Breaking it down into parts: + +* `build` causes the named container to be rebuilt; +* `--no-cache` tells the Dockerfile process that it must not take any shortcuts. It really **must** rebuild the *local image*; +* `--pull` tells the Dockerfile process to actually check with [*DockerHub*](https://hub.docker.com) to see if there is a later version of the *base image* and, if so, to download it before starting the build; +* `nodered` is the named container argument required by the `build` command. + +Your existing Node-RED container continues to run while the rebuild proceeds. Once the freshly-built *local* image is ready, the `up` tells `docker-compose` to do a new-for-old swap. There is barely any downtime for your Node-RED service. + +The `prune` is the simplest way of cleaning up old images. Sometimes you need to run this twice, the first time to clean up the old *local* image, the second time for the old *base* image. Whether an old *base* image exists depends on the version of `docker-compose` you are using and how your version of `docker-compose` builds local images. + + +## Node-RED and `node.js` versions { #containerVersions } + +### Checking versions { #checkingVersions } + +You can use the `npm version` command to check which versions of Node-RED and `node.js` are running in your container: + +``` console +$ docker exec nodered npm version +{ + 'node-red-docker': '2.2.2', + npm: '6.14.15', + ares: '1.18.1', + brotli: '1.0.9', + cldr: '37.0', + http_parser: '2.9.4', + icu: '67.1', + llhttp: '2.1.4', + modules: '72', + napi: '8', + nghttp2: '1.41.0', + node: '12.22.8', + openssl: '1.1.1m', + tz: '2019c', + unicode: '13.0', + uv: '1.40.0', + v8: '7.8.279.23-node.56', + zlib: '1.2.11' +} +``` + +In the above: + +* `'node-red-docker': '2.2.2'` indicates that version 2.2.2 of Node-RED is running. This is the version number you see at the bottom of the main menu when you click on the "hamburger" icon ("≡") at the top, right of the *Node-Red* window in your browser. +* `node: '12.22.8'` indicates that version 12.x of `node.js` is installed. + +### Controlling versions { #versionControl } + +IOTstack uses a service definition for Node-RED that includes these lines: + +``` yaml linenums="3" + build: + context: ./services/nodered/. + args: + - DOCKERHUB_TAG=latest +``` + +> If you do not see this structure in your *Compose* file, refer to [updating to July 2022 syntax](#july2022syntax). + +The value of the `DOCKERHUB_TAG` gives you the ability to control, from your *Compose* file, which versions of Node-RED and `node.js` run within your Node-RED container. + +The allowable values of `DOCKERHUB_TAG` can be found on the [*DockerHub* Node-RED tags page](https://hub.docker.com/r/nodered/node-red/tags). The table below contains examples of tags that were available on *DockerHub* at the time of writing (2022-07-06): + +tag | Node-RED version | `node.js` version +----------|------------------|------------------ +latest | 2.2.2 | 14.x +latest-14 | 2.2.2 | 14.x 📌 +2.2.2 | 2.2.2 📌 | 14.x +2.2.2-14 | 2.2.2 📌 | 14.x 📌 + +Interpreting the tag: + +1. The sub-string to the left of the hyphen determines the version of Node-RED: + + - "latest" means the most up-to-date version, implying that the actual version number can change any time you follow the process to [upgrade Node-RED](#upgradeNodeRed). + - "2.2.2" pins your container to that specific version of Node-RED, implying that the version number will be frozen until you change the pin. + +2. The sub-string to the right of the hyphen determines the version of `node.js`: + + - "-14" refers to `node.js` version 14.x and pins your container to that specific version of `node.js`. + - If the hyphen and suffix are omitted, it implies that the actual version of `node.js` can change any time you follow the process to [upgrade Node-RED](#upgradeNodeRed). + +In short: + +* If you pin both sides to specific values (eg "2.2.2-14") then all decisions about when to upgrade and which versions to use are under **your** control; but +* If you use "latest" then all timing and version decisions are under the control of the maintainers of the *DockerHub* images. + +IOTstack defaults to "latest". Although this appears to cede control to the maintainers of the *DockerHub* images, in practice it is no different to any other container where you pull its image directly from *DockerHub* using the `latest` tag (irrespective of whether `latest` is explicit or implied by omission). + +The `DOCKERHUB_TAG` argument for Node-RED merely gives you the ability to pin to specific versions of Node-RED from within your *Compose* file, in the same way as you can use tags on `image` directives for other containers. + +For example, suppose you wanted to pin to Node-RED version 2.2.2 with `node.js` version 12: + +1. Edit your *Compose* file so that the `DOCKERHUB_TAG` looks like this: + + ``` yaml + - DOCKERHUB_TAG=2.2.2-12 + ``` + +2. Run the [re-building the local Node-RED image](#rebuildNodeRed) commands. + +Changing a pinned version and rebuilding *may* result in a new *base* image being downloaded from *DockerHub*. + +## Component management { #componentManagement } + +### via Dockerfile { #viaDockerfile } + +You can install components by adjusting the Node-RED *Dockerfile*. This can be done by: + +* Running the IOTstack menu and changing the selected Node-RED nodes; or +* Editing your Node-RED *Dockerfile* using a text editor. + +Using the IOTstack menu limits your choice of components to those presented in the menu. Editing the *Dockerfile* with a text editor is more flexible but carries the risk that your changes could be lost if you subsequently use the menu method. + +To apply changes made to your *Dockerfile*, run the [re-building the local Node-RED image](#rebuildNodeRed) commands. + +### via Manage Palette { #viaManagePalette } + +You can add, remove or update components in Manage Palette. Node-RED will remind you to restart Node-RED and that is something you have to do by hand: + +``` console +$ cd ~/IOTstack +$ docker-compose restart nodered +``` + +Note: + +* Some users have reported misbehaviour from Node-RED if they do too many iterations of: + + - make a change in Manage Palette + - restart Node-RED + + It is better to make **all** the changes you intend to make, and only *then* restart Node-RED. + +### via `npm` { #viaNPM } + +You can also run `npm` inside the container to install any component that could be installed by `npm` in a non-container environment. This is the basic syntax: + +``` console +$ cd ~/IOTstack +$ docker exec -w /data nodered npm «command» «arguments…» +$ docker-compose restart nodered +``` + +Examples: + +* To add the "find my iphone" node: + + ``` console + $ docker exec -w /data nodered npm install find-my-iphone-node + $ docker-compose restart nodered + ``` + +* To remove the "find my iphone" node: + + ``` console + $ docker exec -w /data nodered npm uninstall find-my-iphone-node + $ docker-compose restart nodered + ``` + +Note: + +* You **must** include `-w /data` on each command. Any formula you find on the web will not include this. You have to remember to do it yourself! +* Many web examples include the `--save` flag on the `npm` command. That flag is not needed (it is ignored because the behaviour it used to control has been the default since NPM version 5. Node-RED containers have been using NPM version 6 for some time. +* See also the note above about restarting too frequently. +* You can use this approach if you need to force the installation of a specific version (which you don't appear to be able to do in Manage Palette). For example, to install version 4.0.0 of the "moment" node: + + ``` console + $ docker exec -w /data nodered npm install node-red-contrib-moment@4.0.0 + $ docker-compose restart nodered + ``` + +### Comparison of methods { #viaWhich } + +In terms of outcome, there is no real difference between the various methods. However, some nodes (eg "node-red-contrib-generic-ble" and "node-red-node-sqlite") **must** be installed by *Dockerfile*. The only way of finding out if a component **must** be installed via *Dockerfile* is to try Manage Palette and find that it doesn't work. + +Aside from the exception cases that require *Dockerfile* or where you need to force a specific version, it is quicker to install nodes via Manage Palette and applying updates is a bit easier too. But it's really up to you. + +If you're wondering about "backup", nodes installed via: + +* *Dockerfile* – implicitly backed up when the *Dockerfile* is backed-up. +* Manage Palette or `npm install` – explicitly backed up when the `~/IOTstack/volumes` directory is backed-up. + +Basically, if you're running IOTstack backups then your add-on nodes will be backed-up. + +### Component precedence { #componentPrecedence } + +Components that are installed via *Dockerfile* wind up at the **internal** path: + +``` +/usr/src/node-red +``` + +Components installed via Manage Palette or `docker exec -w /data` wind up at the **internal** path: + +``` +/data +``` + +which is the same as the **external** path: + +``` +~/IOTstack/volumes/nodered/data +``` + +Because there are two places, this invites the question of what happens if a given component is installed in both? The answer is that components installed in `/data` take precedence. + +Or, to put it more simply: in any contest between methods, *Dockerfile* comes last. + +### Resolving node duplication { #fixDuplicateNodes } + +Sometimes, even when you are 100% certain that **you** didn't do it, a component will turn up in both places. There is probably some logical reason for this but I don't know what it is. + +The problem this creates is that a later version of a component installed via *Dockerfile* will be blocked by the presence of an older version of that component installed by a different method. + +The `nodered_list_installed_nodes.sh` script helps discover when this situation exists. For example: + +``` console +$ nodered_list_installed_nodes.sh + +Fetching list of candidates installed via Dockerfile + +Components built into the image (via Dockerfile) + ACTIVE: node-red-admin + ACTIVE: node-red-configurable-ping + ACTIVE: node-red-contrib-boolean-logic + ACTIVE: node-red-contrib-generic-ble + ACTIVE: node-red-contrib-influxdb + ACTIVE: node-red-dashboard + BLOCKED: node-red-node-email + ACTIVE: node-red-node-pi-gpiod + ACTIVE: node-red-node-rbe + ACTIVE: node-red-node-sqlite + ACTIVE: node-red-node-tail + +Fetching list of candidates installed via Manage Palette or npm + +Components in persistent store at + /home/pi/IOTstack/volumes/nodered/data/node_modules + node-red-contrib-boolean-logic-ultimate + node-red-contrib-chartjs + node-red-node-email + node-red-contrib-md5 + node-red-contrib-moment + node-red-contrib-pushsafer +``` + +Notice how the `node-red-node-email` instance installed in the Dockerfile is being blocked. To fix this problem: + +``` console +$ cd ~/IOTstack +$ docker exec -w /data nodered npm uninstall node-red-node-email +$ docker-compose restart nodered +``` + + +## Package management { #packageManagement } + +As well as providing the Node-RED service, the nodered container is an excellent testbed. Installing the DNS tools, Mosquitto clients and tcpdump will help you to figure out what is going on **inside** container-space. + +There are two ways to add extra packages. The first method is to add them to the running container. For example, to add the Mosquitto clients: + +``` console +$ docker exec nodered apk add --no-cache mosquitto-clients +``` + +> The "apk" implies that the Node-RED container is based on Alpine Linux. Keep that in mind when you search for instructions on installing packages. + +Packages installed this way will persist until the container is re-created (eg a `down` and `up` of the stack, or a reboot of your Raspberry Pi). This is a good choice if you only want to run a quick experiment. + +The second method adds the packages to your *local* image every time you rebuild. Because the packages are in the *local* image, they are always in the running container. For example, to include the Mosquitto clients in every build: + +1. Edit your *Compose* file to include the package on the `EXTRA_PACKAGES` argument: + + ``` yaml + - EXTRA_PACKAGES=mosquitto-clients + ``` + + > If you do not see the `EXTRA_PACKAGES` argument in your *Compose* file, refer to [updating to July 2022 syntax](#july2022syntax). + +2. Rebuild your *local* image by running the [re-building the local Node-RED image](#rebuildNodeRed) commands. + +You can specify multiple packages on the same line. For example: + +``` yaml +- EXTRA_PACKAGES=mosquitto-clients bind-tools tcpdump +``` + +Notes: + +1. Use a space to separate package names. +2. Do **not** encapsulate the list in quote marks. + +## Updating to July 2022 syntax { #july2022syntax } + +The primary benefit of the new syntax is that you no longer risk the IOTstack menu overwriting any custom changes you may have made to your Node-RED *Dockerfile*. + +If you install a clean copy of IOTstack, run the menu, enable Node-RED and select one or more add-on nodes then both your *Compose* file and *Dockerfile* will use the latest syntax automatically. + +If you have an older version of IOTstack installed, the syntax used in your *Compose* file and *Dockerfile* will depend on when you last ran the menu and manipulated Node-RED. + +To avoid any uncertainties, you can use a text editor to update your existing *Compose* file and *Dockerfile* to adopt the latest syntax. + +### Updating your *Compose* file { #july2022compose } + +* Step 1: Implement the new syntactic scaffolding: + + The first three lines of the old syntax are: + + ``` yaml linenums="1" + nodered: + container_name: nodered + build: ./services/nodered/. + ``` + + Replace line 3 (the one-line `build:` directive) with the following lines: + + ``` yaml linenums="3" + build: + context: ./services/nodered/. + args: + - DOCKERHUB_TAG=latest + - EXTRA_PACKAGES= + ``` + +* Step 2: Pin to the desired version (optional): + + If your existing *Dockerfile* pins to a specific version, edit the value of `DOCKERHUB_TAG` (line 6 of your updated *Compose* file) to use the tag from your *Dockerfile*. For example, if your existing *Dockerfile* begins with: + + ``` Dockerfile + FROM nodered/node-red:latest-12 + ``` + + then line 6 of your *Compose* file should be: + + + ``` yaml linenums="6" + - DOCKERHUB_TAG=latest-12 + ``` + + Note: + + * IOTstack switched to `latest-12` in March 2021. The default for July 2022 syntax is `latest`. At the time of writing, that is the same as `latest-14`, which is what is recommended by Node-RED. If any of your flows has a dependence on `node.js` version 12 (or if you do not want to take the risk), use `latest-12`. + +* Step 3: Define extra packages (optional): + + If your existing *Dockerfile* includes extra packages, edit the value of `EXTRA_PACKAGES` (line 7 of your updated *Compose* file) to list the same packages. For example, if your existing *Dockerfile* includes: + + ``` Dockerfile + RUN apk update && apk add --no-cache eudev-dev mosquitto-clients bind-tools tcpdump + ``` + + then everything *after* `eudev-dev` should appear on line 7 of your *Compose* file: + + ``` yaml linenums="6" + - EXTRA_PACKAGES=mosquitto-clients bind-tools tcpdump + ``` + + Notes: + + * use spaces between package names. + * do **not** enclose the list of packages in quotes. + * do **not** include `eudev-dev` (it is specified in the [updated *Dockerfile*](#july2022dockerfile)). + +### Updating your *Dockerfile* { #july2022dockerfile } + +The first four lines of your existing *Dockerfile* will have a structure similar to this: + +``` Dockerfile linenums="1" +FROM nodered/node-red:latest-12 +USER root +RUN apk update && apk add --no-cache eudev-dev +USER node-red +``` + +> The actual text will depend on whether you have modified the tag in the first line or added extra packages to the third line. + +Replace the first four lines of your *Dockerfile* with the following lines: + +``` Dockerfile linenums="1" +# reference argument - omitted defaults to latest +ARG DOCKERHUB_TAG=latest + +# Download base image +FROM nodered/node-red:${DOCKERHUB_TAG} + +# reference argument - omitted defaults to null +ARG EXTRA_PACKAGES +ENV EXTRA_PACKAGES=${EXTRA_PACKAGES} + +# default user is node-red - need to be root to install packages +USER root + +# install packages +RUN apk update && apk add --no-cache eudev-dev ${EXTRA_PACKAGES} + +# switch back to default user +USER node-red + +# variable not needed inside running container +ENV EXTRA_PACKAGES= + +# add-on nodes follow +``` + +All remaining lines of your original *Dockerfile* should be left as-is. + +### Applying the new syntax { #july2022build } + +Run the [re-building the local Node-RED image](#rebuildNodeRed) commands. + +## Alpine vs Debian { #linuxDistro } + +The first part of IOTstack's default service definition for Node-RED is shown at [IOTstack first run](#serviceBuildFragment). Although it is not immediately obvious, this results in a container which is based on the Alpine Linux distribution. You can confirm this by running: + +``` console +$ docker exec nodered grep "PRETTY_NAME" /etc/os-release +PRETTY_NAME="Alpine Linux v3.20" +``` + +Historically, Node-RED has been distributed on on [Dockerhub](https://hub.docker.com/r/nodered/node-red/tags) as two distinct sets of Node-RED images: + +* Those based on the Alpine Linux distribution; and +* Those based on the Debian Linux distribution. + +In general, Node-RED images have tracked Alpine releases more consistently than they have Debian. For example, at the time of writing (July 2024): + +Image Tag | Distro | Image OS | Current +----------------|--------|---------------|-------- +`latest` | Alpine | v3.20 | [v3.20](https://alpinelinux.org/releases/) +`latest-debian` | Debian | 11 (bullseye) | [12 (bookworm)](https://www.debian.org/releases/) + +In addition, Node-RED images based on Alpine have offered a greater range of options when it comes to the embedded version of Node.js. At the time of writing: + +* image variants based on Alpine Linux include `latest-18`, `latest-20` and `latest-22`, implying a choice of Node.js versions 18, 20 and 22, with version 20 being the default; while +* the single image variant for Debian Linux is `latest-debian` which comes with Node.js version 20. + +Naturally, this situation could change at any time! This information is only here to make the point that, historically, Node-RED images based on Debian have lagged behind Alpine and have only supported a single version of Node.js. This is also the main reason why IOTstack defaults to Alpine images. + +However, there may be circumstances where you decide it is appropriate to run a Node-RED image based on Debian. The purpose of this section is not to explore scenarios nor weigh the pros and cons, merely to explain how to adapt your Node-RED service definition to accomplish it. Proceed as follows: + +1. Make a copy of your existing Dockerfile: + + ``` console + $ cd ~/IOTstack/services/nodered + $ cp Dockerfile Debian.Dockerfile + ``` + + The reason for making a copy is to preserve your existing (Alpine-aware) Dockerfile so you can easily switch back if you break something. + +2. Open `Debian.Dockerfile` in a text editor and make the following changes: + + * Find the line: + + ``` Dockerfile linenums="4" + ARG DOCKERHUB_TAG=latest + ``` + + **Replace** that line with: + + ``` Dockerfile linenums="4" + ARG DOCKERHUB_TAG=latest-debian + ``` + + * Find the line: + + ``` Dockerfile linenums="15" + RUN apk update && apk add --no-cache eudev-dev ${EXTRA_PACKAGES} + ``` + + **Replace** that line with: + + ``` Dockerfile linenums="15" + RUN apt update && apt install -y udev ${EXTRA_PACKAGES} + ``` + + `apk` is the Alpine package manager whereas `apt` is the Debian package manager. + + * Save your work. + +3. Make a copy of your existing compose file: + + ``` console + $ cd ~/IOTstack + $ cp docker-compose.yml docker-compose.yml.bak + ``` + + The reason for making a copy is to preserve your existing (Alpine-aware) service definition so you can easily switch back if you break something. + +4. Open `docker-compose.yml ` in a text editor and make the following changes: + + * Change the Node-RED `build` clause so that it looks like this: + + ``` yaml linenums="3" + build: + context: ./services/nodered/. + dockerfile: Debian.Dockerfile + args: + - DOCKERHUB_TAG=latest-debian + - EXTRA_PACKAGES= + ``` + + There are two key edits: + + 1. **Insert** the `dockerfile` line (as line 5). + 2. **Change** the right hand side of the `DOCKERHUB_TAG` argument from `latest` to `latest-debian` (line 7). + + * If you have any `EXTRA_PACKAGES` specified, you will need to allow for any package-name differences between Alpine and Debian. For example, suppose you are using this list of extra packages with Alpine: + + ``` yaml linenums="8" + - EXTRA_PACKAGES=mosquitto-clients bind-tools tcpdump tree + ``` + + The `mosquitto-clients`, `tcpdump` and `tree` packages have the same names in the `apk` (Alpine) package manager as they do in `apt` (Debian) whereas `bind-tools` is named `dnsutils` in the Debian repositories. Thus the extra packages list for a Debian build would need to be: + + ``` yaml linenums="8" + - EXTRA_PACKAGES=mosquitto-clients dnsutils tcpdump tree + ``` + + * Save your work. + +5. Rebuild Node-RED: + + ``` console + $ cd ~/IOTstack + $ docker-compose build --no-cache --pull nodered + ``` + + If the build process reports any errors, go back and check your work. + +6. Start the new container: + + ``` console + $ docker-compose up -d nodered + ``` + +7. Check that the new container is running properly and hasn't gone into a restart loop: + + ``` console + $ docker ps -a --format "table {{.Names}}\t{{.RunningFor}}\t{{.Status}}\t{{.Size}}" --filter name=nodered + NAMES CREATED STATUS SIZE + nodered 32 seconds ago Up 31 seconds (healthy) 0B (virtual 945MB) + ``` + + Providing the STATUS column reports "healthy" after roughly 30 seconds of runtime, it is usually safe to assume that the container is behaving normally. + +8. Verify the base Linux distribution being used by the container: + + ``` console + $ docker exec nodered grep "PRETTY_NAME" /etc/os-release + PRETTY_NAME="Debian GNU/Linux 11 (bullseye)" + ``` + +9. Check your Node-RED and Node.js versions: + + ``` console + $ docker exec nodered npm version --json | jq -r '[.["node-red-docker"],.["node"]] | @tsv' + 4.0.2 20.15.0 + ``` + + Interpretation - the container is running: + + * Node-RED version 4.0.2, with + * Node.js version 20.15.0 + +The actual version numbers you see in the last two steps will depend (obviously) on whatever the good folks who maintain Node-RED thought was appropriate at the time they released whatever `latest-debian` variant is present on DockerHub at the moment when you perform the migration. + +Please keep in mind that none of this affects the IOTstack menu. Re-running the menu is likely to revert your Node-RED service definition to be based on Alpine images. diff --git a/docs/Containers/Octoprint.md b/docs/Containers/Octoprint.md new file mode 100644 index 000000000..3a4412a71 --- /dev/null +++ b/docs/Containers/Octoprint.md @@ -0,0 +1,470 @@ +--- +title: Octoprint +--- +# OctoPrint – the snappy web interface for your 3D printer + +## References + +* [OctoPrint home page](https://octoprint.org) +* [OctoPrint Community Forum](https://community.octoprint.org) +* DockerHub [octoprint/octoprint](https://hub.docker.com/r/octoprint/octoprint) +* GitHub [OctoPrint/octoprint-docker](https://github.com/OctoPrint/octoprint-docker) + +## 3D Printer device mapping + +The first time you try to bring up the OctoPrint container, you should expect to see the following error: + +``` +parsing ~/IOTstack/docker-compose.yml: error while interpolating services.octoprint.devices.[]: required variable OCTOPRINT_DEVICE_PATH is missing a value: eg echo OCTOPRINT_DEVICE_PATH=/dev/serial0 >>~/IOTstack/.env +``` + +The message is telling you that you need to define the device path to your 3D Printer. + +You need to work out *how* your printer presents itself and define the external device accordingly. + +### option 1 - `/dev/ttyUSBn` + +Using "ttyUSBn" will "work" but, because of the inherent variability in the name, this approach is not recommended. + +The "n" in the "ttyUSBn" can vary depending on which USB devices are attached to your Raspberry Pi and the order in which they are attached. The "n" may also change as you add and remove devices. + +If the OctoPrint container is up when the device number changes, the container will crash, and it will either go into a restart loop if you try to bring it up when the expected device is not "there", or will try to communicate with a device that isn't your 3D printer. + +Suppose you choose this method and your 3D Printer mounts as `/dev/ttyUSB0`, you would define your printer like this: + +```console +$ echo OCTOPRINT_DEVICE_PATH=/dev/ttyUSB0 >>~/IOTstack/.env +``` + +### option 2 - `/dev/serial/by-id/xxxxxxxx` + +The "xxxxxxxx" is (usually) unique to your 3D printer. To find it, connect your printer to your Raspberry Pi, then run the command: + +``` console +$ ls -1 /dev/serial/by-id +``` + +You will get an answer like this: + +``` +usb-Silicon_Labs_CP2102N_USB_to_UART_Bridge_Controller_3b14eaa48a154d5e87032d59459d5206-if00-port0 +``` + +Suppose you choose this method and your 3D Printer mounts as shown above. You would define your printer like this: + +```console +$ echo OCTOPRINT_DEVICE_PATH=/dev/serial/by-id/usb-Silicon_Labs_CP2102N_USB_to_UART_Bridge_Controller_3b14eaa48a154d5e87032d59459d5206-if00-port0 >>~/IOTstack/.env +``` + +Note: + +* If you have multiple serial devices attached, you will get multiple lines in the output. It is up to you to sort out which one belongs to your 3D printer, possibly by disconnecting and re-attaching the printer and observing how the list changes. +* The uniqueness of device IDs is under the control of the device manufacturer. Each manufacturer *should* ensure their devices are unique but some manufacturers are more diligent than others. +* device *by-id* names follow the device. In other words, if you have two or more Raspberry Pis and a collection of serial devices (3D printers, Zigbee adapters, UARTs, and so on), a 3D printer will always get the same by-id name, irrespective of which Raspberry Pi it is attached to. +* device *by-id* names do not persist if the physical device is disconnected. If you switch off your 3D printer or disconnect the USB cable while the OctoPrint container is running, the container will crash. + +### option 3 - `/dev/humanReadableName` + +Suppose your 3D printer is a MasterDisaster5000Pro, and that you would like to be able to set up the device to use a human-readable name like: + +``` +/dev/MasterDisaster5000Pro +``` + +Start by disconnecting your 3D printer from your Raspberry Pi. Next, run this command: + +``` console +$ tail -f /var/log/messages +``` + +Connect your 3D printer and observe the log output. You are interested in messages that look like this: + +``` +mmm dd hh:mm:ss mypi kernel: [423839.626522] cp210x 1-1.1.3:1.0: device disconnected +mmm dd hh:mm:ss mypi kernel: [431265.973308] usb 1-1.1.3: new full-speed USB device number 10 using dwc_otg +mmm dd hh:mm:ss mypi kernel: [431266.109418] usb 1-1.1.3: New USB device found, idVendor=dead, idProduct=beef, bcdDevice= 1.00 +mmm dd hh:mm:ss mypi kernel: [431266.109439] usb 1-1.1.3: New USB device strings: Mfr=1, Product=2, SerialNumber=3 +mmm dd hh:mm:ss mypi kernel: [431266.109456] usb 1-1.1.3: Product: CP2102N USB to UART Bridge Controller +mmm dd hh:mm:ss mypi kernel: [431266.109471] usb 1-1.1.3: Manufacturer: Silicon Labs +mmm dd hh:mm:ss mypi kernel: [431266.109486] usb 1-1.1.3: SerialNumber: cafe80facefeed +mmm dd hh:mm:ss mypi kernel: [431266.110657] cp210x 1-1.1.3:1.0: cp210x converter detected +mmm dd hh:mm:ss mypi kernel: [431266.119225] usb 1-1.1.3: cp210x converter now attached to ttyUSB0 +``` + +and, in particular, these two lines: + +``` +… New USB device found, idVendor=dead, idProduct=beef, bcdDevice= 1.00 +… SerialNumber: cafe80facefeed +``` + +Terminate the `tail` command by pressing Control+C. + +Use this line as a template: + +``` +SUBSYSTEM=="tty", ATTRS{idVendor}=="«idVendor»", ATTRS{idProduct}=="«idProduct»", ATTRS{serial}=="«SerialNumber»", SYMLINK+="«sensibleName»" +``` + +Replace the «delimited» values with those you see in the log output. For example, given the above log output, and the desire to associate your 3D printer with the human-readable name of "MasterDisaster5000Pro", the result would be: + +``` +SUBSYSTEM=="tty", ATTRS{idVendor}=="dead", ATTRS{idProduct}=="beef", ATTRS{serial}=="cafe80facefeed", SYMLINK+="MasterDisaster5000Pro" +``` + +Next, ensure the required file exists by executing the following command: + +``` console +$ sudo touch /etc/udev/rules.d/99-usb-serial.rules +``` + +> If the file does not exist already, the `touch` command creates an empty file, owned by root, with mode 644 (rw-r--r--) permissions (all of which are correct). + +Use `sudo` and your favourite text editor to edit `/etc/udev/rules.d/99-usb-serial.rules` and insert the "SUBSYSTEM==" line you prepared earlier into that file, then save the file. + +> Rules files are read on demand so there is no `start` or `reload` command to execute. + +Check your work by disconnecting, then re-connecting your 3D printer, and then run: + +``` +$ ls /dev +``` + +You should expect to see the human-readable name you chose in the list of devices. + +You would then define your printer like this: + +```console +$ echo OCTOPRINT_DEVICE_PATH=/dev/MasterDisaster5000Pro >>~/IOTstack/.env +``` + +Notes: + +* device names follow the device. In other words, if you have two or more Raspberry Pis and a collection of serial devices (3D printers, Zigbee adapters, UARTs, and so on), you can build a single `99-usb-serial.rules` file that you install on *all* of your Raspberry Pis. Then, you can attach a named device to any of your Raspberry Pis and it will always get the same name. +* device names do not persist if the physical device is disconnected. If you switch off your 3D printer or disconnect the USB cable while the OctoPrint container is running, the container will crash. + +## the `/dev/video0:/dev/video0` mapping + +By default, video camera support is disabled. This is because it is unsafe to assume a camera is present on `/dev/video0`. + +> See the [Webcams topic of the Octoprint Community Forum](https://community.octoprint.org/c/support/support-webcams/18) for help configuring other kinds of cameras. + +The OctoPrint docker image includes an MJPG streamer. You do not need to run another container with a streamer unless you want to. + +To activate a Raspberry Pi camera attached via ribbon cable: + +1. Follow the instructions at [raspberrypi.org](https://www.raspberrypi.org/documentation/configuration/camera.md) to connect and test the camera. There are guides on YouTube ([example](https://www.youtube.com/watch?v=T8T6S5eFpqE)) if you need help working out how to insert the ribbon cable. +2. Confirm the presence of `/dev/video0`. +3. Edit `docker-compose.yml` and uncomment **all** of the commented-out lines in the following: + + ``` yaml + environment: + # - ENABLE_MJPG_STREAMER=true + # - MJPG_STREAMER_INPUT=-r 640x480 -f 10 -y + # - CAMERA_DEV=/dev/video0 + + devices: + # - /dev/video0:/dev/video0 + ``` + + Note: + + * The device path on the right hand side of the `CAMERA_DEV` environment variable corresponds with the right hand side (ie *after* the colon) of the device mapping. There should be no reason to change either. + +The "640x480" `MJPG_STREAMER_INPUT` settings will probably result in your camera feed being "letterboxed" but they will get you started. A full list of options is at [mjpg-streamer-configuration-options](https://community.octoprint.org/t/available-mjpg-streamer-configuration-options/1106). + +The typical specs for a baseline Raspberry Pi camera are: + +* 1080p 720p 5Mp Webcam +* Max resolution: 2592x1944 +* Max frame rate: VGA 90fps, 1080p 30fps +* CODEC: MJPG H.264 AVC + +For that type of camera, the following is probably more appropriate: + +``` yaml + - MJPG_STREAMER_INPUT=-r 1152x648 -f 10 +``` + +The resolution of 1152x648 is 60% of 1080p 1920x1080 and does not cause letterboxing. The resolution and rate of 10 frames per second won't over-tax your communications links, and the camera is MJPEG-capable so it does not need the `-y` option. + +## Practical usage + +### starting the OctoPrint container + +To start a print session: + +1. Turn the 3D printer on. +2. Bring up the container: + + ``` console + $ cd ~/IOTstack + $ docker-compose up -d octoprint + ``` + +If you try to start the OctoPrint container before your 3D printer has been switched on and the USB interface has registered with the Raspberry Pi, the container will go into a restart loop. + +### first run – the Setup Wizard + +Use a browser to point to port 9980 on your Raspberry Pi. For example: + +``` +http://raspberrypi.local:9980 +``` + +This will launch the "Setup Wizard". + +1. Click the "Next" button until you reach the "Access Control" screen: + + * Define a Username and Password, and keep a record of your decisions. + * Click "Create Account". + * Ignore the alarming popup alert by clicking "Ignore". This alert is a result of OctoPrint running in a Docker container. + * Click "Next". + +2. At the "Online Connectivity Check" screen: + + * Click "Disable Connectivity Check". + * Click "Next". + +3. At the "Configure Anonymous Usage Tracking" and "Configure plugin blacklist processing" screens: + + * Make a decision about whether you want the feature enabled or disabled and click the appropriate button. + * Click "Next". + +4. At the "Set up your printer profile" screen: + + * It is probably a good idea to visit the tabs and set values appropriate to your printer (build volume, at least). + * Click "Next". + +5. At the "Server Commands" screen: + + * Enter the following in the "Restart OctoPrint" field: + + ``` + s6-svc -r /var/run/s6/services/octoprint + ``` + + * Click "Next". + +6. At the "Webcam & Timelapse Recordings" screen, and assuming you are configuring a PiCamera: + + * Enter the following in the "Stream URL" field: + + ``` + /webcam/?action=stream + ``` + + Click the "Test" button to confirm that the camera is working, then click "Close". + + * Enter the following in the "Snapshot URL" field: + + ``` + http://localhost:8080/?action=snapshot + ``` + + Click the "Test" button to confirm that the camera is working, then click "Close". + + * Enter the following in the "Path to FFMPEG" field: + + ``` + /usr/bin/ffmpeg + ``` + + The expected result is the message "The path is valid". + + * Click "Next". + +7. Click "Finish" then click the button to reload the user interface. + +### after the first run + +Use a browser to point to port 9980 on your Raspberry Pi. For example: + +``` +http://raspberrypi.local:9980 +``` + +Supply your user credentials and login. + +### popup messages + +OctoPrint will display *numerous* messages in popup windows. These generally fall into two categories: + +* Messages that refer to updates; and +* Messages that refer to other events. + +In general, you can ignore messages about updates. You will get all updates automatically the next time the octoprint-docker container is rebuilt and pushed to DockerHub. + +You can, if you wish, allow an update to proceed. It might be appropriate to do that if you want to test an update. Just be aware that: + +1. Updates are ephemeral and will disappear the next time the Octoprint container is created. +2. Updates can change the structure of the persistent storage area in a way which can't be undone, and which may prevent the Octoprint container from starting the next time it is created. In other words, if you want to trial an update, take a backup of OctoPrint's persistent storage area **first**. + +### restarting the OctoPrint container + +You can restart the OctoPrint service in two ways: + +* via the Raspberry Pi command line; or +* via the OctoPrint user interface. + +Whichever method you choose will result in a refresh of the OctoPrint user interface and you will need to follow the prompts to reload your browser page. + +#### restarting via the command line + +Run the following commands: + +``` console +$ cd ~/IOTstack +$ docker-compose restart octoprint +``` + +#### restarting via OctoPrint user interface + +From the "System" icon in the OctoPrint toolbar (looks like a power button symbol): + +1. Choose "Restart OctoPrint". + +Note: + +* If you do not see the "System" icon in the toolbar, fix it line this: + + 1. Click the "Settings" icon (looks like a wrench) in the OctoPrint toolbar. + 2. Choose "Server". + 3. Enter the following into the "Restart OctoPrint" field: + + ``` + s6-svc -r /var/run/s6/services/octoprint + ``` + + 4. Click "Save". + +### stopping the OctoPrint container + +Unless you intend to leave your printer switched on 24 hours a day, you will also need to be careful when you switch off the printer: + +1. Terminate the container: + + ``` console + $ cd ~/IOTstack + $ docker-compose stop octoprint + $ docker-compose rm -f octoprint + ``` + +2. Turn the 3D printer off. + +If you turn the printer off without terminating the container, you will crash the container. + +## Video feed (built-in camera interface) + +You can view the video feed independently of the OctoPrint web interface like this: + +``` +http://raspberrypi.local:9980/webcam/?action=stream +``` + +## Silencing the security warning + +OctoPrint assumes it is running "natively" rather than in a container. From a data-communications perspective, OctoPrint (the process running inside the OctoPrint container) sees itself as running on a computer attached to the internal Docker network. When you connect to OctoPrint's web interface from a client device attached to an external network, OctoPrint sees that your source IP address is not on the internal Docker network and it issues a security warning. + +To silence the warning: + +1. Terminate the container if it is running: + + ``` console + $ cd ~/IOTstack + $ docker-compose stop octoprint + $ docker-compose rm -f octoprint + ``` + +2. use `sudo` and your favourite text editor to open the following file: + + ``` + ~/IOTstack/volumes/octoprint/octoprint/config.yaml + ``` + +3. Implement the following pattern: + + ``` yaml + server: + … + ipCheck: + enabled: true + trustedSubnets: + - 203.0.132.0/24 + ``` + + Notes: + + * The `server:`, `ipCheck:` and `enabled:` directives may already be in place but the `trustedSubnets:` directive may not be. Add it, and then add your local subnet(s) where you see the "192.168.1.0/24" example. + * Remember to use spaces in YAML files. Do not use tabs. + +4. Save the file. +5. Bring up the container: + + ``` console + $ cd ~/IOTstack + $ docker-compose up -d octoprint + ``` + +## Routine container maintenance + +You can check for updates like this: + +``` console +$ cd ~/IOTstack +$ docker-compose pull octoprint +$ docker-compose up -d octoprint +$ docker system prune +``` + +## If you forget your username and password + +You can view a list of usernames like this: + +``` console +$ docker exec octoprint octoprint --basedir /octoprint/octoprint user list +``` + +To reset a user's password: + +1. Use the following line as a template and replace `«username»` and `«password»` with appropriate values: + + ``` console + $ docker exec octoprint octoprint --basedir /octoprint/octoprint user password --password «password» «username» + ``` + +2. Execute the edited command. For example, to set the password for user "me" to "verySecure": + + ``` console + $ docker exec octoprint octoprint --basedir /octoprint/octoprint user password --password verySecure me + ``` + +3. Restart OctoPrint: + + ``` console + $ cd ~/IOTstack + $ docker-compose restart octoprint + ``` + +Note: + +* OctoPrint supports more than one username. To explore the further: + + ``` console + $ docker exec octoprint octoprint --basedir /octoprint/octoprint user --help + ``` + +## If all else fails… + +If the OctoPrint container seems to be misbehaving, you can get a "clean slate" by: + +``` console +$ cd ~/IOTstack +$ docker-compose stop octoprint +$ docker-compose rm -f octoprint +$ sudo rm -rf ./volumes/octoprint +$ docker-compose up -d octoprint +``` + +The OctoPrint container is well-behaved and will re-initialise its persistent storage area correctly. OctoPrint will adopt "first run" behaviour and display the Setup Wizard. + diff --git a/docs/Containers/OpenHab.md b/docs/Containers/OpenHab.md new file mode 100644 index 000000000..8c5c568e3 --- /dev/null +++ b/docs/Containers/OpenHab.md @@ -0,0 +1,43 @@ +# openHAB + +## References + +- [DockerHub](https://hub.docker.com/r/openhab/openhab/) +- [GitHub](https://github.com/openhab/openhab-docker) +- [openHAB website](https://www.openhab.org/) + +openHAB runs in "host mode" so there are no port mappings. The default port bindings on IOTstack are: + +* 4050 - the HTTP port of the web interface (instead of 8080) +* 4051 - the HTTPS port of the web interface (instead of 8443) +* 8101 - the SSH port of the Console (since openHAB 2.0.0) +* 5007 - the LSP port for validating rules (since openHAB 2.2.0) + +If you want to change either of the first two: + +1. Edit the `openhab` fragment in `docker-compose.yml`: + + ``` + - OPENHAB_HTTP_PORT=4050 + - OPENHAB_HTTPS_PORT=4051 + ``` + +2. Recreate the openHAB container: + + ``` console + $ cd ~/IOTstack + $ docker-compose up -d openhab + ``` + +There do not appear to be any environment variables to control ports 8101 or 5007 so, if other containers you need to run also depend on those ports, you will have to figure out some way of resolving the conflict. + +Note: + +* The original IOTstack documentation included: + + > openHAB has been added without Amazon Dashbutton binding. + + but it is not clear if this is still the case. + +* [Amazon Dashbuttons have been discontinued](https://www.theverge.com/2019/2/28/18245315/amazon-dash-buttons-discontinued) so this may no longer be relevant. + diff --git a/docs/Containers/PgAdmin4.md b/docs/Containers/PgAdmin4.md new file mode 100644 index 000000000..d63a9813b --- /dev/null +++ b/docs/Containers/PgAdmin4.md @@ -0,0 +1,77 @@ +# pgAdmin4 + +## References + +- [Docker Hub](https://hub.docker.com/r/gpongelli/pgadmin4-arm) +- [GitHub](https://github.com/gpongelli/pgadmin4-docker-arm/) +- [pgAdmin4 home page](https://www.pgadmin.org) + +## About + +pgAdmin4 is a graphical user interface to PostgreSQL. + +## Configuration + +### Runtime image + +The service definition includes the following lines: + +``` yaml + image: gpongelli/pgadmin4-arm:latest-armv7 + platform: linux/arm/v7 +# image: gpongelli/pgadmin4-arm:latest-armv8 +``` + +The ARMv7 image is enabled by default. This will run on both 32-bit (ARMv7) and 64-bit (ARMv8) systems. The `platform` clause silences warnings from docker-compose that arise when you try to run an ARMv7 image on ARMv8 architecture. + +If you are running on a full 64-bit system, you should edit your service definition so that it looks like this: + +``` yaml +# image: gpongelli/pgadmin4-arm:latest-armv7 +# platform: linux/arm/v7 + image: gpongelli/pgadmin4-arm:latest-armv8 +``` + +### Container Time Zone + +The service definition includes the `TZ` environment variable. It defaults to `Etc/UTC`. You can either edit the environment variable directly in your compose file, or provide your own substitute by editing `~/IOTstack/.env`. Example: + +``` console +$ cat ~/IOTstack/.env +TZ=Australia/Sydney +``` + +## First run + +These instructions assume you have selected the `postgresql` container from the IOTstack menu, and that that container is running. + +Complete the following steps: + +1. Use your web browser to connect to pgAdmin4 on port `5050`. For example: + + * `http://raspberrypi.local:5050` + + The pgAdmin4 service takes a while to start so please be patient if you have only just launched the container. Once your browser is able to connect to pgAdmin4 successfully, the home screeen will be displayed, overlaid with a prompt to enter a master password: + + ![](images/pgadmin4-initial.jpeg) + +2. Enter a master password. +3. Click "OK" to set the master password. The dialog will disappear leaving the home screen. +4. Click "Add New Server". This displays the server registration sheet: + + ![](images/pgadmin4-general.jpeg) + +5. Give the server a name. The name is not important. It just needs to be meaningful to you. +6. Click the "Connection" tab: + + ![](images/pgadmin4-connection.jpeg) + +7. Enter the name of the PostgreSQL container (ie "postgres"). +8. The default port is 5432. This is the **internal** port number the PostgreSQL container is listening on. It is unlikely that you will need to change this. +9. In the "Maintenance database" field, enter the *value* of the `POSTGRES_DB` environment variable as it applies to the PostgreSQL container. +10. In the "Username" field, enter the *value* of the `POSTGRES_USER` environment variable as it applies to the PostgreSQL container. +11. In the "Password" field, enter the *value* of the `POSTGRES_PASSWORD` environment variable as it applies to the PostgreSQL container. +12. Enable the "Save password" switch if you think that is appropriate. +13. Click the "Save" button. + +Keep in mind that the values of the environment variables you set in steps 9, 10 and 11 only apply the first time you launch the PostgreSQL container. If you change any of these in PostgreSQL, you will have to make matching changes in pgAdmin4. diff --git a/docs/Containers/Pi-hole.md b/docs/Containers/Pi-hole.md new file mode 100755 index 000000000..11e77e32b --- /dev/null +++ b/docs/Containers/Pi-hole.md @@ -0,0 +1,517 @@ +# Pi-hole + +Pi-hole is a fantastic utility to reduce ads. + +## References { #references } + +* [Pi-hole on GitHub](https://github.com/pi-hole/docker-pi-hole) +* [Pi-hole on Dockerhub](https://hub.docker.com/r/pihole/pihole) +* [Pi-hole environment variables](https://github.com/pi-hole/docker-pi-hole#environment-variables) + +## Environment variables { #envVars } + +In conjunction with controls in Pi-hole's web GUI, environment variables govern much of Pi-hole's behaviour. + +If you are running new menu (master branch), environment variables are inline in your compose file. If you are running old menu, the variables will be in: + +``` +~/IOTstack/services/pihole/pihole.env +``` + +> There is nothing about old menu which *requires* the variables to be stored in the `pihole.env` file. You can migrate everything to `docker-compose.yml` if you wish. + +Pi-hole's authoritative list of environment variables can be found [here](https://github.com/pi-hole/docker-pi-hole#environment-variables). Although many of Pi-hole's options can be set through its web GUI, there are two key advantages to using environment variables: + +1. If you ever need to reset Pi-hole by [erasing its persistent storage area](#cleanSlate), configuration options set using environment variables will persist while those set through the GUI may be lost; and +2. On at least two occasions in its history, Pi-hole upgrades have had the effect of wiping configuration options set through the GUI, whereas options set using environment variables survived. + +### Admin password { #adminPassword } + +By default, Pi-hole does not have an administrator password. That is because the default service definition provided by IOTstack contains the following environment variable with no value on its right hand side: + +``` yaml +- WEBPASSWORD= +``` + +Each time the Pi-hole container is launched, it checks for the presence or absence of the `WEBPASSWORD` environment variable, then reacts like this: + +* If `WEBPASSWORD` is *defined* but does **not** have a value: + + - No admin password is set; + - Any previous admin password is cleared; + - You will be able to [connect to Pi-hole's web interface](#connectGUI) without providing a password (you won't even see the login screen); and + - The main menu (≡) will not contain a logout command. + + This is the default situation for IOTstack. + +* If `WEBPASSWORD` is *defined* **and** has a value, that value will become the admin password. For example, to change your admin password to be "IOtSt4ckP1Hol3": + + 1. Edit your compose file so that Pi-hole's service definition contains: + + ``` yaml + - WEBPASSWORD=IOtSt4ckP1Hol3 + ``` + + 2. Run: + + ``` console + $ cd ~/IOTstack + $ docker-compose up -d pihole + ``` + + docker-compose will notice the change to the environment variable and re-create the container. The container will see that `WEBPASSWORD` has a value and will change the admin password to "IOtSt4ckP1Hol3". + + You will be prompted for a password whenever you [connect to Pi-hole's web interface](#connectGUI). + +* If `WEBPASSWORD` is *undefined* (absent from your compose file), Pi-hole behaves like this: + + - If this is the first time Pi-hole has been launched, a random password is generated. + + Pi-hole senses "first launch" if it has to initialise its persistent storage area. See also [getting a clean slate](#cleanSlate). You can discover the password by running: + + ``` console + $ docker logs pihole | grep random + ``` + + Remember, docker logs are cleared each time a container is terminated or re-created so you need to run that command before the log disappears! + + - Otherwise, whatever password was set on the previous launch will be re-used. + +#### about `pihole -a -p` { #adminPassChange } + +Some Pi-hole documentation on the web recommends using the following command to change Pi-hole's admin password: + +``` console +$ docker exec pihole pihole -a -p «yourPasswordHere» +``` + +That command works but its effect will always be overridden by `WEBPASSWORD`. For example, suppose your service definition contains: + +``` yaml +- WEBPASSWORD=myFirstPassword +``` + +When you start the container, the admin password will be "myFirstPassword". If you run: + +``` console +$ docker exec pihole pihole -a -p mySecondPassword +``` + +then "mySecondPassword" will become the admin password **until** the next time the container is re-created by docker-compose, at which point the password will be reset to "myFirstPassword". + +Given this behaviour, we recommend that you ignore the `pihole -a -p` command. + +### Logging { #loggingVar } + +You can control the amount of information Pi-hole retains about your DNS queries using the "Privacy Settings" tab of the "Settings" group. The default is "Show & record everything". + +If you choose any option except "Anonymous mode", then Pi-hole divides the logging store into two parts: + +* Entries which are more recent than 24 hours; and +* Entries which are older than 24 hours. + +In the "System" tab of the "Settings" group is a Flush logs (last 24 hours) button. Clicking that button erases all log entries which are more recent than 24 hours. The button does **not** erase entries which are older than 24 hours. + +Retention of log entries older than 24 hours is controlled by the following environment variable: + +``` yaml +- FTLCONF_MAXDBDAYS=365 +``` + +The default (which applies if the variable is omitted) is to retain log entries for 365 days. + +Depending on your DNS activity, the database where the log entries are stored can become quite large. Setting this variable to a shorter period will help you control the amount of storage Pi-hole consumes on disk and in your backups. + +Tip: + +* Adding this variable to an existing service definition, or changing the number of days to be less than the previous setting will **not** reduce the size of the logging database. Although Pi-hole will implement the change, the SQLite database where the logs are written retains the released storage for subsequent re-use. If you want to reclaim that space, run the following command: + + ``` console + $ sqlite3 ~/IOTstack/volumes/pihole/etc-pihole/pihole-FTL.db "vacuum;" + ``` + + The command should not need `sudo` because `pi` is the owner by default. There is no need to terminate Pi-hole before running this command (SQLite handles any contention). + +### Recursive resolvers { #upstreamDNS } + +You can control which public DNS servers are used by PiHole when it needs to refer queries to the Internet. You do this by enabling or disabling checkboxes in the "Upstream DNS Servers" panel of the "DNS" tab in the "Settings" group. + +The default is to use the two Google IPv4 DNS servers which correspond with 8.8.8.8 and 8.8.4.4, respectively. + +An alternative to toggling checkboxes in the Pi-hole GUI is to use an environment variable: + +``` yaml +- PIHOLE_DNS_=8.8.8.8;8.8.4.4 +``` + +> The variable *does* end with an underscore! + +This variable takes a semi-colon-separated list of DNS servers. You can discover the IP address associated with a checkbox by hovering your mouse pointer over the checkbox and waiting for a tool-tip to appear: + +![](./images/pihole-server-ip-discovery.png) + +### Advanced variables { #advancedVars } + +??? info "(advanced) reverse DNS query handling" + + First, understand that there are two basic types of DNS query: + + * *forward queries*: + + - question: "what is the IP address of fred.yourdomain.com?" + - answer: 192.168.1.100 + + * *reverse queries*: + + - question: "what is the domain name for 192.168.1.100?" + - answer: fred.yourdomain.com + + Pi-hole has its own built-in DNS server which can answer both kinds of queries. The implementation is useful but doesn't offer all the features of a full-blown DNS server like BIND9. If you decide to implement a more capable DNS server to work alongside Pi-hole, you will need to understand the following Pi-hole environment variables: + + * `REV_SERVER=` + + If you configure Pi-hole's built-in DNS server to be authoritative for your local domain name, `REV_SERVER=false` is appropriate, in which case none of the variables discussed below has any effect. + + Setting `REV_SERVER=true` allows Pi-hole to forward queries that it can't answer to a local upstream DNS server, typically running inside your network. + + * `REV_SERVER_DOMAIN=yourdomain.com` (where "yourdomain.com" is an example) + + The Pi-hole documentation says: + + > *"If conditional forwarding is enabled, set the domain of the local network router".* + + The words "if conditional forwarding is enabled" mean "when `REV_SERVER=true`". + + However, this option really has little-to-nothing to do with the "domain of the local network **router**". Your router *may* have an IP address that reverse-resolves to a local domain name (eg gateway.mydomain.com) but this is something most routers are unaware of, even if you have configured your router's DHCP server to inform clients that they should assume a default domain of "yourdomain.com". + + This variable actually tells Pi-hole the name of your local domain. In other words, it tells Pi-hole to consider the possibility that an *unqualified* name like "fred" could be the fully-qualified domain name "fred.yourdomain.com". + + * `REV_SERVER_TARGET=192.168.1.5` (where 192.168.1.5 is an example): + + The Pi-hole documentation says: + + > *"If conditional forwarding is enabled, set the IP of the local network router".* + + This option tells Pi-hole where to direct *forward queries* that it can't answer. In other words, Pi-hole will send a forward query for fred.yourdomain.com to 192.168.1.5. + + It *may* be appropriate to set `REV_SERVER_TARGET` to the IP address of your router (eg 192.168.1.1) but, unless your router is running as a DNS server (not impossible but uncommon), the router will likely just relay any queries to your ISP's DNS servers (or other well-known DNS servers like 8.8.8.8 or 1.1.1.1 if you have configured those). Those external DNS servers are unlikely to be able to resolve queries for names in your private domain, and won't be able to do anything sensible with reverse queries if your home network uses RFC1918 addressing (which most do: 192.168.x.x being the most common example). + + Forwarding doesn't guarantee that 192.168.1.5 will be able to answer the query. The DNS server at 192.168.1.5 may well relay the query to yet another server. In other words, this environment variable does no more than set the next hop. + + If you are planning on using this option, the target needs to be a DNS server that is authoritative for your local domain and that, pretty much, is going to be a local upstream DNS server inside your home network like another Raspberry Pi running BIND9. + + * `REV_SERVER_CIDR=192.168.1.0/24` (where 192.168.1.0/24 is an example) + + The Pi-hole documentation says: + + > *"If conditional forwarding is enabled, set the reverse DNS zone (e.g. 192.168.0.0/24)".* + + This is correct but it lacks detail. + + The string "192.168.1.0/24" defines your local subnet using Classless Inter-Domain Routing (CIDR) notation. Most home subnets use a subnet-mask of 255.255.255.0. If you write that out in binary, it is 24 1-bits followed by 8 0-bits, as in: + + ``` + 255 . 255 . 255 . 0 + 11111111 11111111 11111111 00000000 + ``` + + Those 24 one-bits are where the `/24` comes from in `192.168.1.0/24`. When you perform a bitwise logical AND between that subnet mask and 192.168.1.0, the ".0" is removed (conceptually), as in: + + ``` + 192.168.1.0 AND 255.255.255.0 = 192.168.1 + ``` + + What it **means** is: + + 1. The network *prefix* is "192.168.1". + 2. *This* host on the 192.168.1 network is the reserved address "192.168.1.0". It is better to think of this as "the network prefix followed by all-zero bits in the host portion". It is not common to see the .0 address used in practice. A device either knows its IP address or it doesn't. If it doesn't then it won't know its prefix so it will use 0.0.0.0 as a substitute for "this". + 3. The *range* of IP addresses available for allocation to hosts on this subnet is 192.168.1.1 through 192.168.1.254 inclusive. + 4. *All* hosts on the 192.168.1 network (ie broadcast) is the reserved address "192.168.1.255". It is better to think of this as "the network prefix followed by all-one bits in the host portion". + + When you set `REV_SERVER_CIDR=192.168.1.0/24` you are telling Pi-hole that *reverse queries* for the host range 192.168.1.1 through 192.168.1.254 should be sent to the `REV_SERVER_TARGET=192.168.1.5`. + +## Pi-hole Web GUI { #webGUI } + +Note: in order for Web GUI settings to have any effects, you need to configure +the RPi or other machines to use it. This is described in the next topics. + +### Connecting to the GUI { #connectGUI } + +Point your browser to: + +``` +http://«your_ip»:8089/admin +``` + +where «your_ip» can be: + +* The IP address of the Raspberry Pi running Pi-hole. +* The domain name of the Raspberry Pi running Pi-hole. +* The multicast DNS name (eg "raspberrypi.local") of the Raspberry Pi running Pi-hole. + +### Adding local domain names { #localNames } + +Login to the Pi-hole web interface: `http://raspberrypi.local:8089/admin`: + +1. Select from Left menu: Local DNS -> DNS Records +2. Enter Domain: `raspberrypi.home.arpa` and the RPi's IP Address, e.g. `192.168.1.10`. +3. Press Add. + +Now you can use `raspberrypi.home.arpa` as the domain name for the Raspberry Pi +in your whole local network. You can also add domain names for your other +devices, provided they too have static IPs. + +!!! help "why .home.arpa?" + + Instead of `.home.arpa` - which is the real standard, but a mouthful - you + can use `.internal`. Using `.local` would technically work, but it should + be reserved for mDNS use only. + { #homeArpa } + +## Configure your Pi to use Pi-hole { #rpiDNS } + +The Raspberry Pi itself does **not** have to use the Pi-hole container for its own DNS services. Some chicken-and-egg situations can exist if, for example, the Pi-hole container is down when another process (eg `apt` or `docker-compose`) needs to do something that depends on DNS services being available. + +Nevertheless, if you configure Pi-hole to be local DNS resolver, then you will probably want to configure your Raspberry Pi to use the Pi-hole container in the first instance, and then fall back to a public DNS server if the container is down. As a beginner, this is probably what you want regardless. Do this by running the commands: + +``` console +$ echo "name_servers=127.0.0.1" | sudo tee -a /etc/resolvconf.conf +$ echo "name_servers_append=8.8.8.8" | sudo tee -a /etc/resolvconf.conf +$ echo "resolv_conf_local_only=NO" | sudo tee -a /etc/resolvconf.conf +$ sudo resolvconf -u +``` + +This results in a configuration that will continue working, even if the Pi-hole +container isn't running. + +??? info "Detailed explanations of these commands" + + 1. `name_servers=127.0.0.1` instructs the Raspberry Pi to direct DNS queries to the loopback address. Port 53 is implied. If the Pi-hole container is running in: + + - non-host mode, Docker is listening to port 53 and forwards the queries to the Pi-hole container; + - host mode, the Pi-hole container is listening to port 53. + + 2. `name_servers_append=8.8.8.8` instructs the Raspberry Pi to fail-over to 8.8.8.8 if Pi-hole does not respond. You can replace `8.8.8.8` (a Google service) with: + + * Another well-known public DNS server like `1.1.1.1` (Cloudflare). + * The IP address(es) of your ISP's DNS hosts (generally available from your ISP's web site). + * The IP address of another DNS server running in your local network (eg BIND9). + * The IP address of your home router. Most home routers default to the ISP's DNS hosts but you can usually change your router's configuration to bypass your ISP in favour of public servers like 8.8.8.8 and 1.1.1.1. + + You need slightly different syntax if you want to add multiple fallback servers. For example, suppose your fallback hosts are a local server (eg 192.168.1.2) running BIND9 and 8.8.8.8. The command would be: + + ``` console + $ echo 'name_servers_append="192.168.1.2 8.8.8.8"' | sudo tee -a /etc/resolvconf.conf + ``` + + 3. `resolv_conf_local_only=NO` is needed so that 127.0.0.1 and 8.8.8.8 can coexist. + 4. The `resolvconf -u` command instructs Raspberry Pi OS to rebuild the active resolver configuration. In principle, that means parsing `/etc/resolvconf.conf` to derive `/etc/resolv.conf`. This command can sometimes return the error "Too few arguments". You should ignore that error. + + ``` mermaid + flowchart LR + RERECONF["/etc/resolvconf.conf"] --- UP([resolvconf -u]) + DHCP[DHCP provided DNS-server] --- UP + UP -- "generates" --> RECONF["/etc/resolv.conf"] + classDef command fill:#9996,stroke-width:0px + class UP command + ``` + +??? note "Advanced options: ignoring DHCP provided DNS-servers, local domain name search" + + * If you wish to prevent the Raspberry Pi from including the address(es) of DNS servers learned from DHCP, you can instruct the DHCP client running on the Raspberry Pi to ignore the information coming from the DHCP server: + + ``` console + $ echo 'nooption domain_name_servers' | sudo tee -a /etc/dhcpcd.conf + $ sudo service dhcpcd reload + $ sudo resolvconf -u + ``` + + * If you have followed the steps in [Adding local domain names](#localNames) to define names for your local hosts, you can inform the Raspberry Pi of that fact like this: + + ``` console + $ echo 'search_domains=home.arpa' | sudo tee -a /etc/resolvconf.conf + $ sudo resolvconf -u + ``` + + That will add the following line to `/etc/resolv.conf`: + + ``` + search home.arpa + ``` + + Then, when you refer to a host by a short name (eg "fred") the Raspberry Pi will also consider "fred.home.arpa" when trying to discover the IP address. + +??? note "Interaction with other containers" + + Docker provides a special IP 127.0.0.11, which listens to DNS queries and + resolves them according to the host RPi's resolv.conf. Containers usually + rely on this to perform DNS lookups. This is nice as it won't present any + surprises as DNS lookups on both the host and in the containers will yeild + the same results. + + It's possible to make DNS queries directly cross-container, and even + supported in some [rare use-cases](WireGuard.md#customContInit). + +## Using Pi-hole as your local DNS + +To use the Pi-hole in your LAN, you need to assign the Raspberry Pi a fixed IP-address and configure this IP as your DNS server. + +### Fixed IP address for Pi-hole { #rpiFixedIP } + +If you want clients on your network to use Pi-hole for their DNS, the Raspberry Pi running Pi-hole **must** have a fixed IP address. It does not have to be a *static* IP address (in the sense of being hard-coded into the Raspberry Pi). The Raspberry Pi can still obtain its IP address from DHCP at boot time, providing your DHCP server (usually your home router) always returns the same IP address. This is usually referred to as a *static binding* and associates the Raspberry Pi's MAC address with a fixed IP address. + +Keep in mind that many Raspberry Pis have both Ethernet and WiFi interfaces. It is generally prudent to establish static bindings for *both* network interfaces in your DHCP server. + +You can use the following command to discover the MAC addresses for your Raspberry Pi's Ethernet and WiFi interfaces: + +``` console +$ for I in eth0 wlan0 ; do ip link show $I ; done +2: eth0: mtu 1500 qdisc mq state UP mode DEFAULT group default qlen 1000 + link/ether dc:a6:32:4c:89:f9 brd ff:ff:ff:ff:ff:ff +3: wlan0: mtu 1500 qdisc pfifo_fast state UP mode DEFAULT group default qlen 1000 + link/ether e5:4f:01:41:88:b2 brd ff:ff:ff:ff:ff:ff +``` + +In the above: + +* The MAC address of the Ethernet interface is "dc:a6:32:4c:89:f9" +* The MAC address of the WiFi interface is "e5:4f:01:41:88:b2" + +If a physical interface does not exist, the command returns "Device does not exist" for that interface. If you prefer, you can also substitute the `ifconfig` command for `ip link show`. It's just a little more wordy. + +### Configure clients to use Pi-hole { #rpiConfig } + +In order for Pi-hole to block ads or resolve anything, clients need to be told to use it as their DNS server. You can either: + +1. Adopt a whole-of-network approach and edit the DNS settings in your DHCP server so that all clients are given the IP address of the Raspberry Pi running Pi-hole to use for DNS services when a lease is issued. +2. Adopt a case-by-case (manual) approach where you instruct particular clients to obtain DNS services from the IP address of the Raspberry Pi running Pi-hole. + +Option 1 (whole-of-network) is the simplest approach. Assuming your Raspberry Pi has the static IP `192.168.1.10`: + +1. Go to your network's DHCP server. In most home networks, this will be your Wireless Access Point/WLAN Router: + + * Login into its web-interface + * Find where DNS servers are defined (generally with DHCP controls) + * Change all DNS fields to `192.168.1.10` + +2. All local clients have to be rebooted. Without this they will continue to use the old DNS setting from an old DHCP lease for quite some time. + +Option 2 (case-by-case) generally involves finding the IP configuration options for each host and setting the DNS server manually. Manual changes are usually effective immediately without needing a reboot. + +??? note "advanced configurations" + + Setting up a combination of Pi-hole (for ad-blocking services), and/or a local upstream DNS resolver (eg BIND9) to be authoritative for a local domain and reverse-resolution for your local IP addresses, and decisions about where each DNS server forwards queries it can't answer (eg your ISP's DNS servers, or Google's 8.8.8.8, or Cloudflare's 1.1.1.1) is a complex topic and depends on your specific needs. + { #advancedConfig } + + The same applies to setting up a DHCP server (eg DHCPD) which is capable of distinguishing between the various clients on your network (ie by MAC address) to make case-by-case decisions as to where each client should obtain its DNS services. + + If you need help, try asking questions on the [IOTstack Discord channel](https://discord.gg/ZpKHnks). + +## Testing and Troubleshooting { #debugging } + +Make these assumptions: + +1. You have followed the instructions above to add these lines to `/etc/resolvconf.conf`: + + ``` + name_servers=127.0.0.1 + name_servers_append=8.8.8.8 + resolv_conf_local_only=NO + ``` + +2. The Raspberry Pi running Pi-hole has the IP address 192.168.1.10 which it obtains as a static assignment from your DHCP server. +3. You have configured your DHCP server to provide 192.168.1.10 for client devices to use to obtain DNS services (ie, you are saying clients should use Pi-hole for DNS). + +The result of the configuration appears in `/etc/resolv.conf`: + +``` console +$ cat /etc/resolv.conf +# Generated by resolvconf +nameserver 127.0.0.1 +nameserver 192.168.1.10 +nameserver 8.8.8.8 +``` + +Interpretation: + +* `nameserver 127.0.0.1` is present because of `name_servers=127.0.0.1` +* `nameserver 192.168.1.10` is present because it was learned from DHCP +* `nameserver 8.8.8.8` is present because of `name_servers_append=8.8.8.8` + +The fact that the Raspberry Pi is effectively represented twice (once as 127.0.0.1, and again as 192.168.1.10) does not matter. If the Pi-hole container stops running, the Raspberry Pi will bypass 192.168.1.10 and fail over to 8.8.8.8, failing back to 127.0.0.1 when the Pi-hole container starts again. + +Install dig: + +``` console +$ sudo apt install dnsutils +``` + +Test that Pi-hole is correctly configured (should respond 192.168.1.10): + +``` console +$ dig raspberrypi.home.arpa @192.168.1.10 +``` + +To test on another machine if your network's DNS configuration is correct, and +an ESP will resolve its DNS queries correctly, restart the other machine to +ensure DNS changes are updated and then use: + +``` console +$ dig raspberrypi.home.arpa +``` + +This should produce the same result as the previous command. + +If this fails to resolve the IP, check that the server in the response is +`192.168.1.10`. If it's `127.0.0.xx` check `/etc/resolv.conf` begins with +`nameserver 192.168.1.10`. If not, check the machine is configured to use DHCP +and revisit [Pi-hole as DNS](#rpiConfig). + +## Microcontrollers { #iotConfig } + +If you want to avoid hardcoding your Raspberry Pi IP to your ESPhome devices, +you need a DNS server that will do the resolving. This can be done using the +Pi-hole container as described above. + +### `*.local` won't work for ESPhome { #esp32mDNS } + +There is a special case for resolving `*.local` addresses. If you do a `ping raspberrypi.local` on your desktop Linux or the Raspberry Pi, it will first try using mDNS/bonjour to resolve the IP address raspberrypi.local. If this fails it will then ask the DNS server. ESPhome devices can't use mDNS to resolve an IP address. You need a proper DNS server to respond to queries made by an ESP. As such, `dig raspberrypi.local` will fail, simulating ESPhome device behavior. This is as intended, and you should use raspberrypi.home.arpa as the address on your ESP-device. + +## Getting a clean slate { #cleanSlate } + +If Pi-hole misbehaves, you can always try starting from a clean slate by erasing Pi-hole's persistent storage area. Erasing the persistent storage area causes PiHole to re-initialise its data structures on the next launch. You will lose: + +1. Any configuration options you may have set via the web GUI that are not otherwise reflected in [environment variables](#envVars). +2. Any whitelist, blacklist or local DNS records you entered. +3. All DNS resolution and blocking history. + +Also note that your [administrative password](#adminPassword) will reset. + +The recommended approach is: + +1. Login to Pi-hole's web GUI and navigate to Settings » Teleporter. +2. Click the Backup button to download a backup. +3. Logout of the Web GUI. +4. Run the following commands: + + ``` console + $ cd ~/IOTstack + $ docker-compose down pihole + $ sudo rm -rf ./volumes/pihole + $ docker-compose up -d pihole + ``` + + > see also [if downing a container doesn't work](../Basic_setup/index.md/#downContainer) + +5. Login to Pi-hole's web GUI and navigate to Settings » Teleporter. +6. Use the checkboxes to select the settings you wish to restore, and click the Browse and Restore buttons. + +## Docker Desktop { #dockerDesktop } + +If you run Pi-hole using Docker Desktop for macOS, all client activity will be logged against the IP address of the default gateway on the internal bridged network. + +It appears that Docker Desktop for macOS interposes an additional level of Network Address Translation (NAT) between clients and the Pi-hole service. This does not affect Pi-hole's ability to block ads. It just makes the GUI reports a little less useful. + +It is not known whether this is peculiar to Docker Desktop for macOS or also affects other variants of Docker Desktop. + +This problem does not affect Pi-hole running in a container on a Raspberry Pi. diff --git a/docs/Containers/Plex.md b/docs/Containers/Plex.md new file mode 100644 index 000000000..7b1969954 --- /dev/null +++ b/docs/Containers/Plex.md @@ -0,0 +1,12 @@ +# Plex +## References +* [Homepage](https://www.plex.tv/) +* [Docker](https://hub.docker.com/r/linuxserver/plex/) + +## Web interface +The web UI can be found on `"your_ip":32400/web` + +## Mounting an external drive by UUID to the home directory +[official mounting guide](https://www.raspberrypi.org/documentation/configuration/external-storage.md) + +Create a directory in you home directory called `mnt` with a subdirectory `HDD`. Follow the instruction above to mount your external drive to `/home/pi/mnt/HDD` in you `fstab` edit your docker-compose.yml file under plex and uncomment the volumes for tv series and movies (modify the path to point to your media locations). Run `docker-compose up -d` to rebuild plex with the new volumes \ No newline at end of file diff --git a/docs/Containers/Portainer-agent.md b/docs/Containers/Portainer-agent.md new file mode 100644 index 000000000..d194eb074 --- /dev/null +++ b/docs/Containers/Portainer-agent.md @@ -0,0 +1,19 @@ +# Portainer agent +## References +- [Docker](https://hub.docker.com/r/portainer/agent) +- [Docs](https://portainer.readthedocs.io/en/stable/agent.html) + +## About + +The portainer agent is a great way to add a second docker instance to an existing portainer instance. This allows you to manage multiple docker environments from one portainer instance. + +## Adding to an existing instance + +When you want to add the agent to an existing portainer instance. + +* You go to the endpoints tab. +* Click on `Add endpoint` +* Select Agent +* Enter the name of the agent +* Enter the url of the endpoint `ip-of-agent-instance:9001` +* Click on add endpoint diff --git a/docs/Containers/Portainer-ce.md b/docs/Containers/Portainer-ce.md new file mode 100644 index 000000000..ca975dc82 --- /dev/null +++ b/docs/Containers/Portainer-ce.md @@ -0,0 +1,101 @@ +# Portainer CE + +## References { #references } + +- [Docker](https://hub.docker.com/r/portainer/portainer-ce/) +- [Website](https://www.portainer.io/portainer-ce/) + +## Definition { #definitions } + +- "#yourip" means any of the following: + + - the IP address of your Raspberry Pi (eg `192.168.1.10`) + - the multicast domain name of your Raspberry Pi (eg `iot-hub.local`) + - the domain name of your Raspberry Pi (eg `iot-hub.mydomain.com`) + +## About *Portainer CE* { #about } + +*Portainer CE* (Community Edition) is an application for managing Docker. It is a successor to *Portainer*. According to [the *Portainer CE* documentation](https://www.portainer.io/2020/08/portainer-ce-2-0-what-to-expect/) + +> Portainer 1.24.x will continue as a separate code branch, released as portainer/portainer:latest, and will receive ongoing security updates until at least 1st Sept 2021. No new features will be added beyond what was available in 1.24.1. + +From that it should be clear that *Portainer* is deprecated and that *Portainer CE* is the way forward. + +## Installing *Portainer CE* { #installation } + +Run the menu: + +```console +$ cd ~/IOTstack +$ ./menu.sh +``` + +Choose "Build Stack", select "Portainer-ce", press [TAB] then "\" and follow through to the end of the menu process, typically choosing "Do not overwrite" for any existing services. When the menu finishes: + +```console +$ docker-compose up -d +``` + +Ignore any message like this: + +> WARNING: Found orphan containers (portainer) for this project … + +## First run of *Portainer CE* { #firstRun } + +In your web browser navigate to `#yourip:9000/`: + +- the first screen will suggest a username of "admin" and ask for a password. Supply those credentials and click "Create User". +- the second screen will ask you to select a connection method. For IOTstack, "Docker (Manage the local Docker environment)" is usually appropriate so click that and then click "Connect". + +From there, you can click on the "Local" group and take a look around. One of the things *Portainer CE* can help you do is find unused containers but beware of reading too much into this because, sometimes, an "unused" container is actually the base for another container (eg Node-RED). + +There are 'Quick actions' to view logs and other stats. This can all be done from terminal commands but *Portainer CE* makes it easier. + +## Setting the Public IP address { #setPublicIP } + +If you click on a "Published Port" in the "Containers" list, your browser may return an error saying something like "can't connect to server" associated with an IP address of "0.0.0.0". + +To fix that problem, proceed as shown below: + +![Set Public IP address](./images/portainer-ce-set-public-ip.png) + +1. Click "Environments" in the left hand panel. +2. Click the name "local" in the list of Environments. +3. Click in the "Public IP" field. Enter one of the following: + - The multicast DNS (MDNS) name of your Raspberry Pi (eg `iot-hub.local`) + - The fully-qualified domain name (FQDN) of your Raspberry Pi (eg `iot-hub.mydomain.com`) + - The IP address of your Raspberry Pi (eg `192.168.1.10`) +4. Click "Update environment". + +> To remove the Public IP address, repeat the above steps but clear the "Public IP" field in step 3. + +The reason why you have to tell *Portainer CE* which Public IP address to use is because an instance of *Portainer CE* does not necessarily have to be running on the **same** Raspberry Pi as the Docker containers it is managing. + +Keep in mind that clicking on a "Published Port" does not guarantee that your browser can open a connection. For example: + +* Port 1883 for Mosquitto expects MQTT packets. It will not respond to HTTP, so any attempt will fail. +* Port 8089 for PiHole will respond to HTTP but PiHole may reject or mis-handle your attempt. +* Port 1880 for NodeRed will respond normally. + +> All things considered, you will get more consistent behaviour if you simply bookmark the URLs you want to use for your IOTstack services. + +Notes: + +* Earlier documentation for Portainer-CE used the term "endpoint" for what is now being called the "environment". +* The "environment" being discussed in this section is Portainer-CE's environment. It should not be confused with the tools Portainer-CE provides for managing a container's environment (eg setting environment variables). + +## If you forget your password { #forgotPassword } + +If you forget the password you created for *Portainer CE*, you can recover by doing the following: + +```console +$ cd ~/IOTstack +$ docker-compose stop portainer-ce +$ sudo rm -r ./volumes/portainer-ce +$ docker-compose start portainer-ce +``` + +Then, follow the steps in: + +1. [First run of *Portainer CE*](#firstRun); and +2. [Setting the Public IP address](#setPublicIP). diff --git a/docs/Containers/PostgreSQL.md b/docs/Containers/PostgreSQL.md new file mode 100755 index 000000000..fe6821e89 --- /dev/null +++ b/docs/Containers/PostgreSQL.md @@ -0,0 +1,93 @@ +# PostgreSQL + +## References + +- [Docker image](https://hub.docker.com/_/postgres) +- [Postgre SQL Homepage](https://www.postgresql.org/) +- [Postgre SQL docs](https://www.postgresql.org/docs/current/index.html) + +## About + +PostgreSQL is an SQL server, for those that need an SQL database. + +The database is available on port `5432` + +## Configuration + +The service definition includes the following environment variables: + +* `TZ` your timezone. Defaults to `Etc/UTC` +* `POSTGRES_USER`. Initial username. Defaults to `postuser`. +* `POSTGRES_PASSWORD`. Initial password associated with initial username. Defaults to `IOtSt4ckpostgresDbPw` (`postpassword` for old menu). +* `POSTGRES_DB`. Initial database. Defaults to `postdb`. + +You can either edit the environment variables directly or provide your own substitutes by editing `~/IOTstack/.env`. Example: + +``` console +$ cat ~/IOTstack/.env +TZ=Australia/Sydney +POSTGRES_PASSWORD=oneTwoThree +``` + +When the container is brought up: + +* `TZ` will have the value `Australia/Sydney` (from `.env`) +* `POSTGRES_PASSWORD` will have the value `oneTwoThree` (from `.env`) +* `POSTGRES_USER` will have the value `postuser` (the default); and +* `POSTGRES_DB` will have the value `postdb` (the default). + +The `TZ` variable takes effect every time the container is brought up. The other environment variables only work the first time the container is brought up. + +It is highly recommended to select your own password before you launch the container for the first time. See also [Getting a clean slate](#cleanSlate). + +## Management + +You can interact with the PostgreSQL Relational Database Management System running in the container via its `psql` command. You can invoke `psql` like this: + +``` console +$ docker exec -it postgres bash -c 'PGPASSWORD=$POSTGRES_PASSWORD psql $POSTGRES_DB $POSTGRES_USER' +``` + +> Because of the single quotes (') surrounding everything after the `-c`, expansion of the environment variables is deferred until the command is executed *inside* the container. + +You can use any of the following methods to exit `psql`: + +* Type "\q" and press return +* Type "exit" and press return +* Press control+D + +### password change + +Once you have logged into `psql` you can reset the password like this: + +``` sql +# ALTER USER «user» WITH PASSWORD '«password»'; +``` + +Replace: + +* `«user»` with the username (eg the default username is `postuser`) +* `«password»` with your new password. + +Notes: + +* Changing the password via the `ALTER` command does **not** update the value of the [`POSTGRES_PASSWORD`](#postgrespw) environment variable. You need to do that by hand. +* Whenever you make a change to a running container's environment variables, the changes will not take effect until you re-create the container by running: + + ``` console + $ cd ~/IOTstack + $ docker-compose up -d postgresql + ``` + +## Getting a clean slate { #cleanSlate } + +If you need to start over, proceed like this: + +``` console +$ cd ~/IOTstack +$ docker-compose down postgres +$ sudo rm -rf ./volumes/postgres +$ docker-compose up -d postgres +``` + +> see also [if downing a container doesn't work](../Basic_setup/index.md/#downContainer) diff --git a/docs/Containers/Prometheus.md b/docs/Containers/Prometheus.md new file mode 100755 index 000000000..4020334ee --- /dev/null +++ b/docs/Containers/Prometheus.md @@ -0,0 +1,424 @@ +# Prometheus + +## References { #references } + +* [*Prometheus* home](https://prometheus.io) +* *GitHub*: + + - [*Prometheus*](https://github.com/prometheus/prometheus) + - [*CAdvisor*](https://github.com/google/cadvisor) + - [*Node Exporter*](https://github.com/prometheus/node_exporter) + +* *DockerHub*: + + - [*Prometheus*](https://hub.docker.com/r/prom/prometheus) + - [*CAdvisor*](https://hub.docker.com/r/zcube/cadvisor) + - [*Node Exporter*](https://hub.docker.com/r/prom/node-exporter) + + +## Special note 2022-11-08 { #configUpdate } + +[Issue 620](https://github.com/SensorsIot/IOTstack/issues/620) pointed out there was an error in the default configuration file. That has been fixed. To adopt it, please do the following: + +1. If Prometheus and/or any of its associated containers are running, take them down: + + ``` + $ cd ~/IOTstack + $ docker-compose down prometheus prometheus-cadvisor prometheus-nodeexporter + ``` + + > see also [if downing a container doesn't work](../Basic_setup/index.md/#downContainer) + +2. Move the existing active configuration out of the way: + + ``` + $ cd ~/IOTstack/volumes/prometheus/data/config + $ mv config.yml config.yml.old + ``` + +3. Make sure that the service definitions in your `docker-compose.yml` are up-to-date by comparing them with the template versions: + + - `~/IOTstack/.templates/prometheus/service.yml` + - `~/IOTstack/.templates/prometheus-cadvisor/service.yml` + - `~/IOTstack/.templates/prometheus-nodeexporter/service.yml` + + Your service definitions and those in the templates do not need to be *identical*, but you should be able to explain any differences. + +4. Rebuild your Prometheus container by following the instructions in [Upgrading *Prometheus*](#upgradingPrometheus). Rebuilding will import the updated *default* configuration into the container's image. + +5. Start the service: + + ``` + $ cd ~/IOTstack + $ docker-compose up -d prometheus + ``` + + Starting `prometheus` should start `prometheus-cadvisor` and `prometheus-nodeexporter` automatically. Because the old configuration has been moved out of the way, the container will supply a new version as a default. + +6. Compare the configurations: + + ``` + $ cd ~/IOTstack/volumes/prometheus/data/config + $ diff -y config.yml.old config.yml + global: global: + scrape_interval: 10s scrape_interval: 10s + evaluation_interval: 10s evaluation_interval: 10s + + scrape_configs: scrape_configs: + - job_name: "iotstack" - job_name: "iotstack" + static_configs: static_configs: + - targets: - targets: + - localhost:9090 - localhost:9090 + - cadvisor:8080 | - prometheus-cadvisor:8080 + - nodeexporter:9100 | - prometheus-nodeexporter:9100 + ``` + + In the output above, the vertical bars (`|`) in the last two lines indicate that those lines have changed. The "old" version is on the left, "new" on the right. + + If you have made other alterations to your config then you should see other change indicators including `<`, `|` and `>`. If so, you should hand-merge your own changes from `config.yml.old` into `config.yml` and then restart the container: + + ``` + $ cd ~/IOTstack + $ docker-compose restart prometheus + ``` + +## Overview { #overview } + +Prometheus is a collection of three containers: + +* *Prometheus* +* *CAdvisor* +* *Node Exporter* + +The [default configuration](#activeConfig) for *Prometheus* supplied with IOTstack scrapes information from all three containers. + +## Installing Prometheus { #installProm } + +### *if you are running New Menu …* { #installPromNewMenu } + +When you select *Prometheus* in the IOTstack menu, you must also select: + +* *prometheus-cadvisor;* and +* *prometheus-nodeexporter*. + +If you do not select all three containers, Prometheus will not start. + +### *if you are running Old Menu …* { #installPromOldMenu } + +When you select *Prometheus* in the IOTstack menu, the service definition includes the three containers: + +* *prometheus* +* *prometheus-cadvisor;* and +* *prometheus-nodeexporter*. + +## Significant directories and files { #significantFiles } + +``` +~/IOTstack +├── .templates +│   └── prometheus +│      ├── service.yml ❶ +│      ├── Dockerfile ❷ +│      ├── docker-entrypoint.sh ❸ +│      └── iotstack_defaults ❹ +│         └── config.yml +├── services +│   └── prometheus +│      └── service.yml ❺ +├── docker-compose.yml ❻ +└── volumes + └── prometheus ❼ +    └── data +    ├── config ❽ +    │   ├── config.yml +    │   └── prometheus.yml +    └── data +``` + +1. The *template service definition*. +2. The *Dockerfile* used to customise *Prometheus* for IOTstack. +3. A pre-launch script to handle container self-repair before launching the *Prometheus* service. +4. Defaults for IOTstack, used to initialise on first run, and for container self-repair. +5. The *working service definition* (only relevant to old-menu, copied from ❶). +6. The *Compose* file (includes ❶). +7. The *persistent storage area*. +8. The [configuration directory](#configDir). + +## How *Prometheus* gets built for IOTstack { #howPrometheusIOTstackGetsBuilt } + +### *Prometheus* source code ([*GitHub*](https://github.com)) { #githubSourceCode } + +The source code for *Prometheus* lives at [*GitHub* prometheus/prometheus](https://github.com/prometheus/prometheus). + +### *Prometheus* images ([*DockerHub*](https://hub.docker.com)) { #dockerHubImages } + +Periodically, the source code is recompiled and the resulting image is pushed to [prom/prometheus](https://hub.docker.com/r/prom/prometheus) on *DockerHub*. + +### IOTstack menu { #iotstackMenu } + +When you select *Prometheus* in the IOTstack menu, the *template service definition* is copied into the *Compose* file. + +> Under old menu, it is also copied to the *working service definition* and then not really used. + +### IOTstack first run { #iotstackFirstRun } + +On a first install of IOTstack, you run the menu, choose *Prometheus* as one of your containers, and are told to do this: + +``` console +$ cd ~/IOTstack +$ docker-compose up -d +``` + +`docker-compose` reads the *Compose* file. When it arrives at the `prometheus` fragment, it finds: + +```yaml +prometheus: + container_name: prometheus + build: ./.templates/prometheus/. +``` + +The `build` statement tells `docker-compose` to look for: + +``` +~/IOTstack/.templates/prometheus/Dockerfile +``` + +> The *Dockerfile* is in the `.templates` directory because it is intended to be a common build for **all** IOTstack users. This is different to the arrangement for Node-RED where the *Dockerfile* is in the `services` directory because it is how each individual IOTstack user's version of Node-RED is customised. + +The *Dockerfile* begins with: + +```dockerfile +FROM prom/prometheus:latest +``` + +> If you need to pin to a particular version of *Prometheus*, the *Dockerfile* is the place to do it. See [*Prometheus* version pinning](#versionPinning). + +The `FROM` statement tells the build process to pull down the ***base image*** from [*DockerHub*](https://hub.docker.com). + +> It is a ***base*** image in the sense that it never actually runs as a container on your Raspberry Pi. + +The remaining instructions in the *Dockerfile* customise the *base image* to produce a ***local image***. The customisations are: + +1. Add configuration defaults appropriate for IOTstack. +2. Add `docker-entrypoint.sh` which: + + * Ensures the *internal* directory `/prometheus/config/` exists; + * Copies any configuration files that have gone missing into that directory. + * Enforces "pi:pi" ownership in `~/IOTstack/volumes/prometheus/data/config`. + * Launches the *Prometheus* service. + +The *local image* is instantiated to become your running container. + +When you run the `docker images` command after *Prometheus* has been built, you *may* see two rows for *Prometheus*: + +``` console +$ docker images +REPOSITORY TAG IMAGE ID CREATED SIZE +iotstack_prometheus latest 1815f63da5f0 23 minutes ago 169MB +prom/prometheus latest 3f9575991a6c 3 days ago 169MB +``` + +* `prom/prometheus` is the *base image*; and +* `iotstack_prometheus` is the *local image*. + +You *may* see the same pattern in Portainer, which reports the *base image* as "unused". You should not remove the *base* image, even though it appears to be unused. + +> Whether you see one or two rows depends on the version of `docker-compose` you are using and how your version of `docker-compose` builds local images. + +### Dependencies: *CAdvisor* and *Node Exporter* { #dependencies } + +The *CAdvisor* and *Node Exporter* are included in the *Prometheus* service definition as dependent containers. What that means is that each time you start *Prometheus*, `docker-compose` ensures that *CAdvisor* and *Node Exporter* are already running, and keeps them running. + +The [default configuration](#activeConfig) for *Prometheus* assumes *CAdvisor* and *Node Exporter* are running and starts scraping information from those targets as soon as it launches. + +## Configuring **Prometheus** { #configuringPrometheus } + +### Configuration directory { #configDir } + +The configuration directory for the IOTstack implementation of *Prometheus* is at the path: + +``` +~/IOTstack/volumes/prometheus/data/config +``` + +That directory contains two files: + +* `config.yml`; and +* `prometheus.yml`. + +If you delete either file, *Prometheus* will replace it with a default the next time the container starts. This "self-repair" function is intended to provide reasonable assurance that *Prometheus* will at least **start** instead of going into a restart loop. + +Unless you [decide to change it](#environmentVars), the `config` folder and its contents are owned by "pi:pi". This means you can edit the files in the configuration directory without needing the `sudo` command. Ownership is enforced each time the container restarts. + +#### Active configuration file { #activeConfig } + +The file named `config.yml` is the active configuration. This is the file you should edit if you want to make changes. The default structure of the file is: + +```yaml +global: + scrape_interval: 10s + evaluation_interval: 10s + +scrape_configs: + - job_name: "iotstack" + static_configs: + - targets: + - localhost:9090 + - cadvisor:8080 + - nodeexporter:9100 +``` + +To cause a running instance of *Prometheus* to notice a change to this file: + +``` console +$ cd ~/IOTstack +$ docker-compose restart prometheus +$ docker logs prometheus +``` + +Note: + +* The YAML parser used by *Prometheus* seems to be ***exceptionally*** sensitive to syntax errors (far less tolerant than `docker-compose`). For this reason, you should **always** check the *Prometheus* log after any configuration change. + +#### Reference configuration file { #referenceConfig } + +The file named `prometheus.yml` is a reference configuration. It is a **copy** of the original configuration file that ships inside the *Prometheus* container at the path: + +``` +/etc/prometheus/prometheus.yml +``` + +Editing `prometheus.yml` has no effect. It is provided as a convenience to help you follow examples on the web. If you want to make the contents of `prometheus.yml` the active configuration, you need to do this: + +``` console +$ cd ~/IOTstack/volumes/prometheus/data/config +$ cp prometheus.yml config.yml +$ cd ~/IOTstack +$ docker-compose restart prometheus +$ docker logs prometheus +``` + +### Environment variables { #environmentVars } + +The IOTstack implementation of *Prometheus* supports two environment variables: + +```yaml +environment: + - IOTSTACK_UID=1000 + - IOTSTACK_GID=1000 +``` + +Those variables control ownership of the [Configuration directory](#configDir) and its contents. Those environment variables are present in the standard IOTstack service definition for *Prometheus* and have the effect of assigning ownership to "pi:pi". + +If you delete those environment variables from your *Compose* file, the [Configuration directory](#configDir) will be owned by "nobody:nobody"; otherwise the directory and its contents will be owned by whatever values you pass for those variables. + +### Migration considerations { #migration } + +Under the original IOTstack implementation of *Prometheus* (just "as it comes" from *DockerHub*), the service definition expected the configuration file to be at: + +``` +~/IOTstack/services/prometheus/config.yml +``` + +Under this implementation of *Prometheus*, the configuration file has moved to: + +``` +~/IOTstack/volumes/prometheus/data/config/config.yml +``` + +> The change of location is one of the things that allows self-repair to work properly. + +Some of the assumptions behind the default configuration file have changed. In particular, instead of the entire `scrape_configs` block being commented-out, it is active and defines `localhost`, `cadvisor` and `nodeexporter` as targets. + +You should compare the old and new versions and decide which settings need to be migrated into the new configuration file. + +If you change the configuration file, restart *Prometheus* and then check the log for errors: + +``` console +$ docker-compose restart prometheus +$ docker logs prometheus +``` + +Note: + +* The YAML parser used by *Prometheus* is very sensitive to syntax errors. Always check the *Prometheus* log after any configuration change. + +## Upgrading *Prometheus* { #upgradingPrometheus } + +You can update `cadvisor` and `nodeexporter` like this: + +``` console +$ cd ~/IOTstack +$ docker-compose pull cadvisor nodeexporter +$ docker-compose up -d +$ docker system prune +``` + +In words: + +* `docker-compose pull` downloads any newer images; +* `docker-compose up -d` causes any newly-downloaded images to be instantiated as containers (replacing the old containers); and +* the `prune` gets rid of the outdated images. + +This "simple pull" strategy doesn't work when a *Dockerfile* is used to build a *local image* on top of a *base image* downloaded from [*DockerHub*](https://hub.docker.com). The *local image* is what is running so there is no way for the `pull` to sense when a newer version becomes available. + +The only way to know when an update to *Prometheus* is available is to check the [prom/prometheus tags page](https://hub.docker.com/r/prom/prometheus/tags?page=1&ordering=last_updated) on *DockerHub*. + +Once a new version appears on *DockerHub*, you can upgrade *Prometheus* like this: + +``` console +$ cd ~/IOTstack +$ docker-compose build --no-cache --pull prometheus +$ docker-compose up -d prometheus +$ docker system prune +$ docker system prune +``` + +Breaking it down into parts: + +* `build` causes the named container to be rebuilt; +* `--no-cache` tells the *Dockerfile* process that it must not take any shortcuts. It really **must** rebuild the *local image*; +* `--pull` tells the *Dockerfile* process to actually check with [*DockerHub*](https://hub.docker.com) to see if there is a later version of the *base image* and, if so, to download it before starting the build; +* `prometheus ` is the named container argument required by the `build` command. + +Your existing *Prometheus* container continues to run while the rebuild proceeds. Once the freshly-built *local image* is ready, the `up` tells `docker-compose` to do a new-for-old swap. There is barely any downtime for your service. + +The `prune` is the simplest way of cleaning up. The first call removes the old *local image*. The second call cleans up the old *base image*. + +> Whether an old *base image* exists depends on the version of `docker-compose` you are using and how your version of `docker-compose` builds local images. + +### *Prometheus* version pinning { #versionPinning } + +If you need to pin *Prometheus* to a particular version: + +1. Use your favourite text editor to open the following file: + + ``` + ~/IOTstack/.templates/prometheus/Dockerfile + ``` + +2. Find the line: + + ```dockerfile + FROM prom/prometheus:latest + ``` + +3. Replace `latest` with the version you wish to pin to. For example, to pin to version 2.30.2: + + ```dockerfile + FROM prom/prometheus:2.30.2 + ``` + +4. Save the file and tell `docker-compose` to rebuild the local image: + + ``` console + $ cd ~/IOTstack + $ docker-compose up -d --build prometheus + $ docker system prune + ``` + + The new *local image* is built, then the new container is instantiated based on that image. The `prune` deletes the old *local image*. + +Note: + +* As well as preventing Docker from updating the *base image*, pinning will also block incoming updates to the *Dockerfile* from a `git pull`. Nothing will change until you decide to remove the pin. diff --git a/docs/Containers/Python.md b/docs/Containers/Python.md new file mode 100755 index 000000000..f66e63850 --- /dev/null +++ b/docs/Containers/Python.md @@ -0,0 +1,502 @@ +# Python + +## references { #references } + +* [Python.org](https://www.python.org) +* [Dockerhub image library](https://hub.docker.com/_/python) +* [GitHub docker-library/python](https://github.com/docker-library/python) + +## selecting Python in the IOTstack menu { #menuPython } + +When you select Python in the menu: + +1. The following folder and file structure is created: + + ``` console + $ tree ~/IOTstack/services/python + /home/pi/IOTstack/services/python + ├── app + │   └── app.py + ├── docker-entrypoint.sh + └── Dockerfile + ``` + + Note: + + * Under "old menu" (old-menu branch), the `service.yml` is also copied into the `python` directory but is then not used. + +2. This service definition is added to your `docker-compose.yml`: + + ```yaml + python: + container_name: python + build: ./services/python/. + restart: unless-stopped + environment: + - TZ=Etc/UTC + - IOTSTACK_UID=1000 + - IOTSTACK_GID=1000 + # ports: + # - "external:internal" + volumes: + - ./volumes/python/app:/usr/src/app + ``` + +### customising your Python service definition { #customisingPython } + +The service definition contains a number of customisation points: + +1. `restart: unless-stopped` assumes your Python script will run in an infinite loop. If your script is intended to run once and terminate, you should remove this directive. +2. `TZ=Etc/UTC` should be set to your local time-zone. Never use quote marks on the right hand side of a `TZ=` variable. +3. If you are running as a different user ID, you may want to change both `IOTSTACK_UID` and `IOTSTACK_GID` to appropriate values. + + Notes: + + * Don't use user and group *names* because these variables are applied *inside* the container where those names are (probably) undefined. + * The only thing these variables affect is the ownership of: + + ``` + ~/IOTstack/volumes/python/app + ``` + + and its contents. If you want everything to be owned by root, set both of these variables to zero (eg `IOTSTACK_UID=0`). + +4. If your Python script listens to data-communications traffic, you can set up the port mappings by uncommenting the `ports:` directive. + +If your Python container is already running when you make a change to its service definition, you can apply it via: + +``` console +$ cd ~/IOTstack +$ docker-compose up -d python +``` + +## Python - first launch { #firstLaunchPython } + +After running the menu, you are told to run the commands: + +``` console +$ cd ~/IOTstack +$ docker-compose up -d +``` + +This is what happens: + +1. *docker-compose* reads your `docker-compose.yml`. +2. When it finds the service definition for Python, it encounters: + + ``` yaml + build: ./services/python/. + ``` + + The leading period means "the directory containing `docker-compose.yml` while the trailing period means "Dockerfile", so the path expands to: + + ``` + ~/IOTstack/services/python/Dockerfile + ``` + +3. The `Dockerfile` is processed. It downloads the **base** image for Python from Dockerhub and then makes changes including: + + * copying the contents of the following directory into the image as a set of defaults: + + ``` + /home/pi/IOTstack/services/python/app + ``` + + * copying the following file into the image: + + ``` + /home/pi/IOTstack/services/python/docker-entrypoint.sh + ``` + + The `docker-entrypoint.sh` script runs each time the container launches and performs initialisation and "self repair" functions. + + The output of the Dockerfile run is a new **local** image tagged with the name `iotstack_python`. + +4. The `iotstack_python` image is instantiated to become the running container. +5. When the container starts, the `docker-entrypoint.sh` script runs and initialises the container's persistent storage area: + + ``` console + $ tree -pu ~/IOTstack/volumes + /home/pi/IOTstack/volumes + └── [drwxr-xr-x root ] python + └── [drwxr-xr-x pi ] app + └── [-rwxr-xr-x pi ] app.py + ``` + + Note: + + * the top-level `python` folder is owned by "root" but the `app` directory and its contents are owned by "pi". + +5. The initial `app.py` Python script is a "hello world" placeholder. It runs as an infinite loop emitting messages every 10 seconds until terminated. You can see what it is doing by running: + + ``` console + $ docker logs -f python + The world is born. Hello World. + The world is re-born. Hello World. + The world is re-born. Hello World. + … + ``` + + Pressing control+c terminates the log display but does not terminate the running container. + +## stopping the Python service { #stopPython } + +To stop the container from running, either: + +* take down your whole stack: + + ``` console + $ cd ~/IOTstack + $ docker-compose down + ``` + +* terminate the python container + + ``` console + $ cd ~/IOTstack + $ docker-compose down python + ``` + + > see also [if downing a container doesn't work](../Basic_setup/index.md/#downContainer) + +## starting the Python service { #startPython } + +To bring up the container again after you have stopped it, either: + +* bring up your whole stack: + + ``` console + $ cd ~/IOTstack + $ docker-compose up -d + ``` + +* bring up the python container + + ``` console + $ cd ~/IOTstack + $ docker-compose up -d python + ``` + +## Python - second-and-subsequent launch { #reLaunchPython } + +Each time you launch the Python container *after* the first launch: + +1. The existing local image (`iotstack_python`) is instantiated to become the running container. +2. The `docker-entrypoint.sh` script runs and performs "self-repair" by replacing any files that have gone missing from the persistent storage area. Self-repair does **not** overwrite existing files! +3. The `app.py` Python script is run. + +## when things go wrong - check the log { #debugging } + +If the container misbehaves, the log is your friend: + +``` console +$ docker logs python +``` + +## project development life-cycle { #yourPythonScript } + +It is **critical** that you understand that **all** of your project development should occur within the folder: + +``` +~/IOTstack/volumes/python/app +``` + +So long as you are performing some sort of routine backup (either with a supplied script or a third party solution like [Paraphraser/IOTstackBackup](https://github.com/Paraphraser/IOTstackBackup)), your work will be protected. + +### getting started { #gettingStarted } + +Start by editing the file: + +``` +~/IOTstack/volumes/python/app/app.py +``` + +If you need other supporting scripts or data files, also add those to the directory: + +``` +~/IOTstack/volumes/python/app +``` + +Any time you change something in the `app` folder, tell the running python container to notice the change by: + +``` console +$ cd ~/IOTstack +$ docker-compose restart python +``` + +### reading and writing to disk { #persistentStorage } + +Consider this line in the service definition: + +``` +- ./volumes/python/app:/usr/src/app +``` + +The leading period means "the directory containing `docker-compose.yml`" so it the same as: + +``` +- ~/IOTstack/volumes/python/app:/usr/src/app +``` + +Then, you split the line at the ":", resulting in: + +* The *external* directory = `~/IOTstack/volumes/python/app` +* The *internal* directory = `/usr/src/app` + +What it means is that: + +* Any file you put into the *external* directory (or any sub-directories you create within the *external* directory) will be visible to your Python script running inside the container at the same relative position in the *internal* directory. +* Any file or sub-directory created in the *internal* directory by your Python script running inside the container will be visible outside the container at the same relative position in the *external* directory. +* The contents of *external* directory and, therefore, the *internal* directory will persist across container launches. + +If your script writes into any other directory inside the container, the data will be lost when the container re-launches. + +### getting a clean slate { #cleanSlate } + +If you make a mess of things and need to start from a clean slate, erase the persistent storage area: + +``` console +$ cd ~/IOTstack +$ docker-compose down python +$ sudo rm -rf ./volumes/python +$ docker-compose up -d python +``` + +> see also [if downing a container doesn't work](../Basic_setup/index.md/#downContainer) + +The container will re-initialise the persistent storage area from its defaults. + +### adding packages { #addingPackages } + +As you develop your project, you may find that you need to add supporting packages. For this example, we will assume you want to add "[Flask](https://pypi.org/project/Flask/)" and "[beautifulsoup4](https://pypi.org/project/beautifulsoup4/)". + +If you were developing a project outside of container-space, you would simply run: + +``` console +$ pip3 install -U Flask beautifulsoup4 +``` + +You *can* do the same thing with the running container: + +``` console +$ docker exec python pip3 install -U Flask beautifulsoup4 +``` + +and that will work — until the container is re-launched, at which point the added packages will disappear. + +To make *Flask* and *beautifulsoup4* a permanent part of your container: + +1. Change your working directory: + + ``` console + $ cd ~/IOTstack/services/python/app + ``` + +2. Use your favourite text editor to create the file `requirements.txt` in that directory. Each package you want to add should be on a line by itself: + + ``` + Flask + beautifulsoup4 + ``` + +3. Tell Docker to rebuild the local Python image: + + ``` console + $ cd ~/IOTstack + $ docker-compose build --force-rm python + $ docker-compose up -d --force-recreate python + $ docker system prune -f + ``` + + Note: + + * You will see a warning about running pip as root - ignore it. + +4. Confirm that the packages have been added: + + ``` console + $ docker exec python pip3 freeze | grep -e "Flask" -e "beautifulsoup4" + beautifulsoup4==4.10.0 + Flask==2.0.1 + ``` + +5. Continue your development work by returning to [getting started](#gettingStarted). + +Note: + +* The first time you following the process described above to create `requirements.txt`, a copy will appear at: + + ``` + ~/IOTstack/volumes/python/app/requirements.txt + ``` + + This copy is the result of the "self-repair" code that runs each time the container starts noticing that `requirements.txt` is missing and making a copy from the defaults stored inside the image. + + If you make more changes to the master version of `requirements.txt` in the *services* directory and rebuild the local image, the copy in the *volumes* directory will **not** be kept in-sync. That's because the "self-repair" code **never** overwrites existing files. + + If you want to bring the copy of `requirements.txt` in the *volumes* directory up-to-date: + + ``` console + $ cd ~/IOTstack + $ rm ./volumes/python/app/requirements.txt + $ docker-compose restart python + ``` + + The `requirements.txt` file will be recreated and it will be a copy of the version in the *services* directory as of the last image rebuild. + +### making your own Python script the default { #scriptBaking } + +Suppose the Python script you have been developing reaches a major milestone and you decide to "freeze dry" your work up to that point so that it becomes the default when you ask for a [clean slate](#cleanSlate). Proceed like this: + +1. If you have added any packages by following the steps in [adding packages](#addingPackages), run the following command: + + ``` console + $ docker exec python bash -c 'pip3 freeze >requirements.txt' + ``` + + That generates a `requirements.txt` representing the state of play inside the running container. Because it is running *inside* the container, the `requirements.txt` created by that command appears *outside* the container at: + + ``` + ~/IOTstack/volumes/python/app/requirements.txt + ``` + +2. Make your work the default: + + ``` console + $ cd ~/IOTstack + $ cp -r ./volumes/python/app/* ./services/python/app + ``` + + The `cp` command copies: + + * your Python script; + * the optional `requirements.txt` (from step 1); and + * any other files you may have put into the Python working directory. + + Key point: + + * **everything** copied into `./services/python/app` will become part of the new local image. + +3. Terminate the Python container and erase its persistent storage area: + + ``` console + $ cd ~/IOTstack + $ docker-compose down python + $ sudo rm -rf ./volumes/python + ``` + + Note: + + * If erasing the persistent storage area feels too risky, just move it out of the way: + + ``` console + $ cd ~/IOTstack/volumes + $ sudo mv python python.off + ``` + +4. Rebuild the local image: + + ``` console + $ cd ~/IOTstack + $ docker-compose build --force-rm python + $ docker-compose up -d --force-recreate python + ``` + + On its first launch, the new container will re-populate the persistent storage area but, this time, it will be your Python script and any other supporting files, rather than the original "hello world" script. + +5. Clean up by removing the old local image: + + ``` console + $ docker system prune -f + ``` + +### canning your project { #scriptCanning } + +Suppose your project has reached the stage where you wish to put it into production as a service under its own name. Make two further assumptions: + +1. You have gone through the steps in [making your own Python script the default](#scriptBaking) and you are **certain** that the content of `./services/python/app` correctly captures your project. +2. You want to give your project the name "wishbone". + +Proceed like this: + +1. Stop the development project: + + ``` console + $ cd ~/IOTstack + $ docker-compose down python + ``` + +2. Remove the existing local image: + + ``` console + $ docker rmi iotstack_python + ``` + +3. Rename the `python` services directory to the name of your project: + + ``` console + $ cd ~/IOTstack/services + $ mv python wishbone + ``` + +4. Edit the `python` service definition in `docker-compose.yml` and replace references to `python` with the name of your project. In the following, the original is on the left, the edited version on the right, and the lines that need to change are indicated with a "|": + + ``` yaml + python: | wishbone: + container_name: python | container_name: wishbone + build: ./services/python/. | build: ./services/wishbone/. + restart: unless-stopped restart: unless-stopped + environment: environment: + - TZ=Etc/UTC - TZ=Etc/UTC + - IOTSTACK_UID=1000 - IOTSTACK_UID=1000 + - IOTSTACK_GID=1000 - IOTSTACK_GID=1000 + # ports: # ports: + # - "external:internal" # - "external:internal" + volumes: volumes: + - ./volumes/python/app:/usr/src/app | - ./volumes/wishbone/app:/usr/src/app + ``` + + Note: + + * if you make a copy of the `python` service definition and then perform the required "wishbone" edits on the copy, the `python` definition will still be active so `docker-compose` may try to bring up both services. You will eliminate the risk of confusing yourself if you follow these instructions "as written" by **not** leaving the `python` service definition in place. + +5. Start the renamed service: + + ``` console + $ cd ~/IOTstack + $ docker-compose up -d wishbone + ``` + +Remember: + +* After you have done this, the persistent storage area will be at the path: + + ``` + ~/IOTstack/volumes/wishbone/app + ``` + +## routine maintenance { #routineMaintenance } + +To make sure you are running from the most-recent **base** image of Python from Dockerhub: + +``` console +$ cd ~/IOTstack +$ docker-compose build --no-cache --pull python +$ docker-compose up -d python +$ docker system prune -f +$ docker system prune -f +``` + +In words: + +1. Be in the right directory. +2. Force docker-compose to download the most-recent version of the Python **base** image from Dockerhub, and then run the Dockerfile to build a new **local** image. +3. Instantiate the newly-built **local** image. +4. Remove the old **local** image. +5. Remove the old **base** image + +The old base image can't be removed until the old local image has been removed, which is why the `prune` command needs to be run twice. + +Note: + +* If you have followed the steps in [canning your project](#scriptCanning) and your service has a name other than `python`, just substitute the new name where you see `python` in the two `dockerc-compose` commands. diff --git a/docs/Containers/RTL_433-docker.md b/docs/Containers/RTL_433-docker.md new file mode 100644 index 000000000..069f7ab2a --- /dev/null +++ b/docs/Containers/RTL_433-docker.md @@ -0,0 +1,29 @@ +# RTL_433 Docker +Requirements, you will need to have a SDR dongle for you to be able to use RTL. I've tested this with a RTL2838 + +Make sure you can see your receiver by running `lsusb` + +``` console +$ lsusb +Bus 003 Device 001: ID 1d6b:0002 Linux Foundation 2.0 root hub +Bus 002 Device 001: ID 1d6b:0003 Linux Foundation 3.0 root hub +Bus 001 Device 004: ID 0bda:2838 Realtek Semiconductor Corp. RTL2838 DVB-T +Bus 001 Device 002: ID 2109:3431 VIA Labs, Inc. Hub +Bus 001 Device 001: ID 1d6b:0002 Linux Foundation 2.0 root hub + +``` + +Before starting the container please install RTL_433 from the native installs menu. This will setup your environment with the correct variables and programs. It is also advised to run RTL_433 to verify that it is working correctly on your system. + +The container is designed to send all detected messages over mqtt + +Edit the IOTstack/services/rtl_433/rtl_433.env file with your relevant settings for your mqtt server: +``` +MQTT_ADDRESS=mosquitto +MQTT_PORT=1833 +#MQTT_USER=myuser +#MQTT_PASSWORD=mypassword +MQTT_TOPIC=RTL_433 +``` + +the container starts with the command `rtl_433 -F mqtt:....` currently it does not filter any packets, you will need to do this in Node-RED diff --git a/docs/Containers/Ring-MQTT.md b/docs/Containers/Ring-MQTT.md new file mode 100644 index 000000000..cf7b5c348 --- /dev/null +++ b/docs/Containers/Ring-MQTT.md @@ -0,0 +1,155 @@ +# Ring-MQTT + +## References + +- [Ring-MQTT Wiki](https://github.com/tsightler/ring-mqtt/wiki) (documentation) +- [DockerHub](https://hub.docker.com/r/tsightler/ring-mqtt) (Docker images) +- [GitHub](https://github.com/tsightler/ring-mqtt) (Source code and issues) + +## Getting started + +1. Be in the correct directory (assumed throughout): + + ``` console + $ cd ~/IOTstack + ``` + +2. Run the IOTstack menu and choose `ring-mqtt`. An alternative to running the menu is to append the service definition template to your compose file like this: + + ``` console + $ sed -e "s/^/ /" ./.templates/ring-mqtt/service.yml >>docker-compose.yml + ``` + + > The `sed` command is required because service definition templates are left-shifted by two spaces. + +3. This step is optional. Use a text editor to open your `docker-compose.yml` file: + + - find the `ring-mqtt` service definition; + - change the `TZ` environment variable to your time-zone; + - save your work. + +4. Bring up the container: + + ``` console + $ docker-compose up -d ring-mqtt + ``` + + This pulls the image from DockerHub, instantiates the container, and initialises its persistent storage. + +5. Use `sudo` and a text editor to open the configuration file at the path. For example: + + ``` console + $ sudo vi ./volumes/ring-mqtt/data/config.json + ``` + + At the time of writing, the default configuration file looked like this: + + ``` { .json linenums="1" } + { + "mqtt_url": "mqtt://localhost:1883", + "mqtt_options": "", + "livestream_user": "", + "livestream_pass": "", + "disarm_code": "", + "enable_cameras": false, + "enable_modes": false, + "enable_panic": false, + "hass_topic": "homeassistant/status", + "ring_topic": "ring", + "location_ids": [ + "" + ] + } + ``` + + From the perspective of any process running in a Docker container, `localhost` means "this container" rather than "this Raspberry Pi". You need to edit line 2 to point to your MQTT broker: + + * If the `ring-mqtt` container and your `mosquitto` container are running on the **same** Raspberry Pi: + + ``` { .json linenums="2" } + "mqtt_url": "mqtt://mosquitto:1883", + ``` + + * Otherwise, replace `localhost` with the IP address or domain name of the host where your MQTT broker is running. For example: + + ``` { .json linenums="2" } + "mqtt_url": "mqtt://192.168.0.100:1883", + ``` + + * If your MQTT broker is protected by a username and password, refer to the [Ring-MQTT Wiki](https://github.com/tsightler/ring-mqtt/wiki/Configuration-Details#global-configuration-options) for the correct syntax. + + Save your work then restart the container: + + ``` console + $ docker-compose restart ring-mqtt + ``` + +6. Launch your browser (eg Chrome, Firefox, Safari) and open the following URL: + + ``` + http://«ip-or-name»:55123 + ``` + + where `«ip-or-name»` is the IP address or domain name of the Raspberry Pi running your ring-mqtt container. Examples: + + * `http://192.168.1.100:55123` + * `http://iot-hub.my.domain.com:55123` + * `http://iot-hub.local:55123` + + You should see the following screen: + + ![Ring-MQTT web UI](./images/ring-mqtt-token.png) + + Follow the instructions on the screen to generate your refresh token. + +7. Check the logs: + + ``` console + $ docker logs ring-mqtt + ``` + + Unless you see errors being reported, your `ring-mqtt` container should be ready. + +## Environment variables + +The default service definition includes two environment variables: + +``` yaml +environment: +- TZ=Etc/UTC +- DEBUG=ring-* +``` + +* `TZ=` should be set to your local time zone (explained above). +* `DEBUG=ring-*` ("all debugging options enabled") is the default for `ring-mqtt` when running in a container. It is included as a placeholder if you want to tailor debugging output. Refer to the [Ring-MQTT Wiki](https://github.com/tsightler/ring-mqtt/wiki#debugging). + +Whenever you change an environment variable, run: + +``` console +$ cd ~/IOTstack +$ docker-compose up -d ring-mqtt +``` + +The "up" causes docker-compose to notice the configuration change and re-create the container. + +## Configuration + +Consult the [Ring-MQTT Wiki](https://github.com/tsightler/ring-mqtt/wiki/Configuration-Details). + +## Maintenance + +Periodically: + +``` console +$ cd ~/IOTstack +$ docker-compose pull ring-mqtt +``` + +If a new image comes down from DockerHub: + +``` console +$ docker-compose up -d ring-mqtt +$ docker system prune -f +``` + +The "up" instantiates the newly-downloaded image as the running container. The "prune" cleans up the older image. diff --git a/docs/Containers/Scrypted.md b/docs/Containers/Scrypted.md new file mode 100755 index 000000000..255466c15 --- /dev/null +++ b/docs/Containers/Scrypted.md @@ -0,0 +1,99 @@ +--- +title: Scrypted +--- + +# Scrypted – home video integration platform + +## References + +* [Scrypted home page](https://www.scrypted.app/?) +* [DockerHub](https://hub.docker.com/r/koush/scrypted) +* [GitHub](https://github.com/koush/scrypted#installation) + +## Getting started + +1. Run the IOTstack menu and select "Scrypted" so that the service definition is added to your compose file. +2. Before starting the container for the first time, run the following commands: + + ``` console + $ cd ~/IOTstack + $ echo "SCRYPTED_WEBHOOK_UPDATE_AUTHORIZATION=$(cat /proc/sys/kernel/random/uuid | md5sum | head -c 24)" >>.env + ``` + + This generates a random token and places it in `~/IOTstack/.env`. + + Notes: + + 1. You only need to do this **once**. + 2. It is not clear whether the token is respected on every launch, or only on first launch. + +3. Start Scrypted: + + ``` console + $ cd ~/IOTstack + $ docker-compose up -d scrypted + ``` + + Note: + + * scrypted is a **large** image (2.5GB). It takes time to download and decompress! + +4. Use the following URL as a template: + + ``` + https://«host-or-ip»:10443 + ``` + + Replace `«host-or-ip»` with the domain name or IP address of your Raspberry Pi. Examples: + + * `https://raspberrypi.my.domain.com:10443` + * `https://raspberrypi.local:10443` + * `https://192.168.1.10:10443` + + Note: + + * You can't use the `http` protocol. You must use `https`. + +5. Paste the URL into a browser window. The container uses a self-signed certificate so you will need to accept that using your browser's mechanisms. +6. Enter a username and password to create your administrator account. + + +## Troubleshooting + +If you see the message: + +``` +required variable SCRYPTED_WEBHOOK_UPDATE_AUTHORIZATION is missing a value: see instructions for generating a token +``` + +it means that you did not complete step 2 before starting the container. Go back and perform step 2. + +If you need to start over from scratch: + +``` console +$ cd ~/IOTstack +$ docker-compose down scrypted +$ sudo rm -rf ./volumes/scrypted +$ docker-compose up -d scrypted +``` + +> see also [if downing a container doesn't work](../Basic_setup/index.md/#downContainer) + +## About the service definition + +The Scrypted container runs in host mode, which means it binds directly to the Raspberry Pi's ports. The service definition includes: + +``` yaml +x-ports: +- "10443:10443" +``` + +The effect of the `x-` prefix is to comment-out that port mapping. It is included as an aide-memoire to help you remember the port number. + +The service definition also includes the following environment variable: + +``` yaml +- SCRYPTED_WEBHOOK_UPDATE=http://localhost:10444/v1/update +``` + +The container does not bind to port 10444 so the purpose of this is not clear. The port number should be treated as reserved. diff --git a/docs/Containers/Syncthing.md b/docs/Containers/Syncthing.md new file mode 100644 index 000000000..79a02d9c2 --- /dev/null +++ b/docs/Containers/Syncthing.md @@ -0,0 +1,39 @@ +# Syncthing + +Syncthing is a continuous file synchronization program. It synchronizes files between two or more computers in real time, safely protected from prying eyes. Your data is your data alone and you deserve to choose where it is stored, whether it is shared with some third party, and how it's transmitted over the internet. + +Forget about using propietary solutions and take control of your data. Syncthing is an open source solution for synchronizing your data in a p2p way. + +## References + +- [Syncthing home page](https://syncthing.net/) +- [GitHub repository](https://github.com/syncthing/syncthing) +- [linuxserver.io docker image](https://docs.linuxserver.io/images/docker-syncthing) - The one used here +- [Official Syncthing docker image](https://hub.docker.com/r/syncthing/syncthing) - Not the one used here + + - For more information about official syncthing image have a look at [here](https://github.com/syncthing/syncthing/blob/main/README-Docker.md) + +## Web interface + +The web UI can be found on `yourip:8384` + +## Data & volumes + +Configuration data is available under `/config` containers directroy and mapped to `./volumes/syncthing/config` . + +The `/app` directory is inside the container, on the host you will use ./volumes/syncthing/data. +The default share is named Sync. Other added folders will also appear under data. + +## Ports + +Have a look at `~/IOTStack/.templates/syncthing/service.yml` or linuxserve docker documentation, by the way, used ports are; + +``` + ports: + - 8384:8384 # Web UI + - 22000:22000/tcp # TCP file transfers + - 22000:22000/udp # QUIC file transfers + - 21027:21027/udp # Receive local discovery broadcasts +``` + + diff --git a/docs/Containers/TasmoAdmin.md b/docs/Containers/TasmoAdmin.md new file mode 100644 index 000000000..2d2c3c1e5 --- /dev/null +++ b/docs/Containers/TasmoAdmin.md @@ -0,0 +1,10 @@ +# TasmoAdmin +## References +* [Homepage](https://github.com/arendst/Tasmota/wiki/TasmoAdmin) +* [Docker](https://hub.docker.com/r/raymondmm/tasmoadmin/) + +## Web interface +The web UI can be found on `"your_ip":8088` + +## Usage +(instructions to follow) \ No newline at end of file diff --git a/docs/Containers/Telegraf.md b/docs/Containers/Telegraf.md new file mode 100755 index 000000000..5afd36a69 --- /dev/null +++ b/docs/Containers/Telegraf.md @@ -0,0 +1,372 @@ +# Telegraf + +This document discusses an IOTstack-specific version of Telegraf built on top of [influxdata/influxdata-docker/telegraf](https://github.com/influxdata/influxdata-docker/tree/master/telegraf) using a *Dockerfile*. + +The purpose of the Dockerfile is to: + +* tailor the default configuration to be IOTstack-ready; and +* enable the container to perform self-repair if essential elements of the persistent storage area disappear. + +## References { #references } + +- [*influxdata Telegraf* home](https://www.influxdata.com/time-series-platform/telegraf/) +- [*GitHub*: influxdata/influxdata-docker/telegraf](https://github.com/influxdata/influxdata-docker/tree/master/telegraf) +- [*DockerHub*: influxdata Telegraf](https://hub.docker.com/_/telegraf) + +## Significant directories and files { #significantFiles } + +``` +~/IOTstack +├── .templates +│   └── telegraf +│      ├── Dockerfile ❶ +│      ├── entrypoint.sh ❷ +│ ├── iotstack_defaults +│   │ ├── additions ❸ +│   │ └── auto_include ❹ +│      └── service.yml ❺ +├── services +│ └── telegraf +│ └── service.yml ❻ +├── docker-compose.yml +└── volumes + └── telegraf ❼ + ├── additions ❽ + ├── telegraf-reference.conf ➒ + └── telegraf.conf ➓ +``` + +1. The *Dockerfile* used to customise Telegraf for IOTstack. +2. A replacement for the `telegraf` container script of the same name, extended to handle container self-repair. +3. The *additions folder*. See [Applying optional additions](#optionalAdditions). +4. The *auto_include folder*. Additions automatically applied to + `telegraf.conf`. See [Automatic includes to telegraf.conf](#autoInclude). +5. The *template service definition*. +6. The *working service definition* (only relevant to old-menu, copied from ❹). +7. The *persistent storage area* for the `telegraf` container. +8. A working copy of the *additions folder* (copied from ❸). See [Applying optional additions](#optionalAdditions). +9. The *reference configuration file*. See [Changing Telegraf's configuration](#editConfiguration). +10. The *active configuration file*. A subset of ➒ altered to support communication with InfluxDB running in a container in the same IOTstack instance. + +Everything in the persistent storage area ❼: + +* will be replaced if it is not present when the container starts; but +* will never be overwritten if altered by you. + +## How Telegraf gets built for IOTstack { #howTelegrafIOTstackGetsBuilt } + +### IOTstack menu { #iotstackMenu } + +When you select Telegraf in the IOTstack menu, the *template service definition* is copied into the *Compose* file. + +> Under old menu, it is also copied to the *working service definition* and then not really used. + +### IOTstack first run { #iotstackFirstRun } + +On a first install of IOTstack, you run the menu, choose your containers, and are told to do this: + +``` console +$ cd ~/IOTstack +$ docker-compose up -d +``` + +> See also the [Migration considerations](#migration) (below). + +`docker-compose` reads the *Compose* file. When it arrives at the `telegraf` fragment, it finds: + +``` yaml + telegraf: + container_name: telegraf + build: ./.templates/telegraf/. + … +``` + +The `build` statement tells `docker-compose` to look for: + +``` +~/IOTstack/.templates/telegraf/Dockerfile +``` + +> The *Dockerfile* is in the `.templates` directory because it is intended to be a common build for **all** IOTstack users. This is different to the arrangement for Node-RED where the *Dockerfile* is in the `services` directory because it is how each individual IOTstack user's version of Node-RED is customised. + +The *Dockerfile* begins with: + +``` +FROM telegraf:latest +``` + +> If you need to pin to a particular version of Telegraf, the *Dockerfile* is the place to do it. See [Telegraf version pinning](#versionPinning). + +The `FROM` statement tells the build process to pull down the ***base image*** from [*DockerHub*](https://hub.docker.com/_/telegraf?tab=tags&page=1&ordering=last_updated&name=latest). + +> It is a ***base*** image in the sense that it never actually runs as a container on your Raspberry Pi. + +The remaining instructions in the *Dockerfile* customise the ***base image*** to produce a ***local image***. The customisations are: + +1. Add the `rsync` package. This helps the container perform self-repair. +2. Copy the *default configuration file* that comes with the DockerHub image (so it will be available as a fully-commented reference for the user) and make it read-only. +3. Make a *working version* of the *default configuration file* from which comment lines and blank lines have been removed. +4. Patch the *working version* to support communications with InfluxDB running in another container in the same IOTstack instance. +5. Replace `entrypoint.sh` with a version which: + + * calls `rsync` to perform self-repair if `telegraf.conf` goes missing; and + * enforces root:root ownership in `~/IOTstack/volumes/telegraf`. + +The ***local image*** is instantiated to become your running container. + +When you run the `docker images` command after Telegraf has been built, you *may* see two rows for Telegraf: + +``` console +$ docker images +REPOSITORY TAG IMAGE ID CREATED SIZE +iotstack_telegraf latest 59861b7fe9ed 2 hours ago 292MB +telegraf latest a721ac170fad 3 days ago 273MB +``` + +* `telegraf ` is the ***base image***; and +* `iotstack_telegraf ` is the ***local image***. + +You *may* see the same pattern in *Portainer*, which reports the ***base image*** as "unused". You should not remove the ***base*** image, even though it appears to be unused. + +> Whether you see one or two rows depends on the version of `docker-compose` you are using and how your version of `docker-compose` builds local images. + +### Migration considerations { #migration } + +Under the original IOTstack implementation of Telegraf (just "as it comes" from *DockerHub*), the service definition expected `telegraf.conf` to be at: + +``` +~/IOTstack/services/telegraf/telegraf.conf +``` + +Under this implementation of Telegraf, the configuration file has moved to: + +``` +~/IOTstack/volumes/telegraf/telegraf.conf +``` + +> The change of location is one of the things that allows self-repair to work properly. + +With one exception, all prior and current versions of the default configuration file are identical in terms of their semantics. + +> In other words, once you strip away comments and blank lines, and remove any "active" configuration options that simply repeat their default setting, you get the same subset of "active" configuration options. The default configuration file supplied with gcgarner/IOTstack is available [here](https://github.com/gcgarner/IOTstack/blob/master/.templates/telegraf/telegraf.conf) if you wish to refer to it. + +The exception is `[[inputs.mqtt_consumer]]` which is now provided as an optional addition. If your existing Telegraf configuration depends on that input, you will need to apply it. See [applying optional additions](#optionalAdditions). + +## Logging { #logging } + +You can inspect Telegraf's log by: + +``` console +$ docker logs telegraf +``` + +These logs are ephemeral and will disappear when your Telegraf container is rebuilt. + +### log message: *database "telegraf" creation failed* { #logTelegrafDB } + +The following log message can be misleading: + +``` +W! [outputs.influxdb] When writing to [http://influxdb:8086]: database "telegraf" creation failed: Post "http://influxdb:8086/query": dial tcp 172.30.0.9:8086: connect: connection refused +``` + +If InfluxDB is not running when Telegraf starts, the `depends_on:` clause in Telegraf's service definition tells Docker to start InfluxDB (and Mosquitto) before starting Telegraf. Although it can launch the InfluxDB *container* first, Docker has no way of knowing when the `influxd` *process* running inside the InfluxDB container will start listening to port 8086. + +What this error message *usually* means is that Telegraf has tried to communicate with InfluxDB before the latter is ready to accept connections. Telegraf typically retries after a short delay and is then able to communicate with InfluxDB. + +## Changing Telegraf's configuration { #editConfiguration } + +The first time you launch the Telegraf container, the following structure will be created in the persistent storage area: + +``` +~/IOTstack/volumes/telegraf +├── [drwxr-xr-x root ] additions +│   └── [-rw-r--r-- root ] inputs.mqtt_consumer.conf +├── [-rw-r--r-- root ] telegraf.conf +└── [-r--r--r-- root ] telegraf-reference.conf +``` + +The file: + +* `telegraf-reference.conf`: + + - is a *reference* copy of the default configuration file that ships with the ***base image*** for Telegraf when it is downloaded from DockerHub. It is nearly 9000 lines long and is mostly comments. + - is **not** used by Telegraf but will be replaced if you delete it. + - is marked "read-only" (even for root) as a reminder that it is only for your reference. Any changes you make will be ignored. + +* `telegraf.conf`: + + - is created by removing all comment lines and blank lines from `telegraf-reference.conf`, leaving only the "active" configuration options, and then adding options necessary for IOTstack. + - is less than 30 lines and is significantly easier to understand than `telegraf-reference.conf`. + +* `inputs.mqtt_consumer.conf` – see [Applying optional additions](#optionalAdditions) below. + +The intention of this structure is that you: + +1. search `telegraf-reference.conf` to find the configuration option you need; +2. read the comments to understand what the option does and how to use it; and then +3. import the option into the correct section of `telegraf.conf`. + +When you make a change to `telegraf.conf`, you activate it by restarting the container: + +``` console +$ cd ~/IOTstack +$ docker-compose restart telegraf +``` + +### Automatic includes to telegraf.conf { #autoInclude } + +* `inputs.docker.conf` instructs Telegraf to collect metrics from Docker. Requires kernel control + groups to be enabled to collect memory usage data. If not done during initial installation, + enable by running (reboot required): + + ``` console + $ CMDLINE="/boot/firmware/cmdline.txt" && [ -e "$CMDLINE" ] || CMDLINE="/boot/cmdline.txt" + $ echo $(cat "$CMDLINE") cgroup_memory=1 cgroup_enable=memory | sudo tee "$CMDLINE" + ``` + +* `inputs.cpu_temp.conf` collects cpu temperature. + +### Applying optional additions { #optionalAdditions } + +The *additions folder* (see [Significant directories and files](#significantFiles)) is a mechanism for additional *IOTstack-ready* configuration options to be provided for Telegraf. + +Currently there is one addition: + +1. `inputs.mqtt_consumer.conf` which formed part of the [gcgarner/IOTstack telegraf configuration](https://github.com/gcgarner/IOTstack/blob/master/.templates/telegraf/telegraf.conf) and instructs Telegraf to subscribe to a metric feed from the Mosquitto broker. This assumes, of course, that something is publishing those metrics. + +Using `inputs.mqtt_consumer.conf` as the example, applying that addition to +your Telegraf configuration file involves: + +``` console +$ cd ~/IOTstack/volumes/telegraf +$ grep -v "^#" additions/inputs.mqtt_consumer.conf | sudo tee -a telegraf.conf >/dev/null +$ cd ~/IOTstack +$ docker-compose restart telegraf +``` + +The `grep` strips comment lines and the `sudo tee` is a safe way of appending the result to `telegraf.conf`. The `restart` causes Telegraf to notice the change. + +## Getting a clean slate { #cleanSlate } + +### Erasing the persistent storage area { #zapStore } + +Erasing Telegraf's persistent storage area triggers self-healing and restores known defaults: + +``` console +$ cd ~/IOTstack +$ docker-compose down telegraf +$ sudo rm -rf ./volumes/telegraf +$ docker-compose up -d telegraf +``` + +Notes: + +* You can also remove individual files within the persistent storage area and then trigger self-healing. For example, if you decide to edit `telegraf-reference.conf` and make a mess, you can restore the original version like this: + + ``` console + $ cd ~/IOTstack + $ sudo rm ./volumes/telegraf/telegraf-reference.conf + $ docker-compose restart telegraf + ``` + +* See also [if downing a container doesn't work](../Basic_setup/index.md/#downContainer) + +### Resetting the InfluxDB database { #resetDB } + +To reset the InfluxDB database that Telegraf writes into, proceed like this: + +``` console +$ cd ~/IOTstack +$ docker-compose down telegraf +$ docker exec -it influxdb influx -precision=rfc3339 +> drop database telegraf +> exit +$ docker-compose up -d telegraf +``` + +In words: + +* Be in the right directory. +* Stop the Telegraf container (while leaving the InfluxDB container running). See also [if downing a container doesn't work](../Basic_setup/index.md/#downContainer). +* Launch the Influx CLI inside the InfluxDB container. +* Delete the `telegraf` database, and then exit the CLI. +* Start the Telegraf container. This re-creates the database automatically. + +## Upgrading Telegraf { #upgradingTelegraf } + +You can update most containers like this: + +``` console +$ cd ~/IOTstack +$ docker-compose pull +$ docker-compose up -d +$ docker system prune +``` + +In words: + +* `docker-compose pull` downloads any newer images; +* `docker-compose up -d` causes any newly-downloaded images to be instantiated as containers (replacing the old containers); and +* the `prune` gets rid of the outdated images. + +This strategy doesn't work when a *Dockerfile* is used to build a ***local image*** on top of a ***base image*** downloaded from [*DockerHub*](https://hub.docker.com). The ***local image*** is what is running so there is no way for the `pull` to sense when a newer version becomes available. + +The only way to know when an update to Telegraf is available is to check the [Telegraf tags page](https://hub.docker.com/_/telegraf?tab=tags&page=1&ordering=last_updated) on *DockerHub*. + +Once a new version appears on *DockerHub*, you can upgrade Telegraf like this: + +``` console +$ cd ~/IOTstack +$ docker-compose build --no-cache --pull telegraf +$ docker-compose up -d telegraf +$ docker system prune +$ docker system prune +``` + +Breaking it down into parts: + +* `build` causes the named container to be rebuilt; +* `--no-cache` tells the *Dockerfile* process that it must not take any shortcuts. It really **must** rebuild the ***local image***; +* `--pull` tells the *Dockerfile* process to actually check with [*DockerHub*](https://hub.docker.com) to see if there is a later version of the ***base image*** and, if so, to download it before starting the build; +* `telegraf` is the named container argument required by the `build` command. + +Your existing Telegraf container continues to run while the rebuild proceeds. Once the freshly-built ***local image*** is ready, the `up` tells `docker-compose` to do a new-for-old swap. There is barely any downtime for your service. + +The `prune` is the simplest way of cleaning up. The first call removes the old ***local image***. The second call cleans up the old ***base image***. Whether an old ***base image*** exists depends on the version of `docker-compose` you are using and how your version of `docker-compose` builds local images. + +### Telegraf version pinning { #versionPinning } + +If you need to pin Telegraf to a particular version: + +1. Use your favourite text editor to open the following file: + + ``` + ~/IOTstack/.templates/telegraf/Dockerfile + ``` + +2. Find the line: + + ``` + FROM telegraf:latest + ``` + +3. Replace `latest` with the version you wish to pin to. For example, to pin to version 1.19.3: + + ``` + FROM telegraf:1.19.3 + ``` + +4. Save the file and tell `docker-compose` to rebuild the ***local image***: + + ``` console + $ cd ~/IOTstack + $ docker-compose up -d --build telegraf + $ docker system prune + ``` + + The new ***local image*** is built, then the new container is instantiated based on that image. The `prune` deletes the old ***local image***. + +Note: + +* As well as preventing Docker from updating the ***base image***, pinning will also block incoming updates to the *Dockerfile* from a `git pull`. Nothing will change until you decide to remove the pin. diff --git a/docs/Containers/Timescaledb.md b/docs/Containers/Timescaledb.md new file mode 100644 index 000000000..fafcd8b69 --- /dev/null +++ b/docs/Containers/Timescaledb.md @@ -0,0 +1,8 @@ + +### Default port changed + +In order to avoid port conflict with PostgreSQL, the public database port is +mapped to **5433** using Docker. + +Cross-container access from other containers still works as previously: +`timescaledb:5432`. diff --git a/docs/Containers/WireGuard.md b/docs/Containers/WireGuard.md new file mode 100755 index 000000000..1beedccb1 --- /dev/null +++ b/docs/Containers/WireGuard.md @@ -0,0 +1,696 @@ +# WireGuard + +WireGuard is a fast, modern, secure Virtual Private Network (VPN) tunnel. It can securely connect you to your home network, allowing you to access your home network's local services from anywhere. It can also secure your traffic when using public internet connections. + +Reference: + +* [WireGuard home page](https://www.wireguard.com) +* [IOTstack discussion paper : ZeroTier vs WireGuard](ZeroTier-vs-WireGuard.md) +* [2022-10-01 WireGuard migration](#migrateWireguard) + +Assumptions: + +* These instructions assume that you have privileges to configure your network's gateway (router). If you are not able to make changes to your network's firewall settings, then you will not be able to finish this setup. +* In common with most VPN technologies, WireGuard assumes that the WAN side of your network's gateway has a public IP address which is reachable directly. WireGuard may not work if that assumption does not hold. If you strike this problem, read [ZeroTier vs WireGuard](ZeroTier-vs-WireGuard.md). + +## Installing WireGuard under IOTstack { #installWireguard } + +You increase your chances of a trouble-free installation by performing the installation steps in the following order. + +### Step 1: Update your Raspberry Pi OS { #updateRaspbian } + +To be able to run WireGuard successfully, your Raspberry Pi needs to be **fully** up-to-date. If you want to understand why, see [the read only flag](#readOnlyFlag). + +``` console +$ sudo apt update +$ sudo apt upgrade -y +``` + +### Step 2: Set up a Dynamic DNS name { #obtainDDNS } + +Before you can use WireGuard (or any VPN solution), you need a mechanism for your remote clients to reach your home router. You have two choices: + +1. Obtain a permanent IP address for your home router from your Internet Service Provider (ISP). Approach your ISP if you wish to pursue this option. It generally involves additional charges. +2. Use a Dynamic DNS service. See IOTstack documentation [Accessing your device from the internet](../Basic_setup/Accessing-your-Device-from-the-internet.md). The rest of this documentation assumes you have chosen this option. + +### Step 3: Understand the Service Definition { #serviceDefinition } + +This is the service definition *template* that IOTstack uses for WireGuard: + +``` { .yaml linenums="1" } +wireguard: + container_name: wireguard + image: ghcr.io/linuxserver/wireguard + restart: unless-stopped + environment: + - PUID=1000 + - PGID=1000 + - TZ=${TZ:-Etc/UTC} + - SERVERURL=your.dynamic.dns.name + - SERVERPORT=51820 + - PEERS=laptop,phone,tablet + - PEERDNS=auto + - ALLOWEDIPS=0.0.0.0/0 + ports: + - "51820:51820/udp" + volumes: + - ./volumes/wireguard/config:/config + - ./volumes/wireguard/custom-cont-init.d:/custom-cont-init.d + - ./volumes/wireguard/custom-services.d:/custom-services.d + cap_add: + - NET_ADMIN + sysctls: + - net.ipv4.conf.all.src_valid_mark=1 +``` + +Unfortunately, that service definition will not work "as is". It needs to be configured. + +Key points: + +* Everything in the `environment:` section from `SERVERURL=` down to `PEERDNS=` (inclusive) affects WireGuard's generated configurations (the QR codes). In other words, any time you change any of those values, any existing QR codes will stop working. + +### Step 4: Decide what to configure { #configureWhat } + +With most containers, you can continue to tweak environment variables and settings without upsetting the container's basic behaviour. WireGuard is a little different. You really need to think, carefully, about how you want to configure the service before you start. If you change your mind later, you generally have to [start from a clean slate](#cleanSlate). + +#### Fields that you should always configure { #configureAlways } + +* `SERVERURL=` should be set to the domain name you have registered with a Dynamic DNS service provider. Example: + + ```yml + - SERVERURL=downunda.duckdns.org + ``` + +* `PEERS=` should be a comma-separated list of your client devices (all the phones, tablets, laptops, desktops you want to use remotely to get back into your home network). Example: + + ```yml + - PEERS=jillMacbook,jackChromebook,alexNokiaG10 + ``` + + Notes: + + - Many examples on the web use "PEERS=n" where "n" is a number. In practice, that approach seems to be a little fragile and is not recommended for IOTstack. + - Each name needs to start with a letter and be followed by one or more letters and/or digits. Letters can be upper- or lower-case. Do not use any other characters. + +#### Optional configuration - DNS resolution for peers { #configurePeerDNS } + +You have several options for how your remote peers resolve DNS requests: + +* `PEERDNS=auto` + + DNS queries made on connected WireGuard clients should work as if they were made on the host. If you configure [PiHole](Pi-hole.md) into the host's `resolveconf.conf`, Wireguard clients will also automatically use it. + + Details: + + * The default value of `auto` instructs the WireGuard *service* running within the WireGuard *container* to use a DNS-service, coredns, also running in the Wireguard container. Coredns by default directs queries to 127.0.0.11, which Docker intercepts and forwards to whichever resolvers are specified in the Raspberry Pi's `/etc/resolv.conf`. + +* `PEERDNS=auto` with `custom-cont-init` + { #customContInit } + + This configuration instructs WireGuard to forward DNS queries from remote peers to any host daemon or **container** which is listening on port 53. This is the option you will want to choose if you are running an ad-blocking DNS server (eg *PiHole* or *AdGuardHome*) in a container on the same host as WireGuard, and you want your remote clients to obtain DNS resolution via the ad-blocker, but don't want your Raspberry Pi host to use it. + + > Acknowledgement: thanks to @ukkopahis for developing this option. + + To activate this feature: + + 1. Make sure your WireGuard service definition contains `PEERDNS=auto`. + 2. Start the WireGuard container by executing: + + ``` console + $ cd ~/IOTstack + $ docker-compose up -d wireguard + ``` + + This ensures that the `~/IOTstack/volumes/wireguard` folder structure is created and remote client configurations are (re)generated properly. + + 3. Run the following commands: + + ``` console + $ cd ~/IOTstack + $ sudo cp ./.templates/wireguard/use-container-dns.sh ./volumes/wireguard/custom-cont-init.d/ + $ docker-compose restart wireguard + ``` + + The presence of `use-container-dns.sh` causes WireGuard to redirect incoming DNS queries to the default gateway on the internal bridged network. That, in turn, results in the queries being forwarded to any other container that is listening for DNS traffic on port 53. It does not matter if that other container is PiHole, AdGuardHome, bind9 or any other kind of DNS server. + + Do note, however, that this configuration creates a dependency between WireGuard and the container providing DNS resolution. You may wish to make that explicit in your `docker-compose.yml` by adding these lines to your WireGuard service definition: + + ```yaml + depends_on: + - pihole + ``` + + > Substitute `adguardhome` or `bind9` for `pihole`, as appropriate. + + Once activated, this feature will remain active until you decide to deactivate it. If you ever wish to deactivate it, run the following commands: + + ``` console + $ cd ~/IOTstack + $ sudo rm ./volumes/wireguard/custom-cont-init.d/use-container-dns.sh + $ docker-compose restart wireguard + ``` + +* `PEERDNS=«ip address»` + + A third possibility is if you have a local upstream DNS server. You can specify the IP address of that server so that remote peers receive DNS resolution from that host. For example: + + ```yml + - PEERDNS=192.168.203.65 + ``` + + Do note that changes to `PEERDNS` will not be updated to existing clients, and as such you may want to use `PEERDNS=auto` unless you have a very specific requirement. + +#### Optional configuration - WireGuard ports { #configurePorts } + +The WireGuard service definition template follows the convention of using UDP port "51820" in three places. You can leave it like that and it will just work. There is no reason to change the defaults unless you want to. + +To understand what each port number does, it is better to think of them like this: + +```yml +environment: +- SERVERPORT=«public» +ports: +- "«external»:«internal»/udp" +``` + +These definitions are going to be used throughout this documentation: + +* The *«public»* port is the port number that your remote WireGuard clients (phone, laptop etc) will try to reach. This is the port number that your router needs to expose to the outside world. + +* The *«external»* port is the port number that Docker, running on your Raspberry Pi, will be listening on. Your router needs to forward WireGuard incoming traffic to the *«external»* port on your Raspberry Pi. + +* The *«internal»* port is the port number that WireGuard (the server process) will be listening on inside the WireGuard container. Docker handles forwarding between the *«external»* and *«internal»* port. + +Rule #1: + +* You **can** change the *«public»* and *«external»* ports but you **can't** change the *«internal»* port unless you are prepared to do a lot more work. + +Rule #2: + +* The *«public»* port forms part of the QR codes. If you decide to change the *«public»* port after you generate the QR codes, you will have to [start over from a clean slate](#cleanSlate). + +Rule #3: + +* Your router needs to know about both the *«public»* and *«external»* ports so, if you decide to change either of those, you must also reconfigure your router. + +See [Understanding WireGuard's port numbers](#understandingPorts) if you want more information on how the various port numbers are used. + +### Step 5: Configure WireGuard { #configureWireGuard } + +There are two approaches: + +1. Let the menu generate a `docker-compose.yml` with the default WireGuard service definition template, and then edit `docker-compose.yml`. +2. Prepare a `compose-override.yml` file, then run the menu and have it perform the substitutions for you. + +Of the two, the first is generally the simpler and means you don't have to re-run the menu whenever you want to change WireGuard's configuration. + +#### Method 1: Configure WireGuard by editing `docker-compose.yml` { #editCompose } + +1. Run the menu: + + ``` console + $ cd ~/IOTstack + $ ./menu.sh + ``` + +2. Choose the "Build Stack" option. +3. If WireGuard is not already selected, select it. +4. Press enter to begin the build. +5. Choose Exit. +6. Open `docker-compose.yml` in your favourite text editor. +7. Navigate to the WireGuard service definition. +8. Implement the decisions you took in [decide what to configure](#configureWhat). +9. Save your work. + +#### Method 2: Configure WireGuard using `compose-override.yml` { #editOverride } + +The [Custom services and overriding default settings for IOTstack](../Basic_setup/Custom.md) page describes how to use an override file to allow the menu to incorporate your custom configurations into the final `docker-compose.yml` file. + +You will need to create the `compose-override.yml` **before** running the menu to build your stack. If you have already built your stack, you'll have to rebuild it after creating `compose-override.yml`. + +1. Use your favourite text editor to create (or open) the override file. The file is expected to be at the path: + + ``` + ~/IOTstack/compose-override.yml + ``` + +2. Define overrides to implement the decisions you took in [Decide what to configure](#configureWhat). For example: + + ``` { .yaml linenums="1" } + services: + wireguard: + environment: + - PUID=1000 + - PGID=1000 + - TZ=${TZ:-Etc/UTC} + - SERVERURL=downunda.duckdns.org + - SERVERPORT=51820 + - PEERS=laptop,phone,tablet + - PEERDNS=auto + - ALLOWEDIPS=0.0.0.0/0 + ``` + + Key points: + + * The override file works at the **section** level. Therefore, you have to include *all* of the environment variables from the template, not just the ones you want to alter. + * If your override file contains configurations for other containers, make sure the file only has a single `services:` directive at the start. + +3. Save your work. +4. Run the menu: + + ``` console + $ cd ~/IOTstack + $ ./menu.sh + ``` + +5. Choose the "Build Stack" option. +6. If WireGuard is not already selected, select it. +7. Press enter to begin the build. +8. Choose Exit. +9. Check your work by running: + + ``` console + $ cat docker-compose.yml + ``` + + and verify that the `wireguard` service definition is as you expect. + +### Step 6: Start WireGuard { #startWireGuard } + +1. To start WireGuard, bring up your stack: + + ``` console + $ cd ~/IOTstack + $ docker-compose up -d + ``` + +2. Confirm that WireGuard has started properly by running: + + ``` console + $ docker ps --format "table {{.Names}}\t{{.RunningFor}}\t{{.Status}}" --filter name=wireguard + ``` + + Repeat the command a few times with a short delay in between. You are looking for signs that the WireGuard container is restarting. If the container seems to be restarting then this command is your friend: + + ``` console + $ docker logs wireguard + ``` + + See also discussion of [the read-only flag](#readOnlyFlag). + +3. Confirm that WireGuard has generated the expected configurations. For example, given the following setting in `docker-compose.yml`: + + ```yml + - PEERS=jillMacbook,jackChromebook,alexNokiaG10 + ``` + + you would expect a result something like this: + + ``` console + $ tree ./volumes/wireguard/config + ./volumes/wireguard/config + ├── coredns + │   └── Corefile + ├── peer_alexNokiaG10 + │   ├── peer_alexNokiaG10.conf + │   ├── peer_alexNokiaG10.png + │   ├── presharedkey-peer_alexNokiaG10 + │   ├── privatekey-peer_alexNokiaG10 + │   └── publickey-peer_alexNokiaG10 + ├── peer_jackChromebook + │   ├── peer_jackChromebook.conf + │   ├── peer_jackChromebook.png + │   ├── presharedkey-peer_jackChromebook + │   ├── privatekey-peer_jackChromebook + │   └── publickey-peer_jackChromebook + ├── peer_jillMacbook + │   ├── peer_jillMacbook.conf + │   ├── peer_jillMacbook.png + │   ├── presharedkey-peer_jillMacbook + │   ├── privatekey-peer_jillMacbook + │   └── publickey-peer_jillMacbook + ├── server + │   ├── privatekey-server + │   └── publickey-server + ├── templates + │   ├── peer.conf + │   └── server.conf + └── wg0.conf + ``` + + Notice how each element in the `PEERS=` list is represented by a sub-directory prefixed with `peer_`. You should expect the same pattern for your peers. + +### Step 7: Save your WireGuard client configuration files (QR codes) { #clientQRcodes } + +The first time you launch WireGuard, it generates cryptographically protected configurations for your remote clients and encapsulates those configurations in QR codes. You can see the QR codes by running: + +``` console +$ docker logs wireguard +``` + +WireGuard's log is ephemeral, which means it resets each time the container is re-created. In other words, you can't rely on going back to the log to obtain your QR codes if you lose them. + +WireGuard also records the QR codes as `.png` files. In fact, the QR codes shown by `docker logs wireguard` are just side-effects of the `.png` files as they are created. + +If your Raspberry Pi has a GUI (such as a screen attached to an HDMI port or a VNC connection), you can always retrieve the QR codes by opening the `.png` files in the GUI. + +If, however, your Raspberry Pi is running headless, you will need to copy the `.png` files to a system that is capable of displaying them, such as a Mac or PC. You can use SCP to do that. + +> See [ssh tutorial](https://github.com/Paraphraser/IOTstackBackup/blob/master/ssh_tutorial.md) if you need help setting up SSH (of which SCP is a part). + +For example, to copy **all** PNG files from your Raspberry Pi to a target system: + +``` console +$ find ~/IOTstack/volumes/wireguard/config -name "*.png" -exec scp {} user@hostorip:. \; +``` + +Note: + +* `hostorip` is the host name, fully-qualified domain name, multicast domain name or IP address of the GUI-capable target computer; and +* `user` is a valid username on the target computer. + +If you want to work in the other direction (ie from the GUI-capable system), you can try: + +``` console +$ scp pi@hostorip:IOTstack/volumes/wireguard/peer_jill-macbook/peer_jill-macbook.png . +``` + +In this case: + +* `hostorip` is the host name, fully-qualified domain name, multicast domain name or IP address of the Raspberry Pi that is running WireGuard. + +Keep in mind that each QR code contains everything needed for **any** device to access your home network via WireGuard. Treat your `.png` files as "sensitive documents". + +### Step 8: Configure your router with a NAT rule { #routerNATConfig } + +A typical home network will have a firewall that effectively blocks all incoming attempts from the Internet to open a new connection with a device on your network. + +To use a VPN from outside of your home network (which is precisely the point of running the service!), you need to configure your router to allow incoming WireGuard traffic to reach the Raspberry Pi running WireGuard. These instructions assume you have the privileges to do that. + +If you have not used your router's administrative interface before, the default login credentials may be physically printed on the device or in its instruction manual. + +> If you have never changed the default login credentials, you should take the time to do that. + +Routers have wildly different user interfaces but the concepts will be the same. This section describes the basic technique but if you are unsure how to do this on your particular router model, the best idea would be to search the web for: + +* "[YOUR DEVICE NAME] port forwarding configuration"; or +* "[YOUR DEVICE NAME] NAT configuration" + +A typical configuration process goes something like this: + +1. The router sub-process you need to configure is called Network Address Translation (NAT) but it's not unheard of for this functionality to be grouped with FireWall. +2. The NAT component you are looking for probably has a name like "Port Redirection", "Port Forwarding", "NAT Forwarding" or "NAT Virtual Server". + + * It might also be under "Open Ports" but those are usually one-to-one mappings (ie incomingPort=outgoingPort), apply to port ranges, and are intended to target a single DMZ host. + +3. The configuration screen will contain at least the following fields: + + Field | Value + -------------|------- + Interface | router's WAN interface + Private IP | x.x.x.x + Private Port | «external» + Protocol | UDP + Public Port | «public» + Service Name | WireGuard + + The fields in the above list are in alphabetical order. They will almost certainly be in a different order in your router and may also have different names: + + * *Interface* is typically a popup menu. Generally it will either default to the name of the physical port on your router that connects to the outside world, or be some other sensible default like "All". + * *Private IP* (or *Internal IP*) is the IP address of the Raspberry Pi running WireGuard. Note that this pretty much forces you to give your Raspberry Pi a statically-configured IP address (either a static binding in your DHCP server or a hard-coded address in the Raspberry Pi itself). + * *Private Port* (or *Internal Port*) needs to be the value you chose for «external» in the WireGuard service definition (51820 if you didn't change it). + + > Yes, this does sound counterintuitive but it's a matter of perspective. From the router's perspective, the port is on the *private* or *internal* part of your home network. From Docker's perspective, the port is «external» to container-space. + + * *Protocol* will usually default to "TCP" but you **must** change it to "UDP". + * *Public Port* or *External Port* needs to be the value you chose for «public» in the WireGuard service definition (51820 if you didn't change it). + * *Service Name* (or *Service Type*) is typically a text field, an editable menu (where you can either make a choice or type your own value), or a button approximating an editable menu. If you are given the option of choosing "WireGuard", do that, otherwise just type that name into the field. It has no significance other than reminding you what the rule is for. + +### Step 9: Configure your remote WireGuard clients { #configureClients } + +This is a massive topic and one which is well beyond the scope of this guide. You really will have to work it out for yourself. Start by Googling: + +* "[YOUR DEVICE NAME] install WireGuard client". + +You will find the list of client software at [WireGuard Installation](https://www.wireguard.com/install/). + +For portable devices (eg iOS and Android) it usually boils down to: + +1. Install the app on your portable device. +2. Display the QR code the WireGuard server generated for the device. +3. Launch the app. +4. Point the device's camera at the QR code. +5. Follow your nose. + +## Understanding WireGuard's port numbers { #understandingPorts } + +Here's a concrete example configuration using three different port numbers: + +```yml +environment: +- SERVERURL=downunda.duckdns.org +- SERVERPORT=51620 +ports: +- "51720:51820/udp" +``` + +In other words: + +1. The «public» port is 51620. +2. The «external» port is 51720. +3. The «internal» port is 51820. + +You also need to make a few assumptions: + +1. The host running the remote WireGuard client (eg a mobile phone with the WireGuard app installed) has been allocated the IP address 55.66.77.88 when it connected to the Internet over 3G/4G/5G. +2. When the remote WireGuard client initiated the session, it chose UDP port 44524 as its source port. The actual number chosen is (essentially) random and only significant to the client. +3. Your Internet Service Provider allocated the IP address 12.13.14.15 to the WAN side of your router. +4. You have done all the steps in [Set up a Dynamic DNS name](#obtainDDNS) and your WAN IP address (12.13.14.15) is being propagated to your Dynamic DNS service provider. + +Here's a reference model to help explain what occurs: + +![WireGuard port model](./images/wireguard-portmodel.jpeg) + +The remote WireGuard client: + +1. Obtains the Dynamic DNS domain name ("downunda.duckdns.org") and *«public»* UDP port (51620) from the configuration contained within the QR code. Recall that those values are obtained from the `SERVERURL=` and `SERVERPORT=` environment variables in `docker-compose.yml`. +2. Executes a DNS query for the domain name "downunda.duckdns.org" to obtains the WAN IP address (12.13.14.15) of your home router. +3. Addresses outgoing packets to 12.13.14.15:51620. + +You configure a NAT port-forwarding rule in your router which accepts incoming traffic on the *«public»* UDP port (51620) and uses Network Address Translation to change the destination IP address to the Raspberry Pi and destination port to the *«external»* UDP port (51720). In other words, each incoming packet is readdressed to 192.168.203.60:51720. + +Docker is listening to the Raspberry Pi's *«external»* UDP port 51720. Docker uses Network Address Translation to change the destination IP address to the WireGuard container and destination port to the *«internal»* UDP port (51820). In other words, each incoming packet is readdressed to 172.18.0.6:51820. + +The packet is then routed to the internal bridged network, and delivered to the WireGuard server process running in the container which is listening on the *«internal»* UDP port (51820). + +A reciprocal process occurs when the WireGuard server process sends packets back to the remote WireGuard client. + +The following table summarises the transformations as the client and server exchange information: + +![WireGuard NAT table](./images/wireguard-nattable.png) + +Even if you use port 51820 everywhere (the default), all this Network Address Translation still occurs. Keep this in mind if you are trying to debug WireGuard because you may actually find it simpler to understand what is going on if you use different numbers for the *«public»* and *«external»* ports. + +This model is a slight simplification because the remote client may also be also operating behind a router performing Network Address Translation. It is just easier to understand the basic concepts if you assume the remote client has a publicly-routable IP address. + +## Debugging techniques { #debugging } + +### Monitor WireGuard traffic between your router and your Raspberry Pi { #tcpdumpExternal } + +If `tcpdump` is not installed on your Raspberry Pi, you can install it by: + +``` console +$ sudo apt install tcpdump +``` + +After that, you can capture traffic between your router and your Raspberry Pi by: + +``` console +$ sudo tcpdump -i eth0 -n udp port «external» +``` + +Press ctrlc to terminate the capture. + +### Monitor WireGuard traffic between your Raspberry Pi and the WireGuard container { #tcpdumpInternal } + +First, you need to add `tcpdump` to the container. You only need to do this once per debugging session. The package will remain in place until the next time you re-create the container. + +``` console +$ docker exec wireguard bash -c 'apt update ; apt install -y tcpdump' +``` + +To monitor traffic: + +```console +$ docker exec -t wireguard tcpdump -i eth0 -n udp port «internal» +``` + +Press ctrlc to terminate the capture. + +### Is Docker listening on the Raspberry Pi's «external» port? { #listenExternal } + +``` console +$ PORT=«external»; sudo nmap -sU -p $PORT 127.0.0.1 | grep "$PORT/udp" +``` + +There will be a short delay. The expected answer is either: + +* `«external»/udp open|filtered unknown` = Docker is listening +* `«external»/udp closed unknown` = Docker is not listening + +Success implies that the container is also listening. + +### Is your router listening on the «public» port? { #listenPublic } + +``` console +$ PORT=«public»; sudo nmap -sU -p $PORT downunda.duckdns.org | grep "$PORT/udp" +``` + +There will be a short delay. The expected answer is either: + +* `«public»/udp open|filtered unknown` = router is listening +* `«public»/udp closed unknown` = router is not listening + +Note: + +* Some routers always return the same answer irrespective of whether the router is or isn't listening to the port being checked. This stops malicious users from working out which ports might be open. This test will not be useful if your router behaves like that. You will have to rely on `tcpdump` telling you whether your router is forwarding traffic to your Raspberry Pi. + +## The read-only flag { #readOnlyFlag } + +The `:ro` at the end of the following line in WireGuard's service definition means "read only": + +```yml +- /lib/modules:/lib/modules:ro +``` + +If that flag is omitted then WireGuard **may** try to update the `/lib/modules` path in your operating system. To be clear, `/lib/modules` is both **outside** the WireGuard container and **outside** the normal persistent storage area in the `./volumes` directory. + +The basic idea of containers is that processes are *contained*, include all their own dependencies, can be added and removed cleanly, and don't change the underlying operating system. + +Writing into `/lib/modules` is not needed on a Raspberry Pi, providing that Raspberry Pi OS is up-to-date. That is why the first step in the installation procedure tells you to bring the system up-to-date. + +If WireGuard refuses to install and you have good reason to suspect that WireGuard may be trying to write to `/lib/modules` then you can *consider* removing the `:ro` flag and re-trying. Just be aware that WireGuard will likely be modifying your operating system. + +## Updating WireGuard { #pullWireguard } + +To update the WireGuard container: + +``` console +$ cd ~/IOTstack +$ docker-compose pull wireguard +``` + +If a new image comes down, then: + +``` console +$ docker-compose up -d wireguard +$ docker system prune +``` + +### 2022-10-01 WireGuard migration { #migrateWireguard } + +WireGuard's designers have redefined the structure they expect in the persistent storage area. Before the change, a single volume-mapping got the job done: + +``` yml +volumes: +- ./volumes/wireguard:/config +``` + +After the change, three mappings are required: + +``` yml +volumes: +- ./volumes/wireguard/config:/config +- ./volumes/wireguard/custom-cont-init.d:/custom-cont-init.d +- ./volumes/wireguard/custom-services.d:/custom-services.d +``` + +In essence, inside the container: + +* old: `custom-cont-init.d` and `custom-services.d` directories were subdirectories of `/config`; +* new: `custom-cont-init.d` and `custom-services.d` are top-level directories alongside `/config`. + +The new `custom-cont-init.d` and `custom-services.d` directories also need to be owned by root. Previously, they could be owned by "pi". + +IOTstack users implementing WireGuard for the first time will get the correct structure. Existing users need to migrate. The process is a little messy so IOTstack provides a script to automate the restructure: + +``` console +$ cd ~/IOTstack +$ docker-compose down wireguard +$ ./scripts/2022-10-01-wireguard-restructure.sh +``` + +> see also [if downing a container doesn't work](../Basic_setup/index.md/#downContainer) + +In words: + +* Be in the correct directory +* Stop WireGuard (the script won't run if you don't do this) +* Run the script + +The script: + +1. Renames `./volumes/wireguard` to `./volumes/wireguard.bak`; then +2. Builds the new `./volumes/wireguard` structure using `./volumes/wireguard.bak` for its source material. +3. Finishes by reminding you to update your `docker-compose.yml` to adopt the new service definition. + +Your WireGuard client configurations (QR codes) are not affected by the migration. + +Once the migration is complete **and** you have adopted the new service definition, you can start WireGuard again: + +``` console +$ docker-compose up -d wireguard +``` + +You should test that your remote clients can still connect. Assuming a successful migration, you can safely delete the backup directory: + +``` console +$ sudo rm -rf ./volumes/wireguard.bak +``` + +> Always be careful when using `sudo` in conjunction with recursive remove. Double-check everything before pressing return. + +## Getting a clean slate { #cleanSlate } + +If WireGuard misbehaves, you can start over from a clean slate. You *may* also need to do this if you change any of the following environment variables: + +```yml +- SERVERURL= +- SERVERPORT= +- PEERS= +- PEERDNS= +``` + +The procedure is: + +1. If WireGuard is running, terminate it: + + ``` console + $ cd ~/IOTstack + $ docker-compose down wireguard + ``` + + > see also [if downing a container doesn't work](../Basic_setup/index.md/#downContainer) + +2. Erase the persistent storage area (essential): + + ``` console + $ sudo rm -rf ./volumes/wireguard + ``` + + > Be very careful with that command and double-check your work **before** you hit return. + + Erasing the persistent storage area: + + * destroys the old client configurations and invalidates any copies of QR codes. Existing clients will stop working until presented with a new QR code. + * deactivates [`PEERDNS=auto` with `custom-cont-init`](#customContInit). + +3. Start WireGuard: + + ``` console + $ docker-compose up -d wireguard + ``` + + This will generate new client configurations and QR codes for your devices. + + Remember to re-activate [`PEERDNS=auto` with `custom-cont-init`](#customContInit) if you need it. diff --git a/docs/Containers/WordPress.md b/docs/Containers/WordPress.md new file mode 100644 index 000000000..5a78188d7 --- /dev/null +++ b/docs/Containers/WordPress.md @@ -0,0 +1,270 @@ +# WordPress + +WordPress is a web content-management system. + +## Resources + +- [WordPress home page](https://wordpress.org) + + - [documentation](https://wordpress.org/documentation/) + +- [DockerHub](https://hub.docker.com/_/wordpress) +- [GitHub](https://github.com/docker-library/wordpress) + +## Overview + +You need to perform two steps before WordPress can be launched: + +1. [Install the service definition](#wpInst). +2. [Configure the environment](#wpConfig). + +Note: + +* Do **not** "up" your stack until you have completed step 2. + + +## Install the service definition + +Be in the correct directory: + +``` console +$ cd ~/IOTstack +``` + +### option 1 - the IOTstack menu + +1. Launch the menu + + ``` console + $ ./menu.sh + ``` + +2. Choose "Build Stack". +3. Place the cursor on "wordpress" and press space to select it. +4. Press enter to build the stack. +5. Place the cursor on "Exit" and press enter. + +### option 2 - manual from IOTstack templates + +When IOTstack is cloned from GitHub, the default for your local copy of the repository is to be on the "master" branch. Master-branch templates are left-shifted by two spaces with respect to how they need to appear in `docker-compose.yml`. The following `sed` command prepends two spaces to the start of each line: + +``` console +$ sed -e "s/^/ /" ./.templates/wordpress/service.yml >>docker-compose.yml +``` + +Templates on the "old-menu" branch already have proper alignment, so `cat` can be used: + +``` console +$ cat ./.templates/wordpress/service.yml >>docker-compose.yml +``` + + +## Configure the environment + +### check dependency + +The password-generation steps in the [next section](#pwgen) assume `uuidgen` is available on your system. The following command installs `uuidgen` if it is not present already: + +``` console +$ [ -z "$(which uuidgen)" ] && sudo apt update && sudo apt install -y uuid-runtime +``` + + +### generate passwords + +WordPress relies on MariaDB, and MariaDB requires both a user password and a root password. You can generate the passwords like this: + +``` console +$ echo "WORDPRESS_DB_PASSWORD=$(uuidgen)" >>~/IOTstack/.env +$ echo "WORDPRESS_ROOT_PASSWORD=$(uuidgen)" >>~/IOTstack/.env +``` + +Key points: + +1. You will not need to know either of these passwords in order to use WordPress. + + > These passwords govern access to the WordPress database (the `wordpress_db` container). WordPress (the `wordpress` container) has a separate system of credentials. You set up an administrator account the first time you [login to WordPress](#wordPressGUI). + +2. You will not need to know either password in order to use the `mysql` command line interface to inspect the WordPress database. See [accessing the MariaDB command line interface](#mariaDBcli). +3. The WordPress database container does not expose any ports to the outside world. That means you can't use general-purpose MariaDB/MySQL GUI-based front-ends to reach the WordPress database. +4. Both passwords are applied when the MariaDB container is first initialised. Changing either password value in `.env` will break your installation. + + +### set hostname + +WordPress (running inside the container) needs to know the domain name of the host on which the container is running. You can satisfy the requirement like this: + +``` console +$ echo "WORDPRESS_HOSTNAME=$HOSTNAME.local" >>~/IOTstack/.env +``` + +The above assumes the host is advertising a multicast domain name. This is a safe assumption for Raspberry Pis but may not necessarily be applicable in other situations. If your host is associated with a fully-qualified domain name (A record or CNAME), you can use that instead. For example: + +``` console +$ echo "WORDPRESS_HOSTNAME=iot-hub.my.domain.com" >>~/IOTstack/.env +``` + +### checking your WordPress environment values + +You can confirm that the passwords and hostname have been added to `.env` like this: + +``` +$ grep "^WORDPRESS" ~/IOTstack/.env +WORDPRESS_DB_PASSWORD=41dcbe76-9c39-4c7f-bd65-2f0421bccbeb +WORDPRESS_ROOT_PASSWORD=ee749d72-f1a5-4bc0-b182-21e8284f9fd2 +WORDPRESS_HOSTNAME=raspberrypi.local +``` + +### alternative method + +If you prefer to keep your environment values inline in your `docker-compose.yml` rather than in the `.env` file then you can achieve the same result by editing the service definitions as follows: + +* `wordpress`: + + ``` yaml + environment: + WORDPRESS_DB_PASSWORD: «yourUserPasswordHere» + hostname: «hostname».«domain» + ``` + +* `wordpress_db`: + + ``` yaml + environment: + MYSQL_ROOT_PASSWORD: «yourRootPasswordHere» + MYSQL_PASSWORD: «yourUserPasswordHere» + ``` + +## Starting WordPress + +``` console +$ cd ~/IOTstack +$ docker-compose up -d wordpress +``` + +This starts both WordPress and its database. + + +## Accessing the WordPress GUI + +Use a URL in the following form, where `«host»` should be the value you chose at [set hostname](#setHostname). + +``` +http://«host»:8084 +``` + +Examples: + +* `http://raspberrypi.local:8084` +* `http://iot-hub.my.domain.com:8084` + +You will be prompted to: + +1. Set your language; and +2. Create your administrator account. + +After that, you should refer to the [WordPress documentation](https://wordpress.org/documentation/). + + +## About MariaDB + +The MariaDB instance associated with WordPress is **private** to WordPress. It is included along with the WordPress service definition. You do **not** have to select MariaDB in the IOTstack menu. + +> There is nothing stopping you from *also* selecting MariaDB in the IOTstack menu. Multiple instances of MariaDB will coexist quite happily but they are separate and distinct Relational Database Manager Systems (RDBMS). + + +### Accessing the MariaDB command line interface + +If you need inspect or manipulate the WordPress database, begin by opening a shell into the WordPress MariaDB container: + +``` +$ docker exec -it wordpress_db bash +``` + +While you are in the shell, you can use the `MYSQL_ROOT_PASSWORD` environment variable to reference the root password. For example: + +``` console +# mysql -p$MYSQL_ROOT_PASSWORD +Welcome to the MariaDB monitor. Commands end with ; or \g. +Your MariaDB connection id is 169 +Server version: 10.11.6-MariaDB-log Alpine Linux + +Copyright (c) 2000, 2018, Oracle, MariaDB Corporation Ab and others. + +Type 'help;' or '\h' for help. Type '\c' to clear the current input statement. + +MariaDB [(none)]> +``` + +Note: + +* There is no space between the `-p` and `$MYSQL_ROOT_PASSWORD`. If you insert a space, `mysql` will prompt you to enter the password interactively. + +Once you have opened a session using `mysql`, you can execute MySQL commands. For example: + +``` +MariaDB [(none)]> show databases; ++--------------------+ +| Database | ++--------------------+ +| information_schema | +| mysql | +| performance_schema | +| sys | +| wordpress | ++--------------------+ +5 rows in set (0.010 sec) +``` + +To exit `mysql`, either press control+d or use the `exit` command: + +``` +MariaDB [(none)]> exit +Bye + +# +``` + +Similarly, control+d or `exit` will terminate the container's `bash` shell and return you to the host's command line. + +## References to `nextcloud` + +Both the `wordpress` and `wordpress_db` service definitions connect to the `nextcloud` **network**. + +> Please note the emphasis on "**network**". + +The `nextcloud` network is an internal *private* network created by `docker-compose` to facilitate data-communications between a user-facing service (like WordPress) and an associated database back-end (like MariaDB). + +The NextCloud container was the first to use the private-network strategy so the "nextcloud" name is an accident of history. In an ideal world, the network would be renamed to something which more accurately reflected its purpose, like "databases". Unfortunately, the IOTstack menu lacks the facilities needed to update *existing* deployments so the most likely result of any attempt at renaming would be to break existing stacks. + +At runtime, the `nextcloud` network has the name `iotstack_nextcloud`, and exists alongside the `iotstack_default` network which is shared by other IOTstack containers. + +The material point is that, even though WordPress has nothing to do with NextCloud, the references to the `nextcloud` network are are not mistakes. They are intentional. + +## Getting a clean slate + +If you start the WordPress container and *then* decide that you need to change its [environment variables](#wpConfig), you must first erase the container's persistent store: + +``` console +$ cd ~/IOTstack +$ docker-compose down wordpress wordpress_db +$ sudo rm -rf ./volumes/wordpress +``` + +Notes: + +* Both the `wordpress` and `wordpress_db` containers need to be taken down before the persistent store can be removed safely. +* Be very careful with the `sudo rm` command. Double-check *before* pressing the return key! + +Once the persistent store has been erased, you can change the [environment variables](#wpConfig). + +When you are ready, start WordPress again: + +``` console +$ cd ~/IOTstack +$ docker-compose up -d wordpress +``` + +Note: + +* The `wordpress_db` container does not need to be brought up explicitly. It is started automatically as a by-product of starting `wordpress`. diff --git a/docs/Containers/X2go.md b/docs/Containers/X2go.md new file mode 100644 index 000000000..6983ac05c --- /dev/null +++ b/docs/Containers/X2go.md @@ -0,0 +1,24 @@ +# x2go +x2go is an "alternative" to using VNC for a remote connection. It uses X11 forwarding over ssh to provide a desktop environment + +Reason for using: +I have a Pi 4 and I didn't buy a micro hdmi cable. You can use VNC however you are limited to a 800x600 window. + +## Installation + +Install with `sudo apt install x2goserver` + +x2go cant connect to the native Raspbian Desktop so you will need to install another with `sudo tasksel` + +![image](https://user-images.githubusercontent.com/46672225/69007692-b4df0a00-0949-11ea-82d5-09a6833df186.png) + +I chose Xfce because it is light weight. + +Install the x2go client from their [website](https://wiki.x2go.org/doku.php/download:start) + +Now I have a full-screen client + +![image](https://user-images.githubusercontent.com/46672225/69007780-0045e800-094b-11ea-9626-4947595a016e.png) + +## YouTube tutorial +[Laurence systems](https://www.youtube.com/watch?v=oSuy1TS8FGs) \ No newline at end of file diff --git a/docs/Containers/ZeroTier-vs-WireGuard.md b/docs/Containers/ZeroTier-vs-WireGuard.md new file mode 100644 index 000000000..577d04509 --- /dev/null +++ b/docs/Containers/ZeroTier-vs-WireGuard.md @@ -0,0 +1,134 @@ +# ZeroTier vs WireGuard + +ZeroTier and WireGuard are not mutually exclusive. You can run both if you wish. The purpose of this document is to try to offer some general guidance about the two solutions. + +## Installation differences + +Assume your goal is to give yourself access to your home network when you are on the road. This is something you can do with both WireGuard and ZeroTier. + +### WireGuard + +Providing you follow IOTstack's [WireGuard documentation](WireGuard.md) faithfully, WireGuard is a bit easier to get going than ZeroTier. + +Although it helps to have some feeling for TCP/IP fundamentals, you definitely don't need to be a comms guru. + +Using WireGuard to access your home network when you are on the road involves: + +1. A **routable** IP address on the WAN side of your home router. + + > The IP address on the WAN side of your home router is allocated by your ISP. It can be fixed or dynamic. If you have not explicitly signed up for a fixed IP address service then your address is probably dynamic and can change each time you reboot your router, or if your ISP "bounces" your connection. + +2. If your WAN IP address is dynamic then you need a mechanism for making it discoverable using a Dynamic Domain Name System (DDNS) service such as DuckDNS or NoIP.com. + + > That's a separate registration and setup process. + +3. A WireGuard server running in a Docker container on your Raspberry Pi. Ideally, you give some thought to the clients you will need so that the QR codes can be generated the first time you bring up the container. + +4. A WireGuard client running in each remote device. Each client needs to be configured with a QR code or configuration file created in the previous step. + +5. A port-forwarding rule in your home router so that traffic originated by remote WireGuard clients can be relayed to the WireGuard server running on your Raspberry Pi. + +### ZeroTier + +Implementing ZeroTier is not actually any more difficult to get going than WireGuard. ZeroTier's apparent complexity arises from the way it inherently supports many network topologies. Getting it set up to meet your requirements takes planning. + +You still don't need to be a comms guru but it will help if you've had some experience making TCP/IP do what you want. + +Using ZeroTier to access your home network when you are on the road involves: + +1. Registering for a ZeroTier account (free and paid levels). + +2. Either (or both) of the following: + + - A ZeroTier client running on every device at your home to which you need remote access; + - A ZeroTier-router client running in a Docker container on a Raspberry Pi at your home. This is analogous to the WireGuard server. + +3. A ZeroTier client running in each remote device. + +4. Every ZeroTier client (home and remote) needs to be provided with your ZeroTier network identifier. You also need to authorise each client to join your ZeroTier network. Together, these are the equivalent of WireGuard's QR code. + +5. Depending on what you want to achieve, you may need to configure one or more static routes in the ZeroTier Cloud and in your home router. + +The things you **don't** need to worry about include: + +* Whether the IP address on the WAN side of your home router is routable; +* Any port-forwarding rules in your home router; or +* Setting up a Dynamic Domain Name System (DDNS) service. + +## CGNAT – WireGuard's nemesis + +Now that you have some appreciation for the comparative level of difficulty in setting up each service, let's focus on WireGuard's key problem. + +WireGuard depends on the IP address on the WAN side of your home router being *routable*. What that means is that the IP address has to be known to the routing tables of the core routers that drive the Internet. + +You will probably have seen quite a few of the addresses in the following table: + +| Table 1: Reserved IP Address Ranges | +|:--:| +| ![selected IPv4 Address Ranges](images/zerotier-ipv4-ranges.jpeg) | + +Nothing in that list is routable. That list is also far from complete (see [wikipedia](https://en.wikipedia.org/wiki/Reserved_IP_addresses)). The average IOTstack user has probably encountered at least: + +* 172.16/12 - commonly used by Docker to allocate its internal networks. +* 192.168/16 - used by a lot of consumer equipment such as home routers. + +| Figure 1: Router WAN port using CGNAT range | +|:--:| +| ![Image title](images/zerotier-cgnat-topology-dark.png#only-dark)![Image title](images/zerotier-cgnat-topology-light.png#only-light) | + +Consider Figure 1. On the left is a cloud representing your home network where you probably use a subnet in the 192.168/16 range. The 192.168/16 range is not routable so, to exchange packets with the Internet, your home router needs to perform Network Address Translation (NAT). + +Assume a computer on your home network has the IP address 192.168.1.100 and wants to communicate with a service on the Internet. What the NAT service running in your home router does is: + +* in the *outbound* direction, packets leaving your LAN will have a source IP address of 192.168.1.100. NAT replaces the source IP address with the IP address of the WAN side of your home router. Let's say that's 200.1.2.3. +* the system at the other end thinks the packets are coming from 200.1.2.3 so that's what it uses when it sends back reply packets. +* in the *inbound* direction, packets arrive with a destination IP address of 200.1.2.3. NAT replaces the destination address 200.1.2.3 with 192.168.1.100 and sends the packet to the device on your home network that originated the traffic. + +The NAT service running in your router builds tables that keep track of everything needed to make this work but, and this is a critical point, NAT can only build those tables when devices on your home LAN **originate** the traffic. If a packet addressed to your WAN IP arrives unexpectedly and NAT can't figure out what to do from its tables, the packet gets dropped. + +A remote WireGuard client trying to originate a connection with the WireGuard server running in your IOTstack is an example of an "unexpected packet". The reason it doesn't get dropped is because of the port-forwarding rule you set up in your router. That rule essentially fools NAT into believing that the WireGuard server originated the traffic. + +If the IP address your ISP assigns to your router's WAN interface is *routable* then your traffic will follow the green line in [Figure 1](#figure1). It will transit your ISP's network, be forwarded to the Internet, and reply packets will come back the same way. + +However, if the WAN IP address is not routable then your traffic will follow the red line in [Figure 1](#figure1). What happens next is another round of Network Address translation. Using the same address examples above: + +* Your router "A" replaces 192.168.1.100 with the IP address of the WAN side of your home router but, this time, that's a non-routable address like 100.64.44.55; and then +* Your ISP's router "B" replaces 100.64.44.55 with 200.1.2.3. + +The system at the other end sees 200.1.2.3 as the source address so that's what it uses in reply packets. + +Both NAT engines "A" and "B" are building tables to make this work but, again, it is all in response to **outbound** traffic. If your remote WireGuard client tries to originate a connection with your WireGuard server by addressing the packet to "B", it's unexpected and gets dropped. + +Unlike the situation with your home router where you can add a port-forwarding rule to fool NAT into believing your WireGuard server originated the traffic, you don't control your ISP's NAT router so it's a problem you can't fix. + +Your remote WireGuard client can't bypass your ISP's NAT router by addressing the packet to "A" because that address is not routable, so nothing on the Internet has any idea of where to send it, so the packet gets dropped. + +Due to the shortage of IPv4 addresses, it is increasingly common for ISPs to apply their own NAT service after yours. Generally, ISPs use the 100.64/10 range so, if you connect to your home router's user interface and see something like the IP address circled in [Figure 2](#figure2), you can be sure that you are the victim of "[CGNAT](https://datatracker.ietf.org/doc/html/rfc6598)". + +| Figure 2: Router WAN port using CGNAT range | +|:--:| +| ![Router CGNAT WAN IP address](images/zerotier-cgnat-wan-interface.jpeg) | + +While seeing a router WAN address that is not routable proves that your ISP is performing an additional Network Address Translation step, seeing an IP address that *should* be routable does not necessarily prove the opposite. The only way to be certain is to compare the IP address your router shows for its WAN interface with the IP address you see in a service like [whatsmyip.com](https://whatsmyip.com). If they are not the same, your ISP is likely applying its own NAT service. + +If WireGuard won't work and you suspect your ISP is applying its own NAT service, you have the following options: + +1. Negotiate with your ISP to be allocated a fixed IP address in a routable range. You may be asked to pay ongoing fees for this. +2. Change your ISP for one that still allocates routable IP addresses. But this may merely postpone the inevitable. To conserve dwindling IPv4 addresses, many ISPs are implementing Carrier Grade Network Address Translation ([CGNAT](https://datatracker.ietf.org/doc/html/rfc6598)). +3. If your ISP offers it, implement IPv6 on your home network. This is a non-trivial task and well beyond the scope of IOTstack's documentation. +4. Use a Virtual Private Server (VPS) to work around the problem. Explaining this is also well beyond the scope of IOTstack. Google "wireguard cgnat", grab a cup of coffee, and settle down for an afternoon's reading. +5. Switch to ZeroTier. You can think of it as being "like WireGuard with its own VPS". + +## Site-to-site tunnelling + +You can use both WireGuard and ZeroTier to set up secure site-to-site routing such as between your home and the homes of your friends and relatives. + +If you want to use WireGuard: + +1. Make sure that all sites running WireGuard obey the CGNAT constraints mentioned above. +2. Conduct your own research into how to set it up because the IOTstack documentation for WireGuard does not cover the topic. + +If you want to use ZeroTier: + +1. ZeroTier is immune to CGNAT constraints. +2. The IOTstack documentation for ZeroTier explains the how-to. diff --git a/docs/Containers/ZeroTier.md b/docs/Containers/ZeroTier.md new file mode 100755 index 000000000..30a682fd4 --- /dev/null +++ b/docs/Containers/ZeroTier.md @@ -0,0 +1,1009 @@ +# ZeroTier + +ZeroTier is a Virtual Private Network (VPN) solution that creates secure data-communications paths between devices at different locations. You can use ZeroTier to: + +* give remote devices secure access to your home network's local services; +* provide secure network-to-network communications between your home network and the home networks of your friends and relations; and +* bypass carrier-grade network address translation (CGNAT) which can befuddle WireGuard. + +## ZeroTier Docker images { #zeroTierImages } + +This documentation covers two DockerHub images and two IOTstack templates: + +* `zyclonite:zerotier` + + This image implements a standard ZeroTier client. It is what you get if you choose "ZeroTier-client" from the IOTstack menu. Its function is identical to the clients you install on Android, iOS, macOS and Windows. + +* `zyclonite:zerotier-router` + + This is an enhanced version of the ZeroTier client. It is what you get if you choose "ZeroTier-router" from the IOTstack menu. In addition to connecting your Raspberry Pi to your ZeroTier network, it can also forward packets between remote clients and devices attached to your home LAN. It is reasonably close to WireGuard in its general behaviour. + +## References { #references } + +* ZeroTier: + + - [Home Page (sign-up)](https://www.zerotier.com) + - [ZeroTier Central (management)](https://my.zerotier.com) + - [Documentation](https://docs.zerotier.com) + - [Knowledge Base - Networking](https://zerotier.atlassian.net/wiki/spaces/SD/pages/7110657/Networking) + - [GitHub (ZeroTier source code)](https://github.com/zerotier/ZeroTierOne) + +* zyclonite/zerotier: + + - [GitHub (image source code)](https://github.com/zyclonite/zerotier-docker) + - [DockerHub (image repository)](https://hub.docker.com/r/zyclonite/zerotier) + +* [IOTstack discussion paper : ZeroTier vs WireGuard](ZeroTier-vs-WireGuard.md) + +## Definition { #definitions } + +* *Catenet* (a con**cate**nation of **net**works) means the collection of networks and clients that can reach each other either across a local network or via a path through a ZeroTier Cloud. + +## Getting started with ZeroTier { #gettingStarted } + +### Create an account { #createAccount } + +ZeroTier offers both free and paid accounts. A free account offers enough for the average home user. + +Go to the [Zerotier downloads](https://www.zerotier.com/download/) page. If you wait a little while, a popup window will appear with a "Start here" link which triggers a wizard to guide you through the registration and setup process. At the end, you will have an account plus an initial ZeroTier Network ID. + +> Tip: Make a note of your ZeroTier network ID - you will need it! + +You should take the time to work through the configuration page for your newly-created ZeroTier network. At the very least: + +1. Give your ZeroTier network a name. At this point you only have a single network but you may decide to create more. Meaningful names are always easier on the brain than 16-hex-digit numbers. +2. Scroll down until you see the "IPv4 Auto-Assign" area. By default, ZeroTier will have done the following: + + - Enabled "Auto-Assign from Range"; + - Selected the "Easy" button; and + - Randomly-selected one of the [RFC1918](https://www.rfc-editor.org/rfc/rfc1918.html) private ranges below the line. + + If the range selected by ZeroTier does not begin with "10.x", consider changing the selection to something in that range. This documentation uses `10.244.*.*` throughout and it may be easier to follow if you do something similar. + + > Tip: avoid `10.13.*.*` if you are also running WireGuard. + + The logic behind this recommendation is that you can use 10.x.x.x for ZeroTier and 192.168.x.x for your home networks, leaving 172.x.x.x for Docker. That should make it easier to understand what is going on when you examine routing tables. + + Nevertheless, nothing about ZeroTier *depends* on you using a 10.x network. If you have good reasons for selecting from a different range, do so. It's *your* network! + +### Install client on "remote" { #singleRemote } + +You should install ZeroTier client software on at least one mobile device (laptop, iDevice) that is going to connect remotely. You don't need to go to a remote location or fake "remoteness" by connecting through a cellular system. You can do all this while the device is connected to your home network. + +Connecting a client to your ZeroTier network is a three-step process: + +1. Install the client software on the device. The [Zerotier downloads](https://www.zerotier.com/download/) page has clients for every occasion: Android, iOS, macOS, Unix and Windows. + +2. Launch the client and enter your ZeroTier Network ID: + + - on macOS, launching the app adds a menu to the right hand side of your menu bar. From that menu, choose "Join New Network…", enter your network ID into the dialog box and click "Join". + + - on iOS, launching the app for the first time presents you with a privacy policy which you need to accept, followed by a mostly-blank screen: + + - Tap +, accept the privacy policy (again) and enter your network ID into the field. + - Leave the other settings alone and tap "Add Network". Acknowledge any security prompt (what you see depends on your version of iOS). + - Turn on the slider button. + + - Android and Windows – follow your nose. + +3. In a web browser: + + - connect to ZeroTier Central: [https://my.zerotier.com](https://my.zerotier.com) + - login to your account + - click on your network ID + - scroll down to the "Members" area + - find the newly-added client + - authorise the client by turning on its "Auth?" checkbox + - fill in the "Name" and, optionally, the "Description" fields so that you can keep track of the device associated with the client ID. Again, names are easier on the brain than numbers. + +Each time you authorise a client, ZeroTier assigns an IP address from the range you selected in the "IPv4 Auto-Assign" area. Most of the time this is exactly what you want but, occasionally, you may want to override ZeroTier's choice. The simplest approach is: + +- Type a new IP address into the text field to the right of the + ; + + > your choice needs to be from the range you selected in the "IPv4 Auto-Assign" area + +- Click the + to accept the address; then +- Delete the unwanted address by clicking the trash-can icon to its left. + +ZeroTier IP addresses are like fixed assignments from a DHCP server. They persist. The same client will always get the same IP address each time it connects. + +Key point: + +* Clients can't join your ZeroTier network without your approval. If a new client appears in the list which you don't recognise, click the trash-can icon at the far right of its row. That denies the client access - permanently. The client needs to be reset before it can make another attempt. + +### Other devices { #otherRemotes } + +Do **not** install ZeroTier on your Raspberry Pi by following the Linux instructions on the [Zerotier downloads](https://www.zerotier.com/download/) page. Those instructions lead to a "native" installation. We are about to do all that with a Docker container. + +You *can* install ZeroTier clients on other systems but you should hold off on doing that for now because, ultimately, it may not be needed. Whether you need ZeroTier client software on any device will depend on the decisions you make as you follow these instructions. + +## Topology 1: ZeroTier client-only { #topology1 } + +To help you choose between the ZeroTier-client and ZeroTier-router containers, it is useful to study a network topology that does not include routing. + +| Topology 1: Remote client accesses client on home network | +|:--:| +| ![ZeroTier - topology 1](images/zerotier-topology-1.jpeg) | + +Four devices are shown: + +* A is a Raspberry Pi running "ZeroTier-client" installed by IOTstack. +* B is some other device (another Pi, Linux box, Mac, PC). + + > The key thing to note is that B is **not** running ZeroTier client software. + +* C is your local router, likely an off-the-shelf device running a custom OS. + + >Again, assume C is **not** running ZeroTier client software. + +* G is the [remote client](#singleRemote) you set up above. + +Table 1 summarises what you can and can't do from the remote client G: + +| Table 1: Reachability using only ZeroTier clients | +|:--:| +| ![ZeroTier - topology 1 reachability](images/zerotier-topology-1-reachability.jpeg) | + +G can't reach B or C, *directly,* because those devices are not running ZeroTier client software. + +G *can* reach B and C, *indirectly,* by first connecting to A. An example would be G opening an SSH session on A then, within that session, opening another SSH session on B or C. + +It should be apparent that you can also solve this problem by installing ZeroTier client software on B. It would then have its own interface in the 10.244.0.0/16 network that forms the ZeroTier Cloud and be reachable directly from G. The *no* entries would then become *yes*, with the caveat that G would reach B via its interface in the 10.244.0.0/16 network. + +The same would be true for your router C, providing it was capable of running ZeroTier client software. + +Lessons to learn: + +1. All hosts running a ZeroTier client and sharing a common ZeroTier Network ID can reach each other. +2. You can springboard from a host that is reachable to a host that is otherwise unreachable, but your ability to do that in any given situation may depend on the protocol you are trying to use. + +ZeroTier clients are incredibly easy to set up. It's always: + +1. Install the client software. +2. Tell the client the network ID. +3. Authorise the device. + +After that, it's full peer-to-peer interworking. + +The *problem* with this approach is that it does not scale if you are only signed up for a free ZeroTier account. Free accounts are limited to 25 clients. After that you need a paid account. + +### Installing ZeroTier-client { #topology1install } + +Now that you understand what the ZeroTier-client will and won't do, if you want to install the ZeroTier client on your Raspberry Pi via IOTstack, proceed like this: + +1. Run the IOTstack menu and choose "Zerotier-client". +2. Bring up the container: + + ``` console + $ cd ~/IOTstack + $ docker-compose up -d zerotier-client + ``` + +3. Tell the container to join your ZeroTier network by replacing «NetworkID» with your ZeroTier Network ID: + + ``` console + $ docker exec zerotier zerotier-cli join «NetworkID» + ``` + + You only need to do this **once**. The information is kept in the container's persistent storage area. Thereafter, the client will rejoin the same network each time the container comes up. + +4. Go to [ZeroTier Central](https://my.zerotier.com) and authorise the device. + +Job done! There are no environment variables to set. It just works. + +## Topology 2: ZeroTier router { #topology2 } + +This topology is a good starting point for using ZeroTier to replicate a WireGuard service running on your Raspberry Pi. Remember, you don't have to make an either/or choice between ZeroTier and WireGuard. You *can* run both containers side-by-side. + +| Topology 2: Remote client accesses home network | +|:--:| +| ![ZeroTier - topology 2](images/zerotier-topology-2.jpeg) | + +With this structure in place, all hosts in [Topology 2](#topology2) can reach each other *directly.* All the cells in [Table 1](#table1) are *yes.* Full peer-to-peer networking! + +### Installing ZeroTier-router { #topology2install } + +The ZeroTier-router container is just the ZeroTier-client container with some `iptables` rules. However, you can't run both containers at the same time. If ZeroTier-client is installed: + +1. Terminate the container if it is running: + + ``` console + $ cd ~/IOTstack + $ docker-compose down zerotier-client + ``` + + > See also [if downing a container doesn't work](../Basic_setup/index.md/#downContainer) + +2. Remove the existing service definition, either by: + + - running the menu and de-selecting "ZeroTier-client"; or + - editing your `docker-compose.yml` to remove the service definition. + +The ZeroTier-router *can* re-use the ZeroTier-client configuration (and vice-versa) so you should **not** erase the persistent storage area at: + +``` +~/IOTstack/volumes/zerotier-one/ +``` + +Keeping the configuration also means you won't need to authorise the ZeroTier-router client when it first launches. + +To install Zerotier-router: + +1. Run the IOTstack menu and choose "Zerotier-router". + +2. Use a text editor to open your `docker-compose.yml`. Find the ZeroTier service definition and the environment variables it contains: + + ``` yaml linenums="5" + environment: + - TZ=${TZ:-Etc/UTC} + - PUID=1000 + - PGID=1000 + # - ZEROTIER_ONE_NETWORK_IDS=yourNetworkID + - ZEROTIER_ONE_LOCAL_PHYS=eth0 wlan0 + - ZEROTIER_ONE_USE_IPTABLES_NFT=true + - ZEROTIER_ONE_GATEWAY_MODE=both + ``` + + You should: + + 1. Set your timezone. + 2. Uncomment line 9 and replace "yourNetworkID" with your ZeroTier Network ID. This variable only has an effect the first time ZeroTier is launched. It is an alternative to executing the following command after the container has come up the first time: + + ``` console + $ docker exec zerotier zerotier-cli join «NetworkID» + ``` + + The reason for the plural variable name ("IDS") is because it supports joining multiple networks on first launch. Network IDs are space-separated, like this: + + ``` yaml linenums="9" + - ZEROTIER_ONE_NETWORK_IDS=3926d64e8ff148b3 ef7a364a687c45e0 + ``` + + 3. If necessary, change line 10 to represent your active local interfaces. Examples: + + - if your Raspberry Pi only connects to WiFi, you would use: + + ``` yaml linenums="10" + - ZEROTIER_ONE_LOCAL_PHYS=wlan0 + ``` + + - if both Ethernet and WiFi are active, use: + + ``` yaml linenums="10" + - ZEROTIER_ONE_LOCAL_PHYS=eth0 wlan0 + ``` + +3. Launch the container: + + ``` console + $ cd ~/IOTstack + $ docker-compose up -d zerotier-router + ``` + +4. If the Raspberry Pi running the service has not previously been authorised in [ZeroTier Central](https://my.zerotier.com), authorise it. Make a note of the IP address assigned to the device in ZeroTier Central. In [Topology 2](#topology2) it is 10.244.0.1. + +5. You also need to set up some static routes: + + * In ZeroTier Central … + + Please start by reading [Managed Routes](#managedRoutes). + + Once you understand how to construct a valid less-specific route, go to [ZeroTier Central](https://my.zerotier.com) and find the "Managed Routes" area. Under "Add Routes" are text-entry fields. Enter the values into the fields: + + ``` + Destination: 192.168.202.0/23 (via) 10.244.0.1 + ``` + + Click Submit. + + With reference to [Topology 2](#topology2): + + - 192.168.202.0/23 is the less-specific route to the home network; and + - 10.244.0.1 is the IP address of A in the ZeroTier Cloud. + + This route teaches ZeroTier clients that the 10.244.0.0/16 network offers a path to the less-specific range (192.168.202.0/23) encompassing the home subnet (192.168.203.0/24). + + Remote clients can then reach devices on your home network. When a packet arrives on A, it is passed through NAT so devices on your home network "think" the packet has come from A. That means they can reply. However, this only works for connections that are initiated by remote clients like G. Devices on your home network like B and C can't initiate connections with remote clients because they don't know where to send the traffic. That's the purpose of the next static route. + + * In your home router C … + + Add a static route to the ZeroTier Cloud pointing to the IP address of your Raspberry Pi on your home network. In [Topology 2](#topology2), this is: + + ``` + 10.244.0.0/16 via 192.168.203.50 + ``` + + > You need to figure out how to add this route in your router's user interface. + + Here's an example of what actually happens once this route is in place. Suppose B wants to communicate with G. B is not a ZeroTier client so it doesn't know that A offers a path to G. The IP stack running on B sends the packet to the default gateway C (your router). Because of the static route, C sends the packet to A. Once the packet arrives on A, it is forwarded via the ZeroTier Cloud to G. + + The process of a packet going into a router and coming back out on the same interface is sometimes referred to as "one-armed routing". It may seem inefficient but C also sends B what is called an "ICMP Redirect" message. This teaches B that it reach G via A so, in practice, not every B-to-G packet needs to transit C. + +## Topology 3: Full tunnel { #topology3 } + +The ZeroTier Cloud does not offer a path to the Internet. It is not a VPN solution which will allow you to pretend to be in another location. Every ZeroTier client still needs its own viable path to the Internet. + +| Topology 3: Remote client tunnels to Internet via Home Network | +|:--:| +| ![ZeroTier - topology 3](images/zerotier-topology-3.jpeg) | + +In terms of traffic flows, what this means in a practical sense is: + +* Traffic from G to [A, B or C] (and vice versa) flows over the ZeroTier Cloud and is securely end-to-end encrypted in transit; but +* All other traffic goes straight to the ISP or cellular carrier and is not encrypted. + +This is the routing table you would expect to see on G: + +``` data linenums="1" +Destination Gateway Genmask Flags MSS Window irtt Iface +0.0.0.0 172.20.10.1 0.0.0.0 UG 0 0 0 wlan0 +10.244.0.0 0.0.0.0 255.255.0.0 U 0 0 0 ztr2qsmswx +172.20.10.0 0.0.0.0 255.255.255.240 U 0 0 0 wlan0 +192.168.202.0 10.244.0.1 255.255.254.0 UG 0 0 0 ztr2qsmswx +``` + +Executing a `traceroute` to 8.8.8.8 (Google DNS) shows: + +``` data +$ traceroute 8.8.8.8 +traceroute to 8.8.8.8 (8.8.8.8), 30 hops max, 60 byte packets + 1 172.20.10.1 (172.20.10.1) 4.706 ms 4.572 ms 4.398 ms + 2 10.111.9.189 (10.111.9.189) 49.599 ms 49.807 ms 49.626 ms +… +11 dns.google (8.8.8.8) 32.710 ms 32.047 ms +``` + +You can see that the first hop is via 172.20.10.1. This means the traffic is not flowing over the ZeroTier Cloud (10.244.0.0/16). The traffic is reaching 8.8.8.8 via the default route through the phone's connection to the carrier's network (172.20.10.0/28). + +ZeroTier supports an option for forcing all of a client's traffic to pass over the ZeroTier Cloud. The client's traffic is then end-to-end encrypted, at least until it reaches your home. Traffic destined for the Internet will then pass back out through your home router. From the perspective of the Internet, your remote client will appear to be at your home. + +Enabling this feature is a two-step process: + +1. In ZeroTier Central, find the "Managed Routes" area and add: + + ``` + Destination: 0.0.0.0/0 (via) 10.240.0.1 + ``` + + This is setting up a "default route". 10.240.0.1 is the IP address of A in the ZeroTier network. + +2. Each remote client (and **only** remote clients) needs to be instructed to accept the default route from the ZeroTier Cloud: + + - iOS clients: + + 1. Launch the ZeroTier One app. + 2. If the connection is not already enabled, turn it on and wait for it to start. + 3. Tap on the network ID (brings up a details sheet). + 4. Turn on "Enable Default Route". + 5. Tap outside the details sheet to dismiss it. + 6. Turn the connection off. + 7. Turn the connection on again. + + Once the client has been configured like this, the "Enable Default Route" setting will stick. Subsequent connections will follow the *managed* default route. + + If you wish to turn the setting off again, you need to repeat the same series of steps, turning "Enable Default Route" off at Step 4. + + - Linux clients: execute the command: + + ``` console + $ docker exec zerotier zerotier-cli set «yourNetworkID» allowDefault=1 + ``` + + See [change option](#cliOptionSet) for an explanation of the output and how to turn the option off. + + - macOS clients: open the ZeroTier menu, then the sub-menu for the Network ID, then enable "Allow Default Router [sic] Override". + - Android and Windows clients: follow your nose. + +Once `allowDefault` is enabled on a client, the routing table changes: + +``` data linenums="1" +Destination Gateway Genmask Flags MSS Window irtt Iface +0.0.0.0 10.244.0.1 128.0.0.0 UG 0 0 0 ztr2qsmswx +0.0.0.0 172.20.10.1 0.0.0.0 UG 0 0 0 wlan0 +10.244.0.0 0.0.0.0 255.255.0.0 U 0 0 0 ztr2qsmswx +128.0.0.0 10.244.0.1 128.0.0.0 UG 0 0 0 ztr2qsmswx +172.20.10.0 0.0.0.0 255.255.255.240 U 0 0 0 wlan0 +192.168.202.0 10.244.0.1 255.255.254.0 UG 0 0 0 ztr2qsmswx +``` + +Close inspection will show you that **two** entries have been added to the routing table: + +Line | Route | Destination | Mask | Address Range +:---:|------------:|:-----------:|:---------:|:------------------------: +2 | 0.0.0.0/1 | 10.244.0.1 | 128.0.0.0 | 0.0.0.0…127.255.255.255 +5 | 128.0.0.0/1 | 10.244.0.1 | 128.0.0.0 | 128.0.0.0…255.255.255.255 + +Taken together, these have the same effect as a standard default route (0.0.0.0/0) but, because they are more-specific than the standard default route being offered by the cellular network, the path via ZeroTier Cloud will be preferred. + +You can test this with a `traceroute`: + +``` data +$ traceroute 8.8.8.8 +traceroute to 8.8.8.8 (8.8.8.8), 30 hops max, 60 byte packets + 1 10.244.0.1 (10.244.0.1) 98.239 ms 98.121 ms 98.042 ms + 2 192.168.203.1 (192.168.203.1) 98.038 ms 97.943 ms 97.603 ms +… + 7 dns.google (8.8.8.8) 104.748 ms 106.669 ms 106.356 ms +``` + +This time, the first hop is via the ZeroTier Cloud to A (10.244.0.1), then out through the local router C (192.168.203.1). + +## Topology 4: Multi-site routing { #topology4 } + +| Topology 4: Site-to-Site with ZeroTier-router | +|:--:| +| ![ZeroTier - topology 4](images/zerotier-topology-4.jpeg) | + +In this topology, everything can reach everything within your catenet. The installation process for F is the same as it was for A. See [Installing ZeroTier-router](#topology2install). + +In ZeroTier Central you need one "less-specific" [Managed Route](#managedRoutes) pointing to each site where there is a ZeroTier router. + +At each site, the local router needs two static routes, both via the IP address of the local host running the ZeroTier-router container: + +1. A static route pointing to the ZeroTier Cloud (10.244.0.0/16); plus +2. A static route covering all of 192.168.0.0/16. + +If the second route does not make sense, think of it like this: + +* A packet destined for the local network (at any site) will match the more-specific routing table entry for that local network and be sent direct to the destination host. +* Otherwise, the packet will be sent to the local router (default gateway). +* On the router (C or D), the packet will match the less-specific static route for 192.168.0.0/16 and be forwarded to the local host running the ZeroTier-router container (one-armed routing). +* Courtesy of the Managed Routes set in ZeroTier Central, the local host running the ZeroTier-router container (A or F) will either have a more-specific route through the ZeroTier Cloud to the destination network, or it won't. +* If it has a more-specific route, the packet will be forwarded across the ZeroTier Cloud. +* Otherwise the packet will be dropped and the originator will receive an "ICMP destination network unreachable" message. + +In essence, both these static routes are "set and forget". They assume catenet growth is a *possibility,* and that it is preferable to set up schemes that will be robust and not need constant tweaking. + +### tunnelling remote clients { #topo4tunnel } + +The diagram above for Topology 4 does not include a default route in ZeroTier Central. If you implement Topology 4 according to the diagram: + +* traffic between G and your sites will travel via the ZeroTier Cloud (tunnelled, encrypted); but +* traffic between G and the wider Internet will not be tunnelled, will not be encrypted by ZeroTier, and will reach the Internet via the ISP or cellular carrier. + +If you want remote clients like G to use full tunnelling, you can follow the same approach as for [Topology 3](#topology3). You simply need to decide which site should used by G to reach the Internet. Having made your decision, define an appropriate default route in ZeroTier Central. For example, if G should reach the Internet via: + +- the left-hand site, the default route should point to the ZeroTier-router running on A: + + ``` + Destination: 0.0.0.0/0 (via) 10.240.0.1 + ``` + +- the right-hand site, the default route should point to the ZeroTier-router running on F: + + ``` + Destination: 0.0.0.0/0 (via) 10.240.0.2 + ``` + +Once you implement the default route, everything else is the same as for [Topology 3](#topology3). + +## Managed Routes { #managedRoutes } + +### TL;DR { #managedRoutesTLDR} + +If your home network is a single subnet with a /24 prefix (a subnet mask of 255.255.255.0), you need to follow two rules when constructing the "destination" field of a Managed Route in ZeroTier Central: + +1. use a /23 prefix. +2. if the third octet of your home network range is an odd number, subtract 1 to make it an even number. + +Examples: + +| Table 2: Constructing Managed Routes for Subnets - examples | +|:--:| +| ![ZeroTier - Managed Route examples](images/zerotier-managed-route-examples.jpeg) | + +If your home network has multiple subnets and/or you do not use /24 prefixes then you should either read through the next section or consult one of the many IP address calculators that are available on the Internet. One example: + +* [www.calculator.net](https://www.calculator.net/ip-subnet-calculator.html) + +### The details { #managedRoutesDetails} + +This is a slightly contrived example but it will help you to understand why you need Managed Routes and how to construct them correctly in ZeroTier Central. + +Assume we are talking about [Topology 1](#topology1) and that this is the routing table for host A: + +``` data linenums="1" +Destination Gateway Genmask Flags MSS Window irtt Iface +0.0.0.0 192.168.203.1 0.0.0.0 UG 0 0 0 eth0 +192.168.203.0 0.0.0.0 255.255.255.0 U 0 0 0 eth0 +``` + +Suppose A wants to send a packet to B. The IP stack starts searching the routing table. For each row: + +1. The destination IP address for B (192.168.203.60) is ANDed with the subnet mask (255.255.255.0). Given the last row in the routing table above: + + ``` + candidate = destinationIP AND Genmask + = 192.168.203.60 AND 255.255.255.0 + = 192.168.203.0 + ``` + +2. The candidate (192.168.203.0) is compared with the value in the Destination column (192.168.203.0). If the two values are the same, the route is considered to be a match: + + ``` + match = compareEqual(candidate,Destination) + = compareEqual(192.168.203.0, 192.168.203.0) + = true + ``` + +3. The result is a match so the packet is handed to Layer 2 for transmission via the `eth0` interface. + +Now suppose A wants to send a packet to 8.8.8.8 (Google DNS). The last row of the routing table will evaluate as follows: + +``` +candidate = destinationIP AND Genmask + = 8.8.8.8 AND 255.255.255.0 + = 8.8.8.0 + match = compareEqual(candidate,Destination) + = compareEqual(8.8.8.0, 192.168.203.0) + = false +``` + +The result is no-match so the routing algorithm continues to search the table. Eventually it will arrive at the 0.0.0.0 entry which is known as the "default route": + +``` +candidate = destinationIP AND Genmask + = 8.8.8.8 AND 0.0.0.0 + = 0.0.0.0 + match = compareEqual(candidate,Destination) + = compareEqual(0.0.0.0, 0.0.0.0) + = true +``` + +The result of comparing anything with the default route is always a match. Because the "Gateway" column is non-zero, the IP address of 192.168.203.1 (C) is used as the "next hop". The IP stack searches the routing table again. This new search for 192.168.203.1 will match on the bottom row so the packet will be handed to Layer 2 for transmission out of the `eth0` interface aimed at C (the local router, otherwise known as the "default gateway"). In turn, the local router forwards the packet to the ISP and, eventually, it winds up at 8.8.8.8. + +Let's bring ZeroTier into the mix. + +The local subnet shown in [Topology 1](#topology1) is 192.168.203.0/24 so it seems to make sense to use that same subnet in a Managed Route. Assume you configured that in ZeroTier Central: + +``` +192.168.203.0/24 via 10.144.0.1 +``` + +When the ZeroTier client on (A) adds that route to its routing table, you get something like this: + +``` data linenums="1" +Destination Gateway Genmask Flags MSS Window irtt Iface +0.0.0.0 192.168.203.1 0.0.0.0 UG 0 0 0 eth0 +10.244.0.0 0.0.0.0 255.255.0.0 U 0 0 0 ztr2qsmswx +192.168.203.0 10.244.0.1 255.255.255.0 UG 0 0 0 ztr2qsmswx +192.168.203.0 0.0.0.0 255.255.255.0 U 0 0 0 eth0 +``` + +> To all network gurus following along: please remember this is a *contrived* example. + +Study the last two lines. You should be able to see that both lines will match when the IP stack searches this table whenever A needs to send a packet to B. This results in a tie. + +What normally happens is a tie-breaker algorithm kicks in. Schemes of route metrics, route weights, hop counts, round-trip times or interface priorities are used to pick a winner. Unfortunately, those schemes are all "implementation defined". Although the algorithms *usually* converge on a good answer, sometimes Murphy's Law kicks in. Routing problems are notoriously difficult to diagnose and can manifest in a variety of ways, ranging from sub-optimal routing, where the only symptom may be sluggishness, to forwarding loops, which can render your network mostly useless. + +Prevention is always better than cure so it is preferable to side-step the entire problem by taking advantage of the fact that IP routing will always match on a *more-specific* route before a *less-specific* route, and employ slightly less-specific Managed Routes in ZeroTier Central. + +What do *"more-"* and *"less-"* mean when we're talking about searching a routing table? The terms refer to the length of the network prefix. In "/X" notation, a larger value of X is *more-specific* than a smaller value of X: + +* a "/25" is *more* specific then a "/24" +* a "/23" is *less* specific than a "/24" + +To ensure that the IP stack will always make the correct decision, the Managed Route you configure in ZeroTier Central should always be slightly *less-specific* than the actual subnet it covers. Given 192.168.203.0/24, your first attempt at constructing a less-specific route might be: + +``` +192.168.203.0/23 via 10.144.0.1 +``` + +Sadly, that won't work. Why? Because the 192.168.203.0/23 **subnet** does not actually exist. That may surprise you but it's true. It has to do with the requirement that subnet masks use **contiguous** one-bits. It's easier to understand if you study the binary: + +| Table 3: Invalid vs Valid Managed Route | +|:--:| +| ![ZeroTier - managed route construction](images/zerotier-managed-route-construction.jpeg) | + +The left hand side of [Table 3](#table3) shows a network prefix of 192.168.203.0/23 along with what that /23 expands to as a subnet mask of 255.255.254.0. The last row is the result of ANDing the first two rows. Notice the right-most 1-bit in the third octet (circled). That bit hasn't made it to the last row and that's a problem. + +What's going on here is that the right-most 1-bit in the third octet is not actually part of the *network* portion of the IP address; it's part of the *host* portion. For a network prefix to be valid, all the bits in the host portion must be zero. To put it another way, the IP address 192.168.203.0/23 is host .1.0 (ordinal 256) in subnet 192.168.202.0/23. + +Read that last sentence again because "in subnet 192.168.202.0/23" is the clue. + +The right hand side of [Table 3](#table3) starts with network prefix 192.168.202.0/23 and ANDs it with its subnet mask. This time the host portion is all-zero. That means it's a valid subnet and, accordingly, can be the subject of a Managed Route. + +[Table 3](#table3) tells us something else about a /23 prefix. It tells us that whatever value appears in that third octet, the right-most 1-bit must always be zero. That's another way of saying that a /23 subnet is only valid if the third octet is an even number. + +At this point, you should understand the reason for the two rules in [TL;DR](#managedRoutesTLDR) above, and have a better idea of what you are doing if you need to use a subnet calculator. + +## Network Design considerations { #designConsiderations } + +If you intend to set up multiple sites and route between them using ZeroTier, you need to be aware of some of the consequences that flow from how you need to configure Managed Routes. + +First, it should be obvious that you can't have two sites with the same network prefix. You and a friend can't both be using 192.168.1.0/24 at home. + +The second is that the set of less-specific prefixes in Managed Routes can't overlap either. If you are using the 192.168.0.0/24 subnet at home while your friend is using 192.168.1.0/24 at her home, both of your less-specific Managed Routes will be the same: 192.168.0.0/23. If you set up two Managed Routes to 192.168.0.0/23 with different "via" addresses, all the routers will think there's a **single** site that can be reached by multiple routes. That's a recipe for a mess. + +Putting both of the above together, any network plan for multiple sites should assume a gap of two between subnets. For example, if you are using the subnet 192.168.0.0/24 then your friend should be using 192.168.2.0/24. Your Managed Route will be 192.168.0.0/23, and your friend's Managed Route will be 192.168.2.0/23. + +None of this stops either you or your friend from using both of the /24 subnets that aggregate naturally under your respective /23 prefixes. For example, the single Managed Route 192.168.0.0/23 naturally aggregates two subnets: + +* 192.168.0.0/24 - eg your Ethernet +* 192.168.1.0/24 - eg your WiFi + +Similarly, if you are using more than two subnets, such as: + +* 192.168.0.0/24 - your house Ethernet +* 192.168.1.0/24 - your house WiFi +* 192.168.2.0/24 - your workshop WiFi + +then you would slide your ZeroTier Managed Route prefix another bit to the left and use: + +``` +192.168.0.0/22 via 10.144.0.1 +``` + +Notice what happens as you slide the prefix left. Things change in powers of 2: + +* a /24 prefix Managed Route spans exactly **1** /24 subnet +* a /23 prefix Managed Route spans exactly **2** /24 subnets +* a /22 prefix Managed Route spans exactly **4** /24 subnets +* a /21 prefix Managed Route spans exactly **8** /24 subnets +* … +* a /17 prefix Managed Route spans exactly **128** /24 subnets + +The direct consequence of that for Managed Routes is: + +* a /23 prefix means values in the third octet must be wholly divisible by **2** +* a /22 prefix means values in the third octet must be wholly divisible by **4** +* a /21 prefix means values in the third octet must be wholly divisible by **8** +* … +* a /17 prefix means values in the third octet must be wholly divisible by **128** + +Understanding how adjacent subnets can be aggregated easily by changing the prefix length should also bring with it the realisation that it is unwise to use a scattergun approach when allocating the third octet among your home subnets. Consider this scheme: + +* 192.168.0.0/24 - your Ethernet +* 192.168.100.0/24 - your house WiFi +* 192.168.200.0/24 - your workshop WiFi + +You would need three /23 Managed Routes in ZeroTier Central. In addition, you would prevent anyone else in your private ZeroTier catenet from using 192.168.1.0/24, 192.168.101.0/24 and 192.168.201.0/24. It would be preferable to use a single /22 as shown in the [example](#subnetsThree) above. + +Sure, that third octet can range from 0..255 but it's still a finite resource which is best used wisely, particularly once you start to contemplate using ZeroTier to span multiple sites. + +## Host mode and ports { #hostPorts } + +The default service definition for ZeroTier-router contains the following lines: + +``` yaml linenums="13" + network_mode: host + x-ports: + - "9993:9993" +``` + +Line 13 tells ZeroTier to run in Docker's "host mode". This means the processes running inside the container bind to the Raspberry Pi's network ports. + +> Processes running inside non-host-mode containers bind to the container's ports, and then use Network Address Translation (NAT) to reach the Raspberry Pi's ports. + +The `x-` prefix on line 14 has the effect of commenting-out the entire clause. In other words, the single `x-` has exactly the same meaning as: + +``` yaml linenums="14" +# ports: +# - "9993:9993" +``` + +The `x-ports` clause is included to document the fact that ZeroTier uses the Raspberry Pi's port 9993. + +> Documenting the ports in use for host-mode containers helps IOTstack's maintainers avoid port conflicts when adding new containers. + +You should **not** remove the `x-` prefix. If docker-compose complains about the `x-ports` clause, the message is actually telling you that your copy of docker-compose is obsolete and that you should upgrade. + +## The Domain Name System { #dnsConsiderations } + +### Normal DNS { #normalDNS } + +If you have a DNS server running somewhere in your catenet, you can ask ZeroTier to propagate that information to your ZeroTier clients. It works the same way as a DHCP server can be configured to provide the IP addresses of DNS servers when handing out leases. + +It is a two-step process: + +1. In ZeroTier Central, find the "DNS" area, complete the (optional) "Search Domain" and (required) "Server Address" fields, then click Submit. + + Examples. In [Topology 4](#topology4), suppose the DNS server (eg PiHole or BIND9) is host: + + - A, then "Server Address" = 10.244.0.1 (preferred) or 192.168.203.50 (less preferred); + - B, then "Server Address" = 192.168.203.60 + +2. Each client needs to be instructed to accept the DNS configuration: + + - iOS clients: always enabled. + - Linux clients: execute the command: + + ``` console + $ docker exec zerotier zerotier-cli set «yourNetworkID» allowDNS=1 + ``` + + See [change option](#cliOptionSet) for an explanation of the output and how to turn the option off. + + - macOS clients: open the ZeroTier menu, then the sub-menu for the Network ID, then enable "Allow DNS Configuration". + - Android and Windows clients: follow your nose. + +Notes: + +* Notice that clients need to opt-in to receiving DNS via ZeroTier. It is generally more appropriate for remote clients to do this than devices attached to a home network. This is probably why ZeroTier-managed DNS is "always on" for iOS clients. Android clients may be the same. For local clients, is usually better to let DHCP hand out DNS servers with the lease. +* There are reports of `allowDNS` being unreliable on Linux clients. If you have trouble on Linux, try disabling `allowDNS` and add the DNS server(s) to: + + ``` + /etc/resolvconf.conf + ``` + +### Multicast DNS { #mDNS } + +The ZeroTier Cloud relays multicast traffic. That means that multicast DNS (mDNS) names are propagated between ZeroTier clients and you can use those names in connection requests. + +In terms of [Topology 4](#topology4), A, F and G can all reach each other using mDNS names. For example: + +``` console +pi@a:~$ ssh pi@f.local +``` + +However, even if B and C were advertising mDNS names over 192.168.203.0/24, they would be unreachable from D, E, F and G using those mDNS names because B and C are not ZeroTier clients. The same applies to reaching D and E from A, B, C or G using mDNS names. + +## Resolving address-range conflicts { #addressConflicts } + +As your network infrastructure becomes more complex, you may find that you occasionally run into address-range conflicts that force you to consider renumbering. + +ZeroTier Central is where you define the subnet used by the ZeroTier Cloud (eg 10.244.0.0/16), while your home router is generally where you define the subnets used on your home networks. + +Docker typically allocates its internal subnets from 172.16/12 but it can sometimes venture into 192.168/16. Docker tries to stay clear of anything that is in use but it doesn't always have full visibility into every corner of your private catenet. + +The IOTstack menu adds the following to your compose file: + +``` yaml +networks: + + default: + driver: bridge + ipam: + driver: default + + nextcloud: + driver: bridge + internal: true + ipam: + driver: default +``` + +That structure tells docker-compose that it should construct two networks: + +* `iotstack_default` +* `iotstack_nextcloud` + +but leaves it up to docker-compose to work out the details. If you need more control, you can tell docker-compose to use specific subnets by adding two lines to each network definition: + +``` yaml +networks: + + default: + driver: bridge + ipam: + driver: default + config: + - subnet: 172.30.0.0/22 + + nextcloud: + driver: bridge + internal: true + ipam: + driver: default + config: + - subnet: 172.30.4.0/22 +``` + +A /22 is sufficient for 1,021 containers. That may seem like overkill but it doesn't really affect anything. Nevertheless, no part of those subnet prefixes is any kind of "magic number". You should feel free to use whatever subnet definitions are appropriate to your needs. + +Note: + +* If you are never going to run NextCloud on your Raspberry Pi, you can omit that network definition entirely. Doing so will silence unnecessary messages from docker-compose. +* The `172.30.0.0/22` and `172.30.4.0/22` subnets (or whatever alternative ranges you choose) are *private* to the host where IOTstack is installed. That means you can re-use these same subnets on multiple hosts (Raspberry Pis or other supported platforms), irrespective of whether those hosts are at the same site (like A and B) or distributed across multiple sites (like A and F). + + > The only time you would need to consider adjusting the subnet ranges is if you happened to be running two or more instances of IOTstack on the same host, simultaneously. + +## Global addressing { #globalAddressing } + +Everything in this documentation assumes you are using RFC1918 private ranges throughout your catenet. ZeroTier Cloud makes the same assumption. + +If some parts of your private catenet are using public addressing (either officially allocated to you or "misappropriated" like the 28/7 network), you *may* need to enable assignment of Global addressing: + +- iOS clients: not mentioned - likely enabled by default. +- Linux clients: execute the command: + + ``` + $ docker exec zerotier zerotier-cli set «yourNetworkID» allowGlobal=1 + ``` + + See [change option](#cliOptionSet) for an explanation of the output and how to turn the option off. + +- macOS clients: open the ZeroTier menu, then the sub-menu for the Network ID, then enable "Allow Assignment of Global IPs". +- Android and Windows clients: follow your nose. + +## Allow Managed Addresses { #aboutAllowManaged } + +The "Allow Managed Addresses" command (aka `allowManaged` option) is enabled by default. It gives ZeroTier permission to propagate IP addresses and route assignments. It is not a good idea to turn it off. If you turn it off accidentally, you can re-enable it either in the GUI or via: + +``` console +$ docker exec zerotier zerotier-cli set «yourNetworkID» allowManaged=1 +``` + +See [change option](#cliOptionSet) for an explanation of the output. + +## Useful Commands { #usefulCommands } + +The commands in this section are given using this syntax: + +``` +$ zerotier-cli command {argument …} +``` + +When ZeroTier client software is running in a container, you can execute commands: + +* directly using `docker exec`: + + ``` console + $ docker exec zerotier zerotier-cli command {argument …} + ``` + +* or by first opening a shell into the container: + + ``` console + $ docker exec -it zerotier /bin/ash + # zerotier-cli command {argument …} + # exit + $ + ``` + +On macOS you can run the commands from a Terminal window with `sudo`: + +``` console +$ sudo zerotier-cli command {argument …} +``` + +Windows, presumably, has similar functionality. + +### Networks { #cliNetworks } + +#### Check networks { #cliNetworksCheck } + +To check the ZeroTier networks the client has joined: + +``` +$ zerotier-cli listnetworks +200 listnetworks +200 listnetworks 900726788b1df8e2 My_Great_Network 33:b0:c6:2e:ad:2d OK PRIVATE feth4026 10.244.0.1/16 +``` + +#### Join network { #cliNetworksJoin } + +To join a new ZeroTier network: + +``` +$ zerotier-cli join «NewNetworkID» +``` + +#### Leave network { #cliNetworksLeave } + +To leave an existing ZeroTier network: + +``` +$ zerotier-cli leave «ExistingNetworkID» +``` + +### Client status { #cliInfo } + +To check the status of a device running ZeroTier client: + +``` console +$ zerotier-cli info +200 info 340afcaa2a 1.10.1 ONLINE +``` + +### Peer status { #cliPeer } + +To check the status of peers in your ZeroTier Networks: + +``` console +$ zerotier-cli peers +200 peers + +7492fd0dc5 1.10.1 LEAF 2 DIRECT 5407 5407 17.203.229.120/47647 +f14094b92a 1.10.1 LEAF 227 DIRECT 1976 1976 34.209.49.222/54643 +C88262CD64 1.10.1 LEAF 2 DIRECT 5411 5408 192.168.1.70/64408 +… +``` + +Tip: + +* In the `` column, `DIRECT` means ZeroTier has been able to arrange for *this* client (where you are running the command) and *that* peer to communicate directly. In other words, the traffic is not being relayed through ZeroTier's servers. Seeing `RELAY` in this field is not necessarily a bad thing but, to quote from the ZeroTier documentation: + + > If you see the peer you're trying to contact in the RELAY state, that means packets are bouncing through our root servers because a direct connection between peers cannot be established. Side effects of RELAYING are increased latency and possible packet loss. See "[Router Configuration Tips](https://docs.zerotier.com/zerotier/troubleshooting#router-configuration-tips)" above for how to resolve this. + +### Options { #cliOptions } + +At the time of writing, these options are defined: + +option | Let ZeroTier … +-------------|------------------------------------------------ +allowDefault | … modify the system's default route +allowDNS | … modify the system's DNS settings +allowGlobal | … manage IP addresses and Route assignments outside the RFC1918 ranges +allowManaged | … manage IP addresses and Route assignments + +#### Check option { #cliOptionCheck } + +To check an option: + +``` console +$ zerotier-cli get «yourNetworkID» «option» +``` + +The result is either "0" (false) or "1" (true). Example: + +``` console +$ zerotier-cli get 900726788b1df8e2 allowDNS +0 +``` + +#### Change option { #cliOptionSet } + +* To enable an option: + + ``` console + $ zerotier-cli set «yourNetworkID» «option»=1 + ``` + +* To disable an option: + + ``` console + $ zerotier-cli set «yourNetworkID» «option»=0 + ``` + +The response to changing an option is a large amount of JSON output. The updated state of the options is near the start. In practice, you can limit the output to just the options with a `grep`: + +``` +$ zerotier-cli set 900726788b1df8e2 allowDNS=0 | grep allow + "allowDNS": false, + "allowDefault": false, + "allowGlobal": false, + "allowManaged": true, +``` + +## About persistent storage + +Both ZeroTier-client and ZeroTier-router use the same persistent storage area. Should you choose to do so, you can freely switch back and forth between the -client and -router containers without worrying about the persistent storage area. + +The contents of ZeroTier's persistent storage uniquely identify the client to the ZeroTier Cloud. Unlike WireGuard, it is neither safe nor prudent to copy ZeroTier's persistent storage from one Raspberry Pi to another. + +An exception to this would be where you actually intend to move a ZeroTier client's identity to a different machine. That will work, providing your migration procedure never results in the same ZeroTier identity being in use on two machines at the same time. + +You can erase ZeroTier's persistent storage area like this: + +``` console +$ cd ~/IOTstack +$ docker-compose down {zerotier-client | zerotier-router} +$ sudo rm -rf ./volumes/zerotier-one +``` + +Tips: + +1. always double-check `sudo` commands **before** hitting Enter. +2. see also [if downing a container doesn't work](../Basic_setup/index.md/#downContainer) + +Erasing persistent storage destroys the client's authorisation (cryptographic credentials). If you start the container again, it will construct a new identity and you will need to re-authorise the client in ZeroTier Central. You should also delete the obsolete client authorisation. + +## Container maintenance + +ZeroTier (either -client or -router) can be kept up-to-date with routine "pulls": + +``` +$ cd ~/IOTstack +$ docker-compose pull +$ docker-compose up -d +$ docker system prune -f +``` + +## iOS tip + +On iOS, you must decide whether to select "Custom DNS" when you define the VPN. If you want to change your mind, you need to delete the connection and start over. + +> Providing you don't delete the Zerotier app, the client's identity remains unchanged so you won't need to re-authorise the client in ZeroTier Central. + +An example of when you might want to enable Custom DNS is if you want your remote clients to use PiHole for name services. If PiHole is running on the same Raspberry Pi as your Zerotier instance, you should use the IP address associated with the Raspberry Pi's interface to the ZeroTier Cloud (ie 10.244.0.1 in the example topologies). diff --git a/docs/Containers/Zigbee2MQTT.md b/docs/Containers/Zigbee2MQTT.md new file mode 100644 index 000000000..99c651344 --- /dev/null +++ b/docs/Containers/Zigbee2MQTT.md @@ -0,0 +1,672 @@ +# Zigbee2MQTT + +## Quick links + +* New users: [start here](#basicProcess) +* Existing users: [Service definition change](#update202204) (circa April 2022) + +## References + +* [Web Guide](https://www.zigbee2mqtt.io) +* [Supported adapters](https://www.zigbee2mqtt.io/guide/adapters/#recommended) +* [GitHub](https://github.com/Koenkk/zigbee2mqtt) +* [DockerHub](https://hub.docker.com/r/koenkk/zigbee2mqtt/tags) + +## Definitions + +* *"compose file"* means the file at the path: + + ``` + ~/IOTstack/docker-compose.yml + ``` + +## Basic process for new users { #basicProcess } + +1. Run the IOTstack menu and choose both "Mosquitto" and "Zigbee2MQTT". That adds the service definitions for both of those containers to your *compose file*. + +2. [Prepare your Zigbee adapter](#prepareAdapter) by flashing its firmware. +3. Follow the steps in [Identify your Zigbee adapter](#identifyAdapter) to work out how your adapter: + + * "mounts" on your Raspberry Pi; or + * "connects" over your network, + + and edit your *compose file* to include that information. + +4. The default environment variables assume: + + - You are running Mosquitto and Zigbee2MQTT as IOTstack containers on the same computer; + - Your adapter mounts via USB; and + - You want the Zigbee2MQTT web front end to be available on port 8080. + + This is a good basis for getting started. If it sounds like it will meet your needs, you will not need to make any changes. Otherwise, review the [environment variables](#envVars) and make appropriate changes to the service definition in your *compose file*. + +5. Bring up your stack: + + ```console + $ cd ~/IOTstack + $ docker-compose up -d + ``` + +6. Confirm that the Zigbee2MQTT container appears to be working correctly. You should: + + * [Check container status](#checkStatus) to confirm that the container is running and stable, and is not in a restart loop; + * [Check the container's log](#checkLog) for any errors, warnings or other evidence of malfunction; and + * [Check inter-container connectivity](#checkMQTT) by verifying that the Zigbee2MQTT container is publishing MQTT messages to the Mosquitto broker. + +7. [Connect to the web front end](#connectGUI) and start adding your Zigbee devices. + +## Prepare your Zigbee adapter { #prepareAdapter } + +Zigbee adapters usually need to be "flashed" before they can be used by Zigbee2MQTT. To prepare your adatper: + +1. Go to the [supported adapters](https://www.zigbee2mqtt.io/guide/adapters/#recommended) page. +2. Find your adapter in the list. +3. Follow the instructions for flashing your adapter. + +Note: + +* If you can't find your adapter in the list of supported devices, you may not be able to get the Zigbee2MQTT container to connect to it. This kind of problem is outside the scope of IOTstack. You will have to raise the issue with the [Zigbee2MQTT](https://www.zigbee2mqtt.io) project. + +## Identify your Zigbee adapter { #identifyAdapter } + +* [USB adapters](#identifyUSBAdapter) +* [Remote adapters](#identifyRemoteAdapter) + +### USB adapters { #identifyUSBAdapter } + +This section covers adapters that connect to your Raspberry Pi via USB. + +Many USB Zigbee adapters mount as `/dev/ttyACM0` but this is not true for *all* adapters. In addition, if you have multiple devices connected to your Raspberry Pi that contend for a given device name, there are no guarantees that your Zigbee adapter will *always* be assigned the *same* name each time the device list is enumerated. + +For those reasons, it is better to take the time to identify your Zigbee adapter in a manner that will be predictable, unique and reliable: + +1. If your Zigbee adapter is connected to your Raspberry Pi, disconnect it. +2. Run the following command (the option is the digit "1"): + + ```console + $ ls -1 /dev/serial/by-id + ``` + + The possible response patterns are: + + * An error message: + + ``` + ls: cannot access '/dev/serial/by-id': No such file or directory + ``` + + * A list of one or more lines where your Zigbee adapter is **not** present. Example: + + ``` + usb-Silicon_Labs_CP2102N_USB_to_UART_Bridge_Controller_f068b8e7e82d4b119c0ee71fa1143ea0-if00-port0 + ``` + + The actual response (error, or a list of devices) does not matter. You are simply establishing a baseline. + +3. Connect your prepared Zigbee adapter to a USB port on your Raspberry Pi. +4. Repeat the same `ls` command from step 2. The response pattern should be different from step 2. The list should now contain your Zigbee adapter. Example: + + ``` + usb-Silicon_Labs_CP2102N_USB_to_UART_Bridge_Controller_f068b8e7e82d4b119c0ee71fa1143ea0-if00-port0 + usb-Texas_Instruments_TI_CC2531_USB_CDC___0X00125A00183F06C5-if00 + ``` + + The second line indicates a CC2531 adapter is attached to the Raspberry Pi. + + If the response pattern does **not** change, it means the Raspberry Pi is unable to see your adapter. The two most common reasons are: + + 1. Your adapter was not flashed correctly. Start over at [prepare your Zigbee adapter](#prepareAdapter). + 2. Your adapter does not mount as a serial device. Try repeating steps 2 through 4 with the command: + + ```console + $ ls -1 /dev + ``` + + to see if you can discover how your adapter attaches to your Raspberry Pi. + + > One example is the Electrolama zig-a-zig-ah which attaches as `/dev/ttyUSB0`. + +5. Use the output from the `ls` command in step 4 to form the absolute path to your Zigbee adapter. Example: + + ``` + /dev/serial/by-id/usb-Texas_Instruments_TI_CC2531_USB_CDC___0X00125A00183F06C5-if00 + ``` + +6. Check your work like this (the option is the lower-case letter "l"): + + ```console + $ ls -l /dev/serial/by-id/usb-Texas_Instruments_TI_CC2531_USB_CDC___0X00125A00183F06C5-if00 + lrwxrwxrwx 1 root root 13 Mar 31 19:49 dev/serial/by-id/usb-Texas_Instruments_TI_CC2531_USB_CDC___0X00125A00183F06C5-if00 -> ../../ttyACM0 + ``` + + What the output is telling you is that the *by-id* path is a symbolic link to `/dev/ttyACM0`. Although this *may* always be true on your Raspberry Pi, the only part that is actually *guaranteed* to be true is the *by-id* path, which is why you should use it. + +7. Once you have identified the path to your adapter, you communicate that information to docker-compose like this: + + ```console + $ echo ZIGBEE2MQTT_DEVICE_PATH=/dev/serial/by-id/usb-Texas_Instruments_TI_CC2531_USB_CDC___0X00125A00183F06C5-if00 >>~/IOTstack/.env + ``` + + Note: + + * if you forget to do this step, docker-compose will display the following error message: + + ``` + parsing ~/IOTstack/docker-compose.yml: error while interpolating services.zigbee2mqtt.devices.[]: required variable ZIGBEE2MQTT_DEVICE_PATH is missing a value: eg echo ZIGBEE2MQTT_DEVICE_PATH=/dev/ttyACM0 >>~/IOTstack/.env + ``` + +8. Continue from [bring up your stack](#upStack). + +### Remote adapters { #identifyRemoteAdapter } + +This section covers adapters that your Raspberry Pi connects to over a network via TCP. + +See also: + +* [connect to a remote adapter](https://www.zigbee2mqtt.io/advanced/remote-adapter/connect_to_a_remote_adapter.html). + +The default service definition provided by IOTstack for Zigbee2MQTT includes this device mapping: + +``` yaml +devices: + - "${ZIGBEE2MQTT_DEVICE_PATH:?eg echo ZIGBEE2MQTT_DEVICE_PATH=/dev/ttyACM0 >>~/IOTstack/.env}:/dev/ttyACM0" +``` + +The above syntax assumes your Zigbee adapter connects via USB. You should either remove or comment-out both of those lines from your compose file. An alternative approach is to make the `devices` clause inactive by prepending `x-`, like this: + +``` yaml +x-devices: + - "${ZIGBEE2MQTT_DEVICE_PATH:?eg echo ZIGBEE2MQTT_DEVICE_PATH=/dev/ttyACM0 >>~/IOTstack/.env}:/dev/ttyACM0" +``` + +You tell the container how to find your Zigbee adapter across the network by using an environment variable: + +``` yaml +- ZIGBEE2MQTT_CONFIG_SERIAL_PORT=tcp://«ipaddr»:«port» +``` + +Where: + +* «ipaddr» is the IP address or domain name where your remote Zigbee adapter is reachable; and +* «port» is the port on which your remote Zigbee adapter is listening. + +Example: + +``` yaml +- ZIGBEE2MQTT_CONFIG_SERIAL_PORT=tcp://192.168.1.5:6638 +``` + +## Configuration { #configTemplate } + +When you select Zigbee2MQTT in the IOTstack menu, the following service definition is added to your compose file: + +``` yaml linenums="1" +zigbee2mqtt: + container_name: zigbee2mqtt + image: koenkk/zigbee2mqtt:latest + environment: + - TZ=${TZ:-Etc/UTC} + - ZIGBEE2MQTT_CONFIG_SERIAL_PORT=/dev/ttyACM0 + - ZIGBEE2MQTT_CONFIG_SERIAL_ADAPTER=zstack + - ZIGBEE2MQTT_CONFIG_MQTT_SERVER=mqtt://mosquitto:1883 + # only enable the next line for Zigbee2MQTT v1 + # - ZIGBEE2MQTT_CONFIG_FRONTEND=true + - ZIGBEE2MQTT_CONFIG_FRONTEND_ENABLED=true + - ZIGBEE2MQTT_CONFIG_ADVANCED_LOG_SYMLINK_CURRENT=true + # - DEBUG=zigbee-herdsman* + ports: + - "8080:8080" + volumes: + - ./volumes/zigbee2mqtt/data:/app/data + devices: + - "${ZIGBEE2MQTT_DEVICE_PATH:?eg echo ZIGBEE2MQTT_DEVICE_PATH=/dev/ttyACM0 >>~/IOTstack/.env}:/dev/ttyACM0" + restart: unless-stopped + depends_on: + - mosquitto +``` + +### Environment variables { #envVars } + +Many first time users of the Zigbee2MQTT container are following guidance which assumes their Zigbee2MQTT service is running *natively* rather than in a *container*. + +When you run Zigbee2MQTT *natively* you provide configuration information by editing Zigbee2MQTT's `configuration.yaml` file. Although you *can* edit `configuration.yaml` when Zigbee2MQTT is running in a container, it is a multi-step process and is also a sub-optimal approach. The correct way to provide configuration information to the Zigbee2MQTT container is via environment variables. + +**Any** value that can be set in a Zigbee2MQTT [configuration file](#confFile) can also be set using an environment variable. + +Please read that last sentence again and notice the emphasis on "Any" because it is really important. When you are running Zigbee2MQTT in a container, you **never** have to resort to editing the `configuration.yaml`. + +The [Zigbee2MQTT documentation](https://www.zigbee2mqtt.io/guide/configuration/#environment-variables) explains the syntax. It boils down to these rules: + +1. All environment variables start with `ZIGBEE2MQTT_CONFIG_`. +2. Append all-upper-case labels for section and variable names, separated by underscores. +3. Append an `=` followed by the value(s). + +For example, if the Zigbee2MQTT `configuration.yaml` example you are following contains these lines: + +``` yaml +serial: + port: /dev/ttyACM0 + adapter: zstack +``` + +then the equivalent environment variables are: + +``` yaml +- ZIGBEE2MQTT_CONFIG_SERIAL_PORT=/dev/ttyACM0 +- ZIGBEE2MQTT_CONFIG_SERIAL_ADAPTER=zstack +``` + +Note: + +* Do **not** use quote marks to enclose the values (right hand sides) of environment variables. + +Whenever you change the value of an environment variable, you also need to tell `docker-compose` to apply the change: + +```console +$ cd ~/IOTstack +$ docker-compose up -d zigbee2mqtt +``` + +The default service definition provided with IOTstack includes the following environment variables: + +#### timezone support { #tzSupport } + +``` yaml +- TZ=${TZ:-Etc/UTC} +``` + +This assumes that your system timezone has been copied to `~/IOTstack/.env`, otherwise defaults to `Etc/UTC`. + +If you want to set your timezone: + +``` console +$ echo "TZ=$(cat /etc/timezone)" >> ~/IOTstack/.env +``` + +Most (but not yet all) IOTstack containers use this syntax. The idea is that a single value set in `.env` will ensure your containers operate in the same timezone. + +#### serial adapter { #serialAdapter } + +``` yaml +- ZIGBEE2MQTT_CONFIG_SERIAL_PORT=/dev/ttyACM0 +``` + +The default value of `/dev/ttyACM0` works in conjunction with the `devices` clause: + +``` yaml +devices: + - "${ZIGBEE2MQTT_DEVICE_PATH:?eg echo ZIGBEE2MQTT_DEVICE_PATH=/dev/ttyACM0 >>~/IOTstack/.env}:/dev/ttyACM0" +``` + +Taken together, these assume your Zigbee adapter is connected to a local USB port. If you are using a [remote adapter](#identifyRemoteAdapter) then you should: + +1. Change the right hand side of this variable so that it points to your adapter. For example: + + ``` yaml + - ZIGBEE2MQTT_CONFIG_SERIAL_PORT=tcp://«ipaddr»:«port»` + ``` + +2. Remove, comment-out or inactivate the `devices` clause (as explained in [remote adapters](#identifyRemoteAdapter)). + +#### adapter type { #adapterType } + +``` yaml +- ZIGBEE2MQTT_CONFIG_SERIAL_ADAPTER=zstack +``` + +Identify your adapter from the [official list](https://www.zigbee2mqtt.io/guide/configuration/adapter-settings.html#basic-configuration). At the time of writing, the possible values were `zstack`, `ember`, `deconz`, `zigate` or `zboss`. + +#### MQTT server type { #mqttServer } + +``` yaml +- ZIGBEE2MQTT_CONFIG_MQTT_SERVER=mqtt://mosquitto:1883 +``` + +Typical values for this are: + +- `mqtt://mosquitto:1883` + + This is default value supplied with the IOTstack template. It assumes that both Zigbee2MQTT and the Mosquitto broker are running in non-host mode containers on the same Raspberry Pi. + +- `mqtt://localhost:1883` + + This would be appropriate if you were to run Zigbee2MQTT in host mode and the Mosquitto broker was running on the same Raspberry Pi. + +- `mqtt://«host-or-ip»:1883` + + If the Mosquitto broker is running on a *different* computer, replace `«host-or-ip»` with the IP address or domain name of that other computer. You should also remove or comment-out the following lines from the service definition: + + ```yaml + depends_on: + - mosquitto + ``` + + The `depends_on` clause ensures that the Mosquitto container starts alongside the Zigbee2MQTT container. That would not be appropriate if Mosquitto was running on a separate computer. + +#### front end { #frontEndEnable } + +The "front end" is the name given to the Zigbee2MQTT web interface on port 8080. If you want to change the port number where you access the Zigbee2MQTT web interface, see [connecting to the web GUI](#connectGUI). + +Zigbee2MQTT version 2 introduced an incompatibility with this setting. The IOTstack template contains the following lines: + +``` yaml +# only enable the next line for Zigbee2MQTT v1 +# - ZIGBEE2MQTT_CONFIG_FRONTEND=true +- ZIGBEE2MQTT_CONFIG_FRONTEND_ENABLED=true +``` + +If you are running Zigbee2MQTT version 1 then the front end will not be enabled unless you uncomment: + +``` yaml +- ZIGBEE2MQTT_CONFIG_FRONTEND=true +``` + +Zigbee2MQTT version 1 ignores the following environment variable so you do not need to comment it out: + +``` yaml +- ZIGBEE2MQTT_CONFIG_FRONTEND_ENABLED=true +``` + +However, if you have been running Zigbee2MQTT version 1 and you upgrade to version 2 then you **must** either delete or comment-out: + +``` yaml +# - ZIGBEE2MQTT_CONFIG_FRONTEND=true +``` + +If you do not do that then the container will go into a restart loop. If you examine the container's log, you will see this error: + +``` +frontend must be object +``` + +That error is telling you to comment-out that environment variable. + +#### logging { #logSymlink } + +``` yaml +- ZIGBEE2MQTT_CONFIG_ADVANCED_LOG_SYMLINK_CURRENT=true +``` + +Defining this variable causes Zigbee2MQTT to create a symlink pointing to the current log **folder** at the path: + +``` +~/IOTstack/volumes/zigbee2mqtt/data/log/current +``` + +See [Checking the log](#checkLog) for more information about why this is useful. + +#### debugging { #debugging } + +``` yaml +- DEBUG=zigbee-herdsman* +``` + +Enabling this variable turns on extended debugging inside the container. + +### Configuration file { #confFile } + +Zigbee2MQTT creates a default configuration file at the path: + +``` +~/IOTstack/volumes/zigbee2mqtt/data/configuration.yaml +``` + +Although you *can* edit the configuration file, the approach recommended for IOTstack is to use [environment variables](#envVars). + +If you decide to edit the configuration file: + +1. You will need to use `sudo` to edit the file. +2. After you have finished making changes, you need to inform the running container by: + + ```console + $ cd ~/IOTstack + $ docker-compose restart zigbee2mqtt + ``` + +3. [Check the log](#checkLog) for errors. + +Notes: + +* If you start Zigbee2MQTT from a clean slate (ie where the configuration file does not exist) **and** your *compose file* does not define the [`… MQTT_SERVER`](#mqttServer) environment variable discussed above, the container will go into a restart loop. This happens because the Zigbee2MQTT container defaults to trying to reach the Mosquitto broker at `localhost:1883` instead of `mosquitto:1883`. That usually fails. +* Settings passed via environment variables take precedence over both the defaults and any changes you make subsequently to `configuration.yaml`. The Zigbee2MQTT container does not update `configuration.yaml` to reflect settings passed via environment variables. + +## Verifying basic operation + +### Checking status { #checkStatus } + +```console +$ docker ps | grep -e mosquitto -e zigbee2mqtt +NAMES CREATED STATUS +zigbee2mqtt 33 seconds ago Up 30 seconds +mosquitto 33 seconds ago Up 31 seconds (healthy) +``` + +> The above output is filtered down to the relevant columns + +You are looking for evidence that the container is restarting (ie the "Status" column only ever shows a low number of seconds when compared with the "Created" column). + +### Checking the log { #checkLog } + +You can watch the container's log using this command: + +``` +$ docker logs -f zigbee2mqtt +``` + +Press control+c to terminate the command. An alternative is to observe the following path using commands like `cat` and `tail`: + +``` +~/IOTstack/volumes/zigbee2mqtt/data/log/current/log.log +``` + +Note: + +* this depends on the [`… LOG_SYMLINK_CURRENT`](#logSymlink) environment variable being set to `true`. + +### Checking Mosquitto connectivity { #checkMQTT } + +To perform this check, you will need to have the Mosquitto clients installed: + +```console +$ sudo apt install -y mosquitto-clients +``` + +The Mosquitto clients package includes two command-line tools: + +* `mosquitto_pub` for publishing MQTT messages to the broker; and +* `mosquitto_sub` for subscribing to MQTT messages distributed by the broker. + + > In IOTstack, the "broker" is usually the Mosquitto container. + +Assuming the Mosquitto clients are installed, you can run the following command: + +```console +$ mosquitto_sub -v -h "localhost" -t "zigbee2mqtt/#" -F "%I %t %p" +``` + +One of two things will happen: + +* *silence,* indicating that the Zigbee2MQTT container is **not** able to communicate with the Mosquitto container. If this happens, you should [check the Zigbee2MQTT log](#checkLog). +* *chatter,* proving that the Zigbee2MQTT container **can** communicate with the Mosquitto container. + +Terminate the `mosquitto_sub` command with a Controlc. + +## Connecting to the web GUI { #connectGUI } + +Open a browser, and point it to port 8080 on your Raspberry Pi. For example: + +```url +http://raspberrypi.local:8080 +``` + +You should see the Zigbee2MQTT interface. + +Notes: + +1. The availability of the Zigbee2MQTT UI is governed by an environment variable. If you do not see the UI, check that [`… FRONTEND`](#frontEndEnable) is defined. + +2. In the URL above, port 8080 is an **external** port which is exposed via the following port mapping in the Zigbee2MQTT service definition: + + ```yaml + ports: + - "8080:8080" + ``` + + If you want to reach the Zigbee2MQTT UI via a different port, you should edit the left hand side of that mapping. For example, if you wanted to use port 10080 you would write: + + ```yaml + ports: + - "10080:8080" + ``` + + Do not change the *internal* port number on the right hand side of the mapping. To apply changes to the port mapping: + + ```console + $ cd ~/IOTstack + $ docker-compose up -d zigbee2mqtt + ``` + +## Shell access to the container + +To open a shell inside the Zigbee2MQTT container, run: + +```console +$ docker exec -it zigbee2mqtt ash +``` + +> `ash` is **not** a typo! + +To close the shell and leave the container, either type "exit" and press return, or press Controld. + +## Container maintenance + +When you become aware of a new version of Zigbee2MQTT on [DockerHub](https://hub.docker.com/r/koenkk/zigbee2mqtt/tags), do the following: + +```console +$ cd ~IOTstack +$ docker-compose pull zigbee2mqtt +$ docker-compose up -d zigbee2mqtt +$ docker system prune +``` + +In words: + +1. Be in the correct directory. +2. The `pull` compares the version on your Raspberry Pi with the latest version on [DockerHub](https://hub.docker.com/r/koenkk/zigbee2mqtt/tags), and downloads any later version. +3. If a newer version is downloaded, the `up` instantiates a new container based on the new image and performs a new-for-old swap. There is barely any downtime. +4. The `prune` cleans up the older image. + +You can omit the `zigbee2mqtt` arguments from the `pull` and `up` commands, in which case `docker-compose` makes an attempt to pull any available updates for all non-Dockerfile-based images, and then instantiates any new images it has downloaded. + +## 2025 v1 to v2 upgrade { #update202501 } + +If you have been running Zigbee2MQTT version 1 but do a "pull" from DockerHub you will be upgraded to version 2. The first time you do this, you may encounter the following error: + +``` +frontend must be object +``` + +This is caused by a configuration incompatibility between v1 and v2. Although it is not *difficult* to update your service definition to work with v2, if you are in a hurry to get your Zigbee service running again you can revert to v1 by making a temporary alteration to your service definition, like this: + +``` yaml +# image: koenkk/zigbee2mqtt:latest +image: koenkk/zigbee2mqtt:1.42.0 +``` + +Then, "up" the container: + +``` console +$ cd ~/IOTstack +$ docker-compose up -d zigbee2mqtt +``` + +When you are ready to upgrade to v2, you will need to undo the above change, and you will also need to update your Zigbee2MQTT service definition based on the [template](#configTemplate). In general terms, you will need to do the following: + +1. If you have a locally-connected USB adapter then you will need to add: + + ``` yaml + - ZIGBEE2MQTT_CONFIG_SERIAL_PORT=/dev/ttyACM0 + ``` + + If you have a network adapter, you will have that variable defined already so you should not change it. + +2. Add: + + ``` yaml + - ZIGBEE2MQTT_CONFIG_SERIAL_ADAPTER=zstack + ``` + + Then read the explanation about [adapter types](#adapterType) and make a decision on whether `zstack` is the correct choice. + +3. The value of `ZIGBEE2MQTT_CONFIG_MQTT_SERVER` will probably be correct so you should not change it. + +4. Replace the existing `ZIGBEE2MQTT_CONFIG_FRONTEND` with the following: + + ``` yaml + # only enable the next line for Zigbee2MQTT v1 + # - ZIGBEE2MQTT_CONFIG_FRONTEND=true + - ZIGBEE2MQTT_CONFIG_FRONTEND_ENABLED=true + ``` + +5. Any other variables you have set will likely be correct so leave those alone. + +## 2022 Service definition change { #update202204 } + +This information is for existing users of the Zigbee2MQTT container. + +The default IOTstack service definition for Zigbee2MQTT has changed: + +* The container no longer needs to be built using a Dockerfile. +* The Zigbee2MQTT images on [DockerHub](https://hub.docker.com/r/koenkk/zigbee2mqtt/tags) can be used "as is". +* Environment variables supplied with the updated service definition exactly replicate the purpose of the old Dockerfile. +* The Dockerfile supplied with the IOTstack template is deprecated but continues to be provided to maintain backwards compatibility and to avoid introducing a breaking change. + +If you were using the Zigbee2MQTT container in IOTstack before April 2022, you should use your favourite text editor to update your *compose file* to conform with the new service definition. + +> You *could* run the menu, then de-select and re-select Zigbee2MQTT. That *will* have the effect of applying the updated service definition but it also risks overwriting any other customisations you may have in place. That is why editing your *compose file* is the recommended approach. + +The updated service definition is included [here](#configTemplate) for ease of reference. + +The changes you should make to your existing Zigbee2MQTT service definition are: + +1. Replace the `build` directive: + + ```yaml + build: ./.templates/zigbee2mqtt/. + ``` + + with this `image` directive: + + ```yaml + image: koenkk/zigbee2mqtt:latest + ``` + + This causes IOTstack to use Zigbee2MQTT images "as is" from [DockerHub](https://hub.docker.com/r/koenkk/zigbee2mqtt/tags). + +2. Use the [template](#configTemplate) as a guide to adjusting your environment variables. See also [environment variables](#envVars) for more detail. + +3. Add the dependency clause: + + ```yaml + depends_on: + - mosquitto + ``` + + This ensures the Mosquitto container is brought up alongside Zigbee2MQTT. The Zigbee2MQTT container goes into a restart loop if Mosquitto is not reachable so this change enforces that business rule. See [`… MQTT_SERVER`](#mqttServer) for the situation where this might not be appropriate. + +### pre-existing configuration file + +Environment variables in your *compose file* override corresponding values set in the *configuration file* at: + +``` +~/IOTstack/volumes/zigbee2mqtt/data/configuration.yaml +``` + +If you have customised your existing Zigbee2MQTT [configuration file](#confFile), you should review your settings for potential conflicts with the environment variables introduced by the changes to the IOTstack service definition. You can resolve any conflicts either by: + +* removing or commenting-out conflicting environment variables; or +* altering the environment variable values to match your configuration file. + +The second approach is recommended because it minimises the risk that Zigbee2MQTT will go into a restart loop if the configuration file is not present when the container starts. + +As the [Zigbee2MQTT documentation](https://www.zigbee2mqtt.io/guide/configuration/#environment-variables) explains, any option that can be set in a configuration file can also be set using an environment variable, so you may want to take the opportunity to implement all your settings as environment variables. diff --git a/docs/Containers/Zigbee2mqttassistant.md b/docs/Containers/Zigbee2mqttassistant.md new file mode 100644 index 000000000..346e75249 --- /dev/null +++ b/docs/Containers/Zigbee2mqttassistant.md @@ -0,0 +1,24 @@ +# Zigbee2Mqtt Assistant + +## References + +- [Docker](https://hub.docker.com/r/carldebilly/zigbee2mqttassistant) +- [Website](https://github.com/yllibed/Zigbee2MqttAssistant/blob/master/README.md) + +## About + +This service a web frontend which displays Zigbee2Mqtt service messages and able to control it over MQTT. For the +servie a working MQTT server is required and that have to be configured. + +## Environment Parameters + +* `Z2MA_SETTINGS__MQTTSERVER=mosquitto` - The MQTT service instance which is used by Zigbee2Mqtt instance. Here, "mosquitto" is the name of the container. +* `Z2MA_SETTINGS__MQTTUSERNAME=name` - Used if your MQTT service has authentication enabled. Optional. +* `Z2MA_SETTINGS__MQTTPASSWORD=password` - Used if your MQTT service has authentication enabled. Optional. +* `TZ=Etc/UTC`- Set to your timezone. Optional but recommended. + +## Accessing the UI + +The Zigbee2Mqtt Assistant UI is available using port 8880. For example: + +* `http://your.local.ip.address:8880/` diff --git a/docs/Containers/images/esphome-010-login.png b/docs/Containers/images/esphome-010-login.png new file mode 100644 index 000000000..940ba41d3 Binary files /dev/null and b/docs/Containers/images/esphome-010-login.png differ diff --git a/docs/Containers/images/esphome-020-new-device.png b/docs/Containers/images/esphome-020-new-device.png new file mode 100644 index 000000000..add6ed469 Binary files /dev/null and b/docs/Containers/images/esphome-020-new-device.png differ diff --git a/docs/Containers/images/esphome-030-new-device-continue.png b/docs/Containers/images/esphome-030-new-device-continue.png new file mode 100644 index 000000000..8f3a7d9f4 Binary files /dev/null and b/docs/Containers/images/esphome-030-new-device-continue.png differ diff --git a/docs/Containers/images/esphome-040-create-config.png b/docs/Containers/images/esphome-040-create-config.png new file mode 100644 index 000000000..eebf9a243 Binary files /dev/null and b/docs/Containers/images/esphome-040-create-config.png differ diff --git a/docs/Containers/images/esphome-050-device-type.png b/docs/Containers/images/esphome-050-device-type.png new file mode 100644 index 000000000..e216f1822 Binary files /dev/null and b/docs/Containers/images/esphome-050-device-type.png differ diff --git a/docs/Containers/images/esphome-060-encryption-key.png b/docs/Containers/images/esphome-060-encryption-key.png new file mode 100644 index 000000000..ced103514 Binary files /dev/null and b/docs/Containers/images/esphome-060-encryption-key.png differ diff --git a/docs/Containers/images/esphome-070-install-method.png b/docs/Containers/images/esphome-070-install-method.png new file mode 100644 index 000000000..9b04cecea Binary files /dev/null and b/docs/Containers/images/esphome-070-install-method.png differ diff --git a/docs/Containers/images/esphome-080-server-port.png b/docs/Containers/images/esphome-080-server-port.png new file mode 100644 index 000000000..9b8bb7a4d Binary files /dev/null and b/docs/Containers/images/esphome-080-server-port.png differ diff --git a/docs/Containers/images/esphome-085-no-server-port.png b/docs/Containers/images/esphome-085-no-server-port.png new file mode 100644 index 000000000..487524980 Binary files /dev/null and b/docs/Containers/images/esphome-085-no-server-port.png differ diff --git a/docs/Containers/images/esphome-090-build-sequence.png b/docs/Containers/images/esphome-090-build-sequence.png new file mode 100644 index 000000000..dbe0f780b Binary files /dev/null and b/docs/Containers/images/esphome-090-build-sequence.png differ diff --git a/docs/Containers/images/esphome-100-device-online.png b/docs/Containers/images/esphome-100-device-online.png new file mode 100644 index 000000000..6ae2938e4 Binary files /dev/null and b/docs/Containers/images/esphome-100-device-online.png differ diff --git a/docs/Containers/images/influxdb2-chart-vs-grafana.jpeg b/docs/Containers/images/influxdb2-chart-vs-grafana.jpeg new file mode 100644 index 000000000..17930479c Binary files /dev/null and b/docs/Containers/images/influxdb2-chart-vs-grafana.jpeg differ diff --git a/docs/Containers/images/influxdb2-explorer.jpeg b/docs/Containers/images/influxdb2-explorer.jpeg new file mode 100644 index 000000000..0e7607a6c Binary files /dev/null and b/docs/Containers/images/influxdb2-explorer.jpeg differ diff --git a/docs/Containers/images/influxdb2-grafana-db-source.jpeg b/docs/Containers/images/influxdb2-grafana-db-source.jpeg new file mode 100644 index 000000000..fd0729503 Binary files /dev/null and b/docs/Containers/images/influxdb2-grafana-db-source.jpeg differ diff --git a/docs/Containers/images/influxdb2-nodered-db-out-node.jpeg b/docs/Containers/images/influxdb2-nodered-db-out-node.jpeg new file mode 100644 index 000000000..428f87883 Binary files /dev/null and b/docs/Containers/images/influxdb2-nodered-db-out-node.jpeg differ diff --git a/docs/Containers/images/influxdb2-nodered-flow-models.jpeg b/docs/Containers/images/influxdb2-nodered-flow-models.jpeg new file mode 100644 index 000000000..59c066431 Binary files /dev/null and b/docs/Containers/images/influxdb2-nodered-flow-models.jpeg differ diff --git a/docs/Containers/images/influxdb2-table1.png b/docs/Containers/images/influxdb2-table1.png new file mode 100644 index 000000000..f3283121e Binary files /dev/null and b/docs/Containers/images/influxdb2-table1.png differ diff --git a/docs/Containers/images/nextcloud-createadminaccount.png b/docs/Containers/images/nextcloud-createadminaccount.png new file mode 100644 index 000000000..ac333bc3c Binary files /dev/null and b/docs/Containers/images/nextcloud-createadminaccount.png differ diff --git a/docs/Containers/images/nextcloud-dashboard.png b/docs/Containers/images/nextcloud-dashboard.png new file mode 100644 index 000000000..255ae4dd8 Binary files /dev/null and b/docs/Containers/images/nextcloud-dashboard.png differ diff --git a/docs/Containers/images/nextcloud-network-model.jpeg b/docs/Containers/images/nextcloud-network-model.jpeg new file mode 100644 index 000000000..2dcc77b49 Binary files /dev/null and b/docs/Containers/images/nextcloud-network-model.jpeg differ diff --git a/docs/Containers/images/nextcloud-networks-clause.jpeg b/docs/Containers/images/nextcloud-networks-clause.jpeg new file mode 100644 index 000000000..4c38e2852 Binary files /dev/null and b/docs/Containers/images/nextcloud-networks-clause.jpeg differ diff --git a/docs/Containers/images/nextcloud-postinitialisation.png b/docs/Containers/images/nextcloud-postinitialisation.png new file mode 100644 index 000000000..6c53a0006 Binary files /dev/null and b/docs/Containers/images/nextcloud-postinitialisation.png differ diff --git a/docs/Containers/images/nextcloud-recommendedapps.png b/docs/Containers/images/nextcloud-recommendedapps.png new file mode 100644 index 000000000..f2ab3c864 Binary files /dev/null and b/docs/Containers/images/nextcloud-recommendedapps.png differ diff --git a/docs/Containers/images/nodered-exec-node-ssh-test.jpeg b/docs/Containers/images/nodered-exec-node-ssh-test.jpeg new file mode 100644 index 000000000..16ba5570f Binary files /dev/null and b/docs/Containers/images/nodered-exec-node-ssh-test.jpeg differ diff --git a/docs/Containers/images/nodered-flow-write-persistent-file.png b/docs/Containers/images/nodered-flow-write-persistent-file.png new file mode 100644 index 000000000..7a5e0a7be Binary files /dev/null and b/docs/Containers/images/nodered-flow-write-persistent-file.png differ diff --git a/docs/Containers/images/nodered-portainer-unused-image.png b/docs/Containers/images/nodered-portainer-unused-image.png new file mode 100644 index 000000000..bacce62cb Binary files /dev/null and b/docs/Containers/images/nodered-portainer-unused-image.png differ diff --git a/docs/Containers/images/pgadmin4-connection.jpeg b/docs/Containers/images/pgadmin4-connection.jpeg new file mode 100644 index 000000000..9c6700e68 Binary files /dev/null and b/docs/Containers/images/pgadmin4-connection.jpeg differ diff --git a/docs/Containers/images/pgadmin4-general.jpeg b/docs/Containers/images/pgadmin4-general.jpeg new file mode 100644 index 000000000..49d81fa57 Binary files /dev/null and b/docs/Containers/images/pgadmin4-general.jpeg differ diff --git a/docs/Containers/images/pgadmin4-initial.jpeg b/docs/Containers/images/pgadmin4-initial.jpeg new file mode 100644 index 000000000..c4d0fb1ec Binary files /dev/null and b/docs/Containers/images/pgadmin4-initial.jpeg differ diff --git a/docs/Containers/images/pihole-server-ip-discovery.png b/docs/Containers/images/pihole-server-ip-discovery.png new file mode 100644 index 000000000..12ec58192 Binary files /dev/null and b/docs/Containers/images/pihole-server-ip-discovery.png differ diff --git a/docs/Containers/images/portainer-ce-set-public-ip.png b/docs/Containers/images/portainer-ce-set-public-ip.png new file mode 100644 index 000000000..a16670c4a Binary files /dev/null and b/docs/Containers/images/portainer-ce-set-public-ip.png differ diff --git a/docs/Containers/images/ring-mqtt-token.png b/docs/Containers/images/ring-mqtt-token.png new file mode 100644 index 000000000..ac14f16f5 Binary files /dev/null and b/docs/Containers/images/ring-mqtt-token.png differ diff --git a/docs/Containers/images/wireguard-nattable.png b/docs/Containers/images/wireguard-nattable.png new file mode 100644 index 000000000..7d9f4acd9 Binary files /dev/null and b/docs/Containers/images/wireguard-nattable.png differ diff --git a/docs/Containers/images/wireguard-portmodel.jpeg b/docs/Containers/images/wireguard-portmodel.jpeg new file mode 100644 index 000000000..6a0ab08d5 Binary files /dev/null and b/docs/Containers/images/wireguard-portmodel.jpeg differ diff --git a/docs/Containers/images/zerotier-cgnat-topology-dark.png b/docs/Containers/images/zerotier-cgnat-topology-dark.png new file mode 100644 index 000000000..61708f813 Binary files /dev/null and b/docs/Containers/images/zerotier-cgnat-topology-dark.png differ diff --git a/docs/Containers/images/zerotier-cgnat-topology-light.png b/docs/Containers/images/zerotier-cgnat-topology-light.png new file mode 100644 index 000000000..4a0c8cab0 Binary files /dev/null and b/docs/Containers/images/zerotier-cgnat-topology-light.png differ diff --git a/docs/Containers/images/zerotier-cgnat-wan-interface.jpeg b/docs/Containers/images/zerotier-cgnat-wan-interface.jpeg new file mode 100644 index 000000000..440c80de8 Binary files /dev/null and b/docs/Containers/images/zerotier-cgnat-wan-interface.jpeg differ diff --git a/docs/Containers/images/zerotier-ipv4-ranges.jpeg b/docs/Containers/images/zerotier-ipv4-ranges.jpeg new file mode 100644 index 000000000..13b0320d0 Binary files /dev/null and b/docs/Containers/images/zerotier-ipv4-ranges.jpeg differ diff --git a/docs/Containers/images/zerotier-managed-route-construction.jpeg b/docs/Containers/images/zerotier-managed-route-construction.jpeg new file mode 100644 index 000000000..de2554e83 Binary files /dev/null and b/docs/Containers/images/zerotier-managed-route-construction.jpeg differ diff --git a/docs/Containers/images/zerotier-managed-route-examples.jpeg b/docs/Containers/images/zerotier-managed-route-examples.jpeg new file mode 100644 index 000000000..4b7c8d71c Binary files /dev/null and b/docs/Containers/images/zerotier-managed-route-examples.jpeg differ diff --git a/docs/Containers/images/zerotier-topology-1-reachability.jpeg b/docs/Containers/images/zerotier-topology-1-reachability.jpeg new file mode 100644 index 000000000..d14e1b575 Binary files /dev/null and b/docs/Containers/images/zerotier-topology-1-reachability.jpeg differ diff --git a/docs/Containers/images/zerotier-topology-1.jpeg b/docs/Containers/images/zerotier-topology-1.jpeg new file mode 100644 index 000000000..68c80f432 Binary files /dev/null and b/docs/Containers/images/zerotier-topology-1.jpeg differ diff --git a/docs/Containers/images/zerotier-topology-2.jpeg b/docs/Containers/images/zerotier-topology-2.jpeg new file mode 100644 index 000000000..894550ccc Binary files /dev/null and b/docs/Containers/images/zerotier-topology-2.jpeg differ diff --git a/docs/Containers/images/zerotier-topology-3.jpeg b/docs/Containers/images/zerotier-topology-3.jpeg new file mode 100644 index 000000000..9171a2924 Binary files /dev/null and b/docs/Containers/images/zerotier-topology-3.jpeg differ diff --git a/docs/Containers/images/zerotier-topology-4.jpeg b/docs/Containers/images/zerotier-topology-4.jpeg new file mode 100644 index 000000000..3fbe8c11f Binary files /dev/null and b/docs/Containers/images/zerotier-topology-4.jpeg differ diff --git a/docs/Developers/BuildStack-RandomPassword.md b/docs/Developers/BuildStack-RandomPassword.md new file mode 100644 index 000000000..e25fd91af --- /dev/null +++ b/docs/Developers/BuildStack-RandomPassword.md @@ -0,0 +1,489 @@ +# Build Stack Random Services Password + +This page explains how to have a service generate a random password during build time. This will require that your service have a working options menu. + +Keep in mind that updating strings in a service's yaml config isn't limited to passwords. + +## A word of caution +Many services often set a password on their initial spin up and store it internally. That means if if the password is changed by the menu afterwards, it may not be reflected in the service. By default the password specified in the documentation should be used, unless the user specifically selected to use a randomly generated one. In the future, the feature to specify a password manually may be added in, much like how ports can be customised. + +## A basic example +Inside the service's `service.yml` file, a special string can be added in for the build script to find and replace. Commonly the string is `%randomPassword%`, but technically any string can be used. The same string can be used multiple times for the same password to be used multiple times, and/or multiple difference strings can be used for multiple passwords. +``` yaml + mariadb: + image: linuxserver/mariadb + container_name: mariadb + environment: + - MYSQL_ROOT_PASSWORD=%randomAdminPassword% + - MYSQL_DATABASE=default + - MYSQL_USER=mariadbuser + - MYSQL_PASSWORD=%randomPassword% +``` + +These strings will be updated during the Prebuild Hook stage when building. The code to make this happen is shown below. + +## Code commonly used to update passwords +This code can basically be copy-pasted into your service's `build.py` file. You are welcome to expand upon it if required. It will probably be refactored into a utils function in the future to adear to DRY (Don't Repeat Yourself) practices. +``` +def preBuild(): + # Multi-service load. Most services only include a single service. The exception being NextCloud where the database information needs to match between NextCloud and MariaDB (as defined in NextCloud's 'service.yml' file, not IOTstack's MariaDB). + with open((r'%s/' % serviceTemplate) + servicesFileName) as objServiceFile: + serviceYamlTemplate = yaml.load(objServiceFile) + + oldBuildCache = {} + try: + with open(r'%s' % buildCache) as objBuildCache: # Load previous build, if it exists + oldBuildCache = yaml.load(objBuildCache) + except: + pass + + buildCacheServices = {} + if "services" in oldBuildCache: # If a previous build does exist, load it so that we can reuse the password from it if required. + buildCacheServices = oldBuildCache["services"] + + if not os.path.exists(serviceService): # Create the service directory for the service + os.makedirs(serviceService, exist_ok=True) + + # Check if buildSettings file exists (from previous build), or create one if it doesn't (in the else block). + if os.path.exists(buildSettings): + # Password randomisation + with open(r'%s' % buildSettings) as objBuildSettingsFile: + piHoleYamlBuildOptions = yaml.load(objBuildSettingsFile) + if ( + piHoleYamlBuildOptions["databasePasswordOption"] == "Randomise database password for this build" + or piHoleYamlBuildOptions["databasePasswordOption"] == "Randomise database password every build" + or deconzYamlBuildOptions["databasePasswordOption"] == "Use default password for this build" + ): + + if deconzYamlBuildOptions["databasePasswordOption"] == "Use default password for this build": + newAdminPassword = "######" # Update to what's specified in your documentation + newPassword = "######" # Update to what's specified in your documentation + else: + # Generate our passwords + newAdminPassword = generateRandomString() + newPassword = generateRandomString() + + # Here we loop through each service included in the current service's `service.yml` file and update the password strings. + for (index, serviceName) in enumerate(serviceYamlTemplate): + dockerComposeServicesYaml[serviceName] = serviceYamlTemplate[serviceName] + if "environment" in serviceYamlTemplate[serviceName]: + for (envIndex, envName) in enumerate(serviceYamlTemplate[serviceName]["environment"]): + envName = envName.replace("%randomPassword%", newPassword) + envName = envName.replace("%randomAdminPassword%", newAdminPassword) + dockerComposeServicesYaml[serviceName]["environment"][envIndex] = envName + + # If the user had selected to only update the password once, ensure the build options file is updated. + if (piHoleYamlBuildOptions["databasePasswordOption"] == "Randomise database password for this build"): + piHoleYamlBuildOptions["databasePasswordOption"] = "Do nothing" + with open(buildSettings, 'w') as outputFile: + yaml.dump(piHoleYamlBuildOptions, outputFile) + else: # Do nothing - don't change password + for (index, serviceName) in enumerate(buildCacheServices): + if serviceName in buildCacheServices: # Load service from cache if exists (to maintain password) + dockerComposeServicesYaml[serviceName] = buildCacheServices[serviceName] + else: + dockerComposeServicesYaml[serviceName] = serviceYamlTemplate[serviceName] + + # Build options file didn't exist, so create one, and also use default password (default action). + else: + print("PiHole Warning: Build settings file not found, using default password") + time.sleep(1) + newAdminPassword = "######" # Update to what's specified in your documentation + newPassword = "######" # Update to what's specified in your documentation + for (index, serviceName) in enumerate(serviceYamlTemplate): + dockerComposeServicesYaml[serviceName] = serviceYamlTemplate[serviceName] + if "environment" in serviceYamlTemplate[serviceName]: + for (envIndex, envName) in enumerate(serviceYamlTemplate[serviceName]["environment"]): + envName = envName.replace("%randomPassword%", newPassword) + envName = envName.replace("%randomAdminPassword%", newAdminPassword) + dockerComposeServicesYaml[serviceName]["environment"][envIndex] = envName + piHoleYamlBuildOptions = { + "version": "1", + "application": "IOTstack", + "service": "PiHole", + "comment": "PiHole Build Options" + } + + piHoleYamlBuildOptions["databasePasswordOption"] = "Do nothing" + with open(buildSettings, 'w') as outputFile: + yaml.dump(piHoleYamlBuildOptions, outputFile) + + return True +``` + +## Code for your service's menu +While not needed, since the default action is to create a random password, it is a good idea to allow the user to choose what to do. This can be achieved by giving them access to a password menu. This code can be placed in your service's `build.py` file, that will show a new menu option, allowing users to select it and be taken to a password settings screen. + +Remember that you need to have an already working menu, and to place this code into it. + +``` +import signal + +... + +def setPasswordOptions(): + global needsRender + global hasRebuiltAddons + passwordOptionsMenuFilePath = "./.templates/{currentService}/passwords.py".format(currentService=currentServiceName) + with open(passwordOptionsMenuFilePath, "rb") as pythonDynamicImportFile: + code = compile(pythonDynamicImportFile.read(), passwordOptionsMenuFilePath, "exec") + execGlobals = { + "currentServiceName": currentServiceName, + "renderMode": renderMode + } + execLocals = {} + screenActive = False + exec(code, execGlobals, execLocals) + signal.signal(signal.SIGWINCH, onResize) + screenActive = True + needsRender = 1 + +... + +def createMenu(): + global yourServicesBuildOptions + global serviceService + + yourServicesBuildOptions = [] + yourServicesBuildOptions.append([ + "Your Service Password Options", + setPasswordOptions + ]) + + yourServicesBuildOptions.append(["Go back", goBack]) + +``` + +## Password settings screen +The code for the Password settings is lengthy, but it's pasted here for convienence +``` +#!/usr/bin/env python3 + +import signal + +def main(): + from blessed import Terminal + from deps.chars import specialChars, commonTopBorder, commonBottomBorder, commonEmptyLine + from deps.consts import servicesDirectory, templatesDirectory, buildSettingsFileName + import time + import subprocess + import ruamel.yamls + import os + + global signal + global currentServiceName + global menuSelectionInProgress + global mainMenuList + global currentMenuItemIndex + global renderMode + global paginationSize + global paginationStartIndex + global hideHelpText + + yaml = ruamel.yaml.YAML() + yaml.preserve_quotes = True + + try: # If not already set, then set it. + hideHelpText = hideHelpText + except: + hideHelpText = False + + term = Terminal() + hotzoneLocation = [((term.height // 16) + 6), 0] + paginationToggle = [10, term.height - 25] + paginationStartIndex = 0 + paginationSize = paginationToggle[0] + + serviceService = servicesDirectory + currentServiceName + serviceTemplate = templatesDirectory + currentServiceName + buildSettings = serviceService + buildSettingsFileName + + def goBack(): + global menuSelectionInProgress + global needsRender + menuSelectionInProgress = False + needsRender = 1 + return True + + mainMenuList = [] + + hotzoneLocation = [((term.height // 16) + 6), 0] + + menuSelectionInProgress = True + currentMenuItemIndex = 0 + menuNavigateDirection = 0 + + # Render Modes: + # 0 = No render needed + # 1 = Full render + # 2 = Hotzone only + needsRender = 1 + + def onResize(sig, action): + global mainMenuList + global currentMenuItemIndex + mainRender(1, mainMenuList, currentMenuItemIndex) + + def generateLineText(text, textLength=None, paddingBefore=0, lineLength=64): + result = "" + for i in range(paddingBefore): + result += " " + + textPrintableCharactersLength = textLength + + if (textPrintableCharactersLength) == None: + textPrintableCharactersLength = len(text) + + result += text + remainingSpace = lineLength - textPrintableCharactersLength + + for i in range(remainingSpace): + result += " " + + return result + + def renderHotZone(term, renderType, menu, selection, hotzoneLocation, paddingBefore = 4): + global paginationSize + selectedTextLength = len("-> ") + + print(term.move(hotzoneLocation[0], hotzoneLocation[1])) + + if paginationStartIndex >= 1: + print(term.center("{b} {uaf} {uaf}{uaf}{uaf} {ual} {b}".format( + b=specialChars[renderMode]["borderVertical"], + uaf=specialChars[renderMode]["upArrowFull"], + ual=specialChars[renderMode]["upArrowLine"] + ))) + else: + print(term.center(commonEmptyLine(renderMode))) + + for (index, menuItem) in enumerate(menu): # Menu loop + if index >= paginationStartIndex and index < paginationStartIndex + paginationSize: + lineText = generateLineText(menuItem[0], paddingBefore=paddingBefore) + + # Menu highlight logic + if index == selection: + formattedLineText = '-> {t.blue_on_green}{title}{t.normal} <-'.format(t=term, title=menuItem[0]) + paddedLineText = generateLineText(formattedLineText, textLength=len(menuItem[0]) + selectedTextLength, paddingBefore=paddingBefore - selectedTextLength) + toPrint = paddedLineText + else: + toPrint = '{title}{t.normal}'.format(t=term, title=lineText) + # ##### + + # Menu check render logic + if menuItem[1]["checked"]: + toPrint = " (X) " + toPrint + else: + toPrint = " ( ) " + toPrint + + toPrint = "{bv} {toPrint} {bv}".format(bv=specialChars[renderMode]["borderVertical"], toPrint=toPrint) # Generate border + toPrint = term.center(toPrint) # Center Text (All lines should have the same amount of printable characters) + # ##### + print(toPrint) + + if paginationStartIndex + paginationSize < len(menu): + print(term.center("{b} {daf} {daf}{daf}{daf} {dal} {b}".format( + b=specialChars[renderMode]["borderVertical"], + daf=specialChars[renderMode]["downArrowFull"], + dal=specialChars[renderMode]["downArrowLine"] + ))) + else: + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + + + def mainRender(needsRender, menu, selection): + global paginationStartIndex + global paginationSize + term = Terminal() + + if selection >= paginationStartIndex + paginationSize: + paginationStartIndex = selection - (paginationSize - 1) + 1 + needsRender = 1 + + if selection <= paginationStartIndex - 1: + paginationStartIndex = selection + needsRender = 1 + + if needsRender == 1: + print(term.clear()) + print(term.move_y(term.height // 16)) + print(term.black_on_cornsilk4(term.center('IOTstack YourServices Password Options'))) + print("") + print(term.center(commonTopBorder(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Select Password Option {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + + if needsRender >= 1: + renderHotZone(term, needsRender, menu, selection, hotzoneLocation) + + if needsRender == 1: + print(term.center(commonEmptyLine(renderMode))) + if not hideHelpText: + if term.height < 32: + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Not enough vertical room to render controls help text {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + else: + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Controls: {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Space] to select option {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Up] and [Down] to move selection cursor {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [H] Show/hide this text {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Enter] to build and save option {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Escape] to cancel changes {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonBottomBorder(renderMode))) + + def runSelection(selection): + import types + if len(mainMenuList[selection]) > 1 and isinstance(mainMenuList[selection][1], types.FunctionType): + mainMenuList[selection][1]() + else: + print(term.green_reverse('IOTstack Error: No function assigned to menu item: "{}"'.format(mainMenuList[selection][0]))) + + def isMenuItemSelectable(menu, index): + if len(menu) > index: + if len(menu[index]) > 1: + if "skip" in menu[index][1] and menu[index][1]["skip"] == True: + return False + return True + + def loadOptionsMenu(): + global mainMenuList + mainMenuList.append(["Use default password for this build", { "checked": True }]) + mainMenuList.append(["Randomise database password for this build", { "checked": False }]) + mainMenuList.append(["Randomise database password every build", { "checked": False }]) + mainMenuList.append(["Do nothing", { "checked": False }]) + + def checkMenuItem(selection): + global mainMenuList + for (index, menuItem) in enumerate(mainMenuList): + mainMenuList[index][1]["checked"] = False + + mainMenuList[selection][1]["checked"] = True + + def saveOptions(): + try: + if not os.path.exists(serviceService): + os.makedirs(serviceService, exist_ok=True) + + if os.path.exists(buildSettings): + with open(r'%s' % buildSettings) as objBuildSettingsFile: + yourServicesYamlBuildOptions = yaml.load(objBuildSettingsFile) + else: + yourServices = { + "version": "1", + "application": "IOTstack", + "service": "Your Service", + "comment": "Your Service Build Options" + } + + yourServices["databasePasswordOption"] = "" + + for (index, menuOption) in enumerate(mainMenuList): + if menuOption[1]["checked"]: + yourServices["databasePasswordOption"] = menuOption[0] + break + + with open(buildSettings, 'w') as outputFile: + yaml.dump(yourServices, outputFile) + + except Exception as err: + print("Error saving Your Services Password options", currentServiceName) + print(err) + return False + global hasRebuiltHardwareSelection + hasRebuiltHardwareSelection = True + return True + + def loadOptions(): + try: + if not os.path.exists(serviceService): + os.makedirs(serviceService, exist_ok=True) + + if os.path.exists(buildSettings): + with open(r'%s' % buildSettings) as objBuildSettingsFile: + yourServicesYamlBuildOptions = yaml.load(objBuildSettingsFile) + + for (index, menuOption) in enumerate(mainMenuList): + if menuOption[0] == yourServicesYamlBuildOptions["databasePasswordOption"]: + checkMenuItem(index) + break + + except Exception as err: + print("Error loading Your Services Password options", currentServiceName) + print(err) + return False + return True + + + if __name__ == 'builtins': + global signal + term = Terminal() + signal.signal(signal.SIGWINCH, onResize) + loadOptionsMenu() + loadOptions() + with term.fullscreen(): + menuNavigateDirection = 0 + mainRender(needsRender, mainMenuList, currentMenuItemIndex) + menuSelectionInProgress = True + with term.cbreak(): + while menuSelectionInProgress: + menuNavigateDirection = 0 + + if not needsRender == 0: # Only rerender when changed to prevent flickering + mainRender(needsRender, mainMenuList, currentMenuItemIndex) + needsRender = 0 + + key = term.inkey(esc_delay=0.05) + if key.is_sequence: + if key.name == 'KEY_TAB': + if paginationSize == paginationToggle[0]: + paginationSize = paginationToggle[1] + else: + paginationSize = paginationToggle[0] + mainRender(1, mainMenuList, currentMenuItemIndex) + if key.name == 'KEY_DOWN': + menuNavigateDirection += 1 + if key.name == 'KEY_UP': + menuNavigateDirection -= 1 + if key.name == 'KEY_ENTER': + if saveOptions(): + return True + else: + print("Something went wrong. Try saving the list again.") + if key.name == 'KEY_ESCAPE': + menuSelectionInProgress = False + return True + elif key: + if key == ' ': # Space pressed + checkMenuItem(currentMenuItemIndex) # Update checked list + needsRender = 2 + elif key == 'h': # H pressed + if hideHelpText: + hideHelpText = False + else: + hideHelpText = True + mainRender(1, mainMenuList, currentMenuItemIndex) + + if menuNavigateDirection != 0: # If a direction was pressed, find next selectable item + currentMenuItemIndex += menuNavigateDirection + currentMenuItemIndex = currentMenuItemIndex % len(mainMenuList) + needsRender = 2 + + while not isMenuItemSelectable(mainMenuList, currentMenuItemIndex): + currentMenuItemIndex += menuNavigateDirection + currentMenuItemIndex = currentMenuItemIndex % len(mainMenuList) + return True + + return True + +originalSignalHandler = signal.getsignal(signal.SIGINT) +main() +signal.signal(signal.SIGWINCH, originalSignalHandler) + +``` \ No newline at end of file diff --git a/docs/Developers/BuildStack-Services.md b/docs/Developers/BuildStack-Services.md new file mode 100644 index 000000000..28a02017d --- /dev/null +++ b/docs/Developers/BuildStack-Services.md @@ -0,0 +1,193 @@ +# Build Stack Services system + +This page explains how the build stack system works for developers. + +## How to define a new service +A service only requires 2 files: +* `service.yml` - Contains data for docker-compose +* `build.py` - Contains logic that the menu system uses. + +### A basic service +Inside the `service.yml` is where the service data for docker-compose is housed, for example: +``` yaml +adminer: + container_name: adminer + image: adminer + restart: unless-stopped + ports: + - "9080:8080" +``` +It is important that the service name match the directory that it's in - that means that the `adminer` service must be placed into a folder called `adminer` inside the `./.templates` directory. + + +### Basic build code for service +At the very least, the `build.py` requires the following code: +``` +#!/usr/bin/env python3 + +issues = {} # Returned issues dict +buildHooks = {} # Options, and others hooks +haltOnErrors = True + +# Main wrapper function. Required to make local vars work correctly +def main(): + global currentServiceName # Name of the current service + + # This lets the menu know whether to put " >> Options " or not + # This function is REQUIRED. + def checkForOptionsHook(): + try: + buildHooks["options"] = callable(runOptionsMenu) + except: + buildHooks["options"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForPreBuildHook(): + try: + buildHooks["preBuildHook"] = callable(preBuild) + except: + buildHooks["preBuildHook"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForPostBuildHook(): + try: + buildHooks["postBuildHook"] = callable(postBuild) + except: + buildHooks["postBuildHook"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForRunChecksHook(): + try: + buildHooks["runChecksHook"] = callable(runChecks) + except: + buildHooks["runChecksHook"] = False + return buildHooks + return buildHooks + + # Entrypoint for execution + if haltOnErrors: + eval(toRun)() + else: + try: + eval(toRun)() + except: + pass + +# This check isn't required, but placed here for debugging purposes +global currentServiceName # Name of the current service +if currentServiceName == 'adminer': # Make sure you update this. + main() +else: + print("Error. '{}' Tried to run 'adminer' config".format(currentServiceName)) +``` +This code doesn't have any port conflicting checking or menu code in it, and just allows the service to be built as is. The best way to learn on extending the functionality of the service's build script is to look at the other services' build scripts. You can also check out the advanced sections on adding menus and checking for issues for services though for a deeper explanation of specific situations. + +### Basic code for a service that uses bash +If Python isn't your thing, here's a code blob you can copy and paste. Just be sure to update the lines where the comments start with `---` +``` +#!/usr/bin/env python3 + +issues = {} # Returned issues dict +buildHooks = {} # Options, and others hooks +haltOnErrors = True + +# Main wrapper function. Required to make local vars work correctly +def main(): + import subprocess + global dockerComposeServicesYaml # The loaded memory YAML of all checked services + global toRun # Switch for which function to run when executed + global buildHooks # Where to place the options menu result + global currentServiceName # Name of the current service + global issues # Returned issues dict + global haltOnErrors # Turn on to allow erroring + + from deps.consts import servicesDirectory, templatesDirectory, volumesDirectory, servicesFileName + + # runtime vars + serviceVolume = volumesDirectory + currentServiceName # Unused in example + serviceService = servicesDirectory + currentServiceName # Unused in example + serviceTemplate = templatesDirectory + currentServiceName + + # This lets the menu know whether to put " >> Options " or not + # This function is REQUIRED. + def checkForOptionsHook(): + try: + buildHooks["options"] = callable(runOptionsMenu) + except: + buildHooks["options"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForPreBuildHook(): + try: + buildHooks["preBuildHook"] = callable(preBuild) + except: + buildHooks["preBuildHook"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForPostBuildHook(): + try: + buildHooks["postBuildHook"] = callable(postBuild) + except: + buildHooks["postBuildHook"] = False + return buildHooks + return buildHooks + + # This function is REQUIRED. + def checkForRunChecksHook(): + try: + buildHooks["runChecksHook"] = callable(runChecks) + except: + buildHooks["runChecksHook"] = False + return buildHooks + return buildHooks + + # This service will not check anything unless this is set + # This function is optional, and will run each time the menu is rendered + def runChecks(): + checkForIssues() + return [] + + # This function is optional, and will run after the docker-compose.yml file is written to disk. + def postBuild(): + return True + + # This function is optional, and will run just before the build docker-compose.yml code. + def preBuild(): + execComm = "bash {currentServiceTemplate}/build.sh".format(currentServiceTemplate=serviceTemplate) # --- You may want to change this + print("[Wireguard]: ", execComm) # --- Ensure to update the service name with yours + subprocess.call(execComm, shell=True) # This is where the magic happens + return True + + # ##################################### + # Supporting functions below + # ##################################### + + def checkForIssues(): + return True + + if haltOnErrors: + eval(toRun)() + else: + try: + eval(toRun)() + except: + pass + +# This check isn't required, but placed here for debugging purposes +global currentServiceName # Name of the current service +if currentServiceName == 'wireguard': # --- Ensure to update the service name with yours + main() +else: + print("Error. '{}' Tried to run 'wireguard' config".format(currentServiceName)) # --- Ensure to update the service name with yours + +``` \ No newline at end of file diff --git a/docs/Developers/Git-Setup.md b/docs/Developers/Git-Setup.md new file mode 100644 index 000000000..014d9628f --- /dev/null +++ b/docs/Developers/Git-Setup.md @@ -0,0 +1,178 @@ +How to setup and use git for IOTstack development. + +1. First, create a + [fork](https://docs.github.com/en/get-started/quickstart/fork-a-repo) of + SensorsIot/IOTstack on github. And + [setup](https://docs.github.com/en/authentication/connecting-to-github-with-ssh/adding-a-new-ssh-key-to-your-github-account) + your ssh-keys. +1. Clone your fork and setup your github username and email + ``` console + $ git clone git@github.com:/IOTstack.git + $ cd IOTstack + $ git config user.name + $ git config user.email <1234>+@users.noreply.github.com + ``` +1. Add up the SensorsIot/IOTstack upstream + ``` console + $ git remote add upstream https://github.com/SensorsIot/IOTstack.git + ``` +1. Configure for ease of operation + ``` console + $ git config fetch.prune true + $ git config remote.pushDefault origin + $ git config --add remote.origin.fetch "^refs/heads/gh-pages" + $ git config --add remote.upstream.fetch "^refs/heads/gh-pages" + $ git config branch.master.mergeoptions "--no-ff" + $ git config fetch.parallel 0 + $ git fetch --all + ``` + +## Make a pull-request + +``` mermaid +flowchart LR + upstream["upstream (SensorsIOT)"] -- "1. git fetch + git checkout -b" + --> local[local branch] + local -- "2. git commit" --> local + local -- "3. git push" --> origin["origin (your fork)"] + origin -- "3. create github pull-request" --> upstream +``` + +Please see [Contributing](index.md) for instructions on how to write commit +messages. + +``` console +$ git fetch upstream +$ git checkout -b upstream/master +...coding and testing... +$ git add +Check everything has been added: +$ git status +$ git commit +$ git push +``` +When you execute git push, its output should have a link for creating the +pull-request to github. + +## Common operations + +### Show compact history with "git lg" + +``` console +$ git config alias.lg "log --color --graph --pretty=format:'%Cred%h%Creset -%C(yellow)%d%Creset %s %Cgreen(%cr) %C(bold blue)<%an>%Creset' --abbrev-commit" +``` + +### Remove branches of merged pull-requests. + +When your pull-requests have been merged, their branches aren't needed anymore. +Remove them to reduce clutter and distractions. The master branch is never +deleted. + +``` console +$ git fetch --all +$ git checkout master +$ git branch -r --merged upstream/master | \ + grep -v origin/master$ | grep origin | sed 's/origin\///' | \ + xargs -I 'B' git push --delete origin B +$ git branch --merged upstream/master | grep -v " master$" | \ + xargs -I'B' git branch -d B +``` + +## Advanced topics + +### Fetch all pull-requests as branches + +This is handy for easily testing out other persons' suggested changes. The +branches are of course fetch-only, and you can't push your own commits to them. + +``` console +$ git config --add remote.upstream.fetch +refs/pull/*/head:refs/remotes/upstream/pr-* +$ git fetch upstream +``` + +*Note:* Everything below requires this. + +### Show up-to-date branches not merged + +Branches that include the latest upstream/master, but are not merged to +your current branch, are potentially mergeable pull-requests. This is useful +for identifying which pull-requests you should be able to merge without +conflict. + +``` console +$ git fetch upstream +$ git branch -r --contains upstream/master --no-merged upstream/master +``` + +### Check pull-requests on Github can be merged without conflicts + +In git, the only way to know if a branch can be merged without a conflict, is +by actually doing the merge. An alias to (re-)create a branch named +`merge-test` and do merges into it: + +``` console +$ git config alias.test-pull-request-merge $'!f() { : git merge && \ + OPENPULLS=$(curl -s \'https://api.github.com/repos/SensorsIot/IOTstack/pulls?base=master&per_page=100\' | \ + grep "^.....number" | sed -E \'s/.* ([0-9]+),/ upstream\\/pr-\\1/\') && \ + git fetch upstream && git checkout -B merge-test upstream/master && \ + git branch -r --contains upstream/master --no-merged upstream/master | \ + grep upstream/pr- | sort - <(echo "$OPENPULLS") | \ + { uniq -d; [[ "$1" ]] && echo "$1"; } | \ + xargs -I B sh -c "echo Merging B && \ + git merge --no-rerere-autoupdate --no-ff --quiet B || \ + { echo ***FAILED TO MERGE B && exit 255; };" ;}; f' +``` + + + + +Then use this alias combined with `git checkout -`, returning your working copy +back to the original branch if all merges succeeded: + +``` console +$ git test-pull-request-merge && git checkout - +``` + +This merges all branches that are: a) currently open pull requests and b) +up-to-date, i.e. contains upstream/master and c) not merged already and d) the +optional provided argument. Note: won't ignore draft pull-requests. If it +encounters a failure, it stops immediately to let you inspect the conflict. + +!!! help "Failed merge?" + + *If* there was a merge-conflict, inspect it e.g. using `git diff`, but + don't do any real work or conflict resolution in the merge-test branch. + When you have understood the merge-conflict and want to leave the + merge-test branch, abort the failed merge and switch to your actual branch: + + ``` console + $ git diff + $ git merge --abort + $ git checkout + ``` + +### Check your branch doesn't conflict with any existing pull-request + +When you intend to submit a pull-request you might want to check that it won't +conflict with any of the existing pull-requests. + +1. Commit all your changes into your pull request branch. +2. Use the alias from the previous "Test all current pull-requests..."-topic + to test merging your branch in addition to all current pull request: + + ``` console + $ git test-pull-request-merge && git checkout - + ``` + + If there is a merge-conflict, see "Failed merge?" above. diff --git a/docs/Developers/Menu-System.md b/docs/Developers/Menu-System.md new file mode 100644 index 000000000..3048ed70a --- /dev/null +++ b/docs/Developers/Menu-System.md @@ -0,0 +1,90 @@ +# Menu system + +This page explains how the menu system works for developers. + +## Background +Originally this script was written in bash. After a while it became obvious that bash wasn't well suited to dealing with all the different types of configuration files, and logic that goes with configuring everything. IOTstack needs to be accessible to all levels of programmers and tinkerers, not just ones experienced with Linux and bash. For this reason, it was rewritten in Python since the language syntax is easier to understand, and is more commonly used for scripting and programming than bash. Bash is still used in IOTstack where it makes sense to use it, but the menu system itself uses Python. The code it self while not being the most well structured or efficient, was intentionally made that way so that beginners and experienced programmers could contribute to the project. We are always open to improvements if you have suggestions. + +## Menu Structure + +Each screen of the menu is its own Python script. You can find most of these in the `./scripts` directory. When you select an item from the menu, and it changes screens, it actually dynamically loads and executes that Python script. It passes data as required by placing it into the global variable space so that both the child and the parent script can access it. + +### Injecting and getting globals in a child script +``` +with open(childPythonScriptPath, "rb") as pythonDynamicImportFile: + code = compile(pythonDynamicImportFile.read(), childPythonScriptPath, "exec") +execGlobals = { + "globalKeyName": "globalKeyValue" +} +execLocals = {} +print(globalKeyName) # Will print out 'globalKeyValue' +exec(code, execGlobals, execLocals) +print(globalKeyName) # Will print out 'newValue' +``` + +### Reading and writing global variables in a child script +``` +def someFunction: + global globalKeyName + print(globalKeyName) # Will print out 'globalKeyValue' + globalKeyName = "newValue" +``` + +Each menu is its own python executable. The entry point is down the bottom of the file wrapped in a `main()` function to prevent variable scope creep. + +The code at the bottom of the `main()` function: +``` +if __name__ == 'builtins': +``` + +Is actually where the execution path runs, all the code above it is just declared so that it can be called without ordering or scope issues. + +### Optimisations + +It was obvious early on that the menu system would be slow on lower end devices, such as the Raspberry Pi, especially if it were rending a 4k terminal screen from a desktop via SSH. To mitigate this issue, not all of the screen is redrawn when there is a change. A "Hotzone" as it's called in the code, is usually rerendered when there's a change (such as pressing up or down to change an item selection, but not when scrolling). Full screen redraws are expensive and are only used when required, for example, when scrolling the pagination, selecting or deselecting a service, expanding or collapsing the menu and so on. + +### Environments and encoding +At the very beginning of the main menu screen (`./scripts/main_menu.py`) the function `checkRenderOptions()` is run to determine what characters can be displayed on the screen. It will try various character sets, and eventually default to ASCII if none of the fancier stuff can be rendered. This setting is passed into of the sub menus through the submenu's global variables so that they don't have to recheck when they load. + +### Sub-Menus + +From the main screen, you will see several sections leading to various submenus. Most of these menus work in the same way as the main menu. The only exception to this rule is the Build Stack menu, which is probably the most complex part of IOTstack. + +## Build Stack Menu + +Path: `./scripts/buildstack_menu.py` + +### Loading + +1. Upon loading, the Build Stack menu will get a list of folders inside the `./templates` directory and check for a `build.py` file inside each of them. This can be seen in the `generateTemplateList()` function, which is executed before the first rendering happens. +2. The menu will then check if the file `./services/docker-compose.save.yml` exists. This file is used to save the configuration of the last build. This happens in the `loadCurrentConfigs()` function. It is important that the service name in the compose file matches the folder name, any service that doesn't will either cause an error, or won't be loaded into the menu. +3. If a previous build did exist the menu will then run the `prepareMenuState()` function that basically checks which items should be ticked, and check for any issues with the ticked items by running `checkForIssues()`. + +### Selection and deselection +When an item is selected, 3 things happen: +1. Update the UI variable (`menu`) with function `checkMenuItem(selectionIndex)` to let the user know the current state. +2. Update the array holding every checked item `setCheckedMenuItems()`. It uses the UI variable (`menu`) to know which items are set. +3. Check for any issues with the new list of selected items by running `checkForIssues()`. + +### Check for options (submenus of services) +During a full render sequence (this is not a hotzone render), the build stack menu checks to see if each of the services has an options menu. It does this by executing the `build.py` script of each of the services and passing in `checkForOptionsHook` into the `toRun` global variable property to see if the script has a `runOptionsMenu` function. If the service's function result is true, without error, then the options text will appear up for that menu item. + +### Check for issues +When a service is selected or deselected on the menu, the `checkForIssues()` function is run. This function iterates through each of the selected menu items' folders executing the `build.py` script and passing in `checkForRunChecksHook` into the `toRun` global variable property to see if the script has a `runChecks` function. The `runChecks` function is different depending on the service, since each service has its own requirements. Generally though, the `runChecks` function should check for conflicting port conflicts again any of the other services that are enabled. The menu will still allow you to build the stack, even if issues are present, assumine there's no errors raised during the build process. + +### Prebuild hook +Pressing enter on the Build Stack menu kicks off the build process. The Build Stack menu will execute the `runPrebuildHook()` function. This function iterates through each of the selected menu items' folders executing the `build.py` script and passing in `checkForPreBuildHook` into the `toRun` global variable property to see if the script has a `preBuild` function. The `preBuild` function is different depending on the service, since each service has its own requirements. Some services may not even use the prebuild hook. The prebuild is very useful for setting up the services' configuration however. For example, it can be used to autogenerate a password for a paticular service, or copy and modify a configuration file from the `./.templates` directory into the `./services` or `./volumes` directory. + +### Postbuild hook +The Build Stack menu will execute the `runPostBuildHook()` function in the final step of the build process, after the `docker-compose.yml` file has been written to disk. This function iterates through each of the selected menu items' folders executing the `build.py` script and passing in `checkForPostBuildHook` into the `toRun` global variable property to see if the script has a `postBuild` function. The `postBuild` function is different depending on the service, since each service has its own requirements. Most services won't require this function, but it can be useful for cleaning up temporary files and so on. + +### The build process +The selected services' yaml configuration is already loaded into memory before the build stack process is started. + +1. Run prebuildHooks. +2. Read `./.templates/docker-compose-base.yml` file into a in memory yaml structure. +3. Add selected services into the in memory structure. +4. If it exists merge the `./compose-override.yml` file into memory +5. Write the in memory yaml structure to disk `./docker-compose.yml`. +6. Run postbuildHooks. +7. Run `postbuild.sh` if it exists, with the list of services built. diff --git a/docs/Developers/PostBuild-Script.md b/docs/Developers/PostBuild-Script.md new file mode 100644 index 000000000..cd3142b12 --- /dev/null +++ b/docs/Developers/PostBuild-Script.md @@ -0,0 +1,30 @@ +# Postbuild BASH Script +The postbuild bash script allows for executing arbitrary execution of bash commands after the stack has been build. + +## How to use +Place a file in the main directory called `postbuild.sh`. When the buildstack [build logic](../Developers/Menu-System.md) finishes, it'll execute the `postbuild.sh` script, passing in each service selected from the buildstack menu as a parameter. This script is run each time the buildstack logic runs. + +## Updates +The `postbuild.sh` file has been added to gitignore, so it won't be updated by IOTstack when IOTstack is updated. It has also been added to the backup script so that it will be backed up with your personal IOTstack backups. + +## Example `postbuild.sh` script +The following script will print out each of the services built, and a custom message for nodered. If it was the first time the script was executed, it'll also output "Fresh Install" at the end, using a `.install_tainted` file for knowing. +``` +#!/bin/bash + +for iotstackService in "$@" +do + echo "$iotstackService" + if [ "$iotstackService" == "nodered" ]; then + echo "NodeRed Installed!" + fi +done + +if [ ! -f .install_tainted ]; then + echo "Fresh Install!" + touch .install_tainted +fi +``` + +## What is my purpose? +The postbuild script can be used to run custom bash commands, such as moving files, or issuing commands that your services expect to be completed before running. diff --git a/docs/Developers/index.md b/docs/Developers/index.md new file mode 100644 index 000000000..ecf61d957 --- /dev/null +++ b/docs/Developers/index.md @@ -0,0 +1,101 @@ +# Contributing + +We welcome pull-requests. + +For larger contributions, please open an issue describing your idea. It +may provide valuable discussion and feedback. It also prevents the unfortunate +case of two persons working on the same thing. There's no need to wait for any +approval. + +!!! check "Development guidelines" + * It-just-works - use good defaults that will work well for a first time user + * Keep-it-simple - try to keep stuff beginner-friendly and don't go too + deep into advanced topics + +## Writing documentation + +!!! tip inline end + For simple changes you can straight-up just use the edit link available on + every documentation page. It's the pen-icon to the right of the top + heading. Write your changes, check the preview-tab everything looks as + expected and submit as proposed changes. + +Documentation is is written as markdown, processed using mkdocs ([docs](https://www.mkdocs.org/user-guide/writing-your-docs/#writing-your-docs)) and the Material theme ([docs](https://squidfunk.github.io/mkdocs-material/reference/)). The Material theme is not just styling, but provides additional syntax extensions. + +To test your local changes while writing them and before making a pull-request, +start a local mkdocs server: +``` console +$ ~/IOTstack/scripts/development/mkdocs-serve.sh +``` +And then open [http://127.0.0.1:8000/](http://127.0.0.1:8000/) in a browser. + +## Creating a new service + +In this section you can find information on how to contribute a service to IOTstack. We are generally very accepting of new services where they are useful. Keep in mind that if it is not IOTstack, selfhosted, or automation related we may not approve the PR. + +Services will grow over time, we may split up the buildstack menu into subsections or create filters to make organising all the services we provide easier to find. + +## Checks +* `service.yml` file is correct +* `build.py` file is correct +* Service allows for changing external WUI port from Build Stack's options menu if service uses a HTTP/S port +* Use a default password, or allow the user to generate a random password for the service for initial installation. If the service asks to setup an account this can be ignored. +* Ensure [Default Configs](../Basic_setup/Default-Configs.md) is updated as required. A helper script (default_ports_md_generator.sh) exists to simplify this. +* Must detect port conflicts with other services on [BuildStack](Menu-System.md) Menu. +* `Pre` and `Post` hooks work with no errors. +* Does not require user to edit config files in order to get the service running. +* Ensure that your service can be backed up and restored without errors or data loss. +* Any configs that are required before getting the service running should be configured in the service's options menu (and a BuildStack menu Issue should be displayed if not). +* Fork the repo and push the changes to your fork. Create a cross repo PR for the mods to review. We may request additional changes from you. + +## Commit message + +``` +service_name: Add/Fix/Change feature or bug summary + +Optional longer description of the commit. What is changed and why it +is changed. Wrap at 72 characters. + +* You can use markdown formating as this will automatically be the + description of your pull-request. +* End by adding any issues this commit fixes, one per line: + +Fixes #1234 +Fixes #4567 +``` + +1. The first line is a short description. Keep it short, aim for 50 + characters. This is like the subject of an email. It shouldn't try to fully + or uniquely describe what the commit does. More importantly it should aim + to inform *why* this commit was made. + + `service_name` - service or project-part being changed, e.g. influxdb, + grafana, docs. Documentation changes should use the the name of the + service. Use `docs` if it's changes to general documentation. If all else + fails, use the folder-name of the file you are changing. Use lowercase. + + `Add/Fix/Change` - what type of an change this commit is. Capitalized. + + `feature or bug summary` - free very short text giving an idea of why/what. + +2. Empty line. + +3. A longer description of what and why. Wrapped to 72 characters. + + Use [github issue linking]( + https://docs.github.com/en/issues/tracking-your-work-with-issues/linking-a-pull-request-to-an-issue) + to automatically close issues when the pull-request of this commit is + merged. + +For tips on how to use git, see [Git Setup](Git-Setup.md). + +## Follow up +If your new service is approved and merged then congratulations! Please watch the Issues page on github over the next few days and weeks to see if any users have questions or issues with your new service. + +Links: + +* [Default configs](../Basic_setup/Default-Configs.md) +* [Password configuration for Services](BuildStack-RandomPassword.md) +* [Build Stack Menu System](Menu-System.md) +* [Coding a new service](BuildStack-Services.md) +* [IOTstack issues](https://github.com/SensorsIot/IOTstack/issues) diff --git a/docs/Updates/Changelog.md b/docs/Updates/Changelog.md new file mode 100644 index 000000000..e6382d8af --- /dev/null +++ b/docs/Updates/Changelog.md @@ -0,0 +1,37 @@ +## Latest +(may include items not yet merged) + + +- Fixes to [bash aliases](../Basic_setup/Docker.md#aliases). +- Timescaledb template fixed and public port now mapped to 5433. + + +## 2022-06-12 + +- Dockerfile based Zigbee2MQTT **deprecated**, requiring [migration]( + ../Containers/Zigbee2MQTT.md#update202204). +- New service: [Duckdns](../Containers/Duckdns.md), deprecates the + `duck/duck.sh` script. +- New service: [Influxdb 2](../Containers/InfluxDB2.md), supported only on + fully 64bit systems. +- Docker health checks added to Grafana and InfluxDB. + +## 2022-04-26 + +- New service: [Syncthing](../Containers/Syncthing.md) +- Zigbee2MQTT: [Service definition change]( + ../Containers/Zigbee2MQTT.md#update202204) +- Dropping support for Home Assistant Supervised. Home Assistant **Container** + still available. +- [Homebridge](../Containers/Homebridge.md) is now on port 8581 +- Documentation: Added: [Git Setup](../Developers/Git-Setup.md). Large changes + to: [Updates](../Updates/index.md), [InfluxDB](../Containers//InfluxDB.md), + [Grafana](../Containers/Grafana.md), [Pi-hole](../Containers/Pi-hole.md), + [Docker Logging](../Basic_setup/Docker.md#logging). + +## 2022-01-18 + +- Networking change **requiring** [migration]( + ../Updates/migration-network-change.md). diff --git a/docs/Updates/New-Menu-Release-Notes.md b/docs/Updates/New-Menu-Release-Notes.md new file mode 100644 index 000000000..ab324a63d --- /dev/null +++ b/docs/Updates/New-Menu-Release-Notes.md @@ -0,0 +1,30 @@ +# New IOTstack Menu + +## Background +Originally this script was written in bash. After a while it became obvious that bash wasn't well suited to dealing with all the different types of configuration files, and logic that goes with configuring everything. IOTstack needs to be accessible to all levels of programmers and tinkerers, not just ones experienced with Linux and bash. For this reason, it was rewritten in Python since the language syntax is easier to understand, and is more commonly used for scripting and programming than bash. Bash is still used in IOTstack where it makes sense to use it, but the menu system itself uses Python. The code is intentionally made so that beginners and experienced programmers could contribute to the project. We are always open to improvements if you have suggestions. + +## On-going improvements +There are many features that are needing to be introduced into the new menu system. From meta tags on services for filtering, to optional nginx autoconfiguration and authentication. For this reason you may initially experience bugs (very hard to test every type of configuration!). The new menu system has been worked on and tested for 6 months and we think it's stable enough to merge into the master branch for mainstream usage. The code still needs some work to make it easier to add new services and to not require copy pasting the same code for each new service. Also to make the menu system not be needed at all (so it can be automated with bash scripts). + +## Breaking changes +There are a few changes that you need to be aware of: + +* Docker Environmental `*.env` files are no longer a thing by default. Everything needed is specified in the service.yml file, you can still optionally use them though either with [Custom Overrides](../Basic_setup/Custom.md) or with the [PostBuild](../Developers/PostBuild-Script.md) script. Specific config files for certain services still work as they once did. +* Python 3, pip3, PyYAML and Blessed are all required to be installed. +* Not backwards compatible with old menu system. You will be able to switch back to the old menu system for a period of time by changing to the `old-menu` branch. It will be unmaintained except for critical updates. It will eventually be removed - but not before everyone is ready to leave it. + +**Test that your backups are working before you switch.** The `old-menu` branch will become avaiable just before the new menu is merged into master to ensure it has the latest commits applied. + +## Full change list +* Menu and everything that goes with it rewritten in Python and Blessed +* Easy installation script +* All services rewritten to be compatible with PyYAML +* Optional port selection for services +* Issue checking for services before building +* Options for services now in menu (no more editing `service.yml` files) +* Automatic password generation for each service +* Pre and post scripts for customising services +* Removed env files +* Backup and restoring more streamlined +* Documentation updated for all services +* No longer needs to be installed in the home directory `~`. diff --git a/docs/Updates/gcgarner-migration.md b/docs/Updates/gcgarner-migration.md new file mode 100644 index 000000000..5223c349c --- /dev/null +++ b/docs/Updates/gcgarner-migration.md @@ -0,0 +1,368 @@ +# Migrating from gcgarner to SensorsIot + +These instructions explain how to migrate from [gcgarner/IOTstack](https://github.com/gcgarner/IOTstack) to [SensorsIot/IOTstack](https://github.com/SensorsIot/IOTstack). + +Migrating to SensorsIot/IOTstack was fairly easy when this repository was first forked from gcgarner/IOTstack. Unfortunately, what was a fairly simple switching procedure no longer works properly because conflicts have emerged. + +The probability of conflicts developing increases as a function of time since the fork. Conflicts were and are pretty much inevitable so a more involved procedure is needed. + +## Migration Steps + +### Step 1 – Check your assumptions + +Make sure that you are, *actually*, on gcgarner. Don't assume! + +``` console +$ git remote -v +origin https://github.com/gcgarner/IOTstack.git (fetch) +origin https://github.com/gcgarner/IOTstack.git (push) +``` + +Do not proceed if you don't see those URLs! + +### Step 2 – Take IOTstack down + +Take your stack down. This is not *strictly* necessary but we'll be moving the goalposts a bit so it's better to be on the safe side. + +``` console +$ cd ~/IOTstack +$ docker-compose down +``` + +### Step 3 – Choose your migration method + +There are two basic approaches to switching from gcgarner/IOTstack to SensorsIot/IOTstack: + +- [Migration by changing upstream repository](#migration-option-1-change-upstream-repository) +- [Migration by clone and merge](#migration-option-2-clone-and-merge) + +You can think of the first as "working *with* git" while the second is "using brute force". + +The first approach will work if you haven't tried any other migration steps and/or have not made too many changes to items in your gcgarner/IOTstack that are under git control. + +If you are already stuck or you try the first approach and get a mess, or it all looks far too hard to sort out, then try the [Migration by clone and merge](#migration-option-2-clone-and-merge) approach. + +#### Migration Option 1 – change upstream repository + +##### Check for local changes + +Make sure you are on the master branch (you probably are so this is just a precaution), and then see if Git thinks you have made any local changes: + +``` console +$ cd ~/IOTstack +$ git checkout master +$ git status +``` + +If Git reports any "modified" files, those will probably get in the way of a successful migration so it's a good idea to get those out of the way. + +For example, suppose you edited `menu.sh` at some point. Git would report that as: + +``` + modified: menu.sh +``` + +The simplest way to deal with modified files is to rename them to move them out of the way, and then restore the original: + +1. Rename your customised version by adding your initials to the end of the filename. Later, you can come back and compare your customised version with the version from GitHub and see if you want to preserve any changes. + + Here I'm assuming your initials are "jqh": + + ``` console + $ mv menu.sh menu.sh.jqh + ``` + +2. Tell git to restore the unmodified version: + + ``` console + $ git checkout -- menu.sh + ``` + +3. Now, repeat the Git command that complained about the file: + + ``` console + $ git status + ``` + + The modified file will show up as "untracked" which is OK (ignore it) + + ``` + Untracked files: + (use "git add ..." to include in what will be committed) + + menu.sh.jqh + ``` + +##### Synchronise with gcgarner on GitHub + +Make sure your local copy of gcgarner is in sync with GitHub. + +``` console +$ git pull +``` + +##### Get rid of any upstream reference + +There may or may not be any "upstream" set. The most likely reason for this to happen is if you used your local copy as the basis of a Pull Request. + +The next command will probably return an error, which you should ignore. It's just a precaution. + +``` console +$ git remote remove upstream +``` + +##### Point to SensorsIot + +Change your local repository to point to SensorsIot. + +``` console +$ git remote set-url origin https://github.com/SensorsIot/IOTstack.git +``` + +##### Synchronise with SensorsIot on GitHub + +This is where things can get a bit tricky so please read these instructions carefully **before** you proceed. + +When you run the next command, it will probably give you a small fright by opening a text-editor window. Don't panic - just keep reading. Now, run this command: + +``` console +$ git pull -X theirs origin master +``` + +The text editor window will look something like this: + +``` +Merge branch 'master' of https://github.com/SensorsIot/IOTstack + +# Please enter a commit message to explain why this merge is necessary, +# especially if it merges an updated upstream into a topic branch. +# +# Lines starting with '#' will be ignored, and an empty message aborts +# the commit. +``` + +The first line is a pre-prepared commit message, the remainder is boilerplate instructions which you can ignore. + +Exactly which text editor opens is a function of your `EDITOR` environment variable and the `core.editor` set in your global Git configuration. If you: + +* remember changing `EDITOR` and/or `core.editor` then, presumably, you will know how to interact with your chosen text editor. You don't need to make any changes to this file. All you need to do is save the file and exit; + +* **don't** remember changing either `EDITOR` or `core.editor` then the editor will probably be the default `vi` (aka `vim`). You need to type ":wq" (without the quotes) and then press return. The ":" puts `vi` into command mode, the "w" says "save the file" and "q" means "quit `vi`". Pressing return runs the commands. + +Git will display a long list of stuff. It's very tempting to ignore it but it's a good idea to take a closer look, particularly for signs of error or any lines beginning with: + +``` +Auto-merging +``` + +At the time of writing, you can expect Git to mention these two files: + +``` +Auto-merging menu.sh +Auto-merging .templates/zigbee2mqtt/service.yml +``` + +Those are known issues and the merge strategy `-X theirs` on the `git pull` command you have just executed deals with both, correctly, by preferring the SensorsIot version. + +Similar conflicts may emerge in future and those will **probably** be dealt with, correctly, by the same merge strategy. Nevertheless, you should still check the output very carefully for other signs of merge conflict so that you can at least be alive to the possibility that the affected files may warrant closer inspection. + +For example, suppose you saw: + +``` +Auto-merging .templates/someRandomService/service.yml +``` + +If you don't use `someRandomService` then you could safely ignore this on the basis that it was "probably right". However, if you did use that service and it started to misbehave after migration, you would know that the `service.yml` file was a good place to start looking for explanations. + +##### Finish with a pull + +At this point, only the migrated master branch is present on your local copy of the repository. The next command brings you fully in-sync with GitHub: + +``` console +$ git pull +``` + +#### Migration Option 2 – clone and merge + +If you have been following the process correctly, your IOTstack will already be down. + +##### Rename your existing IOTstack folder + +Move your old IOTstack folder out of the way, like this: + +``` console +$ cd ~ +$ mv IOTstack IOTstack.old +``` + +Note: + +* You should not need `sudo` for the `mv` command but it is OK to use it if necessary. + +##### Fetch a clean clone of SensorsIot/IOTstack + +``` console +$ git clone https://github.com/SensorsIot/IOTstack.git ~/IOTstack +``` + +Explore the result: + +``` console +$ tree -aFL 1 --noreport ~/IOTstack +/home/pi/IOTstack +├── .bash_aliases +├── .git/ +├── .github/ +├── .gitignore +├── .native/ +├── .templates/ +├── .tmp/ +├── LICENSE +├── README.md +├── docs/ +├── duck/ +├── install.sh* +├── menu.sh* +├── mkdocs.yml +└── scripts/ +``` + +Note: + +* If the `tree` command is not installed for some reason, use `ls -A1F ~/IOTstack`. + +Observe what is **not** there: + +* There is no `docker-compose.yml` +* There is no `backups` directory +* There is no `services` directory +* There is no `volumes` directory + +From this, it should be self-evident that a clean checkout from GitHub is the factory for *all* IOTstack installations, while the contents of `backups`, `services`, `volumes` and `docker-compose.yml` represent each user's individual choices, configuration options and data. + +##### Merge old into new + +Execute the following commands: + +``` console +$ mv ~/IOTstack.old/docker-compose.yml ~/IOTstack +$ mv ~/IOTstack.old/services ~/IOTstack +$ sudo mv ~/IOTstack.old/volumes ~/IOTstack +``` + +You should not need to use `sudo` for the first two commands. However, if you get a permissions conflict on either, you should proceed like this: + +* docker-compose.yml + + ``` console + $ sudo mv ~/IOTstack.old/docker-compose.yml ~/IOTstack + $ sudo chown pi:pi ~/IOTstack/docker-compose.yml + ``` + +* services + + ``` console + $ sudo mv ~/IOTstack.old/services ~/IOTstack + $ sudo chown -R pi:pi ~/IOTstack/services + ``` + +There is no need to migrate the `backups` directory. You are better off creating it by hand: + +``` console +$ mkdir ~/IOTstack/backups +``` + +### Step 4 – Choose your menu + +If you have reached this point, you have migrated to SensorsIot/IOTstack where you are on the "master" branch. This implies "new menu". + +The choice of menu is entirely up to you. Differences include: + +1. New menu takes a **lot** more screen real-estate than old menu. If you do a fair bit of work on small screens (eg iPad) you might find it hard to work with new menu. +2. New menu creates a large number of internal Docker networks whereas old menu has *one internal network to rule them all*. The practical consequence is that most users see error messages for networks being defined but not used, and occasionally run into problems where two containers can't talk to each other without tinkering with the networks. Neither of those happen under old menu. See [Issue 245](https://github.com/SensorsIot/IOTstack/issues/245) if you want more information on this. +3. New menu has moved the definition of environment variables into `docker-compose.yml`. Old menu keeps environment variables in "environment files" in `~/IOTstack/services`. There is no "right" or "better" about either approach. It's just something to be aware of. +4. Under new menu, the `service.yml` files in `~/IOTstack/.templates` have all been left-shifted by two spaces. That means you can no longer use copy and paste to test containers - you're stuck with the extra work of re-adding the spaces. Again, this doesn't *matter* but you do need to be aware of it. + +What you give up when you choose old menu is summarised in the following. If a container appears on the right hand side but not the left then it is only available in new menu. + +``` +old-menu master (new menu) +├── adminer ├── adminer +├── blynk_server ├── blynk_server +├── dashmachine ├── dashmachine +├── deconz ├── deconz +├── diyhue ├── diyhue +├── domoticz ├── domoticz +├── dozzle ├── dozzle +├── espruinohub ├── espruinohub + > ├── example_template +├── gitea ├── gitea +├── grafana ├── grafana +├── heimdall ├── heimdall + > ├── home_assistant +├── homebridge ├── homebridge +├── homer ├── homer +├── influxdb ├── influxdb +├── mariadb ├── mariadb +├── mosquitto ├── mosquitto +├── motioneye ├── motioneye +├── nextcloud ├── nextcloud +├── nodered ├── nodered +├── openhab ├── openhab +├── pihole ├── pihole +├── plex ├── plex +├── portainer ├── portainer +├── portainer_agent ├── portainer_agent +├── portainer-ce ├── portainer-ce +├── postgres ├── postgres +├── prometheus ├── prometheus +├── python ├── python +├── qbittorrent ├── qbittorrent +├── rtl_433 ├── rtl_433 +├── tasmoadmin ├── tasmoadmin +├── telegraf ├── telegraf +├── timescaledb ├── timescaledb +├── transmission ├── transmission +├── webthings_gateway ├── webthings_gateway +├── wireguard ├── wireguard +└── zigbee2mqtt ├── zigbee2mqtt + > └── zigbee2mqtt_assistant +``` + +You also give up the `compose-override.yml` functionality. On the other hand, Docker has its own `docker-compose.override.yml` which works with both menus. + +If you want to switch to the old menu: + +``` console +$ git checkout old-menu +``` + +Any time you want to switch back to the new menu: + +``` console +$ git checkout master +``` + +You can switch back and forth as much as you like and as often as you like. It's no harm, no foul. The branch you are on just governs what you see when you run: + +``` console +$ ./menu.sh +``` + +Although you can freely change branches, it's probably not a good idea to try to mix-and-match your menus. Pick one menu and stick to it. + +Even so, nothing will change **until** you run your chosen menu to completion and allow it to generate a new `docker-compose.yml`. + +### Step 5 – Bring up your stack + +Unless you have gotten ahead of yourself and have already run the menu (old or new) then nothing will have changed in the parts of your `~/IOTstack` folder that define your IOTstack implementation. You can safely: + +``` console +$ docker-compose up -d +``` + +## See also + +There is another gist [Installing Docker for IOTstack](https://gist.github.com/Paraphraser/d119ae81f9e60a94e1209986d8c9e42f) which explains how to overcome problems with outdated Docker and Docker-Compose installations. + +Depending on the age of your gcgarner installation, you **may** run into problems which will be cured by working through that gist. diff --git a/docs/Updates/index.md b/docs/Updates/index.md new file mode 100644 index 000000000..1d55cd682 --- /dev/null +++ b/docs/Updates/index.md @@ -0,0 +1,208 @@ +# Updating the project + +There are two different update sources: the IOTstack project (github.com) and +Docker image registries (e.g. hub.docker.com). Both the initial stack creation +and updates use both of these. Initial creation is a bit simpler, as the +intermediate steps are done automatically. For a full update they need to be +performed explicitly. To illustrate the steps and artifacts of the *update* +process: + +``` mermaid +flowchart TD + GIT[github.com/sensorsiot/IOTstack.git] + GIT --- GITPULL([$ git pull -r]) + GITPULL --> TEMPLATES["~/IOTstack/.templates"] + TEMPLATES --- MENU([$ ./menu.sh -> Build stack]) + MENU --> COMPOSE["~/IOTstack/docker-compose.yml + ~/IOTstack/.templates/*/Dockerfile + ~/IOTstack/services/*/Dockerfile"] + COMPOSE --- UP(["$ docker-compose up --build -d"]) + + HUB[hub.docker.com images and tags] + HUB --- PULL([$ docker-compose pull\n$ docker-compose build --pull --no-cache]) + COMPOSE --- PULL + PULL --> CACHE[local Docker image cache] + CACHE --- UP + + UP --> CONTAINER[recreated Docker containers based on the latest cached images] + + classDef command fill:#9996,stroke-width:0px + class GITPULL,MENU,UP,PULL command +``` + +??? note "Minor details fudged in the graph" + + In order to keep the graph simple, some minor details were left unprecise: + + - `$ docker-compose pull` will read `docker-compose.yml`, in order to know + what image tags to check for updates. + - `$ docker-compose build --pull --no-cache` will use `docker-compose.yml` + to find which of the "build:" sources are in use: + + * `~/IOTstack/.templates/*/Dockerfile` + * `~/IOTstack/services/*/Dockerfile` + * remote repositories with Dockerfiles + + and pull Docker images referenced in these while building. + - `$ docker-compose up --build -d` may not require the "--build"-flag, + but having it won't hurt (and may help keep some corner-case problems + away, docker may be a bit finicky). + +## Backup and rollback + +The usual way of backing up just your `~/IOTstack` contents isn't sufficient +for a 100% identical restore. Some containers may have local ephemeral +modifications that will be lost when they're recreated. Currently running +containers may be based on now outdated images. Recreating a container using an +old image is tricky. The local Docker image cache can't easily be restored to +the same state with old images and old tag references. The `docker pull` will +fetch the latest images, but it's not unheard of that the latest image may +break [something]( +https://github.com/node-red/node-red/issues/3461#issuecomment-1076348639). + +Thus to *guarantee* a successful rollback to the pre-update state, you have to +shutdown your RPi and save a complete disk image backup of its storage using +another machine. + +For a hobby project, not having a perfect rollback may be a risk you're willing +to take. Usually image problems will have fixes/workarounds within a day. + +## Update Raspberry Pi OS + +You should keep your Raspberry Pi up-to-date. Despite the word "container" +suggesting that containers are fully self-contained, they sometimes depend on +operating system components (WireGuard is an example). + +``` console +$ sudo apt update +$ sudo apt upgrade -y +``` + +## Recommended: Update only Docker images + +When you built the stack using the menu, it created the Docker Compose file +`docker-compose.yml`. This file and any used build instructions +(`Dockerfile`s), use image name and tag references to images on hub.docker.com +or other registries. An undefined tag defaults to `:latest`. When Docker is +told to pull updated images, it will download the images into the local +cache, based upon what is currently stored at the registry for the used names +and tags. + +Updating the IOTstack project templates and recreating your +`docker-compose.yml` isn't usually necessary. Doing so isn't likely to provide +much benefits, and may actually break something. A full update is only +recommended when there is a new feature or change you need. + +!!! tip "Recommended update procedure" + + 1. Shutdown your RPi, remove the storage medium and do a full backup + [image](https://www.howtogeek.com/341944/how-to-clone-your-raspberry-pi-sd-card-for-foolproof-backup/) + of the storage to another machine. Reattach the storage back and power + up your RPi.
+ NOTE: To skip this step may cause days of downtime as you debug a + problem or wait for fixes. + 2. Get latest images from the web: + ``` console + $ docker-compose pull + ``` + 3. Rebuild localy created images based on new parent images: + ``` console + $ docker-compose build --pull --no-cache + ``` + Note: this may not do anything, depending on your selected services. + 4. Update(recreate) containers that have new images: + ``` console + $ docker-compose up --build -d + ``` + +If a service fails to start after it's updated, especially if you are updating +frequently, wait for a few hours and repeat the update procedure. Sometimes bad +releases are published to hub.docker.com, but they are usually fixed in under +half a day. Of course you are always welcome to report the problem to our +[Discord](https://discord.gg/ZpKHnks) server. Usually someone else has +encountered the same problem and reported the fix. + +## Full update + +Periodically updates are made to project which include new or updated container +template, changes to backups or additional features. To evaluate if this is +really needed, see the [changelog](Changelog.md) or [merged pull requests]( +https://github.com/SensorsIot/IOTstack/pulls?q=is%3Amerged). To apply all these +changes all service definitions are recreated. As a drawback, this will wipe +any custom changes to docker-compose.yml, may change semantics or even require +manual migration steps. + +!!! danger "Breaking update" + A change done 2022-01-18 will require [manual steps]( + ../Updates/migration-network-change.md) + or you may get an error like: + `ERROR: Service "influxdb" uses an undefined network "iotstack_nw"` + +Full update steps: + +1. Shutdown your RPi, remove the storage medium and do a [full backup + image](https://www.howtogeek.com/341944/how-to-clone-your-raspberry-pi-sd-card-for-foolproof-backup/) + of the storage to another machine. Reattach the storage back and power up + your RPi.
+ NOTE: To skip this step may cause days of downtime as you debug a problem or + wait for fixes. +2. check `git status --untracked-files no` for any local changes you may have + made to project files. For any listed changes, either: + + 1. Save and preserve your change by doing a local commit: `git commit -m + "local customization" -- path/to/changed_file`, or + 2. Revert it using: `git checkout -- path/to/changed_file` + +3. Update project files from github: `git pull -r origin master` +4. Save your current compose file: `cp docker-compose.yml + docker-compose.yml.bak`. NOTE: this is really useful, as the next step will + overwrite all your previous manual changes to docker-compose.yml. +5. Recreate the compose file and Dockerfile:s: `./menu.sh`, select Build Stack, + for each of your selected services: de- and re-select it, press enter to + build, and then exit. +6. check the changes for obvious errors (e.g. passwords): `diff + docker-compose.yml docker-compose.yml.bak` +7. Perform the Docker image update procedure: + ``` console + $ docker-compose pull + $ docker-compose build --pull --no-cache + $ docker-compose up --build -d + ``` + +### Troubleshooting: if a container fails to start after update + +* try restarting the whole stack: `docker-compose restart` +* Check log output of the failing service: `docker-compose logs *service-name*` + * try googling and fixing problems in docker-compose.yml manually. +* check how the container definitions have changed: `diff docker-compose.yml + docker-compose.yml.bak` +* try rebuilding your complete stack from scratch: + 1. check that you have a backup. + 2. stop and remove Docker containers: `docker-compose down` + 3. remove all menu generated files: `rm -r docker-compose.yml services` + 4. recreate the stack: `./menu.sh`, select Build Stack, select all your + services, press enter to build, and then exit. + 5. try starting: `docker-compose up -d` +* Go to the [IOTstack Discord](https://discord.gg/ZpKHnks) and describe your + problem. We're happy to help. + +## Old-menu + +!!! warning + If you ran `git checkout -- 'git ls-files -m'` as suggested in the old wiki entry then please check your duck.sh because it removed your domain and token + +Git offers build in functionality to fetch the latest changes. + +`git pull origin master` will fetch the latest changes from GitHub without overwriting files that you have modified yourself. If you have done a local commit then your project may to handle a merge conflict. + +This can be verified by running `git status`. You can ignore if it reports duck.sh as being modified. + +![image](https://user-images.githubusercontent.com/46672225/68645804-d42d0000-0521-11ea-842f-fd0b2d22cd0e.png) + +Should you have any modified scripts or templates they can be reset to the latest version with `git checkout -- scripts/ .templates/` + +With the new latest version of the project you can now use the menu to build your stack. If there is a particular container you would like to update its template then you can select that at the overwrite option for your container. You have the choice to not to overwrite, preserve env files or to completely overwrite any changes (passwords) + +![image](https://user-images.githubusercontent.com/46672225/68646024-8fee2f80-0522-11ea-8b6e-f1d439a5be7f.png) + +After your stack had been rebuild you can run `docker-compose up -d` to pull in the latest changes. If you have not update your images in a while consider running the `./scripts/update.sh` to get the latest version of the image from Docker hub as well diff --git a/docs/Updates/migration-network-change.md b/docs/Updates/migration-network-change.md new file mode 100644 index 000000000..303878182 --- /dev/null +++ b/docs/Updates/migration-network-change.md @@ -0,0 +1,64 @@ +# Migration: network change + +Networking under both *new menu* (master branch) and *old menu* (old-menu branch) has undergone a significant change. This will not affect new users of IOTstack (who will adopt it automatically). Neither will it affect existing users who do not use the menu to maintain their stacks (see [adopting networking changes by hand](#networkHandEdit) below). + +Users who *do* use the menu to maintain their stacks will also be unaffected *until the next menu run*, at which point it will be prudent to down your stack entirely and re-select all your containers. Downing the stack causes Docker to remove all associated networks as well as the containers. + +These changes mean that networking is **identical** under both *old* and *new* menus. To summarise the changes: + +1. Only two internal networks are defined – as follows: + + * "default" which adopts the name `iotstack_default` at runtime. + * "nextcloud" which adopts the name `iotstack_nextcloud` at runtime. + + If you are using docker-compose v2.0.0 or later then the `iotstack_nextcloud` network will only be instantiated if you select NextCloud as one of your services. Earlier versions of docker-compose instantiate all networks even if no service uses them (which is why you get those warnings at "up" time). + +2. The only service definitions which now have `networks:` directives are: + + * NextCloud: joins the "default" and "nextcloud" networks; and + * NextCloud_DB: joins the "nextcloud" network. + + All other containers will join the "default" network, automatically, without needing any `networks:` directives. + +#### adopting networking changes by hand + +If you maintain your `docker-compose.yml` by hand, you can adopt the networking changes by doing the following: + +1. Take your stack down. This causes Docker to remove any existing networks. +2. Remove **all** `networks:` directives wherever they appear in your `docker-compose.yml`. That includes: + + * the `networks:` directives in all service definitions; and + * the `networks:` specifications at the end of the file. + +3. Append the contents of the following file to your `docker-compose.yml`: + + ``` + ~/IOTstack/.templates/docker-compose-base.yml + ``` + + For example: + + ``` + $ cat ~/IOTstack/.templates/docker-compose-base.yml >>~/IOTstack/docker-compose.yml + ``` + + The `docker-compose-base.yml` file is named `env.yml` in the *old-menu* branch. + +4. If you run the NextCloud service then: + + * Add these lines to the NextCloud service definition: + + ``` + networks: + - default + - nextcloud + ``` + + * Add these lines to the NextCloud_DB service definition: + + ``` + networks: + - nextcloud + ``` + +5. Bring up your stack. diff --git a/docs/index.md b/docs/index.md new file mode 100644 index 000000000..a7708362c --- /dev/null +++ b/docs/index.md @@ -0,0 +1,31 @@ +--- +title: Home +hide: + - navigation +--- +# IOTStack Wiki + +!!! abstract inline end "What is IOTstack" + IOTstack is a builder for docker-compose to easily make and maintain IoT + stacks on the Raspberry Pi + +Welcome to IOTstack: + +* + Use the top tabs and then the left list to explore this Wiki. + + + +* If you are just getting started with IOTstack, see [Getting Started](Basic_setup/index.md). + * For latest changes, see [Changelog](Updates/Changelog.md). + * If you're running gcgarner/IOTstack see [Migrating to SensorsIot](Updates/gcgarner-migration.md). + +* You're always welcome to ask questions on the [IOTStack Discord](https://discord.gg/ZpKHnks). + +* Fixes and improvements welcome, see [Contributing](./Developers/index.md) + +!!! cite inline end "Full site page listing" + + {nav} diff --git a/docs/javascript/fix-codeblock-console-copy-button.js b/docs/javascript/fix-codeblock-console-copy-button.js new file mode 100644 index 000000000..2aa4b8114 --- /dev/null +++ b/docs/javascript/fix-codeblock-console-copy-button.js @@ -0,0 +1,34 @@ +document.addEventListener("DOMContentLoaded", function() { + fixCopyOnlyUserSelectable(); +}); + +function fixCopyOnlyUserSelectable() { + buttonsToFix = document.querySelectorAll( + '.language-console button.md-clipboard'); + if (buttonsToFix.length) + console.log('Fixing copy-to-clipboard text of console code-blocks.'); + buttonsToFix.forEach((btn) => { + var content = extractUserSelectable(btn.dataset.clipboardTarget); + btn.dataset.clipboardText = content; + }); +} + +function extractUserSelectable(selector) { + var result = ''; + var element = document.querySelector(selector); + element.childNodes.forEach((child) => { + if (child instanceof Element) { + var s=window.getComputedStyle(child); + if (s.getPropertyValue('user-select') == 'none' || + s.getPropertyValue('-webkit-user-select') == 'none' || + s.getPropertyValue('-ms-user-select') == 'none') + { + return; + } + } + result += child.textContent; + }); + // remove empty lines + result = result.replace(/^\s*\n/gm, '') + return result; +} diff --git a/docs/stack-24.svg b/docs/stack-24.svg new file mode 100644 index 000000000..05794aee6 --- /dev/null +++ b/docs/stack-24.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/style.css b/docs/style.css new file mode 100644 index 000000000..df18e0199 --- /dev/null +++ b/docs/style.css @@ -0,0 +1,32 @@ +/* vim: set sw=2: */ + +/* hide "Made with Material" footer */ +.md-footer-meta { + display: none; +} + +/* prevent selection of prefix and output for console syntax */ +.language-console .gp, .language-console .go { + user-select: none; + -webkit-user-select: none; /* Chrome/Safari */ + -ms-user-select: none; /* IE10+ */ +} + +@media screen and (max-width:76.25em) { + .show-when-wide-layout { + display:none + } +} +@media screen and (min-width:76.25em) { + .show-when-narrow-layout { + display:none + } +} + +/* Make dark-mode links just a tiny bit lighter, for better contrast + on low brightness screens. */ +@media screen { + [data-md-color-scheme="slate"] { + --md-typeset-a-color: var(--md-primary-fg-color--light) !important; + } +} diff --git a/duck/duck.sh b/duck/duck.sh index bfec7a339..37852a956 100755 --- a/duck/duck.sh +++ b/duck/duck.sh @@ -1,6 +1,27 @@ -#!/bin/bash -# Your comma-separated domains list -DOMAINS="YOUR_DOMAINS" +#!/usr/bin/env bash + +# Your DuckDNS domain (or comma-separated list of DuckDNS domains if you +# have multiple domains associated with the same IP address). +DOMAINS="YOURS.duckdns.org" + # Your DuckDNS Token DUCKDNS_TOKEN="YOUR_DUCKDNS_TOKEN" -curl -k -o /var/log/duck.log "https://www.duckdns.org/update?domains=${DOMAINS}&token=${DUCKDNS_TOKEN}&ip=" + +# is this script running in the foreground or background? +if [ "$(tty)" = "not a tty" ] ; then + + # background! Assume launched by cron. Add a random delay to avoid + # every client contacting DuckDNS at exactly the same moment. + sleep $((RANDOM % 60)) + +fi + +# mark the event in case this is being logged. +echo "$(date "+%a, %d %b %Y %H:%M:%S %z") - updating DuckDNS" + +# Request duckdns to update your domain name with your public IP address +curl --max-time 10 \ + "https://www.duckdns.org/update?domains=${DOMAINS}&token=${DUCKDNS_TOKEN}&ip=" + +# curl does not append newline so fix that +echo "" diff --git a/install.sh b/install.sh new file mode 100755 index 000000000..d6f22932b --- /dev/null +++ b/install.sh @@ -0,0 +1,430 @@ +#!/usr/bin/env bash + +# version - MUST be exactly 7 characters! +UPDATE="2024v03" + +echo " " +echo " _____ ____ _______ _ installer _ " +echo " |_ _/ __ \\__ __|| | $UPDATE | | " +echo " | || | | | | |___| |_ __ _ ___| | __" +echo " | || | | | | / __| __/ _\` |/ __| |/ /" +echo " _| || |__| | | \\__ \\ || (_| | (__| < " +echo " |_____\\____/ |_|___/\\__\\__,_|\\___|_|\\_\\" +echo " " +echo " " + +#---------------------------------------------------------------------- +# The intention of this script is that it should be able to be run +# multiple times WITHOUT doing any harm. If you propose changes, please +# make sure you test the script in both a "green fields" system AND on +# a working system where docker, docker-compose and IOTstack are already +# installed. +#---------------------------------------------------------------------- + +# overuse of sudo is a very common problem among new IOTstack users +[ "$EUID" -eq 0 ] && echo "This script should NOT be run using sudo" && exit 1 + +# this script should be run without arguments +[ $# -ne 0 ] && echo "command line argument(s) $@ ignored" + +# assumption(s) which can be overridden +IOTSTACK=${IOTSTACK:-"$HOME/IOTstack"} + +# derived path(s) - note that the menu knows about most of these so +# they can't just be changed without a lot of care. +IOTSTACK_ENV="$IOTSTACK/.env" +IOTSTACK_MENU_REQUIREMENTS="$IOTSTACK/requirements-menu.txt" +IOTSTACK_MENU_VENV_DIR="$IOTSTACK/.virtualenv-menu" +IOTSTACK_INSTALLER_HINT="$IOTSTACK/.new_install" + +# git cloning options which can be overridden +# (needs special handling for the null case) +if [[ ! -v GIT_CLONE_OPTIONS ]] ; then + GIT_CLONE_OPTIONS="--filter=tree:0" +fi + +# the expected installation location of docker-compose-plugin is +COMPOSE_PLUGIN_PATH="/usr/libexec/docker/cli-plugins/docker-compose" + +# the default location of a symlink in the PATH pointing to the above is +COMPOSE_SYMLINK_PATH="/usr/local/bin/docker-compose" + +# add these to /boot/cmdline.txt (if it exists) +CMDLINE_OPTIONS="cgroup_memory=1 cgroup_enable=memory" + +# dependencies installed via apt +APT_DEPENDENCIES="curl git jq python3-pip python3-dev python3-virtualenv uuid-runtime whiptail" + +# minimum version requirements +DOCKER_VERSION_MINIMUM="24" +COMPOSE_VERSION_MINIMUM="2.20" +PYTHON_VERSION_MINIMUM="3.9" + +# best-practice for group membership +DESIRED_GROUPS="docker bluetooth" + +# what to do at script completion (reboot takes precedence) +REBOOT_REQUIRED=false +LOGOUT_REQUIRED=false + +#---------------------------------------------------------------------- +# Check script dependencies +#---------------------------------------------------------------------- + +echo -e -n "\nChecking operating-system environment - " +# This script assumes apt and dpkg are available. That's more-or-less +# the same as saying Debian oe Debian-derived. If apt and/or dpkg are +# missing then there's not much that can be done. +if [ -z $(which apt) -o -z $(which dpkg) ] ; then + echo "fail" + unset ID + [ -f "/etc/os-release" ] && eval $(grep "^ID=" /etc/os-release) + if [ "$ID" = "debian" ] ; then + echo "This system looks like it is based on Debian but seems to be missing" + echo "some key utilities (apt and/or dpkg). That suggests something is wrong." + echo "This script can't proceed until those issues are resolved." + else + echo "Some key utilities that are needed by this script seem to be missing" + echo "from this system. Both the Advanced Package Tool (apt) and the Debian" + echo "Package Manager (dpkg) are core components of Debian and Debian-derived" + echo "distributions like Raspberry Pi OS (aka Raspbian). It looks like you" + echo "might be trying to install IOTstack on a system which isn't based on" + echo "Debian. IOTstack has only ever been tested on Debian-based distributions" + echo "and is not qualified for other Linux or Unix distributions. This script" + echo "can't proceed." + fi + # direct exit - not via handle_exit() + exit 1 +else + echo "pass" +fi + + +#---------------------------------------------------------------------- +# script memory (exit conditions) +#---------------------------------------------------------------------- + +function handle_exit() { + + # record the exit condition (if possible) + [ -d "$IOTSTACK" ] && echo "$1" >"$IOTSTACK_INSTALLER_HINT" + + # inform the user + echo -n "install.sh completed" + + # advise if should be re-run + [ $1 -ne 0 ] && echo -n " - but should be re-run" + + # reboot takes precedence over logout + if [ "$REBOOT_REQUIRED" = "true" ] ; then + echo " - a reboot is required." + sleep 2 + sudo reboot + elif [ "$LOGOUT_REQUIRED" = "true" ] ; then + echo " - a logout is required." + sleep 2 + # iterate ancestor processes + for ANCESTOR in $(ps -o ppid=) ; do + # find first process belonging to current user + if [ "$(ps -p $ANCESTOR -o user=)" = "$USER" ] ; then + # kill it + kill -HUP $ANCESTOR + fi + done + # should not reach this + sleep 2 + fi + + # exit as instructed + echo "" + exit $1 + +} + + +#---------------------------------------------------------------------- +# IOTstack dependencies installed via apt +#---------------------------------------------------------------------- + +echo -e "\nUpdating Advanced Package Tool (apt) caches" +sudo apt update + +echo -e "\nInstalling/updating IOTstack dependencies" +sudo apt install -y $APT_DEPENDENCIES + + +#---------------------------------------------------------------------- +# docker + compose installation +#---------------------------------------------------------------------- + +# is docker installed? +if [ -z $(which docker) ] ; then + # no! use the convenience script + echo -e "\nInstalling docker and docker-compose-plugin using the 'convenience script'" + echo "from https://get.docker.com ..." + curl -fsSL https://get.docker.com | sudo sh + if [ $? -eq 0 ] ; then + echo -e "\nInstallation of docker and docker-compose-plugin completed normally." + REBOOT_REQUIRED=true + else + echo -e "\nThe 'convenience script' returned an error. Unable to proceed." + handle_exit 1 + fi +else + echo -e -n "\nDocker is already installed - checking your version - " + DOCKER_VERSION_INSTALLED="$(docker version -f "{{.Server.Version}}")" + if dpkg --compare-versions "$DOCKER_VERSION_MINIMUM" "gt" "$DOCKER_VERSION_INSTALLED" ; then + echo "fail" + echo "You have an obsolete version of Docker installed:" + echo " Minimum version required: $DOCKER_VERSION_MINIMUM" + echo " Version currently installed: $DOCKER_VERSION_INSTALLED" + echo "Try updating your system by running:" + echo " \$ sudo apt update && sudo apt upgrade -y" + echo " \$ docker version -f {{.Server.Version}}" + echo "If the version number changes, try re-running this script. If the" + echo "version number does not change, you may need to uninstall both" + echo "docker and docker-compose. If any containers are running, stop" + echo "them, then run:" + echo " \$ sudo systemctl stop docker.service" + echo " \$ sudo systemctl disable docker.service" + echo " \$ sudo apt -y purge docker-ce docker-ce-cli containerd.io docker-compose" + echo " \$ sudo apt -y autoremove" + echo " \$ sudo reboot" + echo "and then re-run this script after the reboot." + handle_exit 1 + else + echo "pass" + fi +fi + + +#---------------------------------------------------------------------- +# group memberships +#---------------------------------------------------------------------- + +function should_add_user_to_group() +{ + # sense group does not exist + grep -q "^$1:" /etc/group || return 1 + # sense group exists and user is already a member + groups | grep -q "\b$1\b" && return 1 + # group exists, user should be added + return 0 +} + +# check group membership +echo -e -n "\nChecking group memberships" +for GROUP in $DESIRED_GROUPS ; do + echo -n " - $GROUP " + if should_add_user_to_group $GROUP ; then + echo -n "adding $USER" + sudo /usr/sbin/usermod -G $GROUP -a $USER + LOGOUT_REQUIRED=true + else + echo -n "pass" + fi +done +echo "" + +#---------------------------------------------------------------------- +# docker-compose setup/verification +#---------------------------------------------------------------------- + +# Correct installation of docker-compose is defined as the result of +# `which docker-compose` (typically $COMPOSE_SYMLINK_PATH) being a +# symlink pointing to the expected location of docker-compose-plugin as +# it is installed by the convenience script ($COMPOSE_PLUGIN_PATH). +# Alternatively, if `which docker-compose` returns null but the plugin +# is in the expected location, the necessary symlink can be created by +# this script and then docker-compose will be installed "correctly". + +function is_python_script() { + [ $(file -b "$1" | grep -c "^Python script") -gt 0 ] && return 0 + return 1 +} + +# presume docker-compose not installed correctly +COMPOSE_INSTALLED_CORRECTLY=false + +# search for docker-compose in the PATH +COMPOSE_CMD_PATH=$(which docker-compose) + +# is docker-compose in the PATH? +echo -e -n "\nChecking whether docker-compose is installed correctly - " +if [ -n "$COMPOSE_CMD_PATH" ] ; then + # yes! is it a symlink and does the symlink point to a file? + if [ -L "$COMPOSE_CMD_PATH" -a -f "$COMPOSE_CMD_PATH" ] ; then + # yes! fetch the inode of what the link points to + COMPOSE_CMD_INODE=$(stat -c "%i" -L "$COMPOSE_CMD_PATH") + # does the plugin exist at the expected path? + if [ -f "$COMPOSE_PLUGIN_PATH" ] ; then + # yes! fetch the plugin's inode + COMPOSE_PLUGIN_INODE=$(stat -c "%i" "$COMPOSE_PLUGIN_PATH") + # are the inodes the same? + if [ $COMPOSE_CMD_INODE -eq $COMPOSE_PLUGIN_INODE ] ; then + # yes! thus docker-compose is installed correctly + COMPOSE_INSTALLED_CORRECTLY=true + fi + fi + fi +else + # no! does the plugin exist at the expected location? + if [ -f "$COMPOSE_PLUGIN_PATH" ] ; then + # yes! so, no command, but plugin present. Fix with symlink + sudo ln -s "$COMPOSE_PLUGIN_PATH" "$COMPOSE_SYMLINK_PATH" + # and now compose is installed correctly + COMPOSE_INSTALLED_CORRECTLY=true + else + echo "fail" + echo "Your system has docker installed but doesn't seem to have either" + echo "docker-compose or docker-compose-plugin. Try running:" + echo " \$ sudo apt install -y docker-compose-plugin" + echo "and then try re-running this script." + handle_exit 1 + fi +fi + +# is docker-compose installed correctly? +if [ "$COMPOSE_INSTALLED_CORRECTLY" = "true" ] ; then + echo "pass" + echo -e -n "\nChecking your version of docker-compose - " + COMPOSE_VERSION_INSTALLED="$(docker-compose version --short)" + if dpkg --compare-versions "$COMPOSE_VERSION_MINIMUM" "gt" "$COMPOSE_VERSION_INSTALLED" ; then + echo "fail" + echo "You have an obsolete version of docker-compose installed:" + echo " Minimum version required: $COMPOSE_VERSION_MINIMUM" + echo " Version currently installed: $COMPOSE_VERSION_INSTALLED" + echo "Try updating your system by running:" + echo " \$ sudo apt update && sudo apt upgrade -y" + echo "and then try re-running this script." + handle_exit 1 + else + echo "pass" + fi +else + echo "fail" + echo "docker-compose is not installed correctly. The most common reason is" + echo "having installed docker and docker-compose without using the official" + echo "'convenience script'. You may be able to solve this problem by running" + if is_python_script "$COMPOSE_CMD_PATH" ; then + echo " \$ export PIP_BREAK_SYSTEM_PACKAGES=1" + echo " \$ pip3 uninstall -y docker-compose" + echo " \$ sudo pip3 uninstall -y docker-compose" + echo " (ignore any errors from those commands)" + else + echo " \$ sudo apt purge -y docker-compose" + fi + echo "and then try re-running this script." + handle_exit 1 +fi + + +#---------------------------------------------------------------------- +# Clone IOTstack repo +#---------------------------------------------------------------------- + +# does the IOTstack folder already exist? +if [ ! -d "$IOTSTACK" ] ; then + # no! clone from GitHub + if [ -n "$GIT_CLONE_OPTIONS" ] ; then + echo -e "\nCloning the IOTstack repository from GitHub using options $GIT_CLONE_OPTIONS" + git clone "$GIT_CLONE_OPTIONS" https://github.com/SensorsIot/IOTstack.git "$IOTSTACK" + else + echo -e "\nCloning the full IOTstack repository from GitHub" + git clone https://github.com/SensorsIot/IOTstack.git "$IOTSTACK" + fi + if [ $? -eq 0 -a -d "$IOTSTACK" ] ; then + echo "IOTstack cloned successfully into $IOTSTACK" + else + echo "Unable to clone IOTstack (likely a git or network error)" + handle_exit 1 + fi +else + echo -e "\n$IOTSTACK already exists - no need to clone from GitHub" +fi + +# ensure backups and services directories exist and are owned by $USER +# https://github.com/SensorsIot/IOTstack/issues/651#issuecomment-2525347511 +mkdir -p "$IOTSTACK/backups" "$IOTSTACK/services" +sudo chown -R "$USER:$USER" "$IOTSTACK/backups" "$IOTSTACK/services" +# but, if the influxdb backup dir already exists, put it back to root +[ -d "$IOTSTACK/backups/influxdb" ] && sudo chown -R "root:root" "$IOTSTACK/backups/influxdb" + +# initialise docker-compose global environment file with system timezone +if [ ! -f "$IOTSTACK_ENV" ] || [ $(grep -c "^TZ=" "$IOTSTACK_ENV") -eq 0 ] ; then + echo "TZ=$(cat /etc/timezone)" >>"$IOTSTACK_ENV" +fi + +#---------------------------------------------------------------------- +# Python support +#---------------------------------------------------------------------- + +# make sure "python" invokes "python3" +PYTHON_INVOKES=$(update-alternatives --list python 2>/dev/null) +PYTHON3_PATH=$(which python3) +if [ "$PYTHON_INVOKES" != "$PYTHON3_PATH" ] ; then + echo -e "\nMaking python3 the default" + sudo update-alternatives --install /usr/bin/python python "$PYTHON3_PATH" 1 +fi + +echo -e -n "\nChecking your version of Python - " +PYTHON_VERSION_INSTALLED="$(python --version)" +PYTHON_VERSION_INSTALLED="${PYTHON_VERSION_INSTALLED#*Python }" +if dpkg --compare-versions "$PYTHON_VERSION_MINIMUM" "gt" "$PYTHON_VERSION_INSTALLED" ; then + echo "fail" + echo "You have an obsolete version of python installed:" + echo " Minimum version required: $PYTHON_VERSION_MINIMUM" + echo " Version currently installed: $PYTHON_VERSION_INSTALLED" + echo "Try updating your system by running:" + echo " \$ sudo apt update && sudo apt upgrade -y" + echo " \$ python --version" + echo "If the version number changes, try re-running this script. If not, you" + echo "may need to reinstall python3-pip, python3-dev and python3-virtualenv." + handle_exit 1 +else + echo "pass" +fi + +# implement menu requirements +if [ -e "$IOTSTACK_MENU_REQUIREMENTS" ] ; then + echo -e "\nChecking and updating IOTstack dependencies (pip)" + echo "Note: pip3 installs bypass externally-managed environment check" + PIP_BREAK_SYSTEM_PACKAGES=1 pip3 install -U -r "$IOTSTACK_MENU_REQUIREMENTS" +fi + +# trigger re-creation of venv on next menu launch. Strictly speaking, +# sudo is not required for this but it protects against accidental prior +# use of sudo when the venv was created +sudo rm -rf "$IOTSTACK_MENU_VENV_DIR" + + +#---------------------------------------------------------------------- +# Raspberry Pi boot options +#---------------------------------------------------------------------- + +# set cmdline options (if possible - Raspberry Pi dependency) +TARGET="/boot/firmware/cmdline.txt" +[ -e "$TARGET" ] || TARGET="/boot/cmdline.txt" +if [ -e "$TARGET" ] ; then + echo -e -n "\nChecking Raspberry Pi boot-time options - " + unset APPEND + for OPTION in $CMDLINE_OPTIONS ; do + if [ $(grep -c "$OPTION" "$TARGET") -eq 0 ] ; then + APPEND="$APPEND $OPTION" + fi + done + if [ -n "$APPEND" ] ; then + echo "appending$APPEND" + sudo sed -i.bak "s/$/$APPEND/" "$TARGET" + REBOOT_REQUIRED=true + else + echo "no modifications needed" + fi +fi + + +#---------------------------------------------------------------------- +# normal exit +#---------------------------------------------------------------------- + +handle_exit 0 diff --git a/menu.sh b/menu.sh index db259a6a7..2287a417e 100755 --- a/menu.sh +++ b/menu.sh @@ -1,442 +1,404 @@ #!/bin/bash +# vim: noexpandtab -#get path of menu correct -pushd ~/IOTstack - -declare -A cont_array=( - [portainer]="Portainer" - [nodered]="Node-RED" - [influxdb]="InfluxDB" - [telegraf]="Telegraf (Requires InfluxDB and Mosquitto)" - [grafana]="Grafana" - [mosquitto]="Eclipse-Mosquitto" - [postgres]="Postgres" - [mariadb]="MariaDB (MySQL fork)" - [adminer]="Adminer" - [openhab]="openHAB" - [zigbee2mqtt]="zigbee2mqtt" - [pihole]="Pi-Hole" - [plex]="Plex media server" - [tasmoadmin]="TasmoAdmin" - [rtl_433]="RTL_433 to mqtt" - [espruinohub]="EspruinoHub" - [motioneye]="motionEye" - [webthings_gateway]="Mozilla webthings-gateway" - [blynk_server]="blynk-server" - [nextcloud]="Next-Cloud" - [nginx]="NGINX by linuxserver" - [diyhue]="diyHue" - [homebridge]="Homebridge" - [python]="Python 3" - -) -declare -a armhf_keys=("portainer" "nodered" "influxdb" "grafana" "mosquitto" "telegraf" "mariadb" "postgres" - "adminer" "openhab" "zigbee2mqtt" "pihole" "plex" "tasmoadmin" "rtl_433" "espruinohub" - "motioneye" "webthings_gateway" "blynk_server" "nextcloud" "diyhue" "homebridge" "python") +# go to the absolute path of menu.sh +cd "$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +CURRENT_BRANCH=$(git name-rev --name-only HEAD) -sys_arch=$(uname -m) - -#timezones -timezones() { +# Minimum Software Versions +REQ_DOCKER_VERSION=18.2.0 +REQ_PYTHON_VERSION=3.6.9 - env_file=$1 - TZ=$(cat /etc/timezone) +PYTHON_CMD=python3 - #test for TZ= - [ $(grep -c "TZ=" $env_file) -ne 0 ] && sed -i "/TZ=/c\TZ=$TZ" $env_file +sys_arch=$(uname -m) +# ---------------------------------------------- +# Helper functions +# ---------------------------------------------- +function command_exists() { + command -v "$@" > /dev/null 2>&1 } -# this function creates the volumes, services and backup directories. It then assisgns the current user to the ACL to give full read write access -docker_setfacl() { - [ -d ./services ] || mkdir ./services - [ -d ./volumes ] || mkdir ./volumes - [ -d ./backups ] || mkdir ./backups - - #give current user rwx on the volumes and backups - [ $(getfacl ./volumes | grep -c "default:user:$USER") -eq 0 ] && sudo setfacl -Rdm u:$USER:rwx ./volumes - [ $(getfacl ./backups | grep -c "default:user:$USER") -eq 0 ] && sudo setfacl -Rdm u:$USER:rwx ./backups -} +function user_in_group() +{ + # see if the group exists + grep -q "^$1:" /etc/group; -#future function add password in build phase -password_dialog() { - while [[ "$passphrase" != "$passphrase_repeat" || ${#passphrase} -lt 8 ]]; do + # sense that the group does not exist + if [ $? -ne 0 ]; then return 0; fi - passphrase=$(whiptail --passwordbox "${passphrase_invalid_message}Please enter the passphrase (8 chars min.):" 20 78 3>&1 1>&2 2>&3) - passphrase_repeat=$(whiptail --passwordbox "Please repeat the passphrase:" 20 78 3>&1 1>&2 2>&3) - passphrase_invalid_message="Passphrase too short, or not matching! " - done - echo $passphrase + # group exists - now check that the user is a member + groups | grep -q "\b$1\b" } -#test=$( password_dialog ) -function command_exists() { - command -v "$@" >/dev/null 2>&1 -} +function minimum_version_check() { + # Usage: minimum_version_check required_version current_major current_minor current_build + # Example: minimum_version_check "1.2.3" 1 2 3 + REQ_MIN_VERSION_MAJOR=$(echo "$1"| cut -d' ' -f 2 | cut -d'.' -f 1) + REQ_MIN_VERSION_MINOR=$(echo "$1"| cut -d' ' -f 2 | cut -d'.' -f 2) + REQ_MIN_VERSION_BUILD=$(echo "$1"| cut -d' ' -f 2 | cut -d'.' -f 3) + + CURR_VERSION_MAJOR=$2 + CURR_VERSION_MINOR=$3 + CURR_VERSION_BUILD=$4 + + VERSION_GOOD="Unknown" + + NUMB_REG='^[0-9]+$' + if ! [[ $CURR_VERSION_MAJOR =~ $NUMB_REG ]] ; then + echo "$VERSION_GOOD" + return 1 + fi + if ! [[ $CURR_VERSION_MINOR =~ $NUMB_REG ]] ; then + echo "$VERSION_GOOD" + return 1 + fi + if ! [[ $CURR_VERSION_BUILD =~ $NUMB_REG ]] ; then + echo "$VERSION_GOOD" + return 1 + fi -#function copies the template yml file to the local service folder and appends to the docker-compose.yml file -function yml_builder() { + if [ -z "$CURR_VERSION_MAJOR" ]; then + echo "$VERSION_GOOD" + return 1 + fi - service="services/$1/service.yml" + if [ -z "$CURR_VERSION_MINOR" ]; then + echo "$VERSION_GOOD" + return 1 + fi - [ -d ./services/ ] || mkdir ./services/ + if [ -z "$CURR_VERSION_BUILD" ]; then + echo "$VERSION_GOOD" + return 1 + fi - if [ -d ./services/$1 ]; then - #directory already exists prompt user to overwrite - sevice_overwrite=$(whiptail --radiolist --title "Overwrite Option" --notags \ - "$1 service directory has been detected, use [SPACEBAR] to select you overwrite option" 20 78 12 \ - "none" "Do not overwrite" "ON" \ - "env" "Preserve Environment and Config files" "OFF" \ - "full" "Pull full service from template" "OFF" \ - 3>&1 1>&2 2>&3) + if [ "${CURR_VERSION_MAJOR}" -ge $REQ_MIN_VERSION_MAJOR ]; then + VERSION_GOOD="true" + echo "$VERSION_GOOD" + return 0 + else + VERSION_GOOD="false" + fi - case $sevice_overwrite in + if [ "${CURR_VERSION_MAJOR}" -ge $REQ_MIN_VERSION_MAJOR ] && \ + [ "${CURR_VERSION_MINOR}" -ge $REQ_MIN_VERSION_MINOR ]; then + VERSION_GOOD="true" + echo "$VERSION_GOOD" + return 0 + else + VERSION_GOOD="false" + fi - "full") - echo "...pulled full $1 from template" - rsync -a -q .templates/$1/ services/$1/ --exclude 'build.sh' - ;; - "env") - echo "...pulled $1 excluding env file" - rsync -a -q .templates/$1/ services/$1/ --exclude 'build.sh' --exclude '$1.env' --exclude '*.conf' - ;; - "none") - echo "...$1 service not overwritten" - ;; + if [ "${CURR_VERSION_MAJOR}" -ge $REQ_MIN_VERSION_MAJOR ] && \ + [ "${CURR_VERSION_MINOR}" -ge $REQ_MIN_VERSION_MINOR ] && \ + [ "${CURR_VERSION_BUILD}" -ge $REQ_MIN_VERSION_BUILD ]; then + VERSION_GOOD="true" + echo "$VERSION_GOOD" + return 0 + else + VERSION_GOOD="false" + fi - esac + echo "$VERSION_GOOD" +} +function user_in_group() +{ + if grep -q $1 /etc/group ; then + if id -nGz "$USER" | grep -qzxF "$1"; then + echo "true" else - mkdir ./services/$1 - echo "...pulled full $1 from template" - rsync -a -q .templates/$1/ services/$1/ --exclude 'build.sh' + echo "false" fi - - - #if an env file exists check for timezone - [ -f "./services/$1/$1.env" ] && timezones ./services/$1/$1.env - - #add new line then append service - echo "" >>docker-compose.yml - cat $service >>docker-compose.yml - - #test for post build - if [ -f ./.templates/$1/build.sh ]; then - chmod +x ./.templates/$1/build.sh - bash ./.templates/$1/build.sh + else + echo "notgroup" fi +} - #test for directoryfix.sh - if [ -f ./.templates/$1/directoryfix.sh ]; then - chmod +x ./.templates/$1/directoryfix.sh - echo "...Running directoryfix.sh on $1" - bash ./.templates/$1/directoryfix.sh +function check_git_updates() +{ + UPSTREAM=${1:-'@{u}'} + LOCAL=$(git rev-parse @) + REMOTE=$(git rev-parse "$UPSTREAM") + BASE=$(git merge-base @ "$UPSTREAM") + + if [ "$LOCAL" = "$REMOTE" ]; then + echo "Up-to-date" + elif [ "$LOCAL" = "$BASE" ]; then + echo "Need to pull" + elif [ "$REMOTE" = "$BASE" ]; then + echo "Need to push" + else + echo "Diverged" fi +} +function install_python3_and_deps() { + CURR_PYTHON_VER="${1:-Unknown}" + CURR_VIRTUALENV="${2:-Unknown}" + if (whiptail --title "Python 3 and virtualenv" --yesno "Python 3.6.9 or later (Current = $CURR_PYTHON_VER) and virtualenv (Installed = $CURR_VIRTUALENV) are required for IOTstack to function correctly. Install these now?" 20 78); then + sudo apt update + sudo apt install -y python3-dev python3-virtualenv + if [ $? -eq 0 ]; then + PYTHON_VERSION_GOOD="true" + else + echo "Failed to install Python and virtualenv" >&2 + exit 1 + fi + fi +} - #make sure terminal.sh is executable - [ -f ./services/$1/terminal.sh ] && chmod +x ./services/$1/terminal.sh +function install_docker() { + sudo bash ./scripts/install_docker.sh install +} +function update_docker() { + sudo bash ./scripts/install_docker.sh upgrade } -#--------------------------------------------------------------------------------------------------- -# Project updates -echo "checking for project update" -git fetch origin master +function update_project() { + git pull origin $CURRENT_BRANCH + git status +} -if [ $(git status | grep -c "Your branch is up to date") -eq 1 ]; then - #delete .outofdate if it exisist - [ -f .outofdate ] && rm .outofdate - echo "Project is up to date" +function do_python3_checks() { + VIRTUALENV_GOOD="false" + if command_exists virtualenv; then + VIRTUALENV_GOOD="true" + echo "Python virtualenv found." >&2 + fi + PYTHON_VERSION_GOOD="false" + if command_exists $PYTHON_CMD; then + PYTHON_VERSION=$($PYTHON_CMD --version 2>/dev/null) + PYTHON_VERSION_MAJOR=$(echo "$PYTHON_VERSION"| cut -d' ' -f 2 | cut -d' ' -f 2 | cut -d'.' -f 1) + PYTHON_VERSION_MINOR=$(echo "$PYTHON_VERSION"| cut -d' ' -f 2 | cut -d'.' -f 2) + PYTHON_VERSION_BUILD=$(echo "$PYTHON_VERSION"| cut -d' ' -f 2 | cut -d'.' -f 3) + + printf "Python Version: '${PYTHON_VERSION:-Unknown}'. " + if [ "$(minimum_version_check $REQ_PYTHON_VERSION $PYTHON_VERSION_MAJOR $PYTHON_VERSION_MINOR $PYTHON_VERSION_BUILD)" == "true" -a "$VIRTUALENV_GOOD" == "true" ]; then + PYTHON_VERSION_GOOD="true" + echo "Python and virtualenv is up to date." >&2 + else + echo "Python is outdated or virtualenv is missing" >&2 + install_python3_and_deps "$PYTHON_VERSION_MAJOR.$PYTHON_VERSION_MINOR.$PYTHON_VERSION_BUILD" "$VIRTUALENV_GOOD" + return 1 + fi + else + install_python3_and_deps + return 1 + fi +} -else - echo "An update is available for the project" - if [ ! -f .outofdate ]; then - whiptail --title "Project update" --msgbox "An update is available for the project\nYou will not be reminded again until you next update" 8 78 - touch .outofdate +function do_env_setup() { + echo "Setting up environment:" + if [[ ! "$(user_in_group bluetooth)" == "notgroup" ]] && [[ ! "$(user_in_group bluetooth)" == "true" ]]; then + echo "User is NOT in 'bluetooth' group. Adding:" >&2 + echo "sudo usermod -G bluetooth -a $USER" >&2 + echo "You will need to restart your system before the changes take effect." + sudo usermod -G "bluetooth" -a $USER fi -fi -#--------------------------------------------------------------------------------------------------- -# Menu system starts here -# Display main menu -mainmenu_selection=$(whiptail --title "Main Menu" --menu --notags \ - "" 20 78 12 -- \ - "install" "Install Docker" \ - "build" "Build Stack" \ - "hassio" "Install Hass.io (Requires Docker)" \ - "native" "Native Installs" \ - "commands" "Docker commands" \ - "backup" "Backup options" \ - "misc" "Miscellaneous commands" \ - "update" "Update IOTstack" \ - 3>&1 1>&2 2>&3) - -case $mainmenu_selection in -#MAINMENU Install docker ------------------------------------------------------------ -"install") - #sudo apt update && sudo apt upgrade -y ;; + if [ ! "$(user_in_group docker)" == "true" ]; then + echo "User is NOT in 'docker' group. Adding:" >&2 + echo "sudo usermod -G docker -a $USER" >&2 + echo "You will need to restart your system before the changes take effect." + sudo usermod -G "docker" -a $USER + fi +} +function do_docker_checks() { if command_exists docker; then - echo "docker already installed" + DOCKER_VERSION_GOOD="false" + DOCKER_VERSION=$(docker version -f "{{.Server.Version}}" 2>&1) + echo "Command: docker version -f \"{{.Server.Version}}\"" + if [[ "$DOCKER_VERSION" == *"Cannot connect to the Docker daemon"* ]]; then + echo "Error getting docker version. Error when connecting to docker daemon. Check that docker is running." + if (whiptail --title "Docker and Docker-Compose" --yesno "Error getting docker version. Error when connecting to docker daemon. Check that docker is running.\n\nCommand: docker version -f \"{{.Server.Version}}\"\n\nExit?" 20 78); then + exit 1 + fi + elif [[ "$DOCKER_VERSION" == *" permission denied"* ]]; then + echo "Error getting docker version. Received permission denied error. Try running with: ./menu.sh --run-env-setup" + if (whiptail --title "Docker and Docker-Compose" --yesno "Error getting docker version. Received permission denied error.\n\nTry rerunning the menu with: ./menu.sh --run-env-setup\n\nExit?" 20 78); then + exit 1 + fi + return 0 + fi + + if [[ -z "$DOCKER_VERSION" ]]; then + echo "Error getting docker version. Error when running docker command. Check that docker is installed correctly." + fi + + DOCKER_VERSION_MAJOR=$(echo "$DOCKER_VERSION"| cut -d'.' -f 1) + DOCKER_VERSION_MINOR=$(echo "$DOCKER_VERSION"| cut -d'.' -f 2) + + DOCKER_VERSION_BUILD=$(echo "$DOCKER_VERSION"| cut -d'.' -f 3) + DOCKER_VERSION_BUILD=$(echo "$DOCKER_VERSION_BUILD"| cut -f1 -d"-") + DOCKER_VERSION_BUILD=$(echo "$DOCKER_VERSION_BUILD"| cut -f1 -d"+") + + if [ "$(minimum_version_check $REQ_DOCKER_VERSION $DOCKER_VERSION_MAJOR $DOCKER_VERSION_MINOR $DOCKER_VERSION_BUILD )" == "true" ]; then + [ -f .docker_outofdate ] && rm .docker_outofdate + DOCKER_VERSION_GOOD="true" + echo "Docker version $DOCKER_VERSION >= $REQ_DOCKER_VERSION. Docker is good to go." >&2 + else + if [ ! -f .docker_outofdate ]; then + if (whiptail --title "Docker and Docker-Compose Version Issue" --yesno "Docker version is currently $DOCKER_VERSION which is less than $REQ_DOCKER_VERSION consider upgrading or you may experience issues. You will not be prompted again. You can manually upgrade by typing:\n sudo apt upgrade docker docker-compose\n\nAttempt to upgrade now?" 20 78); then + update_docker + else + touch .docker_outofdate + fi + fi + fi else - echo "Install Docker" - curl -fsSL https://get.docker.com | sh - sudo usermod -aG docker $USER + [ -f .docker_outofdate ] && rm .docker_outofdate + echo "Docker not installed" >&2 + if [ ! -f .docker_notinstalled ]; then + if (whiptail --title "Docker and Docker-Compose" --yesno "Docker is not currently installed, and is required to run IOTstack. Would you like to install docker and docker-compose now?\nYou will not be prompted again." 20 78); then + [ -f .docker_notinstalled ] && rm .docker_notinstalled + echo "Setting up environment:" + if [[ ! "$(user_in_group bluetooth)" == "notgroup" ]] && [[ ! "$(user_in_group bluetooth)" == "true" ]]; then + echo "User is NOT in 'bluetooth' group. Adding:" >&2 + echo "sudo usermod -G bluetooth -a $USER" >&2 + echo "You will need to restart your system before the changes take effect." + sudo usermod -G "bluetooth" -a $USER + fi + + if [ ! "$(user_in_group docker)" == "true" ]; then + echo "User is NOT in 'docker' group. Adding:" >&2 + echo "sudo usermod -G docker -a $USER" >&2 + echo "You will need to restart your system before the changes take effect." + sudo usermod -G "docker" -a $USER + fi + install_docker + else + touch .docker_notinstalled + fi + fi fi +} - if command_exists docker-compose; then - echo "docker-compose already installed" +function do_project_checks() { + echo "Checking for project update" >&2 + git fetch origin $CURRENT_BRANCH + + if [[ "$(check_git_updates)" == "Need to pull" ]]; then + echo "An update is available for IOTstack" >&2 + if [ ! -f .project_outofdate ]; then + if (whiptail --title "Project update" --yesno "An update is available for IOTstack\nYou will not be reminded again until after you update.\nYou can upgrade manually by typing:\n git pull origin $CURRENT_BRANCH \n\n\nWould you like to update now?" 14 78); then + update_project + else + touch .project_outofdate + fi + fi else - echo "Install docker-compose" - sudo apt install -y docker-compose + [ -f .project_outofdate ] && rm .project_outofdate + echo "Project is up to date" >&2 fi +} - if (whiptail --title "Restart Required" --yesno "It is recommended that you restart your device now. Select yes to do so now" 20 78); then - sudo reboot - fi - ;; - #MAINMENU Build stack ------------------------------------------------------------ -"build") +function do_env_checks() { + GROUPSGOOD=0 - title=$'Container Selection' - message=$'Use the [SPACEBAR] to select which containers you would like to install' - entry_options=() + if [[ ! "$(user_in_group bluetooth)" == "notgroup" ]] && [[ ! "$(user_in_group bluetooth)" == "true" ]]; then + GROUPSGOOD=1 + echo "User is NOT in 'bluetooth' group" >&2 + fi - #check architecture and display appropriate menu - if [ $(echo "$sys_arch" | grep -c "arm") ]; then - keylist=("${armhf_keys[@]}") - else - echo "your architecture is not supported yet" - exit + if [[ ! "$(user_in_group docker)" == "true" ]]; then + GROUPSGOOD=1 + echo "User is NOT in 'docker' group" >&2 fi - #loop through the array of descriptions - for index in "${keylist[@]}"; do - entry_options+=("$index") - entry_options+=("${cont_array[$index]}") + if [ "$GROUPSGOOD" == 1 ]; then + echo "!! You might experience issues with docker or bluetooth. To fix run: ./menu.sh --run-env-setup" + fi +} - #check selection - if [ -f ./services/selection.txt ]; then - [ $(grep "$index" ./services/selection.txt) ] && entry_options+=("ON") || entry_options+=("OFF") - else - entry_options+=("OFF") - fi - done - - container_selection=$(whiptail --title "$title" --notags --separate-output --checklist \ - "$message" 20 78 12 -- "${entry_options[@]}" 3>&1 1>&2 2>&3) - - mapfile -t containers <<<"$container_selection" - - #if no container is selected then dont overwrite the docker-compose.yml file - if [ -n "$container_selection" ]; then - touch docker-compose.yml - echo "version: '2'" >docker-compose.yml - echo "services:" >>docker-compose.yml - - #set the ACL for the stack - #docker_setfacl - - # store last sellection - [ -f ./services/selection.txt ] && rm ./services/selection.txt - #first run service directory wont exist - [ -d ./services ] || mkdir services - touch ./services/selection.txt - #Run yml_builder of all selected containers - for container in "${containers[@]}"; do - echo "Adding $container container" - yml_builder "$container" - echo "$container" >>./services/selection.txt - done - - # add custom containers - if [ -f ./services/custom.txt ]; then - if (whiptail --title "Custom Container detected" --yesno "custom.txt has been detected do you want to add these containers to the stack?" 20 78); then - mapfile -t containers <<<$(cat ./services/custom.txt) - for container in "${containers[@]}"; do - echo "Adding $container container" - yml_builder "$container" - done - fi - fi +# ---------------------------------------------- +# Menu bootstrap entry point +# ---------------------------------------------- - echo "docker-compose successfully created" - echo "run 'docker-compose up -d' to start the stack" +if [[ "$*" == *"--no-check"* ]]; then + echo "Skipping preflight checks." +else + do_project_checks + do_env_checks + do_python3_checks + echo "Please enter sudo pasword if prompted" + do_docker_checks + + if [[ "$DOCKER_VERSION_GOOD" == "true" ]] && \ + [[ "$PYTHON_VERSION_GOOD" == "true" ]]; then + echo "Project dependencies up to date" + echo "" else - - echo "Build cancelled" - + echo "Project dependencies not up to date. Menu may crash." + echo "To be prompted to update again, run command:" + echo " rm .docker_notinstalled || rm .docker_outofdate || rm .project_outofdate" + echo "" fi - ;; - #MAINMENU Docker commands ----------------------------------------------------------- -"commands") - - docker_selection=$( - whiptail --title "Docker commands" --menu --notags \ - "Shortcut to common docker commands" 20 78 12 -- \ - "aliases" "Add iotstack_up and iotstack_down aliases" \ - "start" "Start stack" \ - "restart" "Restart stack" \ - "stop" "Stop stack" \ - "stop_all" "Stop any running container regardless of stack" \ - "pull" "Update all containers" \ - "prune_volumes" "Delete all stopped containers and docker volumes" \ - "prune_images" "Delete all images not associated with container" \ - 3>&1 1>&2 2>&3 - ) - - case $docker_selection in - "start") ./scripts/start.sh ;; - "stop") ./scripts/stop.sh ;; - "stop_all") ./scripts/stop-all.sh ;; - "restart") ./scripts/restart.sh ;; - "pull") ./scripts/update.sh ;; - "prune_volumes") ./scripts/prune-volumes.sh ;; - "prune_images") ./scripts/prune-images.sh ;; - "aliases") - touch ~/.bash_aliases - if [ $(grep -c 'IOTstack' ~/.bash_aliases) -eq 0 ]; then - echo ". ~/IOTstack/.bash_aliases" >>~/.bash_aliases - echo "added aliases" - else - echo "aliases already added" - fi - source ~/.bashrc - echo "aliases will be available after a reboot" - ;; - esac - ;; - #Backup menu --------------------------------------------------------------------- -"backup") - backup_sellection=$(whiptail --title "Backup Options" --menu --notags \ - "Select backup option" 20 78 12 -- \ - "dropbox-uploader" "Dropbox-Uploader" \ - "rclone" "google drive via rclone" \ - 3>&1 1>&2 2>&3) - - case $backup_sellection in - - "dropbox-uploader") - if [ ! -d ~/Dropbox-Uploader ]; then - git clone https://github.com/andreafabrizi/Dropbox-Uploader.git ~/Dropbox-Uploader - chmod +x ~/Dropbox-Uploader/dropbox_uploader.sh - pushd ~/Dropbox-Uploader && ./dropbox_uploader.sh - popd - else - echo "Dropbox uploader already installed" - fi +fi - #add enable file for Dropbox-Uploader - [ -d ~/IOTstack/backups ] || sudo mkdir -p ~/IOTstack/backups/ - sudo touch ~/IOTstack/backups/dropbox - ;; - "rclone") - sudo apt install -y rclone - echo "Please run 'rclone config' to configure the rclone google drive backup" - - #add enable file for rclone - [ -d ~/IOTstack/backups ] || sudo mkdir -p ~/IOTstack/backups/ - sudo touch ~/IOTstack/backups/rclone - ;; +while test $# -gt 0 +do + case "$1" in + --branch) CURRENT_BRANCH=${2:-$(git name-rev --name-only HEAD)} + ;; + --no-check) echo "" + ;; + --run-env-setup) # Sudo cannot be run from inside functions. + echo "Setting up environment:" + if [[ ! "$(user_in_group bluetooth)" == "notgroup" ]] && [[ ! "$(user_in_group bluetooth)" == "true" ]]; then + echo "User is NOT in 'bluetooth' group. Adding:" >&2 + echo "sudo usermod -G bluetooth -a $USER" >&2 + echo "You will need to restart your system before the changes take effect." + sudo usermod -G "bluetooth" -a $USER + fi + + if [ ! "$(user_in_group docker)" == "true" ]; then + echo "User is NOT in 'docker' group. Adding:" >&2 + echo "sudo usermod -G docker -a $USER" >&2 + echo "You will need to restart your system before the changes take effect." + sudo usermod -G "docker" -a $USER + fi + ;; + --encoding) ENCODING_TYPE=$2 + ;; + --*) echo "bad option $1" + ;; esac - ;; - #MAINMENU Misc commands------------------------------------------------------------ -"misc") - misc_sellection=$( - whiptail --title "Miscellaneous Commands" --menu --notags \ - "Some helpful commands" 20 78 12 -- \ - "swap" "Disable swap by uninstalling swapfile" \ - "swappiness" "Disable swap by setting swappiness to 0" \ - "log2ram" "install log2ram to decrease load on sd card, moves /var/log into ram" \ - 3>&1 1>&2 2>&3 - ) - - case $misc_sellection in - "swap") - sudo dphys-swapfile swapoff - sudo dphys-swapfile uninstall - sudo update-rc.d dphys-swapfile remove - sudo systemctl disable dphys-swapfile - #sudo apt-get remove dphys-swapfile - echo "Swap file has been removed" - ;; - "swappiness") - if [ $(grep -c swappiness /etc/sysctl.conf) -eq 0 ]; then - echo "vm.swappiness=0" | sudo tee -a /etc/sysctl.conf - echo "updated /etc/sysctl.conf with vm.swappiness=0" - else - sudo sed -i "/vm.swappiness/c\vm.swappiness=0" /etc/sysctl.conf - echo "vm.swappiness found in /etc/sysctl.conf update to 0" - fi + shift +done - sudo sysctl vm.swappiness=0 - echo "set swappiness to 0 for immediate effect" - ;; - "log2ram") - if [ ! -d ~/log2ram ]; then - git clone https://github.com/azlux/log2ram.git ~/log2ram - chmod +x ~/log2ram/install.sh - pushd ~/log2ram && sudo ./install.sh - popd - else - echo "log2ram already installed" +# This section is temporary, it's just for notifying people of potential breaking changes. +if [[ -f .new_install ]]; then + echo "Existing installation detected." +else + if [[ -f docker-compose.yml ]]; then + echo "Warning: Please ensure to read the following prompt" + sleep 1 + if (whiptail --title "Project update" --yesno "There has been a large update to IOTstack, and there may be breaking changes to your current setup. Would you like to switch to the older branch by having the command:\ngit checkout old-menu\n\nrun for you?\n\nIt's suggested that you backup your existing IOTstack instance if you select No\n\nIf you run into problems, please open an issue: https://github.com/SensorsIot/IOTstack/issues\n\nOr Discord: https://discord.gg/ZpKHnks\n\nRelease Notes: https://github.com/SensorsIot/IOTstack/blob/master/docs/New-Menu-Release-Notes.md" 24 95); then + echo "Running command: git checkout old-menu" + git checkout old-menu + sleep 2 fi - ;; - esac - ;; - -"hassio") - echo "install requirements for hass.io" - sudo apt install -y bash jq curl avahi-daemon dbus - hassio_machine=$(whiptail --title "Machine type" --menu \ - "Please select you device type" 20 78 12 -- \ - "raspberrypi4" " " \ - "raspberrypi3" " " \ - "raspberrypi2" " " \ - "raspberrypi4-64" " " \ - "raspberrypi3-64" " " \ - "qemux86" " " \ - "qemux86-64" " " \ - "qemuarm" " " \ - "qemuarm-64" " " \ - "orangepi-prime" " " \ - "odroid-xu" " " \ - "odroid-c2" " " \ - "intel-nuc" " " \ - "tinker" " " \ - 3>&1 1>&2 2>&3) - if [ -n "$hassio_machine" ]; then - curl -sL https://raw.githubusercontent.com/home-assistant/hassio-installer/master/hassio_install.sh | sudo bash -s -- -m $hassio_machine - else - echo "no selection" - exit fi - ;; -"update") - echo "Pulling latest project file from Github.com ---------------------------------------------" - git pull origin master - echo "git status ------------------------------------------------------------------------------" - git status - ;; -"native") - - native_selections=$(whiptail --title "Native installs" --menu --notags \ - "Install local applications" 20 78 12 -- \ - "rtl_433" "RTL_433" \ - "rpieasy" "RPIEasy" \ - 3>&1 1>&2 2>&3) - - case $native_selections in - "rtl_433") - bash ./.native/rtl_433.sh - ;; - "rpieasy") - bash ./.native/rpieasy.sh - ;; - esac - ;; -*) ;; + touch .new_install +fi -esac +set -e # stop execution on failure +if cmp -s requirements-menu.txt .virtualenv-menu/requirements.txt; then + echo "Using existing python virtualenv for menu" + source .virtualenv-menu/bin/activate +else + rm -rf .virtualenv-menu + echo "Creating python virtualenv for menu..." + virtualenv -q --seed pip .virtualenv-menu + source .virtualenv-menu/bin/activate + echo "Installing menu requirements into the virtualenv..." + pip3 install -q -r requirements-menu.txt + cp requirements-menu.txt .virtualenv-menu/requirements.txt +fi -popd +# Hand control to new menu +$PYTHON_CMD ./scripts/menu_main.py $ENCODING_TYPE diff --git a/mkdocs.yml b/mkdocs.yml new file mode 100644 index 000000000..34ef56f23 --- /dev/null +++ b/mkdocs.yml @@ -0,0 +1,84 @@ +site_name: IOTstack +site_description: 'Docker stack for getting started on IOT on the Raspberry PI' + +# Repository +repo_url: https://github.com/SensorsIot/IOTstack +repo_name: SensorsIot/IOTstack +edit_uri: "https://github.com/SensorsIot/IOTstack/edit/master/docs" + +theme: + name: material + icon: + logo: octicons/stack-24 + favicon: stack-24.svg + palette: + - scheme: default + media: "(prefers-color-scheme: light)" + toggle: + icon: material/weather-sunny + name: Switch to dark mode + - scheme: slate + media: "(prefers-color-scheme: dark)" + toggle: + icon: material/weather-night + name: Switch to light mode + features: + - content.code.annotate + - tabs + - navigation.tabs + - navigation.tabs.sticky + - navigation.sections + +plugins: + - awesome-pages # Required for pagenav-generator + - pagenav-generator + - search + - redirects: + # Forward renamed pages to avoid breaking old links. + redirect_maps: + Getting-Started.md: Basic_setup/index.md + Accessing-your-Device-from-the-internet.md: Basic_setup/Accessing-your-Device-from-the-internet.md + Backup-and-Restore.md: Basic_setup/Backup-and-Restore.md + Custom.md: Basic_setup/Custom.md + Default-Configs.md: Basic_setup/Default-Configs.md + Docker-commands.md: Basic_setup/Docker.md + Basic_setup/Docker-commands.md: Basic_setup/Docker.md + How-the-script-works.md: Basic_setup/Menu.md + Misc.md: Basic_setup/Menu.md + Native-RTL_433.md: Basic_setup/Menu.md + Networking.md: Basic_setup/Networking.md + RPIEasy_native.md: Basic_setup/Menu.md + Understanding-Containers.md: Basic_setup/Understanding-Containers.md + Updates/Updating-the-Project.md: Updates/index.md + PostBuild-Script.md: Developers/PostBuild-Script.md + BuildStack-RandomPassword.md: Developers/BuildStack-RandomPassword.md + BuildStack-Services.md: Developers/BuildStack-Services.md + Menu-System.md: Developers/Menu-System.md + Contributing-Services.md: Developers/index.md + Basic_setup/How-the-script-works.md: Basic_setup/Menu.md + Basic_setup/Misc.md: Basic_setup/Menu.md + Basic_setup/Native-RTL_433.md: Basic_setup/Menu.md + Basic_setup/RPIEasy_native.md: Basic_setup/Menu.md + +extra_css: + - style.css +extra_javascript: + - javascript/fix-codeblock-console-copy-button.js + +markdown_extensions: + - attr_list + - pymdownx.highlight: + pygments_lang_class: true + - admonition + - pymdownx.snippets + - pymdownx.superfences: + custom_fences: + - name: mermaid + class: mermaid + format: !!python/name:pymdownx.superfences.fence_code_format + - pymdownx.details + - pymdownx.tabbed: + alternate_style: true + - toc: + permalink: true + - md_in_html diff --git a/requirements-menu.txt b/requirements-menu.txt new file mode 100644 index 000000000..9cf29d1ef --- /dev/null +++ b/requirements-menu.txt @@ -0,0 +1,5 @@ +blessed +ruamel.yaml +ruamel.yaml.clib +six +wcwidth diff --git a/requirements-mkdocs.txt b/requirements-mkdocs.txt new file mode 100644 index 000000000..56851693d --- /dev/null +++ b/requirements-mkdocs.txt @@ -0,0 +1,25 @@ +bracex +click +ghp-import +importlib-metadata +Jinja2 +Markdown +MarkupSafe +mergedeep +mkdocs +mkdocs-awesome-pages-plugin +mkdocs-material +mkdocs-material-extensions +mkdocs-pagenav-generator @ git+https://github.com/Andre601/mkdocs-pagenav-generator@acb5b1561695e8f69d67fda029779f40e4b0beef +mkdocs-redirects +packaging +Pygments +pymdown-extensions +pyparsing +python-dateutil +PyYAML +pyyaml_env_tag +six +watchdog +wcmatch +zipp diff --git a/scripts/2022-10-01-wireguard-restructure.sh b/scripts/2022-10-01-wireguard-restructure.sh new file mode 100755 index 000000000..22f2d35db --- /dev/null +++ b/scripts/2022-10-01-wireguard-restructure.sh @@ -0,0 +1,155 @@ +#!/usr/bin/env bash + +# support user renaming of script +SCRIPT=$(basename "$0") + +# dependency check +if [ -z "$(which rsync)" -o -z "$(which jq)" ] ; then + echo "This script depends on jq and rsync. Please run" + echo " sudo apt update && sudo apt install jq rsync" + exit -1 +fi + +# useful function +isContainerRunning() { + if STATUS=$(curl -s --unix-socket /var/run/docker.sock http://localhost/containers/$1/json | jq .State.Status) ; then + if [ "$STATUS" = "\"running\"" ] ; then + return 0 + fi + fi + return 1 +} + + +# should not run as root +[ "$EUID" -eq 0 ] && echo "$SCRIPT should NOT be run using sudo" && exit -1 + +# dependency check +if [ -z "$(which rsync)" -o -z "$(which jq)" ] ; then + echo "This script depends on jq and rsync. Please run" + echo " sudo apt update && sudo apt install jq rsync" + exit -1 +fi + +read -r -d '' RUNNINGNOTES <<-EOM +\n +=============================================================================== + +Error: The WireGuard container can't be running during the migration. + Please stop the container like this: + + $ cd ~/IOTstack + $ docker-compose rm --force --stop -v wireguard + + Do not start the container again until the migration is complete and + you have followed the instructions for modifying WireGuard's service + definition in your docker-compose.yml + +=============================================================================== +\n +EOM + +# wireguard can't be running +isContainerRunning "wireguard" && echo -e "$RUNNINGNOTES" && exit -1 + +# source directory is +WIREGUARD="$HOME/IOTstack/volumes/wireguard" + +# source directory must exist +[ ! -d "$WIREGUARD" ] && echo "Error: $WIREGUARD does not exist" && exit -1 + +# the backup directory is +BACKUP="$WIREGUARD.bak" + +read -r -d '' REPEATNOTES <<-EOM +\n +=============================================================================== + +Error: It looks like you might be trying to migrate twice! You can't do that. + + If you need to start over, you can try resetting like this: + + $ cd ~/IOTstack/volumes + $ sudo rm -rf wireguard + $ sudo mv wireguard.bak wireguard + + Alternatively, restore ~/IOTstack/volumes/wireguard from a backup. + +=============================================================================== +\n +EOM + +# required sub-directories are +CONFIGD="config" +INITD="custom-cont-init.d" +SERVICESD="custom-services.d" + +# backup directory must not exist +[ -d "$BACKUP" ] && echo -e "$REPEATNOTES" && exit -1 + +# required sub-directories must not exist +[ -d "$WIREGUARD/$CONFIGD" ] && echo -e "$REPEATNOTES" && exit -1 +[ -d "$WIREGUARD/$INITD" ] && echo -e "$REPEATNOTES" && exit -1 +[ -d "$WIREGUARD/$SERVICESD" ] && echo -e "$REPEATNOTES" && exit -1 + +# rename source to backup +echo "Renaming $WIREGUARD to $BACKUP" +sudo mv "$WIREGUARD" "$BACKUP" + +# create the required directories +echo "creating required sub-folders" +sudo mkdir -p "$WIREGUARD/$CONFIGD" "$WIREGUARD/$INITD" "$WIREGUARD/$SERVICESD" + +# for now, set ownership to the current user +echo "setting ownership on $WIREGUARD to $USER" +sudo chown -R "$USER":"$USER" "$WIREGUARD" + +# migrate config directory components +echo "migrating user-configuration components" +rsync -r --ignore-existing --exclude="${INITD}*" --exclude="${SERVICESD}*" "$BACKUP"/ "$WIREGUARD/$CONFIGD" + +# migrate special cases and change ownership to root +echo "migrating custom configuration options" +for C in "$INITD" "$SERVICESD" ; do + for D in "$BACKUP/$C"* ; do + echo " merging $D into $WIREGUARD/$C" + rsync -r --ignore-existing --exclude="README.txt" "$D"/ "$WIREGUARD/$C" + echo " changing ownership to root" + sudo chown -R root:root "$WIREGUARD/$C" + done +done + +# force correct mode for wg0.conf +echo "Setting mode 600 on $WIREGUARD/$CONFIGD/wg0.conf" +chmod 600 "$WIREGUARD/$CONFIGD/wg0.conf" + +read -r -d '' COMPOSENOTES <<-EOM +\n +=============================================================================== + +Migration seems to have been successful. Do NOT start the WireGuard container +until you have updated WireGuard's service definition: + +Old: + + volumes: + - ./volumes/wireguard:/config + - /lib/modules:/lib/modules:ro + +New: + + volumes: + - ./volumes/wireguard/config:/config + - ./volumes/wireguard/custom-cont-init.d:/custom-cont-init.d + - ./volumes/wireguard/custom-services.d:/custom-services.d + - /lib/modules:/lib/modules:ro + +Pay careful attention to the lines starting with "- ./volumes". Do NOT +just copy and paste the middle two lines. The first line has changed too. + +=============================================================================== +\n +EOM + +# all done - display the happy news +echo -e "$COMPOSENOTES" diff --git a/scripts/backup.sh b/scripts/backup.sh new file mode 100755 index 000000000..e22453f63 --- /dev/null +++ b/scripts/backup.sh @@ -0,0 +1,150 @@ +#!/bin/bash + +# Usage: +# ./scripts/backup.sh {TYPE=3} {USER=$(whoami)} +# Types: +# 1 = Backup with Date +# 2 = Rolling Date +# 3 = Both +# User: +# This parameter only becomes active if run as root. This script will default to the current logged in user +# If this parameter is not supplied when run as root, the script will ask for the username as input +# +# Backups: +# You can find the backups in the ./backups/ folder. With rolling being in ./backups/rolling/ and date backups in ./backups/backup/ +# Log files can also be found in the ./backups/logs/ directory. +# +# Examples: +# ./scripts/backup.sh +# ./scripts/backup.sh 3 +# Either of these will run both backups. +# +# ./scripts/backup.sh 2 +# This will only produce a backup in the rollowing folder. It will be called 'backup_XX.tar.gz' where XX is the current day of the week (as an int) +# +# sudo bash ./scripts/backup.sh 2 pi +# This will only produce a backup in the rollowing folder and change all the permissions to the 'pi' user. + +if [ -d "./menu.sh" ]; then + echo "./menu.sh file was not found. Ensure that you are running this from IOTstack's directory." + exit 1 +fi + +BACKUPTYPE=${1:-"3"} + +if [[ "$BACKUPTYPE" -ne "1" && "$BACKUPTYPE" -ne "2" && "$BACKUPTYPE" -ne "3" ]]; then + echo "Unknown backup type '$BACKUPTYPE', can only be 1, 2 or 3" + exit 1 +fi + +if [[ "$EUID" -eq 0 ]]; then + if [ -z ${2+x} ]; then + echo "Enter username to chown (change ownership) files to" + read USER; + else + USER=$2 + fi +else + USER=$(whoami) +fi + +BASEDIR=./backups +TMPDIR=./.tmp +DOW=$(date +%u) +BASEBACKUPFILE="$(date +"%Y-%m-%d_%H%M")" +TMPBACKUPFILE="$TMPDIR/backup/backup_$BASEBACKUPFILE.tar.gz" +BACKUPLIST="$TMPDIR/backup-list_$BASEBACKUPFILE.txt" +LOGFILE="$BASEDIR/logs/backup_$BASEBACKUPFILE.log" +BACKUPFILE="$BASEDIR/backup/backup_$BASEBACKUPFILE.tar.gz" +ROLLING="$BASEDIR/rolling/backup_$DOW.tar.gz" + +[ -d ./backups ] || mkdir ./backups +[ -d ./backups/logs ] || mkdir -p ./backups/logs +[ -d ./backups/backup ] || mkdir -p ./backups/backup +[ -d ./backups/rolling ] || mkdir -p ./backups/rolling +[ -d ./.tmp ] || mkdir ./.tmp +[ -d ./.tmp/backup ] || mkdir -p ./.tmp/backup +[ -d ./.tmp/databases_backup ] || mkdir -p ./.tmp/databases_backup + +touch $LOGFILE +echo "" > $LOGFILE +echo "### IOTstack backup generator log ###" >> $LOGFILE +echo "Started At: $(date +"%Y-%m-%dT%H-%M-%S")" >> $LOGFILE +echo "Current Directory: $(pwd)" >> $LOGFILE +echo "Backup Type: $BACKUPTYPE" >> $LOGFILE + +if [[ "$BACKUPTYPE" -eq "1" || "$BACKUPTYPE" -eq "3" ]]; then + echo "Backup File: $BACKUPFILE" >> $LOGFILE +fi + +if [[ "$BACKUPTYPE" -eq "2" || "$BACKUPTYPE" -eq "3" ]]; then + echo "Rolling File: $ROLLING" >> $LOGFILE +fi + +echo "" >> $BACKUPLIST + +echo "" >> $LOGFILE +echo "Executing prebackup scripts" >> $LOGFILE +bash ./scripts/backup_restore/pre_backup_complete.sh >> $LOGFILE 2>&1 + +echo "./services/" >> $BACKUPLIST +echo "./volumes/" >> $BACKUPLIST +[ -f "./docker-compose.yml" ] && echo "./docker-compose.yml" >> $BACKUPLIST +[ -f "./docker-compose.override.yml" ] && echo "./docker-compose.yml" >> $BACKUPLIST +[ -f "./compose-override.yml" ] && echo "./compose-override.yml" >> $BACKUPLIST +[ -f "./extra" ] && echo "./extra" >> $BACKUPLIST +[ -f "./.tmp/databases_backup" ] && echo "./.tmp/databases_backup" >> $BACKUPLIST +[ -f "./postbuild.sh" ] && echo "./postbuild.sh" >> $BACKUPLIST +[ -f "./post_backup.sh" ] && echo "./post_backup.sh" >> $BACKUPLIST +[ -f "./pre_backup.sh" ] && echo "./pre_backup.sh" >> $BACKUPLIST + +sudo tar -czf $TMPBACKUPFILE -T $BACKUPLIST >> $LOGFILE 2>&1 + +[ -f "$ROLLING" ] && ROLLINGOVERWRITTEN=1 && rm -rf $ROLLING + +sudo chown -R $USER:$USER $TMPDIR/backup* >> $LOGFILE 2>&1 + +if [[ "$BACKUPTYPE" -eq "1" || "$BACKUPTYPE" -eq "3" ]]; then + cp $TMPBACKUPFILE $BACKUPFILE +fi +if [[ "$BACKUPTYPE" -eq "2" || "$BACKUPTYPE" -eq "3" ]]; then + cp $TMPBACKUPFILE $ROLLING +fi + +if [[ "$BACKUPTYPE" -eq "2" || "$BACKUPTYPE" -eq "3" ]]; then + if [[ "$ROLLINGOVERWRITTEN" -eq 1 ]]; then + echo "Rolling Overwritten: True" >> $LOGFILE + else + echo "Rolling Overwritten: False" >> $LOGFILE + fi +fi + +echo "Backup Size (bytes): $(stat --printf="%s" $TMPBACKUPFILE)" >> $LOGFILE +echo "" >> $LOGFILE + +echo "Executing postbackup scripts" >> $LOGFILE +bash ./scripts/backup_restore/post_backup_complete.sh >> $LOGFILE 2>&1 +echo "" >> $LOGFILE + +echo "Finished At: $(date +"%Y-%m-%dT%H-%M-%S")" >> $LOGFILE +echo "" >> $LOGFILE + +if [[ -f "$TMPBACKUPFILE" ]]; then + echo "Items backed up:" >> $LOGFILE + cat $BACKUPLIST >> $LOGFILE 2>&1 + echo "" >> $LOGFILE + echo "Items Excluded:" >> $LOGFILE + echo " - No items" >> $LOGFILE 2>&1 + rm -rf $BACKUPLIST >> $LOGFILE 2>&1 + rm -rf $TMPBACKUPFILE >> $LOGFILE 2>&1 +else + echo "Something went wrong backing up. The temporary backup file doesn't exist. No temporary files were removed" + echo "Files: " + echo " $BACKUPLIST" +fi + +echo "" >> $LOGFILE +echo "### End of log ###" >> $LOGFILE +echo "" >> $LOGFILE + +cat $LOGFILE diff --git a/scripts/backup_influxdb.sh b/scripts/backup_influxdb.sh deleted file mode 100755 index fefb8464c..000000000 --- a/scripts/backup_influxdb.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash - -#first move the contents of the old backup out and clear the directory -echo "Moving old influxdb backups if they exist" -[ -d ~/IOTstack/backups/influxdb/db_old ] || sudo mkdir ~/IOTstack/backups/influxdb/db_old -sudo rm ~/IOTstack/backups/influxdb/db_old/* >/dev/null 2>&1 -sudo mv ~/IOTstack/backups/influxdb/db/* ~/IOTstack/backups/influxdb/db_old/ >/dev/null 2>&1 -#sudo rm ~/IOTstack/backups/influxdb/db/* - -#execute the backup command -echo "backing up influxdb database" -docker exec influxdb influxd backup -portable /var/lib/influxdb/backup >/dev/null 2>&1 -echo "influxdb backup complete" diff --git a/scripts/backup_restore.py b/scripts/backup_restore.py new file mode 100755 index 000000000..e13b8d0a5 --- /dev/null +++ b/scripts/backup_restore.py @@ -0,0 +1,240 @@ +#!/usr/bin/env python3 + +import signal + +def main(): + from blessed import Terminal + from deps.chars import specialChars, commonTopBorder, commonBottomBorder, commonEmptyLine + global renderMode + import time + import subprocess + global signal + + global backupRestoreSelectionInProgress + global mainMenuList + global currentMenuItemIndex + global hideHelpText + + try: # If not already set, then set it. + hideHelpText = hideHelpText + except: + hideHelpText = False + + term = Terminal() + + def runBackup(): + global needsRender + print("Execute Backup:") + subprocess.call("./scripts/backup.sh", shell=True) + print("") + print("Backup completed.") + print("Press [Up] or [Down] arrow key to show the menu if it has scrolled too far.") + time.sleep(1) + needsRender = 1 + return True + + def dropboxInstall(): + global needsRender + print("Install Dropbox:") + subprocess.call("git clone https://github.com/andreafabrizi/Dropbox-Uploader.git ~/Dropbox-Uploader", shell=True) + subprocess.call("chmod +x ~/Dropbox-Uploader/dropbox_uploader.sh", shell=True) + subprocess.call("cd ~/Dropbox-Uploader && ./dropbox_uploader.sh", shell=True) + print("") + print("Dropbox install finished") + print("Press [Up] or [Down] arrow key to show the menu if it has scrolled too far.") + time.sleep(1) + needsRender = 1 + return True + + def rcloneInstall(): + global needsRender + print("Install rClone:") + print("sudo apt install -y rclone") + subprocess.call("sudo apt install -y rclone", shell=True) + print("") + print("rClone install finished") + print("Please run 'rclone config' to configure the rclone google drive backup") + print("Press [Up] or [Down] arrow key to show the menu if it has scrolled too far.") + needsRender = 1 + return True + + def rCloneSetup(): + global needsRender + print("Setup rclone:") + subprocess.call("rclone config", shell=True) + print("") + print("rclone setup completed. Press [Up] or [Down] arrow key to show the menu if it has scrolled too far.") + time.sleep(1) + needsRender = 1 + return True + + def runRestore(): + global needsRender + print("Execute Restore:") + subprocess.call("./scripts/restore.sh", shell=True) + print("") + print("Restore completed.") + print("Press [Up] or [Down] arrow key to show the menu if it has scrolled too far.") + time.sleep(1) + needsRender = 1 + return True + + def goBack(): + global backupRestoreSelectionInProgress + global needsRender + backupRestoreSelectionInProgress = False + needsRender = 1 + return True + + mainMenuList = [ + ["Run backup", runBackup], + ["Install Dropbox", dropboxInstall], + ["Install rclone", rcloneInstall], + ["Setup rclone (must be installed first)", rCloneSetup], + ["Restore from backup", runRestore], + ["Back", goBack] + ] + + hotzoneLocation = [7, 0] # Top text + + backupRestoreSelectionInProgress = True + currentMenuItemIndex = 0 + menuNavigateDirection = 0 + + # Render Modes: + # 0 = No render needed + # 1 = Full render + # 2 = Hotzone only + needsRender = 1 + + def onResize(sig, action): + global mainMenuList + global currentMenuItemIndex + mainRender(1, mainMenuList, currentMenuItemIndex) + + def renderHotZone(term, menu, selection, hotzoneLocation): + lineLengthAtTextStart = 71 + print(term.move(hotzoneLocation[0], hotzoneLocation[1])) + for (index, menuItem) in enumerate(menu): + toPrint = "" + if index == selection: + toPrint += ('{bv} -> {t.blue_on_green} {title} {t.normal} <-'.format(t=term, title=menuItem[0], bv=specialChars[renderMode]["borderVertical"])) + else: + toPrint += ('{bv} {t.normal} {title} '.format(t=term, title=menuItem[0], bv=specialChars[renderMode]["borderVertical"])) + + for i in range(lineLengthAtTextStart - len(menuItem[0])): + toPrint += " " + + toPrint += "{bv}".format(bv=specialChars[renderMode]["borderVertical"]) + + toPrint = term.center(toPrint) + + print(toPrint) + + def mainRender(needsRender, menu, selection): + term = Terminal() + + if needsRender == 1: + print(term.clear()) + print(term.move_y(6 - hotzoneLocation[0])) + print(term.black_on_cornsilk4(term.center('IOTstack Backup Commands'))) + print("") + print(term.center(commonTopBorder(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Select backup command to run {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + + if needsRender >= 1: + renderHotZone(term, menu, selection, hotzoneLocation) + + if needsRender == 1: + print(term.center(commonEmptyLine(renderMode))) + if not hideHelpText: + if term.height < 30: + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Not enough vertical room to render controls help text {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + else: + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Controls: {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Up] and [Down] to move selection cursor {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [H] Show/hide this text {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Enter] to run command {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Escape] to go back to main menu {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonBottomBorder(renderMode))) + + def runSelection(selection): + global needsRender + mainRender(1, mainMenuList, currentMenuItemIndex) + import types + if len(mainMenuList[selection]) > 1 and isinstance(mainMenuList[selection][1], types.FunctionType): + mainMenuList[selection][1]() + else: + print(term.green_reverse('IOTstack Error: No function assigned to menu item: "{}"'.format(mainMenuList[selection][0]))) + needsRender = 1 + + def isMenuItemSelectable(menu, index): + if len(menu) > index: + if len(menu[index]) > 2: + if menu[index][2]["skip"] == True: + return False + return True + + if __name__ == 'builtins': + global signal + term = Terminal() + signal.signal(signal.SIGWINCH, onResize) + with term.fullscreen(): + menuNavigateDirection = 0 + mainRender(needsRender, mainMenuList, currentMenuItemIndex) + backupRestoreSelectionInProgress = True + with term.cbreak(): + while backupRestoreSelectionInProgress: + menuNavigateDirection = 0 + + if not needsRender == 0: # Only rerender when changed to prevent flickering + mainRender(needsRender, mainMenuList, currentMenuItemIndex) + needsRender = 0 + + key = term.inkey(esc_delay=0.05) + if key.is_sequence: + if key.name == 'KEY_TAB': + menuNavigateDirection += 1 + if key.name == 'KEY_DOWN': + menuNavigateDirection += 1 + if key.name == 'KEY_UP': + menuNavigateDirection -= 1 + if key.name == 'KEY_ENTER': + runSelection(currentMenuItemIndex) + if backupRestoreSelectionInProgress == False: + return True + if key.name == 'KEY_ESCAPE': + backupRestoreSelectionInProgress = False + return True + elif key: + if key == 'h': # H pressed + if hideHelpText: + hideHelpText = False + else: + hideHelpText = True + mainRender(1, mainMenuList, currentMenuItemIndex) + + if menuNavigateDirection != 0: # If a direction was pressed, find next selectable item + currentMenuItemIndex += menuNavigateDirection + currentMenuItemIndex = currentMenuItemIndex % len(mainMenuList) + needsRender = 2 + + while not isMenuItemSelectable(mainMenuList, currentMenuItemIndex): + currentMenuItemIndex += menuNavigateDirection + currentMenuItemIndex = currentMenuItemIndex % len(mainMenuList) + return True + + return True + +originalSignalHandler = signal.getsignal(signal.SIGINT) +main() +signal.signal(signal.SIGWINCH, originalSignalHandler) diff --git a/scripts/backup_restore/post_backup_complete.sh b/scripts/backup_restore/post_backup_complete.sh new file mode 100755 index 000000000..b6a867bb6 --- /dev/null +++ b/scripts/backup_restore/post_backup_complete.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +# This script runs any postbackup commands you may need + +if [ -f "./post_backup.sh" ]; then + echo "./post_backup.sh file found, executing:" + bash ./post_backup.sh +fi + +docker-compose up -d \ No newline at end of file diff --git a/scripts/backup_restore/post_restore_complete.sh b/scripts/backup_restore/post_restore_complete.sh new file mode 100755 index 000000000..a4dd815aa --- /dev/null +++ b/scripts/backup_restore/post_restore_complete.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +# This script runs any post restore commands you may need + +if [ -f "./post_restore.sh" ]; then + echo "./post_restore.sh file found, executing:" + bash ./post_restore.sh +fi diff --git a/scripts/backup_restore/pre_backup_complete.sh b/scripts/backup_restore/pre_backup_complete.sh new file mode 100755 index 000000000..a29c5f96f --- /dev/null +++ b/scripts/backup_restore/pre_backup_complete.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +# This script runs any prebackup commands you may need + +docker-compose down + +if [ -f "./pre_backup.sh" ]; then + echo "./pre_backup.sh file found, executing:" + bash ./pre_backup.sh +fi diff --git a/scripts/buildstack_menu.py b/scripts/buildstack_menu.py new file mode 100755 index 000000000..b0c29108c --- /dev/null +++ b/scripts/buildstack_menu.py @@ -0,0 +1,630 @@ +#!/usr/bin/env python3 +import signal + +checkedMenuItems = [] +results = {} + +def main(): + import os + import time + import ruamel.yaml + import math + import sys + import subprocess + import traceback + from deps.chars import specialChars, commonTopBorder, commonBottomBorder, commonEmptyLine, padText + from deps.consts import servicesDirectory, templatesDirectory, volumesDirectory, buildCache, envFile, dockerPathOutput, servicesFileName, composeOverrideFile + from deps.yaml_merge import mergeYaml + from blessed import Terminal + global signal + global renderMode + global term + global paginationSize + global paginationStartIndex + global hideHelpText + global activeMenuLocation + global lastSelection + + yaml = ruamel.yaml.YAML() + yaml.preserve_quotes = True + + # Constants + buildScriptFile = 'build.py' + dockerSavePathOutput = buildCache + + # Runtime vars + menu = [] + dockerComposeServicesYaml = {} + templatesDirectoryFolders = next(os.walk(templatesDirectory))[1] + term = Terminal() + hotzoneLocation = [7, 0] # Top text + paginationToggle = [10, term.height - 22] # Top text + controls text + paginationStartIndex = 0 + paginationSize = paginationToggle[0] + activeMenuLocation = 0 + lastSelection = 0 + + try: # If not already set, then set it. + hideHelpText = hideHelpText + except: + hideHelpText = False + + def buildServices(): # TODO: Move this into a dependency so that it can be executed with just a list of services. + global dockerComposeServicesYaml + try: + runPrebuildHook() + menuStateFileYaml = {} + menuStateFileYaml["services"] = dockerComposeServicesYaml + + with open(r'%s' % envFile) as fileEnv: + dockerFileYaml = yaml.load(fileEnv) + dockerFileYaml["services"] = dockerComposeServicesYaml + + if os.path.exists(composeOverrideFile): + with open(r'%s' % composeOverrideFile) as fileOverride: + yamlOverride = yaml.load(fileOverride) + + mergedYaml = mergeYaml(yamlOverride, dockerFileYaml) + dockerFileYaml = mergedYaml + + with open(r'%s' % dockerPathOutput, 'w') as outputFile: + yaml.dump(dockerFileYaml, outputFile) + + if not os.path.exists(servicesDirectory): + os.makedirs(servicesDirectory, exist_ok=True) + + with open(r'%s' % dockerSavePathOutput, 'w') as outputFile: + yaml.dump(menuStateFileYaml, outputFile) + runPostBuildHook() + + if os.path.exists('./postbuild.sh'): + servicesList = "" + for (index, serviceName) in enumerate(dockerComposeServicesYaml): + servicesList += " " + serviceName + subprocess.call("./postbuild.sh" + servicesList, shell=True) + + return True + except Exception as err: + print("Issue running build:") + traceback.print_exc() + input("Press Enter to continue...") + return False + + def generateTemplateList(templatesDirectoryFolders): + templatesDirectoryFolders.sort() + templateListDirectories = [] + for directory in templatesDirectoryFolders: + serviceFilePath = templatesDirectory + '/' + directory + '/' + servicesFileName + if os.path.exists(serviceFilePath): + templateListDirectories.append(directory) + + return templateListDirectories + + def generateLineText(text, textLength=None, paddingBefore=0, lineLength=26): + result = "" + for i in range(paddingBefore): + result += " " + + textPrintableCharactersLength = textLength + + if (textPrintableCharactersLength) == None: + textPrintableCharactersLength = len(text) + + result += text + remainingSpace = lineLength - textPrintableCharactersLength + + for i in range(remainingSpace): + result += " " + + return result + + def renderHotZone(term, renderType, menu, selection, paddingBefore, allIssues): + global paginationSize + optionsLength = len(" >> Options ") + optionsIssuesSpace = len(" ") + selectedTextLength = len("-> ") + spaceAfterissues = len(" ") + issuesLength = len(" !! Issue ") + + print(term.move(hotzoneLocation[0], hotzoneLocation[1])) + + if paginationStartIndex >= 1: + print(term.center("{b} {uaf} {uaf}{uaf}{uaf} {ual} {b}".format( + b=specialChars[renderMode]["borderVertical"], + uaf=specialChars[renderMode]["upArrowFull"], + ual=specialChars[renderMode]["upArrowLine"] + ))) + else: + print(term.center(commonEmptyLine(renderMode))) + + menuItemsActiveRow = term.get_location()[0] + if renderType == 2 or renderType == 1: # Rerender entire hotzone + for (index, menuItem) in enumerate(menu): # Menu loop + if "issues" in menuItem[1] and menuItem[1]["issues"]: + allIssues.append({ "serviceName": menuItem[0], "issues": menuItem[1]["issues"] }) + + if index >= paginationStartIndex and index < paginationStartIndex + paginationSize: + lineText = generateLineText(menuItem[0], paddingBefore=paddingBefore) + + # Menu highlight logic + if index == selection: + activeMenuLocation = term.get_location()[0] + formattedLineText = '-> {t.blue_on_green}{title}{t.normal} <-'.format(t=term, title=menuItem[0]) + paddedLineText = generateLineText(formattedLineText, textLength=len(menuItem[0]) + selectedTextLength, paddingBefore=paddingBefore - selectedTextLength) + toPrint = paddedLineText + else: + toPrint = '{title}{t.normal}'.format(t=term, title=lineText) + # ##### + + # Options and issues + if "buildHooks" in menuItem[1] and "options" in menuItem[1]["buildHooks"] and menuItem[1]["buildHooks"]["options"]: + toPrint = toPrint + '{t.blue_on_black} {raf}{raf} {t.normal}'.format(t=term, raf=specialChars[renderMode]["rightArrowFull"]) + toPrint = toPrint + ' {t.white_on_black} Options {t.normal}'.format(t=term) + else: + for i in range(optionsLength): + toPrint += " " + + for i in range(optionsIssuesSpace): + toPrint += " " + + if "issues" in menuItem[1] and menuItem[1]["issues"]: + toPrint = toPrint + '{t.red_on_orange} !! {t.normal}'.format(t=term) + toPrint = toPrint + ' {t.orange_on_black} Issue {t.normal}'.format(t=term) + else: + if menuItem[1]["checked"]: + if not menuItem[1]["issues"] == None and len(menuItem[1]["issues"]) == 0: + toPrint = toPrint + ' {t.green_on_blue} Pass {t.normal} '.format(t=term) + else: + for i in range(issuesLength): + toPrint += " " + else: + for i in range(issuesLength): + toPrint += " " + + for i in range(spaceAfterissues): + toPrint += " " + # ##### + + # Menu check render logic + if menuItem[1]["checked"]: + toPrint = " (X) " + toPrint + else: + toPrint = " ( ) " + toPrint + + toPrint = "{bv} {toPrint} {bv}".format(bv=specialChars[renderMode]["borderVertical"], toPrint=toPrint) # Generate border + toPrint = term.center(toPrint) # Center Text (All lines should have the same amount of printable characters) + # ##### + print(toPrint) + + + if renderType == 3: # Only partial rerender of hotzone (the unselected menu item, and the newly selected menu item rows) + global lastSelection + global renderOffsetLastSelection + global renderOffsetCurrentSelection + # TODO: Finish this, currently disabled. To enable, update the actions for UP and DOWN array keys below to assigned 3 to needsRender + renderOffsetLastSelection = lastSelection - paginationStartIndex + renderOffsetCurrentSelection = selection - paginationStartIndex + lineText = generateLineText(menu[lastSelection][0], paddingBefore=paddingBefore) + toPrint = '{title}{t.normal}'.format(t=term, title=lineText) + print('{t.move_y(lastSelection)}{title}'.format(t=term, title=toPrint)) + # print(toPrint) + print(renderOffsetCurrentSelection, lastSelection, renderOffsetLastSelection) + lastSelection = selection + + # menuItemsActiveRow + # activeMenuLocation + + + if paginationStartIndex + paginationSize < len(menu): + print(term.center("{b} {daf} {daf}{daf}{daf} {dal} {b}".format( + b=specialChars[renderMode]["borderVertical"], + daf=specialChars[renderMode]["downArrowFull"], + dal=specialChars[renderMode]["downArrowLine"] + ))) + else: + print(term.center(commonEmptyLine(renderMode))) + + def mainRender(menu, selection, renderType = 1): + global paginationStartIndex + global paginationSize + paddingBefore = 4 + + allIssues = [] + + if selection >= paginationStartIndex + paginationSize: + paginationStartIndex = selection - (paginationSize - 1) + 1 + renderType = 1 + + if selection <= paginationStartIndex - 1: + paginationStartIndex = selection + renderType = 1 + + try: + if (renderType == 1): + checkForOptions() + print(term.clear()) + print(term.move_y(7 - hotzoneLocation[0])) + print(term.black_on_cornsilk4(term.center('IOTstack Build Menu'))) + print("") + print(term.center(commonTopBorder(renderMode))) + + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Select containers to build {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + + renderHotZone(term, renderType, menu, selection, paddingBefore, allIssues) + + if (renderType == 1): + print(term.center(commonEmptyLine(renderMode))) + if not hideHelpText: + room = term.height - (28 + len(allIssues) + paginationSize) + if room < 0: + allIssues.append({ "serviceName": "BuildStack Menu", "issues": { "screenSize": 'Not enough scren height to render correctly (t-height = ' + str(term.height) + ' v-lines = ' + str(room) + ')' } }) + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Not enough vertical room to render controls help text ({th}, {rm}) {bv}".format(bv=specialChars[renderMode]["borderVertical"], th=padText(str(term.height), 3), rm=padText(str(room), 3)))) + print(term.center(commonEmptyLine(renderMode))) + else: + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Controls: {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Space] to select or deselect image {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Up] and [Down] to move selection cursor {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Right] for options for containers that support them {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Tab] Expand or collapse build menu size {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [H] Show/hide this text {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + # print(term.center("{bv} [F] Filter options {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Enter] to begin build {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Escape] to cancel build {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonBottomBorder(renderMode))) + + if len(allIssues) > 0: + print(term.center("")) + print(term.center("")) + print(term.center("")) + print(term.center(("{btl}{bh}{bh}{bh}{bh}{bh}{bh} Build Issues " + "{bh}{bh}{bh}{bh}{bh}{bh}{bh}{bh}{bh}{bh}{bh}{bh}{bh}{bh}{bh}{bh}" + "{bh}{bh}{bh}{bh}{bh}{bh}{bh}{bh}{bh}{bh}{bh}{bh}{bh}{bh}{bh}{bh}" + "{bh}{bh}{bh}{bh}{bh}{bh}{bh}{bh}{bh}{bh}{bh}{bh}{bh}{bh}{bh}{bh}" + "{bh}{bh}{bh}{bh}{bh}{bh}{bh}{bh}{bh}{bh}{bh}{bh}{bh}{bh}{bh}{bh}" + "{bh}{bh}{bh}{bh}{bh}{bh}{bh}{bh}{bh}{bh}{bh}{bh}{bh}{bh}{bh}{bh}" + "{bh}{bh}{bh}{bh}{bh}{bh}{bh}{bh}{bh}{bh}{bh}{bh}{bh}{bh}{bh}{bh}" + "{bh}{bh}{bh}{bh}{bh}{bh}{bh}{bh}{bh}{bh}{bh}{bh}{bh}{bh}{bh}{bh}" + "{bh}{bh}{bh}{bh}{bh}{bh}{bh}{btr}").format( + btl=specialChars[renderMode]["borderTopLeft"], + btr=specialChars[renderMode]["borderTopRight"], + bh=specialChars[renderMode]["borderHorizontal"] + ))) + print(term.center(commonEmptyLine(renderMode, size = 139))) + for serviceIssues in allIssues: + for index, issue in enumerate(serviceIssues["issues"]): + spacesAndBracketsLen = 5 + issueAndTypeLen = len(issue) + len(serviceIssues["serviceName"]) + spacesAndBracketsLen + serviceNameAndConflictType = '{t.red_on_black}{issueService}{t.normal} ({t.yellow_on_black}{issueType}{t.normal}) '.format(t=term, issueService=serviceIssues["serviceName"], issueType=issue) + formattedServiceNameAndConflictType = generateLineText(str(serviceNameAndConflictType), textLength=issueAndTypeLen, paddingBefore=0, lineLength=32) + issueDescription = generateLineText(str(serviceIssues["issues"][issue]), textLength=len(str(serviceIssues["issues"][issue])), paddingBefore=0, lineLength=103) + print(term.center("{bv} {nm} - {desc} {bv}".format(nm=formattedServiceNameAndConflictType, desc=issueDescription, bv=specialChars[renderMode]["borderVertical"]) )) + print(term.center(commonEmptyLine(renderMode, size = 139))) + print(term.center(commonBottomBorder(renderMode, size = 139))) + + except Exception as err: + print("There was an error rendering the menu:") + traceback.print_exc() + print("Press [Esc] to go back") + return + + return + + def setCheckedMenuItems(): + global checkedMenuItems + checkedMenuItems.clear() + for (index, menuItem) in enumerate(menu): + if menuItem[1]["checked"]: + checkedMenuItems.append(menuItem[0]) + + def loadAllServices(reload = False): + global dockerComposeServicesYaml + dockerComposeServicesYaml.clear() + for (index, checkedMenuItem) in enumerate(checkedMenuItems): + if reload == False: + if not checkedMenuItem in dockerComposeServicesYaml: + serviceFilePath = templatesDirectory + '/' + checkedMenuItem + '/' + servicesFileName + with open(r'%s' % serviceFilePath) as yamlServiceFile: + dockerComposeServicesYaml[checkedMenuItem] = yaml.load(yamlServiceFile)[checkedMenuItem] + else: + print("reload!") + time.sleep(1) + serviceFilePath = templatesDirectory + '/' + checkedMenuItem + '/' + servicesFileName + with open(r'%s' % serviceFilePath) as yamlServiceFile: + dockerComposeServicesYaml[checkedMenuItem] = yaml.load(yamlServiceFile)[checkedMenuItem] + + return True + + def loadService(serviceName, reload = False): + try: + global dockerComposeServicesYaml + if reload == False: + if not serviceName in dockerComposeServicesYaml: + serviceFilePath = templatesDirectory + '/' + serviceName + '/' + servicesFileName + with open(r'%s' % serviceFilePath) as yamlServiceFile: + dockerComposeServicesYaml[serviceName] = yaml.load(yamlServiceFile)[serviceName] + else: + print("reload!") + time.sleep(1) + servicesFileNamePath = templatesDirectory + '/' + serviceName + '/' + servicesFileName + with open(r'%s' % serviceFilePath) as yamlServiceFile: + dockerComposeServicesYaml[serviceName] = yaml.load(yamlServiceFile)[serviceName] + except Exception as err: + print("Error running build menu:", err) + print("Check the following:") + print("* YAML service name matches the folder name") + print("* Error in YAML file") + print("* YAML file is unreadable") + print("* Buildstack script was modified") + input("Press Enter to exit...") + sys.exit(1) + + return True + + def checkForIssues(): + global dockerComposeServicesYaml + for (index, checkedMenuItem) in enumerate(checkedMenuItems): + buildScriptPath = templatesDirectory + '/' + checkedMenuItem + '/' + buildScriptFile + if os.path.exists(buildScriptPath): + try: + with open(buildScriptPath, "rb") as pythonDynamicImportFile: + code = compile(pythonDynamicImportFile.read(), buildScriptPath, "exec") + execGlobals = { + "dockerComposeServicesYaml": dockerComposeServicesYaml, + "toRun": "checkForRunChecksHook", + "currentServiceName": checkedMenuItem + } + execLocals = locals() + exec(code, execGlobals, execLocals) + if "buildHooks" in execGlobals and "runChecksHook" in execGlobals["buildHooks"] and execGlobals["buildHooks"]["runChecksHook"]: + execGlobals = { + "dockerComposeServicesYaml": dockerComposeServicesYaml, + "toRun": "runChecks", + "currentServiceName": checkedMenuItem + } + execLocals = locals() + try: + exec(code, execGlobals, execLocals) + if "issues" in execGlobals and len(execGlobals["issues"]) > 0: + menu[getMenuItemIndexByService(checkedMenuItem)][1]["issues"] = execGlobals["issues"] + else: + menu[getMenuItemIndexByService(checkedMenuItem)][1]["issues"] = [] + except Exception as err: + print("Error running checkForIssues on '%s'" % checkedMenuItem) + traceback.print_exc() + input("Press Enter to continue...") + else: + menu[getMenuItemIndexByService(checkedMenuItem)][1]["issues"] = [] + except Exception as err: + print("Error running checkForIssues on '%s'" % checkedMenuItem) + traceback.print_exc() + input("Press any key to exit...") + sys.exit(1) + + def checkForOptions(): + global dockerComposeServicesYaml + for (index, menuItem) in enumerate(menu): + buildScriptPath = templatesDirectory + '/' + menuItem[0] + '/' + buildScriptFile + if os.path.exists(buildScriptPath): + try: + with open(buildScriptPath, "rb") as pythonDynamicImportFile: + code = compile(pythonDynamicImportFile.read(), buildScriptPath, "exec") + execGlobals = { + "dockerComposeServicesYaml": dockerComposeServicesYaml, + "toRun": "checkForOptionsHook", + "currentServiceName": menuItem[0], + "renderMode": renderMode + } + execLocals = {} + exec(code, execGlobals, execLocals) + if not "buildHooks" in menu[getMenuItemIndexByService(menuItem[0])][1]: + menu[getMenuItemIndexByService(menuItem[0])][1]["buildHooks"] = {} + if "options" in execGlobals["buildHooks"] and execGlobals["buildHooks"]["options"]: + menu[getMenuItemIndexByService(menuItem[0])][1]["buildHooks"]["options"] = True + except Exception as err: + print("Error running checkForOptions on '%s'" % menuItem[0]) + traceback.print_exc() + input("Press any key to exit...") + sys.exit(1) + + def runPrebuildHook(): + global dockerComposeServicesYaml + for (index, checkedMenuItem) in enumerate(checkedMenuItems): + buildScriptPath = templatesDirectory + '/' + checkedMenuItem + '/' + buildScriptFile + if os.path.exists(buildScriptPath): + with open(buildScriptPath, "rb") as pythonDynamicImportFile: + code = compile(pythonDynamicImportFile.read(), buildScriptPath, "exec") + execGlobals = { + "dockerComposeServicesYaml": dockerComposeServicesYaml, + "toRun": "checkForPreBuildHook", + "currentServiceName": checkedMenuItem + } + execLocals = locals() + try: + exec(code, execGlobals, execLocals) + if "preBuildHook" in execGlobals["buildHooks"] and execGlobals["buildHooks"]["preBuildHook"]: + execGlobals = { + "dockerComposeServicesYaml": dockerComposeServicesYaml, + "toRun": "preBuild", + "currentServiceName": checkedMenuItem + } + execLocals = locals() + exec(code, execGlobals, execLocals) + except Exception as err: + print("Error running PreBuildHook on '%s'" % checkedMenuItem) + traceback.print_exc() + input("Press Enter to continue...") + try: # If the prebuild hook modified the docker-compose object, pull it from the script back to here. + dockerComposeServicesYaml = execGlobals["dockerComposeServicesYaml"] + except: + pass + + def runPostBuildHook(): + for (index, checkedMenuItem) in enumerate(checkedMenuItems): + buildScriptPath = templatesDirectory + '/' + checkedMenuItem + '/' + buildScriptFile + if os.path.exists(buildScriptPath): + with open(buildScriptPath, "rb") as pythonDynamicImportFile: + code = compile(pythonDynamicImportFile.read(), buildScriptPath, "exec") + execGlobals = { + "dockerComposeServicesYaml": dockerComposeServicesYaml, + "toRun": "checkForPostBuildHook", + "currentServiceName": checkedMenuItem + } + execLocals = locals() + try: + exec(code, execGlobals, execLocals) + if "postBuildHook" in execGlobals["buildHooks"] and execGlobals["buildHooks"]["postBuildHook"]: + execGlobals = { + "dockerComposeServicesYaml": dockerComposeServicesYaml, + "toRun": "postBuild", + "currentServiceName": checkedMenuItem + } + execLocals = locals() + exec(code, execGlobals, execLocals) + except Exception as err: + print("Error running PostBuildHook on '%s'" % checkedMenuItem) + traceback.print_exc() + input("Press Enter to continue...") + + def executeServiceOptions(): + global dockerComposeServicesYaml + menuItem = menu[selection] + if menu[selection][1]["checked"] and "buildHooks" in menuItem[1] and "options" in menuItem[1]["buildHooks"] and menuItem[1]["buildHooks"]["options"]: + buildScriptPath = templatesDirectory + '/' + menuItem[0] + '/' + buildScriptFile + if os.path.exists(buildScriptPath): + with open(buildScriptPath, "rb") as pythonDynamicImportFile: + code = compile(pythonDynamicImportFile.read(), buildScriptPath, "exec") + + execGlobals = { + "dockerComposeServicesYaml": dockerComposeServicesYaml, + "toRun": "runOptionsMenu", + "currentServiceName": menuItem[0], + "renderMode": renderMode + } + execLocals = locals() + exec(code, execGlobals, execLocals) + dockerComposeServicesYaml = execGlobals["dockerComposeServicesYaml"] + checkForIssues() + mainRender(menu, selection, 1) + + def getMenuItemIndexByService(serviceName): + for (index, menuItem) in enumerate(menu): + if (menuItem[0] == serviceName): + return index + + def checkMenuItem(selection): + global dockerComposeServicesYaml + if menu[selection][1]["checked"] == True: + menu[selection][1]["checked"] = False + menu[selection][1]["issues"] = None + del dockerComposeServicesYaml[menu[selection][0]] + else: + menu[selection][1]["checked"] = True + print(menu[selection][0]) + loadService(menu[selection][0]) + + def prepareMenuState(): + global dockerComposeServicesYaml + for (index, serviceName) in enumerate(dockerComposeServicesYaml): + checkMenuItem(getMenuItemIndexByService(serviceName)) + setCheckedMenuItems() + checkForIssues() + + return True + + def loadCurrentConfigs(templatesList): + global dockerComposeServicesYaml + if os.path.exists(dockerSavePathOutput): + print("Loading config fom: '%s'" % dockerSavePathOutput) + with open(r'%s' % dockerSavePathOutput) as fileSavedConfigs: + previousConfigs = yaml.load(fileSavedConfigs) + if not previousConfigs == None: + if "services" in previousConfigs: + dockerComposeServicesYaml = {} + for (index, serviceName) in enumerate(previousConfigs["services"]): + if serviceName in templatesList: # This ensures every service loaded has a template directory + dockerComposeServicesYaml[serviceName] = previousConfigs["services"][serviceName] + return True + dockerComposeServicesYaml = {} + return False + + def onResize(sig, action): + global paginationToggle + paginationToggle = [10, term.height - 25] + mainRender(menu, selection, 1) + + templatesList = generateTemplateList(templatesDirectoryFolders) + for directory in templatesList: + menu.append([directory, { "checked": False, "issues": None }]) + + if __name__ == 'builtins': + global results + global signal + needsRender = 1 + signal.signal(signal.SIGWINCH, onResize) + with term.fullscreen(): + print('Loading...') + selection = 0 + if loadCurrentConfigs(templatesList): + prepareMenuState() + mainRender(menu, selection, 1) + selectionInProgress = True + with term.cbreak(): + while selectionInProgress: + key = term.inkey(esc_delay=0.05) + if key.is_sequence: + if key.name == 'KEY_TAB': + needsRender = 1 + if paginationSize == paginationToggle[0]: + paginationSize = paginationToggle[1] + paginationStartIndex = 0 + else: + paginationSize = paginationToggle[0] + if key.name == 'KEY_DOWN': + selection += 1 + needsRender = 2 + if key.name == 'KEY_UP': + selection -= 1 + needsRender = 2 + if key.name == 'KEY_RIGHT': + executeServiceOptions() + if key.name == 'KEY_ENTER': + setCheckedMenuItems() + checkForIssues() + selectionInProgress = False + results["buildState"] = buildServices() + return results["buildState"] + if key.name == 'KEY_ESCAPE': + results["buildState"] = False + return results["buildState"] + elif key: + if key == ' ': # Space pressed + checkMenuItem(selection) # Update checked list + setCheckedMenuItems() # Update UI memory + checkForIssues() + needsRender = 1 + elif key == 'h': # H pressed + if hideHelpText: + hideHelpText = False + else: + hideHelpText = True + needsRender = 1 + else: + print(key) + time.sleep(0.5) + + selection = selection % len(menu) + + mainRender(menu, selection, needsRender) + +originalSignalHandler = signal.getsignal(signal.SIGINT) +main() +signal.signal(signal.SIGWINCH, originalSignalHandler) diff --git a/scripts/default_ports_md_generator.sh b/scripts/default_ports_md_generator.sh new file mode 100755 index 000000000..92476a791 --- /dev/null +++ b/scripts/default_ports_md_generator.sh @@ -0,0 +1,63 @@ +#!/usr/bin/env python3 + +# This script will return a markdown table containing the service names, mode (host or non-host), and default +# external ports used by all services found in the .templates directory. The markdown output can be used to +# quickly update the docs/Basic_setup/Default-Configs.md file. + +import glob +import pathlib +import re + +# Setup columns & print service names, mode, and default ports. + +print("| Service Name | Mode | Port(s)
*External:Internal* |") +print("| ------------ | -----| --------------- |") + +# Change directories + +currentPath = pathlib.Path(__file__) +dirName = str(currentPath.parents[1]) +templates = glob.glob(dirName + '/.templates/**/service.yml',recursive = True) + +# Iterate through service.ymls for required info. + +for template in sorted(templates): + + with open(template) as file: + + fileInput = file.read() + + # Search for service names and mode. + + try: + serviceName = re.search(r'container_name:.?(["a-z0-9_-]+)', fileInput).group(1) + except: + serviceName = 'Parsing error' + try: + if (re.search(r'^([^\#]\s+network_mode:).?([a-z0-9]+)', fileInput,flags = re.M).group(2) == 'host'): + mode = 'host' + except: + mode = 'non-host' + + # Print service and mode but do not end the line. + + print("| " + serviceName + " | " + mode + " | ", end= "") + + # Search for ports used by each service. findall is split into 2 groups to deal with #'s in some service.yml's. + # Keep only the ports and not the whitespace or "-" + + portSearchResult = re.findall(r'^(\s*[-]\s*"*)(\d{2,5}[:]\d{2,5})', fileInput,re.M) + ports = [] + for result in portSearchResult: + ports.append(result[1]) + + # Get rid of found doubles - UDP and TCP ports etc. + + dropDuplicates = [] + [dropDuplicates.append(port) for port in ports if port not in dropDuplicates] + + # Print the ports used and end the line when the for loop completes. + + for port in dropDuplicates: + print(port + "
", end= "") + print("|") \ No newline at end of file diff --git a/scripts/deps/.gitignore b/scripts/deps/.gitignore new file mode 100644 index 000000000..ed8ebf583 --- /dev/null +++ b/scripts/deps/.gitignore @@ -0,0 +1 @@ +__pycache__ \ No newline at end of file diff --git a/scripts/deps/__init__.py b/scripts/deps/__init__.py new file mode 100755 index 000000000..e69de29bb diff --git a/scripts/deps/buildstack.py b/scripts/deps/buildstack.py new file mode 100755 index 000000000..5cb2777f1 --- /dev/null +++ b/scripts/deps/buildstack.py @@ -0,0 +1,106 @@ +import os +import ruamel.yaml +import math +import sys +from deps.yaml_merge import mergeYaml +from deps.consts import servicesDirectory, templatesDirectory, volumesDirectory, buildCache, envFile, dockerPathOutput, servicesFileName, composeOverrideFile + +yaml = ruamel.yaml.YAML() +yaml.preserve_quotes = True + +buildScriptFile = 'build.py' + +def buildServices(dockerComposeServicesYaml): + try: + runPrebuildHook() + dockerFileYaml = {} + menuStateFileYaml = {} + dockerFileYaml["version"] = "3.6" + dockerFileYaml["services"] = {} + menuStateFileYaml["services"] = {} + dockerFileYaml["services"] = dockerComposeServicesYaml + menuStateFileYaml["services"] = dockerComposeServicesYaml + + if os.path.exists(envFile): + with open(r'%s' % envFile) as fileEnv: + envSettings = yaml.load(fileEnv) + mergedYaml = mergeYaml(envSettings, dockerFileYaml) + dockerFileYaml = mergedYaml + + if os.path.exists(composeOverrideFile): + with open(r'%s' % composeOverrideFile) as fileOverride: + yamlOverride = yaml.load(fileOverride) + + mergedYaml = mergeYaml(yamlOverride, dockerFileYaml) + dockerFileYaml = mergedYaml + + with open(r'%s' % dockerPathOutput, 'w') as outputFile: + yaml.dump(dockerFileYaml, outputFile, explicit_start=True, default_style='"') + + with open(r'%s' % buildCache, 'w') as outputFile: + yaml.dump(menuStateFileYaml, outputFile, explicit_start=True, default_style='"') + runPostBuildHook() + return True + except Exception as err: + print("Issue running build:") + print(err) + input("Press Enter to continue...") + return False + +def runPrebuildHook(dockerComposeServicesYaml): + for (index, checkedMenuItem) in enumerate(checkedMenuItems): + buildScriptPath = templatesDirectory + '/' + checkedMenuItem + '/' + buildScriptFile + if os.path.exists(buildScriptPath): + with open(buildScriptPath, "rb") as pythonDynamicImportFile: + code = compile(pythonDynamicImportFile.read(), buildScriptPath, "exec") + execGlobals = { + "dockerComposeServicesYaml": dockerComposeServicesYaml, + "toRun": "checkForPreBuildHook", + "currentServiceName": checkedMenuItem + } + execLocals = locals() + try: + exec(code, execGlobals, execLocals) + if "preBuildHook" in execGlobals["buildHooks"] and execGlobals["buildHooks"]["preBuildHook"]: + execGlobals = { + "dockerComposeServicesYaml": dockerComposeServicesYaml, + "toRun": "preBuild", + "currentServiceName": checkedMenuItem + } + execLocals = locals() + exec(code, execGlobals, execLocals) + except Exception as err: + print("Error running PreBuildHook on '%s'" % checkedMenuItem) + print(err) + input("Press Enter to continue...") + try: # If the prebuild hook modified the docker-compose object, pull it from the script back to here. + dockerComposeServicesYaml = execGlobals["dockerComposeServicesYaml"] + except: + pass + +def runPostBuildHook(): + for (index, checkedMenuItem) in enumerate(checkedMenuItems): + buildScriptPath = templatesDirectory + '/' + checkedMenuItem + '/' + buildScriptFile + if os.path.exists(buildScriptPath): + with open(buildScriptPath, "rb") as pythonDynamicImportFile: + code = compile(pythonDynamicImportFile.read(), buildScriptPath, "exec") + execGlobals = { + "dockerComposeServicesYaml": dockerComposeServicesYaml, + "toRun": "checkForPostBuildHook", + "currentServiceName": checkedMenuItem + } + execLocals = locals() + try: + exec(code, execGlobals, execLocals) + if "postBuildHook" in execGlobals["buildHooks"] and execGlobals["buildHooks"]["postBuildHook"]: + execGlobals = { + "dockerComposeServicesYaml": dockerComposeServicesYaml, + "toRun": "postBuild", + "currentServiceName": checkedMenuItem + } + execLocals = locals() + exec(code, execGlobals, execLocals) + except Exception as err: + print("Error running PostBuildHook on '%s'" % checkedMenuItem) + print(err) + input("Press Enter to continue...") diff --git a/scripts/deps/chars.py b/scripts/deps/chars.py new file mode 100755 index 000000000..51cfa0d77 --- /dev/null +++ b/scripts/deps/chars.py @@ -0,0 +1,72 @@ +specialChars = { + "latin": { + "rightArrowFull": "►", + "upArrowFull": "▲", + "upArrowLine": "↑", + "downArrowFull": "▼", + "downArrowLine": "↓", + "borderVertical": "║", + "borderHorizontal": "═", + "borderTopLeft": "╔", + "borderTopRight": "╗", + "borderBottomLeft": "╚", + "borderBottomRight": "╝" + }, + "simple": { + "rightArrowFull": "→", + "upArrowFull": "↑", + "upArrowLine": "↑", + "downArrowFull": "↓", + "downArrowLine": "↓", + "borderVertical": "│", + "borderHorizontal": "─", + "borderTopLeft": "┌", + "borderTopRight": "┐", + "borderBottomLeft": "└", + "borderBottomRight": "┘" + }, + "ascii": { + "rightArrowFull": ">", + "upArrowFull": "^", + "upArrowLine": "^", + "downArrowFull": "v", + "downArrowLine": "v", + "borderVertical": "|", + "borderHorizontal": "-", + "borderTopLeft": "/", + "borderTopRight": "\\", + "borderBottomLeft": "\\", + "borderBottomRight": "/" + } +} + +def commonTopBorder(renderMode, size=80): + output = "" + output += "{btl}".format(btl=specialChars[renderMode]["borderTopLeft"]) + for i in range(size): + output += "{bh}".format(bh=specialChars[renderMode]["borderHorizontal"]) + output += "{btr}".format(btr=specialChars[renderMode]["borderTopRight"]) + return output + +def commonBottomBorder(renderMode, size=80): + output = "" + output += "{bbl}".format(bbl=specialChars[renderMode]["borderBottomLeft"]) + for i in range(size): + output += "{bh}".format(bh=specialChars[renderMode]["borderHorizontal"]) + output += "{bbr}".format(bbr=specialChars[renderMode]["borderBottomRight"]) + return output + +def padText(text, size=45): + output = "" + output += text + for i in range(size - len(text)): + output += " " + return output + +def commonEmptyLine(renderMode, size=80): + output = "" + output += "{bv}".format(bv=specialChars[renderMode]["borderVertical"]) + for i in range(size): + output += " " + output += "{bv}".format(bv=specialChars[renderMode]["borderVertical"]) + return output diff --git a/scripts/deps/common_functions.py b/scripts/deps/common_functions.py new file mode 100755 index 000000000..b4309df48 --- /dev/null +++ b/scripts/deps/common_functions.py @@ -0,0 +1,190 @@ +import time +import string +import random +import sys +import os +import subprocess +from deps.consts import ifCheckList + +def generateRandomString(size = 0, chars = string.ascii_uppercase + string.ascii_lowercase + string.digits): + if size == 0: + size = random.randint(16, 24) + return ''.join(random.choice(chars) for _ in range(size)) + +def getNetworkDetails(inputList = None): + ifList = inputList + if (inputList == None): + ifList = ifCheckList + + results = { + "name": "", + "mac": "", + "ip": "" + } + + for (index, ifName) in enumerate(ifList): + try: + ip = getIpAddress(ifName) + mac = getMacAddress(ifName) + results["name"] = ifName + results["ip"] = ip + results["mac"] = mac + if (results["ip"] == "" or results["mac"] == ""): + continue + break + except: + continue + # pass + + return results + +def getMacAddress(ifName = None): + if (ifName == None): + print("getMacAddress: Need interface name") + return "" + + mac = "" + + if sys.platform == 'win32': + print("getMacAddress: Linux support only") + else: + FNULL = open(os.devnull, 'w') + ipRes = subprocess.Popen("/sbin/ifconfig %s" % ifName, shell=True, stdout=subprocess.PIPE, stderr=FNULL).communicate() + for line in ipRes[0].decode('utf-8').splitlines(): + if line.find('Ethernet') > -1: + mac = line.split()[1] + break + return mac + +def getIpAddress(ifName = None): + if (ifName == None): + print("getIpAddress: Need interface name") + return "" + + ip = "" + + if sys.platform == 'win32': + print("getIpAddress: Linux support only") + else: + FNULL = open(os.devnull, 'w') + ipRes = subprocess.Popen("/sbin/ifconfig %s" % ifName, shell=True, stdout=subprocess.PIPE, stderr=FNULL).communicate() + for line in ipRes[0].decode('utf-8').splitlines(): + if line.find('inet') > -1: + ip = line.split()[1] + break + return ip + +def getExternalPorts(serviceName, dockerComposeServicesYaml): + externalPorts = [] + try: + yamlService = dockerComposeServicesYaml[serviceName] + if "ports" in yamlService: + for (index, port) in enumerate(yamlService["ports"]): + try: + externalAndInternal = port.split(":") + externalPorts.append(externalAndInternal[0]) + except: + pass + except: + pass + return externalPorts + +def getInternalPorts(serviceName, dockerComposeServicesYaml): + externalPorts = [] + try: + yamlService = dockerComposeServicesYaml[serviceName] + if "ports" in yamlService: + for (index, port) in enumerate(yamlService["ports"]): + try: + externalAndInternal = port.split(":") + externalPorts.append(externalAndInternal[1]) + except: + pass + except: + pass + return externalPorts + +def checkPortConflicts(serviceName, currentPorts, dockerComposeServicesYaml): + portConflicts = [] + yamlService = dockerComposeServicesYaml[serviceName] + servicePorts = getExternalPorts(serviceName, dockerComposeServicesYaml) + for (index, servicePort) in enumerate(servicePorts): + for (index, currentPort) in enumerate(currentPorts): + if (servicePort == currentPort): + portConflicts.append([servicePort, serviceName]) + return portConflicts + +def checkDependsOn(serviceName, dockerComposeServicesYaml): + missingServices = [] + yamlService = dockerComposeServicesYaml[serviceName] + if "depends_on" in yamlService: + for (index, dependsOnName) in enumerate(yamlService["depends_on"]): + if not dependsOnName in dockerComposeServicesYaml: + missingServices.append([dependsOnName, serviceName]) + return missingServices + +def enterPortNumber(term, dockerComposeServicesYaml, currentServiceName, hotzoneLocation, createMenuFn): + newPortNumber = "" + try: + print(term.move_y(hotzoneLocation[0])) + print(term.center(" ")) + print(term.center(" ")) + print(term.center(" ")) + print(term.move_y(hotzoneLocation[0] + 1)) + time.sleep(0.1) # Prevent loop + newPortNumber = input(term.center("Enter new port number: ")) + # newPortNumber = sys.stdin.readline() + time.sleep(0.1) # Prevent loop + newPortNumber = int(str(newPortNumber)) + if 1 <= newPortNumber <= 65535: + time.sleep(0.2) # Prevent loop + internalPort = getInternalPorts(currentServiceName, dockerComposeServicesYaml)[0] + dockerComposeServicesYaml[currentServiceName]["ports"][0] = "{newExtPort}:{oldIntPort}".format( + newExtPort = newPortNumber, + oldIntPort = internalPort + ) + createMenuFn() + return True + else: + print(term.center(' {t.white_on_red} "{port}" {message} {t.normal} <-'.format(t=term, port=newPortNumber, message="is not a valid port"))) + time.sleep(2) # Give time to read error + return False + except Exception as err: + print(term.center(' {t.white_on_red} "{port}" {message} {t.normal} <-'.format(t=term, port=newPortNumber, message="is not a valid port"))) + print(term.center(' {t.white_on_red} Error: {errorMsg} {t.normal} <-'.format(t=term, errorMsg=err))) + time.sleep(2.5) # Give time to read error + return False + +def enterPortNumberWithWhiptail(term, dockerComposeServicesYaml, currentServiceName, hotzoneLocation, defaultPort): + newPortNumber = "" + try: + portProcess = subprocess.Popen(['./scripts/deps/portWhiptail.sh', defaultPort, currentServiceName], stdout=subprocess.PIPE) + portResult = portProcess.communicate()[0] + portResult = portResult.decode("utf-8").split(",") + newPortNumber = portResult[0] + returnCode = portResult[1] + time.sleep(0.1) # Prevent loop + + if not returnCode == "0": + return -1 + + newPortNumber = int(str(newPortNumber)) + if 1 <= newPortNumber <= 65535: + time.sleep(0.2) # Prevent loop + return newPortNumber + else: + print(term.center(' {t.white_on_red} "{port}" {message} {t.normal} <-'.format(t=term, port=newPortNumber, message="is not a valid port"))) + time.sleep(2) # Give time to read error + return -1 + except Exception as err: + print(term.center(' {t.white_on_red} "{port}" {message} {t.normal} <-'.format(t=term, port=newPortNumber, message="is not a valid port"))) + print(term.center(' {t.white_on_red} Error: {errorMsg} {t.normal} <-'.format(t=term, errorMsg=err))) + time.sleep(2.5) # Give time to read error + return -1 + +def literalPresenter(dumper, data): + if isinstance(data, str) and "\n" in data: + return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='|') + # if isinstance(data, None): + # return self.represent_scalar('tag:yaml.org,2002:null', u'') + return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='"') \ No newline at end of file diff --git a/scripts/deps/consts.py b/scripts/deps/consts.py new file mode 100755 index 000000000..405c3ab7a --- /dev/null +++ b/scripts/deps/consts.py @@ -0,0 +1,12 @@ +servicesDirectory = './services/' +templatesDirectory = './.templates/' +volumesDirectory = './volumes/' +tempDirectory = './.tmp/' +scriptsDirectory = './scripts/' +buildSettingsFileName = '/build_settings.yml' +buildCache = servicesDirectory + 'docker-compose.save.yml' +composeOverrideFile = './compose-override.yml' +envFile = templatesDirectory + 'docker-compose-base.yml' +dockerPathOutput = './docker-compose.yml' +servicesFileName = 'service.yml' +ifCheckList = ['eth0', 'wlan0'] diff --git a/scripts/deps/portWhiptail.sh b/scripts/deps/portWhiptail.sh new file mode 100755 index 000000000..ed36657ef --- /dev/null +++ b/scripts/deps/portWhiptail.sh @@ -0,0 +1,10 @@ +#!/bin/sh + +NEWPORT=$(whiptail --inputbox "Enter in new port number (1-65535):" 8 78 "$1" --title "$2" 3>&1 1>&2 2>&3) +exitstatus=$? + +if [ $exitstatus = 0 ]; then + echo -n "$NEWPORT","$exitstatus" +else + echo -n "CANCEL","$exitstatus" +fi \ No newline at end of file diff --git a/scripts/deps/version_check.py b/scripts/deps/version_check.py new file mode 100755 index 000000000..d60740b7b --- /dev/null +++ b/scripts/deps/version_check.py @@ -0,0 +1,46 @@ +import re + +def checkVersion(requiredVersion, currentVersion): + """ + >>> checkVersion('18.2.0', '20.10.11') + (True, '', []) + >>> checkVersion('18.2.0', '16.3.1') + (False, 'Version Check Fail', [False, False, True]) + >>> checkVersion('18.2.0', '20.10.5+dfsg1') + (True, '', []) + """ + requiredSplit = requiredVersion.split('.') + + if len(requiredSplit) < 2: + return False, 'Invalid Required Version', requiredVersion + + try: + requiredMajor = int(requiredSplit[0]) + requiredMinor = int(requiredSplit[1]) + requiredBuild = int(requiredSplit[2]) + except: + return False, 'Invalid Required Version', requiredVersion + + currentSplit = currentVersion.split('.') + + if len(currentSplit) < 2: + return False, 'Invalid Current Version', currentVersion + + try: + currentMajor = int(currentSplit[0]) + currentMinor = int(currentSplit[1]) + currentBuild = re.split(r'[+-]', currentSplit[2])[0] + currentBuild = int(currentBuild) + except: + return False, 'Invalid Current Version', currentVersion + + if currentMajor > requiredMajor: + return True, '', [] + + if currentMajor == requiredMajor and currentMajor > requiredMinor: + return True, '', [] + + if currentMajor == requiredMajor and currentMinor == requiredMinor and currentBuild >= requiredBuild: + return True, '', [] + + return False, 'Version Check Fail', [currentMajor == requiredMajor, currentMinor == requiredMinor, currentBuild >= requiredBuild] \ No newline at end of file diff --git a/scripts/deps/yaml_merge.py b/scripts/deps/yaml_merge.py new file mode 100755 index 000000000..36f43a094 --- /dev/null +++ b/scripts/deps/yaml_merge.py @@ -0,0 +1,19 @@ + +def mergeYaml(priorityYaml, defaultYaml): + if not priorityYaml: + return defaultYaml + finalYaml = {} + if isinstance(defaultYaml, dict): + for dk, dv in defaultYaml.items(): + if dk in priorityYaml: + finalYaml[dk] = mergeYaml(priorityYaml[dk], dv) + else: + finalYaml[dk] = dv + for pk, pv in priorityYaml.items(): + if pk in finalYaml: + finalYaml[pk] = mergeYaml(finalYaml[pk], pv) + else: + finalYaml[pk] = pv + else: + finalYaml = defaultYaml + return finalYaml diff --git a/scripts/development/mkdocs-serve.sh b/scripts/development/mkdocs-serve.sh new file mode 100755 index 000000000..f309a16bf --- /dev/null +++ b/scripts/development/mkdocs-serve.sh @@ -0,0 +1,12 @@ +#!/bin/bash +set -eu + +# Install location agnostic "cd ~/IOTstack" +cd "$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)" + +# Create python virtualenv and install any changes to the requirements: +virtualenv .virtualenv-mkdocs +source .virtualenv-mkdocs/bin/activate +pip3 install --upgrade -r requirements-mkdocs.txt + +mkdocs serve "$@" diff --git a/scripts/disable_swap.sh b/scripts/disable_swap.sh new file mode 100755 index 000000000..34cf49373 --- /dev/null +++ b/scripts/disable_swap.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +if [ $1 = "uninstallswap" ]; then + echo "Uninstalling swapfile..." + sudo dphys-swapfile swapoff + sudo dphys-swapfile uninstall + sudo update-rc.d dphys-swapfile remove + sudo systemctl disable dphys-swapfile + echo "Swap file has been removed" +elif [ $1 = "disableswap" ]; then + echo "Disabling swapfile..." + if [ $(grep -c swappiness /etc/sysctl.conf) -eq 0 ]; then + echo "vm.swappiness=0" | sudo tee -a /etc/sysctl.conf + echo "updated /etc/sysctl.conf with vm.swappiness=0" + else + sudo sed -i "/vm.swappiness/c\vm.swappiness=0" /etc/sysctl.conf + echo "vm.swappiness found in /etc/sysctl.conf update to 0" + fi + + sudo sysctl vm.swappiness=0 + echo "set swappiness to 0 for immediate effect" +else + echo "Param not set, pass either 'uninstallswap' or 'disableswap'" +fi diff --git a/scripts/docker_backup.sh b/scripts/docker_backup.sh deleted file mode 100755 index 43361cd1a..000000000 --- a/scripts/docker_backup.sh +++ /dev/null @@ -1,100 +0,0 @@ -#!/bin/bash - -pushd ~/IOTstack - -[ -d ./backups ] || mkdir ./backups - -#create the list of files to backup -echo "./docker-compose.yml" >list.txt -echo "./services/" >>list.txt -echo "./volumes/" >>list.txt - -#if influxdb is running -if [ $(docker ps | grep -c influxdb) -gt 0 ]; then - ./scripts/backup_influxdb.sh - echo "./backups/influxdb/" >>list.txt -fi - -#setup variables -logfile=./backups/log_local.txt -backupfile="backup-$(date +"%Y-%m-%d_%H%M").tar.gz" - -#compress the backups folders to archive -echo "compressing stack folders" -sudo tar -czf \ - ./backups/$backupfile \ - --exclude=./volumes/influxdb/* \ - --exclude=./volumes/nextcloud/* \ - -T list.txt - -rm list.txt - -#set permission for backup files -sudo chown pi:pi ./backups/backup* - -#create local logfile and append the latest backup file to it -echo "backup saved to ./backups/$backupfile" -sudo touch $logfile -sudo chown pi:pi $logfile -echo $backupfile >>$logfile - -#show size of archive file -du -h ./backups/$backupfile - -#remove older local backup files -#to change backups retained, change below +8 to whatever you want (days retained +1) -ls -t1 ./backups/backup* | tail -n +8 | sudo xargs rm -f -echo "last seven local backup files are saved in ~/IOTstack/backups" - - - -#cloud related - dropbox -if [ -f ./backups/dropbox ]; then - - #setup variables - dropboxfolder=/IOTstackBU - dropboxuploader=~/Dropbox-Uploader/dropbox_uploader.sh - dropboxlog=./backups/log_dropbox.txt - - #upload new backup to dropbox - echo "uploading to dropbox" - $dropboxuploader upload ./backups/$backupfile $dropboxfolder - - #list older files to be deleted from cloud (exludes last 7) - #to change dropbox backups retained, change below -7 to whatever you want - echo "checking for old backups on dropbox" - files=$($dropboxuploader list $dropboxfolder | awk {' print $3 '} | tail -n +2 | head -n -7) - - #write files to be deleted to dropbox logfile - sudo touch $dropboxlog - sudo chown pi:pi $dropboxlog - echo $files | tr " " "\n" >$dropboxlog - - #delete files from dropbox as per logfile - echo "deleting old backups from dropbox if they exist - last 7 files are kept" - - #check older files exist on dropbox, if yes then delete them - if [ $( echo "$files" | grep -c "backup") -ne 0 ] ; then - input=$dropboxlog - while IFS= read -r file - do - $dropboxuploader delete $dropboxfolder/$file - done < "$input" - fi - - echo "backups deleted from dropbox" >>$dropboxlog - -fi - - -#cloud related - google drive -if [ -f ./backups/rclone ]; then - echo "synching to Google Drive" - echo "latest 7 backup files are kept" - #sync local backups to gdrive (older gdrive copies will be deleted) - rclone sync -P ./backups --include "/backup*" gdrive:/IOTstackBU/ - echo "synch with Google Drive complete" -fi - - -popd diff --git a/scripts/docker_commands.py b/scripts/docker_commands.py new file mode 100755 index 000000000..ad7fec1b3 --- /dev/null +++ b/scripts/docker_commands.py @@ -0,0 +1,294 @@ +#!/usr/bin/env python3 +import signal + +def main(): + from blessed import Terminal + from deps.chars import specialChars, commonTopBorder, commonBottomBorder, commonEmptyLine + import math + import time + import subprocess + + global dockerCommandsSelectionInProgress + global renderMode + global signal + global mainMenuList + global currentMenuItemIndex + global hideHelpText + + try: # If not already set, then set it. + hideHelpText = hideHelpText + except: + hideHelpText = False + + term = Terminal() + hotzoneLocation = [7, 0] # Top text + + def onResize(sig, action): + global mainMenuList + global currentMenuItemIndex + mainRender(1, mainMenuList, currentMenuItemIndex) + + def startStack(): + print("Start Stack:") + print("docker-compose up -d --remove-orphans") + subprocess.call("docker-compose up -d", shell=True) + print("") + print("Stack Started") + input("Process terminated. Press [Enter] to show menu and continue.") + needsRender = 1 + return True + + def restartStack(): + print("Restarting Stack...") + print("Stop Stack:") + print("docker-compose down") + subprocess.call("docker-compose down", shell=True) + print("") + print("Start Stack:") + print("docker-compose up -d --remove-orphans") + subprocess.call("docker-compose up -d", shell=True) + # print("docker-compose restart") + # subprocess.call("docker-compose restart", shell=True) + print("") + print("Stack Restarted") + input("Process terminated. Press [Enter] to show menu and continue.") + needsRender = 1 + return True + + def stopStack(): + print("Stop Stack:") + print("docker-compose down") + subprocess.call("docker-compose down", shell=True) + print("") + print("Stack Stopped") + input("Process terminated. Press [Enter] to show menu and continue.") + needsRender = 1 + return True + + def stopAllStack(): + print("Stop All Stack:") + print("docker container stop $(docker container ls -aq)") + subprocess.call("docker container stop $(docker container ls -aq)", shell=True) + print("") + input("Process terminated. Press [Enter] to show menu and continue.") + needsRender = 1 + return True + + def pruneVolumes(): + print("Stop All Stack:") + print("docker container stop $(docker container ls -aq)") + subprocess.call("docker container stop $(docker container ls -aq)", shell=True) + print("") + input("Process terminated. Press [Enter] to show menu and continue.") + needsRender = 1 + return True + + def updateAllContainers(): + print("Update All Containers:") + print("docker-compose pull") + subprocess.call("docker-compose pull", shell=True) + print("") + print("docker-compose build --no-cache --pull") + subprocess.call("docker-compose build --no-cache --pull", shell=True) + print("") + print("docker-compose up -d") + subprocess.call("docker-compose up -d", shell=True) + print("") + print("docker system prune -f") + subprocess.call("docker system prune -f", shell=True) + print("") + input("Process terminated. Press [Enter] to show menu and continue.") + needsRender = 1 + return True + + def deleteAndPruneVolumes(): + print("Delete and prune volumes:") + print("docker system prune --volumes") + subprocess.call("docker system prune --volumes", shell=True) + print("") + input("Process terminated. Press [Enter] to show menu and continue.") + needsRender = 1 + return True + + def deleteAndPruneImages(): + print("Delete and prune volumes:") + print("docker image prune -a") + subprocess.call("docker image prune -a", shell=True) + print("") + input("Process terminated. Press [Enter] to show menu and continue.") + needsRender = 1 + return True + + def monitorLogs(): + print("Monitor Logs:") + print("Press CTRL+X or CTRL+C to exit.") + time.sleep(2) + print("") + print("docker-compose logs -f") + time.sleep(0.5) + subprocess.call("docker-compose logs -f", shell=True) + print("") + time.sleep(0.5) + input("Process terminated. Press [Enter] to show menu and continue.") + needsRender = 1 + return True + + def goBack(): + global dockerCommandsSelectionInProgress + global needsRender + dockerCommandsSelectionInProgress = False + needsRender = 1 + return True + + mainMenuList = [ + ["Start stack", startStack], + ["Restart stack", restartStack], + ["Stop stack", stopStack], + ["Monitor Logs", monitorLogs], + ["Stop ALL running docker containers", stopAllStack], + ["Update all containers (may take a long time)", updateAllContainers], + ["Delete all stopped containers and docker volumes (prune volumes)", deleteAndPruneVolumes], + ["Delete all images not associated with container", deleteAndPruneImages], + ["Back", goBack] + ] + + dockerCommandsSelectionInProgress = True + currentMenuItemIndex = 0 + menuNavigateDirection = 0 + + # Render Modes: + # 0 = No render needed + # 1 = Full render + # 2 = Hotzone only + needsRender = 1 + + def renderHotZone(term, menu, selection, hotzoneLocation): + print(term.move(hotzoneLocation[0], hotzoneLocation[1])) + lineLengthAtTextStart = 71 + + for (index, menuItem) in enumerate(menu): + toPrint = "" + if index == selection: + toPrint += ('{bv} -> {t.blue_on_green} {title} {t.normal} <-'.format(t=term, title=menuItem[0], bv=specialChars[renderMode]["borderVertical"])) + else: + toPrint += ('{bv} {t.normal} {title} '.format(t=term, title=menuItem[0], bv=specialChars[renderMode]["borderVertical"])) + + for i in range(lineLengthAtTextStart - len(menuItem[0])): + toPrint += " " + + toPrint += "{bv}".format(bv=specialChars[renderMode]["borderVertical"]) + + toPrint = term.center(toPrint) + + print(toPrint) + + + def mainRender(needsRender, menu, selection): + term = Terminal() + + if needsRender == 1: + print(term.clear()) + + print(term.clear()) + print(term.move_y(6 - hotzoneLocation[0])) + print(term.black_on_cornsilk4(term.center('IOTstack Docker Commands'))) + print("") + print(term.center(commonTopBorder(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Select Docker Command to run {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + + if needsRender >= 1: + renderHotZone(term, menu, selection, hotzoneLocation) + + if needsRender == 1: + print(term.center(commonEmptyLine(renderMode))) + if not hideHelpText: + if term.height < 30: + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Not enough vertical room to render controls help text {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + else: + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Controls: {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Up] and [Down] to move selection cursor {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [H] Show/hide this text {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Enter] to run command {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Escape] to go back to main menu {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonBottomBorder(renderMode))) + + + + def runSelection(selection): + import types + if len(mainMenuList[selection]) > 1 and isinstance(mainMenuList[selection][1], types.FunctionType): + mainMenuList[selection][1]() + mainRender(1, mainMenuList, currentMenuItemIndex) + else: + print(term.green_reverse('IOTstack Error: No function assigned to menu item: "{}"'.format(mainMenuList[selection][0]))) + + def isMenuItemSelectable(menu, index): + if len(menu) > index: + if len(menu[index]) > 2: + if menu[index][2]["skip"] == True: + return False + return True + + if __name__ == 'builtins': + term = Terminal() + signal.signal(signal.SIGWINCH, onResize) + with term.fullscreen(): + menuNavigateDirection = 0 + mainRender(needsRender, mainMenuList, currentMenuItemIndex) + dockerCommandsSelectionInProgress = True + with term.cbreak(): + while dockerCommandsSelectionInProgress: + menuNavigateDirection = 0 + + if not needsRender == 0: # Only rerender when changed to prevent flickering + mainRender(needsRender, mainMenuList, currentMenuItemIndex) + needsRender = 0 + + key = term.inkey(esc_delay=0.05) + if key.is_sequence: + if key.name == 'KEY_TAB': + menuNavigateDirection += 1 + if key.name == 'KEY_DOWN': + menuNavigateDirection += 1 + if key.name == 'KEY_UP': + menuNavigateDirection -= 1 + if key.name == 'KEY_ENTER': + mainRender(1, mainMenuList, currentMenuItemIndex) + runSelection(currentMenuItemIndex) + if dockerCommandsSelectionInProgress == False: + return True + if key.name == 'KEY_ESCAPE': + dockerCommandsSelectionInProgress = False + return True + elif key: + if key == 'h': # H pressed + if hideHelpText: + hideHelpText = False + else: + hideHelpText = True + mainRender(1, mainMenuList, currentMenuItemIndex) + + if menuNavigateDirection != 0: # If a direction was pressed, find next selectable item + currentMenuItemIndex += menuNavigateDirection + currentMenuItemIndex = currentMenuItemIndex % len(mainMenuList) + needsRender = 2 + + while not isMenuItemSelectable(mainMenuList, currentMenuItemIndex): + currentMenuItemIndex += menuNavigateDirection + currentMenuItemIndex = currentMenuItemIndex % len(mainMenuList) + return True + + return True + +originalSignalHandler = signal.getsignal(signal.SIGINT) +main() +signal.signal(signal.SIGWINCH, originalSignalHandler) \ No newline at end of file diff --git a/scripts/git_check.sh b/scripts/git_check.sh new file mode 100755 index 000000000..f3344f03e --- /dev/null +++ b/scripts/git_check.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +UPSTREAM=${1:-'@{u}'} +LOCAL=$(git rev-parse @) +REMOTE=$(git rev-parse "$UPSTREAM") +BASE=$(git merge-base @ "$UPSTREAM") + +if [ $LOCAL = $REMOTE ]; then + echo "Up-to-date" +elif [ $LOCAL = $BASE ]; then + echo "Need to pull" +elif [ $REMOTE = $BASE ]; then + echo "Need to push" +else + echo "Diverged" +fi \ No newline at end of file diff --git a/scripts/install_docker.sh b/scripts/install_docker.sh new file mode 100755 index 000000000..046c2a048 --- /dev/null +++ b/scripts/install_docker.sh @@ -0,0 +1,51 @@ +#!/bin/bash + +if [ -z "$1" ]; then + echo "You must specify whether to install or upgrade docker." + exit +fi + +if [ "$EUID" -ne 0 ] + then echo "Please run as root" + exit +fi +function command_exists() { + command -v "$@" > /dev/null 2>&1 +} + +if [ "$1" == "install" ]; then + RESTART_REQUIRED="false" + if command_exists docker; then + echo "Docker already installed" >&2 + else + echo "Install Docker" >&2 + curl -fsSL https://get.docker.com | sh + RESTART_REQUIRED="true" + sudo usermod -aG docker $USER + fi + + if command_exists docker-compose; then + echo "docker-compose already installed" >&2 + else + RESTART_REQUIRED="true" + echo "Install docker-compose" >&2 + sudo apt install -y docker-compose + sudo usermod -aG docker $USER + fi + + if [ "$RESTART_REQUIRED" == "true" ]; then + if (whiptail --title "Restart Required" --yesno "It is recommended that you restart your device now. Select yes to do so now" 20 78); then + sudo reboot + fi + fi +fi + +if [ "$1" == "upgrade" ]; then + sudo apt upgrade docker docker-compose + + if [ $? -eq 0 ]; then + if (whiptail --title "Restart Required" --yesno "It is recommended that you restart your device now. Select yes to do so now" 20 78); then + reboot + fi + fi +fi diff --git a/scripts/install_log2ram.sh b/scripts/install_log2ram.sh new file mode 100755 index 000000000..25ea13f57 --- /dev/null +++ b/scripts/install_log2ram.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +if [ ! -d ~/log2ram ]; then + git clone https://github.com/azlux/log2ram.git ~/log2ram + chmod +x ~/log2ram/install.sh + pushd ~/log2ram + sudo ./install.sh + popd +else + echo "Log2RAM is already installed. You can reinstall by running: 'sudo ./install.sh' from ~/log2ram " + sleep 1 +fi diff --git a/scripts/install_ssh_keys.sh b/scripts/install_ssh_keys.sh new file mode 100755 index 000000000..49c6c9fdd --- /dev/null +++ b/scripts/install_ssh_keys.sh @@ -0,0 +1,150 @@ +#!/bin/bash + +# Created by: Slyke +# Email: steven.lawler777@gmail.com +# Version: 2 (IOTstack) +# Date: 2020-07-08 +# This script allows you to automatically install SSH keys from your github account, and optionally disable password authentication for sshd. +# License: MIT + +VERBOSE_MODE=0 +DISABLE_PASSWORD_AUTH=0 +SKIP_SSH_WARNING=0 +AUTH_KEYS_FILE=~/.ssh/authorized_keys +SSH_CONFIG_FILE=/etc/ssh/sshd_config + +function printHelp() { + echo "Usage:" + echo " --quiet" + echo " No output, unless displaying errors or input is required." + echo "" + echo " --clear" + echo " Clears all entries in $AUTH_KEYS_FILE" + echo "" + echo " --username {USERNAME}" + echo " Set your github username so that you are not prompted for it" + echo "" + echo " --disable-password-authentication" + echo " Updates $SSH_CONFIG_FILE to disable password authentication and then restarts ssh service. Requires sudo access. Warning: You may lock yourself out." + echo "" + echo " --skip-ssh-warning" + echo " Skips the warning and the wait when --disable-password-authentication is set." + echo "" + echo " --help" + echo " Displays this message" + echo "" + echo "Example:" + echo " $0 --clear --username slyke --quiet" + echo "" + exit 0 +} + +function setUsername() { + GH_USERNAME=$1 +} + +function clearAuthKeys() { + if [ -f $AUTH_KEYS_FILE ]; then + echo "" > $AUTH_KEYS_FILE + fi +} + +function disablePasswordAuthentication () { + sudo grep -q "ChallengeResponseAuthentication" $SSH_CONFIG_FILE && sed -i "/^[^#]*ChallengeResponseAuthentication[[:space:]]yes.*/c\ChallengeResponseAuthentication no" $SSH_CONFIG_FILE || echo "ChallengeResponseAuthentication no" >> $SSH_CONFIG_FILE + sudo grep -q "^[^#]*PasswordAuthentication" $SSH_CONFIG_FILE && sed -i "/^[^#]*PasswordAuthentication[[:space:]]yes/c\PasswordAuthentication no" $SSH_CONFIG_FILE || echo "PasswordAuthentication no" >> $SSH_CONFIG_FILE + sudo service ssh restart +} + +while test $# -gt 0 +do + case "$1" in + --quiet) VERBOSE_MODE=1 + ;; + --username) setUsername $2 + ;; + --clear) clearAuthKeys + ;; + --disable-password-authentication) DISABLE_PASSWORD_AUTH=1 + ;; + --disable-password-auth) DISABLE_PASSWORD_AUTH=1 + ;; + --disable-passwd-auth) DISABLE_PASSWORD_AUTH=1 + ;; + --skip-ssh-warning) SKIP_SSH_WARNING=1 + ;; + --help) printHelp + ;; + -h) printHelp + ;; + --*) echo "Bad option $1"; echo "For help use: $0 --help "; exit 3 + ;; + esac + shift +done + +if [ -z ${GH_USERNAME+x} ]; then + echo "" + echo "Enter your github username" + read GH_USERNAME; +fi + +exec 3>&1 +# exec 4>&2 + +if [[ "$VERBOSE_MODE" -eq 1 ]]; then + exec 1>/dev/null + # exec 2>/dev/null +fi + +if [ ! -f $AUTH_KEYS_FILE ]; then + echo "Created: '$AUTH_KEYS_FILE'" + touch $AUTH_KEYS_FILE +fi + +if [[ "$DISABLE_PASSWORD_AUTH" -eq 1 ]]; then + if [[ ! "$SKIP_SSH_WARNING" -eq 1 ]]; then + echo "Will disable password authentication and restart sshd service after installing ssh keys." + echo "Press ctrl+c now to cancel." + sleep 5 + fi +fi + +SSH_KEYS=$(curl -s "https://github.com/$GH_USERNAME.keys") + +KEYS_ADDED=0 +KEYS_SKIPPED=0 + +if [[ "$SSH_KEYS" == "Not Found" ]]; then + >&2 echo "Username '$GH_USERNAME' not found" + >&2 echo "URL: 'https://github.com/$GH_USERNAME.keys'" + exit 1 +fi + +if [[ ${#SSH_KEYS} -le 16 ]]; then + >&2 echo "Something went wrong retrieving SSH keys for '$GH_USERNAME'" + >&2 echo "URL: 'https://github.com/$GH_USERNAME.keys'" + >&2 echo "Result: " + >&2 echo "$SSH_KEYS" + exit 2 +fi + +while read -r AUTH_KEY; do AUTHKEYS+=("$AUTH_KEY"); done <<<"$SSH_KEYS" + +for i in "${!AUTHKEYS[@]}"; do + if grep -Fxq "${AUTHKEYS[$i]}" $AUTH_KEYS_FILE ; then + echo "Key $i already exists in '$AUTH_KEYS_FILE' Skipping..." + KEYS_SKIPPED=$(( $KEYS_SKIPPED + 1 )) + else + echo "${AUTHKEYS[$i]}" >> $AUTH_KEYS_FILE + echo "Key [$i] added." + KEYS_ADDED=$(( $KEYS_ADDED + 1 )) + fi +done + +echo "Keys Added: $KEYS_ADDED" +echo "Keys Skipped: $KEYS_SKIPPED" + +if [[ "$DISABLE_PASSWORD_AUTH" -eq 1 ]]; then + echo "Disabling password authentication and restarting sshd:" + disablePasswordAuthentication +fi diff --git a/scripts/menu_main.py b/scripts/menu_main.py new file mode 100755 index 000000000..b4dfc17ba --- /dev/null +++ b/scripts/menu_main.py @@ -0,0 +1,467 @@ +#!/usr/bin/python3 +from blessed import Terminal +import sys +import subprocess +import os +import time +import types +import signal +from deps.chars import specialChars +from deps.version_check import checkVersion + +term = Terminal() + +# Settings/Consts +requiredDockerVersion = "18.2.0" + +# Vars +selectionInProgress = True +currentMenuItemIndex = 0 +menuNavigateDirection = 0 +projectStatusPollRateRefresh = 1 +promptFiles = False +buildComplete = None +hotzoneLocation = [((term.height // 16) + 6), 0] +screenActive = True + + # Render Modes: + # 0 = No render needed + # 1 = Full render + # 2 = Hotzone only +needsRender = 1 + +def checkRenderOptions(): + global term + global renderMode + if len(sys.argv) > 1 and (sys.argv[1] == "simple" or sys.argv[1] == "latin" or sys.argv[1] == "ascii"): + renderMode = sys.argv[1] + else: + print(term.clear()) + try: + print( + specialChars["latin"]["rightArrowFull"], + specialChars["latin"]["upArrowFull"], + specialChars["latin"]["upArrowLine"], + specialChars["latin"]["downArrowFull"], + specialChars["latin"]["downArrowLine"], + specialChars["latin"]["borderVertical"], + specialChars["latin"]["borderHorizontal"], + specialChars["latin"]["borderTopLeft"], + specialChars["latin"]["borderTopRight"], + specialChars["latin"]["borderBottomLeft"], + specialChars["latin"]["borderBottomRight"], + ) + print(term.clear()) + renderMode = "latin" + return "latin" + except: + try: + print( + specialChars["simple"]["rightArrowFull"], + specialChars["simple"]["upArrowFull"], + specialChars["simple"]["upArrowLine"], + specialChars["simple"]["downArrowFull"], + specialChars["simple"]["downArrowLine"], + specialChars["simple"]["borderVertical"], + specialChars["simple"]["borderHorizontal"], + specialChars["simple"]["borderTopLeft"], + specialChars["simple"]["borderTopRight"], + specialChars["simple"]["borderBottomLeft"], + specialChars["simple"]["borderBottomRight"], + ) + print(term.clear()) + renderMode = "simple" + return "simple" + except: + print(term.clear()) + renderMode = "ascii" + return "ascii" + + +def onResize(sig, action): + global needsRender + global mainMenuList + global currentMenuItemIndex + global screenActive + if screenActive: + mainRender(1, mainMenuList, currentMenuItemIndex) + +# Menu Functions +def exitMenu(): + print("Exiting IOTstack menu.") + print(term.clear()) + sys.exit(0) + +def buildStack(): + global buildComplete + global needsRender + global screenActive + + buildComplete = None + buildstackFilePath = "./scripts/buildstack_menu.py" + with open(buildstackFilePath, "rb") as pythonDynamicImportFile: + code = compile(pythonDynamicImportFile.read(), buildstackFilePath, "exec") + execGlobals = { + "renderMode": renderMode + } + execLocals = {} + screenActive = False + print(term.clear()) + exec(code, execGlobals, execLocals) + buildComplete = execGlobals["results"]["buildState"] + signal.signal(signal.SIGWINCH, onResize) + screenActive = True + needsRender = 1 + +def runExampleMenu(): + exampleMenuFilePath = "./.templates/example_template/example_build.py" + with open(exampleMenuFilePath, "rb") as pythonDynamicImportFile: + code = compile(pythonDynamicImportFile.read(), exampleMenuFilePath, "exec") + # execGlobals = globals() + execGlobals = { + "renderMode": renderMode + } + execLocals = locals() + execGlobals["currentServiceName"] = 'SERVICENAME' + execGlobals["toRun"] = 'runOptionsMenu' + screenActive = False + exec(code, execGlobals, execLocals) + signal.signal(signal.SIGWINCH, onResize) + screenActive = True + +def dockerCommands(): + global needsRender + dockerCommandsFilePath = "./scripts/docker_commands.py" + with open(dockerCommandsFilePath, "rb") as pythonDynamicImportFile: + code = compile(pythonDynamicImportFile.read(), dockerCommandsFilePath, "exec") + # execGlobals = globals() + # execLocals = locals() + execGlobals = { + "renderMode": renderMode + } + execLocals = {} + screenActive = False + exec(code, execGlobals, execLocals) + signal.signal(signal.SIGWINCH, onResize) + screenActive = True + needsRender = 1 + +def miscCommands(): + global needsRender + dockerCommandsFilePath = "./scripts/misc_commands.py" + with open(dockerCommandsFilePath, "rb") as pythonDynamicImportFile: + code = compile(pythonDynamicImportFile.read(), dockerCommandsFilePath, "exec") + # execGlobals = globals() + # execLocals = locals() + execGlobals = { + "renderMode": renderMode + } + execLocals = {} + screenActive = False + exec(code, execGlobals, execLocals) + signal.signal(signal.SIGWINCH, onResize) + screenActive = True + needsRender = 1 + +def nativeInstalls(): + global needsRender + global screenActive + dockerCommandsFilePath = "./scripts/native_installs.py" + with open(dockerCommandsFilePath, "rb") as pythonDynamicImportFile: + code = compile(pythonDynamicImportFile.read(), dockerCommandsFilePath, "exec") + # currGlobals = globals() + # currLocals = locals() + execGlobals = { + "renderMode": renderMode + } + execLocals = {} + screenActive = False + exec(code, execGlobals, execLocals) + signal.signal(signal.SIGWINCH, onResize) + screenActive = True + needsRender = 1 + +def backupAndRestore(): + global needsRender + global screenActive + dockerCommandsFilePath = "./scripts/backup_restore.py" + with open(dockerCommandsFilePath, "rb") as pythonDynamicImportFile: + code = compile(pythonDynamicImportFile.read(), dockerCommandsFilePath, "exec") + # currGlobals = globals() + # currLocals = locals() + execGlobals = { + "renderMode": renderMode + } + execLocals = {} + screenActive = False + exec(code, execGlobals, execLocals) + signal.signal(signal.SIGWINCH, onResize) + screenActive = True + needsRender = 1 + +def doNothing(): + selectionInProgress = True + +def skipItem(currentMenuItemIndex, direction): + currentMenuItemIndex = currentMenuItemIndex % len(mainMenuList) + if len(mainMenuList[currentMenuItemIndex]) > 2 and mainMenuList[currentMenuItemIndex][2]["skip"] == True: + currentMenuItemIndex += lastSelectionDirection + return currentMenuItemIndex + +def deletePromptFiles(): + # global promptFiles + # global currentMenuItemIndex + if os.path.exists(".project_outofdate"): + os.remove(".project_outofdate") + if os.path.exists(".docker_outofdate"): + os.remove(".docker_outofdate") + if os.path.exists(".docker_notinstalled"): + os.remove(".docker_notinstalled") + promptFiles = False + currentMenuItemIndex = 0 + +def installDocker(): + print("Install Docker: curl -fsSL https://get.docker.com | sh && sudo usermod -aG docker $USER") + installDockerProcess = subprocess.Popen(['sudo', 'bash', './install_docker.sh', 'install'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + installDockerProcess.wait() + installDockerResult, stdError = installDockerProcess.communicate() + installDockerResult = installDockerResult.decode("utf-8").rstrip() + + return installDockerResult + +def upgradeDocker(): + print("Upgrade Docker: sudo apt upgrade docker docker-compose") + upgradeDockerProcess = subprocess.Popen(['sudo', 'bash', './install_docker.sh', 'upgrade'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + upgradeDockerProcess.wait() + upgradeDockerResult, stdError = upgradeDockerProcess.communicate() + upgradeDockerResult = upgradeDockerResult.decode("utf-8").rstrip() + + return upgradeDockerResult + +baseMenu = [ + ["Build Stack", buildStack], + ["Docker Commands", dockerCommands], + ["Miscellaneous Commands", miscCommands], + ["Backup and Restore", backupAndRestore], + ["Native Installs", nativeInstalls], + # ["Developer: Example Menu", runExampleMenu], # Uncomment if you want to see the example menu + ["Exit", exitMenu] +] + +# Main Menu +mainMenuList = baseMenu + +potentialMenu = { + "projectUpdate": { + "menuItem": ["Update IOTstack", installDocker], + "added": False + }, + "dockerUpdate": { # TODO: Do note use, fix shell issues first + "menuItem": ["Update Docker", upgradeDocker], + "added": False + }, + "dockerNotUpdated": { # TODO: Do note use, fix shell issues first + "menuItem": [term.red_on_black("Docker is not up to date"), doNothing, { "skip": True }], + "added": False + }, + "dockerTerminals": { # TODO: Do note use, not finished + "menuItem": ["Docker Terminals", doNothing], + "added": False + }, + "noProjectUpdate": { + "menuItem": [term.green_on_black("IOTstack is up to date"), doNothing, { "skip": True }], + "added": False + }, + "spacer": { + "menuItem": ["------", doNothing, { "skip": True }], + "added": False + }, + "newLine": { + "menuItem": [" ", doNothing, { "skip": True }], + "added": False + }, + "deletePromptFiles": { + "menuItem": ["Delete 'out of date' prompt files", deletePromptFiles], + "added": False + }, + "updatesCheck": { + "menuItem": [term.blue_on_black("Checking for updates..."), doNothing, { "skip": True }], + "added": False + } +} + +def checkDockerVersion(): + try: + getDockerVersion = subprocess.Popen(['docker', 'version', '-f', '"{{.Server.Version}}"'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) + getDockerVersion.wait() + currentDockerVersion, stdError = getDockerVersion.communicate() + currentDockerVersion = currentDockerVersion.decode("utf-8").rstrip().replace('"', '') + except Exception as err: + print("Error attempting to run docker command:", err) + currentDockerVersion = "" + + return checkVersion(requiredDockerVersion, currentDockerVersion) + +def checkProjectUpdates(): + getCurrentBranch = subprocess.Popen(["git", "name-rev", "--name-only", "HEAD"], stdout=subprocess.PIPE, stderr=subprocess.PIPE) + getCurrentBranch.wait() + currentBranch, stdError = getCurrentBranch.communicate() + currentBranch = currentBranch.decode("utf-8").rstrip() + projectStatus = subprocess.Popen(["git", "fetch", "origin", currentBranch], stdout=subprocess.PIPE, stderr=subprocess.PIPE) + + return projectStatus + +def addPotentialMenuItem(menuItemName, hasSpacer=True): + if (potentialMenu["newLine"]["added"] == False): + potentialMenu["newLine"]["added"] = True + baseMenu.append(potentialMenu["newLine"]["menuItem"]) + if hasSpacer and potentialMenu["spacer"]["added"] == False: + potentialMenu["spacer"]["added"] = True + baseMenu.append(potentialMenu["spacer"]["menuItem"]) + + if (potentialMenu[menuItemName]["added"] == False): + potentialMenu[menuItemName]["added"] = True + baseMenu.append(potentialMenu[menuItemName]["menuItem"]) + return True + + return False + +def removeMenuItemByLabel(potentialItemKey): + i = -1 + for menuItem in mainMenuList: + i += 1 + if menuItem[0] == potentialMenu[potentialItemKey]["menuItem"][0]: + potentialMenu[potentialItemKey]["added"] = False + mainMenuList.pop(i) + +def doPotentialMenuCheck(projectStatus, dockerVersion=True, promptFiles=False): + global needsRender + + if (promptFiles == True): + addPotentialMenuItem("deletePromptFiles") + needsRender = 2 + else: + removeMenuItemByLabel("deletePromptFiles") + + # if (projectStatus.poll() == None): + # addPotentialMenuItem("updatesCheck", False) + # needsRender = 2 + # else: + # removeMenuItemByLabel("updatesCheck") + + # if (projectStatus.poll() == 1): + # added = addPotentialMenuItem("projectUpdate") + # projectStatusPollRateRefresh = None + # if (added): + # needsRender = 1 + + # if (projectStatus.poll() == 0): + # added = addPotentialMenuItem("noProjectUpdate") + # projectStatusPollRateRefresh = None + # if (added): + # needsRender = 1 + + if (dockerVersion == False): + added = addPotentialMenuItem("dockerNotUpdated") + if (added): + needsRender = 1 + +def checkIfPromptFilesExist(): + if os.path.exists(".project_outofdate"): + return True + if os.path.exists(".docker_outofdate"): + return True + if os.path.exists(".docker_notinstalled"): + return True + return False + +def renderHotZone(term, menu, selection): + print(term.move(hotzoneLocation[0], hotzoneLocation[1])) + for (index, menuItem) in enumerate(menu): + if index == selection: + print(term.center('-> {t.blue_on_green}{title}{t.normal} <-'.format(t=term, title=menuItem[0]))) + else: + print(term.center('{title}'.format(t=term, title=menuItem[0]))) + +def mainRender(needsRender, menu, selection): + term = Terminal() + if needsRender == 1: + print(term.clear()) + print(term.move_y(term.height // 16)) + print(term.black_on_cornsilk4(term.center('IOTstack Main Menu'))) + print("") + + if needsRender >= 1: + renderHotZone(term, menu, selection) + + if (buildComplete and needsRender == 1): + print("") + print("") + print("") + print(term.center('{t.blue_on_green} {text} {t.normal}{t.white_on_black}{cPath} {t.normal}'.format(t=term, text="Build completed:", cPath=" ./docker-compose.yml"))) + print(term.center('{t.white_on_black}{text}{t.blue_on_green2} {commandString} {t.normal}'.format(t=term, text="You can start the stack from the Docker Commands menu, or from the CLI with: ", commandString="docker-compose up -d"))) + if os.path.exists('./compose-override.yml'): + print("") + print(term.center('{t.grey_on_blue4} {text} {t.normal}{t.white_on_black}{t.normal}'.format(t=term, text="'compose-override.yml' was merged into 'docker-compose.yml'"))) + print("") + +def runSelection(selection): + global needsRender + if len(mainMenuList[selection]) > 1 and isinstance(mainMenuList[selection][1], types.FunctionType): + mainMenuList[selection][1]() + needsRender = 1 + else: + print(term.green_reverse('IOTstack Error: No function assigned to menu item: "{}"'.format(mainMenuList[selection][0]))) + +def isMenuItemSelectable(menu, index): + if len(menu) > index: + if len(menu[index]) > 2: + if menu[index][2]["skip"] == True: + return False + return True + +# Entrypoint +if __name__ == '__main__': + projectStatus = checkProjectUpdates() # Async + dockerVersion, reason, data = checkDockerVersion() + promptFiles = checkIfPromptFilesExist() + term = Terminal() + + signal.signal(signal.SIGWINCH, onResize) + + with term.fullscreen(): + checkRenderOptions() + mainRender(needsRender, mainMenuList, currentMenuItemIndex) # Initial Draw + with term.cbreak(): + while selectionInProgress: + menuNavigateDirection = 0 + if (promptFiles): + promptFiles = checkIfPromptFilesExist() + + if needsRender > 0: # Only rerender when changed to prevent flickering + mainRender(needsRender, mainMenuList, currentMenuItemIndex) + needsRender = 0 + + doPotentialMenuCheck(projectStatus=projectStatus, dockerVersion=dockerVersion, promptFiles=promptFiles) + + key = term.inkey(timeout=projectStatusPollRateRefresh) + if key.is_sequence: + if key.name == 'KEY_TAB': + menuNavigateDirection = 1 + if key.name == 'KEY_DOWN': + menuNavigateDirection = 1 + if key.name == 'KEY_UP': + menuNavigateDirection = -1 + if key.name == 'KEY_ENTER': + runSelection(currentMenuItemIndex) + if key.name == 'KEY_ESCAPE': + exitMenu() + + if not menuNavigateDirection == 0: # If a direction was pressed, find next selectable item + currentMenuItemIndex += menuNavigateDirection + currentMenuItemIndex = currentMenuItemIndex % len(mainMenuList) + needsRender = 2 + + while not isMenuItemSelectable(mainMenuList, currentMenuItemIndex): + currentMenuItemIndex += menuNavigateDirection + currentMenuItemIndex = currentMenuItemIndex % len(mainMenuList) diff --git a/scripts/misc_commands.py b/scripts/misc_commands.py new file mode 100755 index 000000000..8c311c5d6 --- /dev/null +++ b/scripts/misc_commands.py @@ -0,0 +1,215 @@ +#!/usr/bin/env python3 + +import signal + +def main(): + from blessed import Terminal + from deps.chars import specialChars, commonTopBorder, commonBottomBorder, commonEmptyLine + global renderMode + import time + import subprocess + global signal + + global dockerCommandsSelectionInProgress + global mainMenuList + global currentMenuItemIndex + global hideHelpText + + try: # If not already set, then set it. + hideHelpText = hideHelpText + except: + hideHelpText = False + + term = Terminal() + + def setSwapinessTo0(): + print(term.clear()) + print("Set swapiness to 0:") + subprocess.call("./scripts/disable_swap.sh disableswap", shell=True) + print("") + input("Process terminated. Press [Enter] to show menu and continue.") + return True + + def uninstallSwapfile(): + print(term.clear()) + print("Disabling swap...") + setSwapinessTo0() + print("Uninstall Swapfile:") + subprocess.call("./scripts/disable_swap.sh uninstallswap", shell=True) + print("") + input("Process terminated. Press [Enter] to show menu and continue.") + return True + + def installLog2Ram(): + print(term.clear()) + print("Install log2ram:") + subprocess.call("./scripts/install_log2ram.sh", shell=True) + print("") + input("Process terminated. Press [Enter] to show menu and continue.") + return True + + def installGithubSshKeys(): + print(term.clear()) + print("Install Github SSH Keys:") + print("bash ./scripts/install_ssh_keys.sh") + subprocess.call("bash ./scripts/install_ssh_keys.sh", shell=True) + print("") + input("Process terminated. Press [Enter] to show menu and continue.") + return True + + def goBack(): + global dockerCommandsSelectionInProgress + global needsRender + dockerCommandsSelectionInProgress = False + needsRender = 1 + return True + + mainMenuList = [ + ["Set swapiness to 0 (Disables swap until restart)", setSwapinessTo0], + ["Uninstall Swapfile (Disables swap)", uninstallSwapfile], + ["Install log2ram", installLog2Ram], + ["Install SSH keys from your Github", installGithubSshKeys], + ["Back", goBack] + ] + + hotzoneLocation = [7, 0] # Top text + + dockerCommandsSelectionInProgress = True + currentMenuItemIndex = 0 + menuNavigateDirection = 0 + + # Render Modes: + # 0 = No render needed + # 1 = Full render + # 2 = Hotzone only + needsRender = 1 + + def onResize(sig, action): + global mainMenuList + global currentMenuItemIndex + mainRender(1, mainMenuList, currentMenuItemIndex) + + def renderHotZone(term, menu, selection, hotzoneLocation): + lineLengthAtTextStart = 71 + print(term.move(hotzoneLocation[0], hotzoneLocation[1])) + for (index, menuItem) in enumerate(menu): + toPrint = "" + if index == selection: + toPrint += ('{bv} -> {t.blue_on_green} {title} {t.normal} <-'.format(t=term, title=menuItem[0], bv=specialChars[renderMode]["borderVertical"])) + else: + toPrint += ('{bv} {t.normal} {title} '.format(t=term, title=menuItem[0], bv=specialChars[renderMode]["borderVertical"])) + + for i in range(lineLengthAtTextStart - len(menuItem[0])): + toPrint += " " + + toPrint += "{bv}".format(bv=specialChars[renderMode]["borderVertical"]) + + toPrint = term.center(toPrint) + + print(toPrint) + + def mainRender(needsRender, menu, selection): + term = Terminal() + + if needsRender == 1: + print(term.clear()) + print(term.move_y(6 - hotzoneLocation[0])) + print(term.black_on_cornsilk4(term.center('IOTstack Miscellaneous Commands'))) + print("") + print(term.center(commonTopBorder(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Select Command to run {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + + if needsRender >= 1: + renderHotZone(term, menu, selection, hotzoneLocation) + + if needsRender == 1: + print(term.center(commonEmptyLine(renderMode))) + if not hideHelpText: + if term.height < 30: + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Not enough vertical room to render controls help text {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + else: + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Controls: {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Up] and [Down] to move selection cursor {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [H] Show/hide this text {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Enter] to run command {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Escape] to go back to main menu {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonBottomBorder(renderMode))) + + def runSelection(selection): + import types + if len(mainMenuList[selection]) > 1 and isinstance(mainMenuList[selection][1], types.FunctionType): + mainMenuList[selection][1]() + mainRender(1, mainMenuList, currentMenuItemIndex) + else: + print(term.green_reverse('IOTstack Error: No function assigned to menu item: "{}"'.format(mainMenuList[selection][0]))) + + def isMenuItemSelectable(menu, index): + if len(menu) > index: + if len(menu[index]) > 2: + if menu[index][2]["skip"] == True: + return False + return True + + if __name__ == 'builtins': + global signal + term = Terminal() + signal.signal(signal.SIGWINCH, onResize) + with term.fullscreen(): + menuNavigateDirection = 0 + mainRender(needsRender, mainMenuList, currentMenuItemIndex) + dockerCommandsSelectionInProgress = True + with term.cbreak(): + while dockerCommandsSelectionInProgress: + menuNavigateDirection = 0 + + if not needsRender == 0: # Only rerender when changed to prevent flickering + mainRender(needsRender, mainMenuList, currentMenuItemIndex) + needsRender = 0 + + key = term.inkey(esc_delay=0.05) + if key.is_sequence: + if key.name == 'KEY_TAB': + menuNavigateDirection += 1 + if key.name == 'KEY_DOWN': + menuNavigateDirection += 1 + if key.name == 'KEY_UP': + menuNavigateDirection -= 1 + if key.name == 'KEY_ENTER': + runSelection(currentMenuItemIndex) + if dockerCommandsSelectionInProgress == False: + return True + if key.name == 'KEY_ESCAPE': + dockerCommandsSelectionInProgress = False + return True + elif key: + if key == 'h': # H pressed + if hideHelpText: + hideHelpText = False + else: + hideHelpText = True + mainRender(1, mainMenuList, currentMenuItemIndex) + + if menuNavigateDirection != 0: # If a direction was pressed, find next selectable item + currentMenuItemIndex += menuNavigateDirection + currentMenuItemIndex = currentMenuItemIndex % len(mainMenuList) + needsRender = 2 + + while not isMenuItemSelectable(mainMenuList, currentMenuItemIndex): + currentMenuItemIndex += menuNavigateDirection + currentMenuItemIndex = currentMenuItemIndex % len(mainMenuList) + return True + + return True + +originalSignalHandler = signal.getsignal(signal.SIGINT) +main() +signal.signal(signal.SIGWINCH, originalSignalHandler) diff --git a/scripts/native_installs.py b/scripts/native_installs.py new file mode 100755 index 000000000..a55ce9bfe --- /dev/null +++ b/scripts/native_installs.py @@ -0,0 +1,231 @@ +#!/usr/bin/env python3 +import signal + +def main(): + from blessed import Terminal + from deps.chars import specialChars, commonTopBorder, commonBottomBorder, commonEmptyLine + global renderMode + import time + import subprocess + + global signal + global dockerCommandsSelectionInProgress + global mainMenuList + global currentMenuItemIndex + global screenActive + global hideHelpText + global needsRender + + try: # If not already set, then set it. + hideHelpText = hideHelpText + except: + hideHelpText = False + + term = Terminal() + hotzoneLocation = [7, 0] # Top text + + def onResize(sig, action): + global mainMenuList + global currentMenuItemIndex + if (screenActive): + mainRender(1, mainMenuList, currentMenuItemIndex) + + def installRtl433(): + print(term.clear()) + print("Install RTL_433") + print("bash ./.native/rtl_433.sh") + subprocess.call("bash ./.native/rtl_433.sh", shell=True) + print("") + input("Process terminated. Press [Enter] to show menu and continue.") + return True + + def installRpiEasy(): + print(term.clear()) + print("Install RPIEasy") + print("bash ./.native/rpieasy.sh") + subprocess.call("bash ./.native/rpieasy.sh", shell=True) + print("") + input("Process terminated. Press [Enter] to show menu and continue.") + return True + + def installDockerAndCompose(): + print(term.clear()) + print("Install docker") + print("Install docker-compose") + print("bash ./scripts/install_docker.sh install") + subprocess.call("bash ./scripts/install_docker.sh install", shell=True) + print("") + input("Process terminated. Press [Enter] to show menu and continue.") + return True + + def upgradeDockerAndCompose(): + print(term.clear()) + print("Install docker") + print("Install docker-compose") + print("bash ./scripts/install_docker.sh upgrade") + subprocess.call("bash ./scripts/install_docker.sh upgrade", shell=True) + print("") + input("Process terminated. Press [Enter] to show menu and continue.") + return True + + def goBack(): + global dockerCommandsSelectionInProgress + global needsRender + global screenActive + screenActive = False + dockerCommandsSelectionInProgress = False + needsRender = 1 + return True + + mainMenuList = [ + ["RTL_433", installRtl433], + ["RPIEasy", installRpiEasy], + ["Upgrade Docker and Docker-Compose", upgradeDockerAndCompose], + ["Install Docker and Docker-Compose", installDockerAndCompose], + ["Back", goBack] + ] + + dockerCommandsSelectionInProgress = True + currentMenuItemIndex = 0 + menuNavigateDirection = 0 + + # Render Modes: + # 0 = No render needed + # 1 = Full render + # 2 = Hotzone only + needsRender = 1 + + def renderHotZone(term, menu, selection, hotzoneLocation): + print(term.move(hotzoneLocation[0], hotzoneLocation[1])) + lineLengthAtTextStart = 71 + + for (index, menuItem) in enumerate(menu): + toPrint = "" + if index == selection: + toPrint += ('{bv} -> {t.blue_on_green} {title} {t.normal} <-'.format(t=term, title=menuItem[0], bv=specialChars[renderMode]["borderVertical"])) + else: + toPrint += ('{bv} {t.normal} {title} '.format(t=term, title=menuItem[0], bv=specialChars[renderMode]["borderVertical"])) + + for i in range(lineLengthAtTextStart - len(menuItem[0])): + toPrint += " " + + toPrint += "{bv}".format(bv=specialChars[renderMode]["borderVertical"]) + + toPrint = term.center(toPrint) + + print(toPrint) + + def mainRender(needsRender, menu, selection): + term = Terminal() + + if needsRender == 1: + print(term.clear()) + print(term.move_y(6 - hotzoneLocation[0])) + print(term.black_on_cornsilk4(term.center('Native Installs'))) + print("") + print(term.center(commonTopBorder(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Select service to install {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + + if needsRender >= 1: + renderHotZone(term, menu, selection, hotzoneLocation) + + if needsRender == 1: + print(term.center(commonEmptyLine(renderMode))) + if not hideHelpText: + if term.height < 30: + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Not enough vertical room to render controls help text {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + else: + print(term.center(commonEmptyLine(renderMode))) + print(term.center("{bv} Controls: {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Up] and [Down] to move selection cursor {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [H] Show/hide this text {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Enter] to run command {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center("{bv} [Escape] to go back to main menu {bv}".format(bv=specialChars[renderMode]["borderVertical"]))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonEmptyLine(renderMode))) + print(term.center(commonBottomBorder(renderMode))) + + + + + def runSelection(selection): + global needsRender + import types + if len(mainMenuList[selection]) > 1 and isinstance(mainMenuList[selection][1], types.FunctionType): + mainMenuList[selection][1]() + needsRender = 1 + else: + print(term.green_reverse('IOTstack Error: No function assigned to menu item: "{}"'.format(mainMenuList[selection][0]))) + + def isMenuItemSelectable(menu, index): + if len(menu) > index: + if len(menu[index]) > 2: + if menu[index][2]["skip"] == True: + return False + return True + + if __name__ == 'builtins': + term = Terminal() + with term.fullscreen(): + global screenActive + screenActive = True + signal.signal(signal.SIGWINCH, onResize) + menuNavigateDirection = 0 + mainRender(needsRender, mainMenuList, currentMenuItemIndex) + dockerCommandsSelectionInProgress = True + with term.cbreak(): + while dockerCommandsSelectionInProgress: + menuNavigateDirection = 0 + + if not needsRender == 0: # Only rerender when changed to prevent flickering + mainRender(needsRender, mainMenuList, currentMenuItemIndex) + needsRender = 0 + + key = term.inkey(esc_delay=0.05) + if key.is_sequence: + if key.name == 'KEY_TAB': + menuNavigateDirection += 1 + if key.name == 'KEY_DOWN': + menuNavigateDirection += 1 + if key.name == 'KEY_UP': + menuNavigateDirection -= 1 + if key.name == 'KEY_ENTER': + runSelection(currentMenuItemIndex) + if dockerCommandsSelectionInProgress == False: + screenActive = False + return True + mainRender(1, mainMenuList, currentMenuItemIndex) + if key.name == 'KEY_ESCAPE': + screenActive = False + dockerCommandsSelectionInProgress = False + return True + elif key: + if key == 'h': # H pressed + if hideHelpText: + hideHelpText = False + else: + hideHelpText = True + mainRender(1, mainMenuList, currentMenuItemIndex) + + if menuNavigateDirection != 0: # If a direction was pressed, find next selectable item + currentMenuItemIndex += menuNavigateDirection + currentMenuItemIndex = currentMenuItemIndex % len(mainMenuList) + needsRender = 2 + + while not isMenuItemSelectable(mainMenuList, currentMenuItemIndex): + currentMenuItemIndex += menuNavigateDirection + currentMenuItemIndex = currentMenuItemIndex % len(mainMenuList) + screenActive = False + return True + + screenActive = False + return True + +main() + diff --git a/scripts/nodered_list_installed_nodes.sh b/scripts/nodered_list_installed_nodes.sh new file mode 100755 index 000000000..098c2ac39 --- /dev/null +++ b/scripts/nodered_list_installed_nodes.sh @@ -0,0 +1,61 @@ +#!/usr/bin/env bash + +# where Dockerfile installs components INSIDE the container +DOCKERFILE="/usr/src/node-red" + +# paths to the persistent store +PERSISTENT_INTERNAL="/data" +PERSISTENT_EXTERNAL="$HOME/IOTstack/volumes/nodered/data" + +# the folder in each case containing node modules +MODULES="node_modules" + +# assume no modules are blocked +unset BLOCKED + +# start the command hint +UNBLOCK="docker exec -w /data nodered npm uninstall" + +# fetch what npm knows about components that form part of the image +echo -e "\nFetching list of candidates installed via Dockerfile" +CANDIDATES=$(docker exec nodered bash -c "cd \"$DOCKERFILE\" ; npm list --depth=0 --parseable 2>/dev/null") + +# report +echo -e "\nComponents built into the image (via Dockerfile)" +PARENT=$(basename "$DOCKERFILE") +for CANDIDATE in $CANDIDATES; do + COMPONENT=$(basename "$CANDIDATE") + if [ "$COMPONENT" != "$PARENT" ] ; then + if [ -d "$PERSISTENT_EXTERNAL/$MODULES/$COMPONENT" ] ; then + # yes! the internal node is blocked by the external node + echo " BLOCKED: $COMPONENT" + BLOCKED=true + UNBLOCK="$UNBLOCK $COMPONENT" + else + # no! so that means it's active + echo " ACTIVE: $COMPONENT" + fi + fi +done + +# fetch what npm knows about components that are in the persistent store +echo -e "\nFetching list of candidates installed via Manage Palette or npm" +CANDIDATES=$(docker exec nodered bash -c "cd \"$PERSISTENT_INTERNAL\" ; npm list --depth=0 --parseable") + +# report +echo -e "\nComponents in persistent store at\n $PERSISTENT_EXTERNAL/$MODULES" +PARENT=$(basename "$PERSISTENT_INTERNAL") +for CANDIDATE in $CANDIDATES; do + COMPONENT=$(basename "$CANDIDATE") + if [ "$COMPONENT" != "$PARENT" ] ; then + echo " $COMPONENT" + fi +done + +echo "" + +if [ -n "$BLOCKED" ] ; then + echo "Blocking nodes can be removed by running the following commands" + echo "\$ $UNBLOCK" + echo "\$ docker-compose -f ~/IOTstack/docker-compose.yml restart nodered" +fi diff --git a/scripts/nodered_version_check.sh b/scripts/nodered_version_check.sh new file mode 100755 index 000000000..761c76668 --- /dev/null +++ b/scripts/nodered_version_check.sh @@ -0,0 +1,54 @@ +#!/usr/bin/env bash + +# the name of this script is +SCRIPT=$(basename "$0") + +# default image is +DEFAULTIMAGE="iotstack-nodered:latest" + +# zero or one arguments supported +if [ "$#" -gt 1 ]; then + echo "Usage: $SCRIPT {image:tag}" + echo " eg: $SCRIPT $DEFAULTIMAGE" + exit -1 +fi + +# image can be passed as first argument, else default +IMAGE=${1:-"$DEFAULTIMAGE"} + +# fetch latest version details from GitHub +LATEST=$(wget -O - -q https://raw.githubusercontent.com/node-red/node-red-docker/master/package.json | jq -r .version) + +# figure out the version in the local image +INSTALLED=$(docker image inspect "$IMAGE" | jq -r .[0].Config.Labels[\"org.label-schema.version\"]) + +# compare versions and report result +if [ "$INSTALLED" = "$LATEST" ] ; then + + echo "Node-Red is up-to-date (version $INSTALLED)" + +else + +/bin/cat <<-COLLECT_TEXT + + ==================================================================== + Node-Red version number has changed on GitHub: + + Local Version: $INSTALLED + GitHub Version: $LATEST + + This means a new version MIGHT be available on Dockerhub. Check here: + + https://hub.docker.com/r/nodered/node-red/tags?page=1&ordering=last_updated + + When an updated version is actually avaliable, proceed like this: + + $ REBUILD nodered + $ UP nodered + $ docker system prune + ==================================================================== + +COLLECT_TEXT + +fi + diff --git a/scripts/prune-images.sh b/scripts/prune-images.sh deleted file mode 100755 index 7c16d8c73..000000000 --- a/scripts/prune-images.sh +++ /dev/null @@ -1 +0,0 @@ -docker image prune -a diff --git a/scripts/prune-volumes.sh b/scripts/prune-volumes.sh deleted file mode 100755 index bfef730aa..000000000 --- a/scripts/prune-volumes.sh +++ /dev/null @@ -1,2 +0,0 @@ -docker system prune --volumes - diff --git a/scripts/restart.sh b/scripts/restart.sh deleted file mode 100755 index 2c708fbbf..000000000 --- a/scripts/restart.sh +++ /dev/null @@ -1 +0,0 @@ -docker-compose restart diff --git a/scripts/restore.sh b/scripts/restore.sh new file mode 100755 index 000000000..02c9e8a9c --- /dev/null +++ b/scripts/restore.sh @@ -0,0 +1,91 @@ +#!/bin/bash + +# Usage: +# ./scripts/restore.sh [FILENAME=backup.tar.gz] {noask} + +# Examples: +# ./scripts/restore.sh +# Will restore from the backup file "./backups/backup.tar.gz" +# +# ./scripts/restore.sh some_other_backup.tar.gz +# Will restore from the backup file "./backups/some_other_backup.tar.gz" +# +# ./scripts/restore.sh some_other_backup.tar.gz noask +# Will restore from the backup file "./backups/some_other_backup.tar.gz" and will not warn that data will be deleted. +# + +if [ -d "./menu.sh" ]; then + echo "./menu.sh file was not found. Ensure that you are running this from IOTstack's directory." + exit 1 +fi + +echo "Restoring from a backup will erase all existing data." +read -p "Continue [y/N]? " -n 1 -r PROCEED_WITH_RESTORE +echo "" +if [[ ! $PROCEED_WITH_RESTORE =~ ^[Yy]$ ]]; then + echo "Restore Cancelled." + exit 0 +fi + +RESTOREFILENAME="backup.tar.gz" +RESTOREFILENAME=${1:-$RESTOREFILENAME} + +BASEDIR=./backups +TMPDIR=./.tmp +BASERESTOREFILE="$(date +"%Y-%m-%d_%H%M")" +RESTOREFILE="$BASEDIR/$RESTOREFILENAME" +LOGFILE="$BASEDIR/logs/restore_$BASERESTOREFILE.log" +BACKUPFILE="$BASEDIR/backup/backup_$BASERESTOREFILE.tar.gz" + +[ -d ./backups ] || mkdir -p ./backups +[ -d ./backups/logs ] || mkdir -p ./backups/logs + +[ -d ./.tmp ] || sudo rm -rf ./.tmp +[ -d ./tmp ] || mkdir -p ./tmp + +touch $LOGFILE +echo "" > $LOGFILE +echo "### IOTstack restore generator log ###" >> $LOGFILE +echo "Started At: $(date +"%Y-%m-%dT%H-%M-%S")" >> $LOGFILE +echo "Current Directory: $(pwd)" >> $LOGFILE +echo "Restore Type: Full" >> $LOGFILE + +if [ ! -f $RESTOREFILE ]; then + echo "File: '$RESTOREFILE' doesn't exist. Cancelling restore." + echo "Finished At: $(date +"%Y-%m-%dT%H-%M-%S")" >> $LOGFILE + echo "" >> $LOGFILE + + echo "" >> $LOGFILE + echo "### End of log ###" >> $LOGFILE + exit 2 +fi + +# Remove old files and folders +sudo rm -rf ./services/ >> $LOGFILE 2>&1 +sudo rm -rf ./volumes/ >> $LOGFILE 2>&1 +sudo rm -rf ./compose-override.yml >> $LOGFILE 2>&1 +sudo rm -rf ./docker-compose.yml >> $LOGFILE 2>&1 +sudo rm -rf ./extra/ >> $LOGFILE 2>&1 +sudo rm -rf ./postbuild.sh >> $LOGFILE 2>&1 +sudo rm -rf ./pre_backup.sh >> $LOGFILE 2>&1 +sudo rm -rf ./post_backup.sh >> $LOGFILE 2>&1 +sudo rm -rf ./post_restore.sh >> $LOGFILE 2>&1 +sudo rm -rf ./post_restore.sh >> $LOGFILE 2>&1 + +sudo tar -zxvf \ + $RESTOREFILE >> $LOGFILE 2>&1 + +echo "" >> $LOGFILE + +echo "Executing post restore scripts" >> $LOGFILE +bash ./scripts/backup_restore/post_restore_complete.sh >> $LOGFILE 2>&1 +echo "" > $LOGFILE + +echo "Finished At: $(date +"%Y-%m-%dT%H-%M-%S")" >> $LOGFILE +echo "" >> $LOGFILE + +echo "" >> $LOGFILE +echo "### End of log ###" >> $LOGFILE +echo "" >> $LOGFILE + +cat $LOGFILE diff --git a/scripts/start.sh b/scripts/start.sh deleted file mode 100755 index 5177d11ee..000000000 --- a/scripts/start.sh +++ /dev/null @@ -1 +0,0 @@ -docker-compose up -d \ No newline at end of file diff --git a/scripts/stop-all.sh b/scripts/stop-all.sh deleted file mode 100755 index d13d1b76d..000000000 --- a/scripts/stop-all.sh +++ /dev/null @@ -1 +0,0 @@ -docker container stop $(docker container ls -aq) diff --git a/scripts/stop.sh b/scripts/stop.sh deleted file mode 100755 index 58694d086..000000000 --- a/scripts/stop.sh +++ /dev/null @@ -1 +0,0 @@ -docker-compose down \ No newline at end of file diff --git a/scripts/update.sh b/scripts/update.sh deleted file mode 100755 index ff796b07b..000000000 --- a/scripts/update.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash - -echo "Stopping containers" -docker-compose down - -echo "Downloading latest images from docker hub ... this can take a long time" -docker-compose pull - -echo "Building images if needed" -docker-compose build - -echo "Starting stack up again" -docker-compose up -d - -echo "Consider running prune-images to free up space" diff --git a/scripts/yaml_merge.py b/scripts/yaml_merge.py new file mode 100755 index 000000000..ad7477ac0 --- /dev/null +++ b/scripts/yaml_merge.py @@ -0,0 +1,71 @@ +import sys +import traceback +import ruamel.yaml + +yaml = ruamel.yaml.YAML() +yaml.preserve_quotes = True + +if sys.argv[1] == "--pyyaml-version": + try: + print("pyyaml", yaml.__version__) + sys.exit(0) + except SystemExit: + sys.exit(0) + except: + print("could not get pyyaml version") + sys.exit(3) + +if len(sys.argv) < 4: + print("Error: Not enough args") + print("Usage:") + print(" yaml_merge.py [inputFile] [mergeFile] [outputFile]") + print("") + print("Example:") + print(" yaml_merge.py ./.tmp/docker-compose.tmp.yml ./compose-override.yml ./docker-compose.yml") + sys.exit(4) + +try: + pathTempDockerCompose = sys.argv[1] + pathOverride = sys.argv[2] + pathOutput = sys.argv[3] + + def mergeYaml(priorityYaml, defaultYaml): + finalYaml = {} + if isinstance(defaultYaml, dict): + for dk, dv in defaultYaml.items(): + if dk in priorityYaml: + finalYaml[dk] = mergeYaml(priorityYaml[dk], dv) + else: + finalYaml[dk] = dv + for pk, pv in priorityYaml.items(): + if pk in finalYaml: + finalYaml[pk] = mergeYaml(finalYaml[pk], pv) + else: + finalYaml[pk] = pv + else: + finalYaml = defaultYaml + return finalYaml + + with open(r'%s' % pathTempDockerCompose) as fileTempDockerCompose: + yamlTempDockerCompose = yaml.load(fileTempDockerCompose) + + with open(r'%s' % pathOverride) as fileOverride: + yamlOverride = yaml.load(fileOverride) + + mergedYaml = mergeYaml(yamlOverride, yamlTempDockerCompose) + + with open(r'%s' % pathOutput, 'w') as outputFile: + yaml.dump(mergedYaml, outputFile, explicit_start=True, default_style='"') + + sys.exit(0) +except SystemExit: + sys.exit(0) +except: + print("Something went wrong: ") + print(sys.exc_info()) + print(traceback.print_exc()) + print("") + print("") + print("PyYaml Version: ", yaml.__version__) + print("") + sys.exit(2)