diff --git a/.dockerignore b/.dockerignore
new file mode 100644
index 0000000..1062234
--- /dev/null
+++ b/.dockerignore
@@ -0,0 +1,22 @@
+# Ignore everything
+*
+
+# Allow files and directories
+!/main.py
+!/Pipfile
+!/Pipfile.lock
+!/modules
+!/util
+!/scripts
+!/config
+!/VERSION
+!/start.sh
+!/exclude-file.txt
+
+# Ignore unnecessary files inside allowed directories
+# This should go after the allowed directories
+**/*~
+**/*.log
+**/.DS_Store
+**/Thumbs.db
+**/config.yml
\ No newline at end of file
diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml
new file mode 100644
index 0000000..165e147
--- /dev/null
+++ b/.github/FUNDING.yml
@@ -0,0 +1 @@
+custom: ['https://www.buymeacoffee.com/drazzilb08']
\ No newline at end of file
diff --git a/.github/workflows/dev.yml b/.github/workflows/dev.yml
new file mode 100644
index 0000000..02734d8
--- /dev/null
+++ b/.github/workflows/dev.yml
@@ -0,0 +1,53 @@
+name: Docker Develop Release
+
+on:
+ push:
+ branches: [ dev ]
+ paths-ignore:
+ - '**/README.md'
+ - '**/.github/**'
+ pull_request:
+ types:
+ - closed
+ branches:
+ - dev
+ paths-ignore:
+ - '**/README.md'
+ - '**/.github/**'
+
+jobs:
+
+ docker-dev:
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+ - name: Set up QEMU
+ uses: docker/setup-qemu-action@v3
+ - name: Set up Docker Buildx
+ uses: docker/setup-buildx-action@v3
+ - name: Login to Docker Hub
+ uses: docker/login-action@v3
+ with:
+ username: ${{ secrets.DOCKER_USERNAME }}
+ password: ${{ secrets.DOCKER_TOKEN }}
+ - name: Login to GitHub Container Registry
+ uses: docker/login-action@v3
+ with:
+ registry: ghcr.io
+ username: ${{ secrets.GH_USERNAME }}
+ password: ${{ secrets.GH_TOKEN }}
+ - name: Build and push
+ id: docker_build
+ uses: docker/build-push-action@v5
+ with:
+ context: .
+ file: ./Dockerfile
+ platforms: linux/amd64,linux/arm64
+ build-args: |
+ "BRANCH=dev"
+ push: true
+ tags: |
+ ${{ secrets.DOCKER_USERNAME }}/daps:dev
+ ghcr.io/drazzilb08/daps:dev
diff --git a/.github/workflows/inactive.yml b/.github/workflows/inactive.yml
new file mode 100644
index 0000000..43268c8
--- /dev/null
+++ b/.github/workflows/inactive.yml
@@ -0,0 +1,22 @@
+name: Close inactive issues
+on:
+ schedule:
+ - cron: "30 1 * * *"
+
+jobs:
+ close-issues:
+ runs-on: ubuntu-latest
+ permissions:
+ issues: write
+ pull-requests: write
+ steps:
+ - uses: actions/stale@v5
+ with:
+ days-before-issue-stale: 7
+ days-before-issue-close: 3
+ stale-issue-label: "stale"
+ stale-issue-message: "This issue is stale because it has been open for 30 days with no activity."
+ close-issue-message: "This issue was closed because it has been inactive for 14 days since being marked as stale."
+ days-before-pr-stale: -1
+ days-before-pr-close: -1
+ repo-token: ${{ secrets.GH_TOKEN }}
diff --git a/.github/workflows/latest.yml b/.github/workflows/latest.yml
new file mode 100644
index 0000000..7b8931d
--- /dev/null
+++ b/.github/workflows/latest.yml
@@ -0,0 +1,53 @@
+name: Docker Latest Release
+
+on:
+ push:
+ branches: [ master ]
+ paths-ignore:
+ - '**/README.md'
+ - '**/.github/**'
+ pull_request:
+ types:
+ - closed
+ branches:
+ - master
+ paths-ignore:
+ - '**/README.md'
+ - '**/.github/**'
+
+jobs:
+
+ docker-latest:
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+ - name: Set up QEMU
+ uses: docker/setup-qemu-action@v3
+ - name: Set up Docker Buildx
+ uses: docker/setup-buildx-action@v3
+ - name: Login to Docker Hub
+ uses: docker/login-action@v3
+ with:
+ username: ${{ secrets.DOCKER_USERNAME }}
+ password: ${{ secrets.DOCKER_TOKEN }}
+ - name: Login to GitHub Container Registry
+ uses: docker/login-action@v3
+ with:
+ registry: ghcr.io
+ username: ${{ secrets.GH_USERNAME }}
+ password: ${{ secrets.GH_TOKEN }}
+ - name: Build and push
+ id: docker_build
+ uses: docker/build-push-action@v5
+ with:
+ context: .
+ file: ./Dockerfile
+ platforms: linux/amd64,linux/arm64
+ build-args: |
+ "BRANCH=${{ vars.GITHUB_REF_NAME }}"
+ push: true
+ tags: |
+ ${{ secrets.DOCKER_USERNAME }}/daps:latest
+ ghcr.io/drazzilb08/daps:latest
diff --git a/.github/workflows/tag.yml b/.github/workflows/tag.yml
new file mode 100644
index 0000000..8a8b2a4
--- /dev/null
+++ b/.github/workflows/tag.yml
@@ -0,0 +1,18 @@
+name: Tag
+
+on:
+ push:
+ branches: [ master ]
+
+jobs:
+ tag-new-versions:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ with:
+ token: ${{ secrets.GH_PAT }}
+ fetch-depth: 2
+ - uses: salsify/action-detect-and-tag-new-version@v1.0.3
+ with:
+ version-command: |
+ cat VERSION
\ No newline at end of file
diff --git a/.github/workflows/version.yml b/.github/workflows/version.yml
new file mode 100644
index 0000000..2da1fa4
--- /dev/null
+++ b/.github/workflows/version.yml
@@ -0,0 +1,56 @@
+name: Docker Version Release
+
+on:
+ push:
+ tags:
+ - v*
+
+jobs:
+
+ docker-version:
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+ with:
+ fetch-depth: 0
+ - name: Set up QEMU
+ uses: docker/setup-qemu-action@v3
+ - name: Set up Docker Buildx
+ uses: docker/setup-buildx-action@v3
+
+ - name: Get the version
+ id: get_version
+ run: echo "VERSION=${GITHUB_REF/refs\/tags\//}" >> $GITHUB_OUTPUT
+
+ - name: Extract branch name
+ shell: bash
+ run: echo "branch=${GITHUB_HEAD_REF:-${GITHUB_REF#refs/heads/}}" >> $GITHUB_OUTPUT
+ id: extract_branch
+
+ - name: Login to Docker Hub
+ uses: docker/login-action@v3
+ with:
+ username: ${{ secrets.DOCKER_USERNAME }}
+ password: ${{ secrets.DOCKER_TOKEN }}
+ - name: Login to GitHub Container Registry
+ uses: docker/login-action@v3
+ with:
+ registry: ghcr.io
+ username: ${{ secrets.GH_USERNAME }}
+ password: ${{ secrets.GH_TOKEN }}
+
+ - name: Build and push
+ id: docker_build
+ uses: docker/build-push-action@v5
+ with:
+ context: .
+ file: ./Dockerfile
+ platforms: linux/amd64,linux/arm64
+ build-args: |
+ "BRANCH=${{ steps.extract_branch.outputs.branch }}"
+ push: true
+ tags: |
+ ${{ secrets.DOCKER_USERNAME }}/daps:${{ steps.get_version.outputs.VERSION }}
+ ghcr.io/drazzilb08/daps:${{ steps.get_version.outputs.VERSION }}
diff --git a/.gitignore b/.gitignore
index ec9f7d9..ec38561 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,34 +1,16 @@
-+# Byte-compiled / optimized / DLL files
+# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
-*$py.class
# C extensions
*.so
# Distribution / packaging
-.Python
-build/
-develop-eggs/
dist/
-downloads/
-eggs/
-.eggs/
-lib/
-lib64/
-parts/
-sdist/
-var/
-wheels/
-share/python-wheels/
*.egg-info/
-.installed.cfg
*.egg
-MANIFEST
# PyInstaller
-# Usually these files are written by a python script from a template
-# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
@@ -49,116 +31,35 @@ coverage.xml
*.py,cover
.hypothesis/
.pytest_cache/
-cover/
-# Translations
-*.mo
-*.pot
-
-# Django stuff:
-*.log
-local_settings.py
-db.sqlite3
-db.sqlite3-journal
-
-# Flask stuff:
-instance/
-.webassets-cache
-
-# Scrapy stuff:
-.scrapy
-
-# Sphinx documentation
-docs/_build/
-
-# PyBuilder
-.pybuilder/
-target/
-
-# Jupyter Notebook
-.ipynb_checkpoints
-
-# IPython
-profile_default/
-ipython_config.py
-
-# pyenv
-# For a library or package, you might want to ignore these files since the code is
-# intended to run in multiple environments; otherwise, check them in:
-# .python-version
-
-# pipenv
-# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
-# However, in case of collaboration, if having platform-specific dependencies or dependencies
-# having no cross-platform support, pipenv may install dependencies that don't work, or not
-# install all needed dependencies.
-#Pipfile.lock
-
-# poetry
-# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
-# This is especially recommended for binary packages to ensure reproducibility, and is more
-# commonly ignored for libraries.
-# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
-#poetry.lock
-
-# PEP 582; used by e.g. github.com/David-OConnor/pyflow
-__pypackages__/
-
-# Celery stuff
-celerybeat-schedule
-celerybeat.pid
-
-# SageMath parsed files
-*.sage.py
-
-# Environments
-.env
-.venv
-+env/
+# Virtual environments
venv/
+env/
ENV/
-env.bak/
-venv.bak/
-
-# Spyder project settings
-.spyderproject
-.spyproject
-
-# Rope project settings
-.ropeproject
+.venv/
-# mkdocs documentation
-/site
+# Bash
+*.sh~
-# mypy
-.mypy_cache/
-.dmypy.json
-dmypy.json
-
-# Pyre type checker
-.pyre/
+# IDEs / editors
+.vscode/
+.idea/
+*.swp
+*.swo
+*.swn
+*.bak
+
+# Ignore Directories
+.archives/
+.extra_scripts/
+screenshots/
+logs/
+web/
-# pytype static type analyzer
-.pytype/
-# Cython debug symbols
-cython_debug/
+# Ignore Files
+**/config.yml
+**/.DS_Store
+**/TODO.*
+**test**
-# PyCharm
-# JetBrains specific template is maintainted in a separate JetBrains.gitignore that can
-# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
-# and can be added to the global gitignore or merged into this file. For a more nuclear
-# option (not recommended) you can uncomment the following to ignore the entire idea folder.
-#.idea/
-.vscode/
-.DS_Store
-input/
-output/
-logs/
-*test*
-*config.yml
-*archives*
-tmp/
-bash-scripts/sync.sh
-python-scripts/change_border.py
-python-scripts/torrent-maker.py
diff --git a/Dockerfile b/Dockerfile
new file mode 100755
index 0000000..55fd9dc
--- /dev/null
+++ b/Dockerfile
@@ -0,0 +1,77 @@
+# Stage 1: Create an intermediate image for installing pipenv and converting Pipfile to requirements.txt
+FROM python:3.11-slim as pipenv
+
+# Copy Pipfile and Pipfile.lock to the intermediate image
+COPY Pipfile Pipfile.lock ./
+
+# Install pipenv and use it to generate requirements.txt
+RUN pip3 install --no-cache-dir --upgrade pipenv; \
+ pipenv requirements > requirements.txt
+
+# Debugging: Display the contents of requirements.txt
+RUN cat requirements.txt
+
+# Stage 2: Create an intermediate image for installing Python dependencies from requirements.txt
+FROM python:3.11-slim as python-reqs
+
+# Copy requirements.txt from the pipenv stage to the intermediate image
+COPY --from=pipenv /requirements.txt requirements.txt
+
+# Install gcc for building Python dependencies; install app dependencies
+RUN apt-get update; \
+ apt-get install -y gcc; \
+ pip3 install --no-cache-dir -r requirements.txt
+
+# Stage 3: Create the final image with the application and rclone setup
+FROM python:3.11-slim
+
+# Metadata and labels
+LABEL maintainer="Drazzilb" \
+ description="daps" \
+ org.opencontainers.image.source="https://github.com/Drazzilb08/daps" \
+ org.opencontainers.image.authors="Drazzilb" \
+ org.opencontainers.image.title="daps"
+
+# Set working directory and copy Python packages from the python-reqs stage
+
+COPY --from=python-reqs /usr/local/lib/python3.11/site-packages /usr/local/lib/python3.11/site-packages
+
+ARG BRANCH="master"
+ARG CONFIG_DIR=/config
+
+# Set script environment variables
+ENV CONFIG_DIR=/config
+ENV APPDATA_PATH=/appdata
+ENV LOG_DIR=/config/logs
+ENV TZ=America/Los_Angeles
+ENV BRANCH=${BRANCH}
+ENV DOCKER_ENV=true
+
+# Install curl, unzip, p7zip-full, tzdata, vim, rclone and docker-cli - remove curl and clean up
+# Delete unnecessary setup files
+RUN set -eux; \
+ rm -f Pipfile Pipfile.lock; \
+ apt-get update; \
+ apt-get install -y --no-install-recommends wget curl unzip p7zip-full tzdata vim jdupes jq nano; \
+ curl https://rclone.org/install.sh | bash
+
+RUN apt-get update; \
+ apt-get install -y --no-install-recommends docker.io; \
+ docker --version
+
+VOLUME /var/run/docker.sock
+VOLUME /config
+VOLUME /data
+
+WORKDIR /app
+
+COPY . .
+
+# Create a new user called dockeruser with the specified PUID and PGID
+RUN groupadd -g 99 dockeruser; \
+ useradd -u 100 -g 99 dockeruser; \
+ chown -R dockeruser:dockeruser /app;
+
+
+# Entrypoint script
+ENTRYPOINT ["bash", "start.sh"]
\ No newline at end of file
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..e4b74be
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,22 @@
+# Set PIPENV_VERBOSITY to suppress verbosity of pipenv commands
+export PIPENV_VERBOSITY=-1
+
+# Define a phony target 'install' to create a virtual environment and install dependencies
+.PHONY: install
+install: venv
+ . venv/bin/activate && pipenv install --dev
+
+# Define a phony target 'venv' to create a virtual environment if it doesn't exist
+.PHONY: venv
+venv:
+ test -d venv || python3 -m venv venv
+
+# Define a phony target 'lock' to lock dependencies using pipenv
+.PHONY: lock
+lock:
+ . venv/bin/activate && pipenv lock
+
+# Define a phony target 'lint' to run linting using flake8
+.PHONY: lint
+lint:
+ . venv/bin/activate && pipenv run flake8
diff --git a/Pipfile b/Pipfile
new file mode 100644
index 0000000..a5f4896
--- /dev/null
+++ b/Pipfile
@@ -0,0 +1,23 @@
+[[source]]
+url = "https://pypi.org/simple"
+verify_ssl = true
+name = "pypi"
+
+[packages]
+requests = "*"
+tqdm = "*"
+pyyaml = "*"
+unidecode = "*"
+qbittorrent-api = "*"
+plexapi = "*"
+pillow = "*"
+prettytable = "*"
+croniter = "*"
+"ruamel.yaml" = "*"
+docker = "*"
+python-dateutil = "*"
+
+[dev-packages]
+
+[requires]
+python_version = "3.11"
diff --git a/Pipfile.lock b/Pipfile.lock
new file mode 100644
index 0000000..6ae9268
--- /dev/null
+++ b/Pipfile.lock
@@ -0,0 +1,449 @@
+{
+ "_meta": {
+ "hash": {
+ "sha256": "768f1659b7b4238ef3d866ca75f6b0c2fbb5a1f1c13ba458b811a5bf9dab44bc"
+ },
+ "pipfile-spec": 6,
+ "requires": {
+ "python_version": "3.11"
+ },
+ "sources": [
+ {
+ "name": "pypi",
+ "url": "https://pypi.org/simple",
+ "verify_ssl": true
+ }
+ ]
+ },
+ "default": {
+ "certifi": {
+ "hashes": [
+ "sha256:0569859f95fc761b18b45ef421b1290a0f65f147e92a1e5eb3e635f9a5e4e66f",
+ "sha256:dc383c07b76109f368f6106eee2b593b04a011ea4d55f652c6ca24a754d1cdd1"
+ ],
+ "markers": "python_version >= '3.6'",
+ "version": "==2024.2.2"
+ },
+ "charset-normalizer": {
+ "hashes": [
+ "sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027",
+ "sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087",
+ "sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786",
+ "sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8",
+ "sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09",
+ "sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185",
+ "sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574",
+ "sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e",
+ "sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519",
+ "sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898",
+ "sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269",
+ "sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3",
+ "sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f",
+ "sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6",
+ "sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8",
+ "sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a",
+ "sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73",
+ "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc",
+ "sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714",
+ "sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2",
+ "sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc",
+ "sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce",
+ "sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d",
+ "sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e",
+ "sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6",
+ "sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269",
+ "sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96",
+ "sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d",
+ "sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a",
+ "sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4",
+ "sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77",
+ "sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d",
+ "sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0",
+ "sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed",
+ "sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068",
+ "sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac",
+ "sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25",
+ "sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8",
+ "sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab",
+ "sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26",
+ "sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2",
+ "sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db",
+ "sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f",
+ "sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5",
+ "sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99",
+ "sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c",
+ "sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d",
+ "sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811",
+ "sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa",
+ "sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a",
+ "sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03",
+ "sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b",
+ "sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04",
+ "sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c",
+ "sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001",
+ "sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458",
+ "sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389",
+ "sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99",
+ "sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985",
+ "sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537",
+ "sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238",
+ "sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f",
+ "sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d",
+ "sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796",
+ "sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a",
+ "sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143",
+ "sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8",
+ "sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c",
+ "sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5",
+ "sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5",
+ "sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711",
+ "sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4",
+ "sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6",
+ "sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c",
+ "sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7",
+ "sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4",
+ "sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b",
+ "sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae",
+ "sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12",
+ "sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c",
+ "sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae",
+ "sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8",
+ "sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887",
+ "sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b",
+ "sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4",
+ "sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f",
+ "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5",
+ "sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33",
+ "sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519",
+ "sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561"
+ ],
+ "markers": "python_full_version >= '3.7.0'",
+ "version": "==3.3.2"
+ },
+ "croniter": {
+ "hashes": [
+ "sha256:78bf110a2c7dbbfdd98b926318ae6c64a731a4c637c7befe3685755110834746",
+ "sha256:8bff16c9af4ef1fb6f05416973b8f7cb54997c02f2f8365251f9bf1dded91866"
+ ],
+ "index": "pypi",
+ "markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'",
+ "version": "==2.0.2"
+ },
+ "docker": {
+ "hashes": [
+ "sha256:12ba681f2777a0ad28ffbcc846a69c31b4dfd9752b47eb425a274ee269c5e14b",
+ "sha256:323736fb92cd9418fc5e7133bc953e11a9da04f4483f828b527db553f1e7e5a3"
+ ],
+ "index": "pypi",
+ "markers": "python_version >= '3.8'",
+ "version": "==7.0.0"
+ },
+ "idna": {
+ "hashes": [
+ "sha256:9ecdbbd083b06798ae1e86adcbfe8ab1479cf864e4ee30fe4e46a003d12491ca",
+ "sha256:c05567e9c24a6b9faaa835c4821bad0590fbb9d5779e7caa6e1cc4978e7eb24f"
+ ],
+ "markers": "python_version >= '3.5'",
+ "version": "==3.6"
+ },
+ "packaging": {
+ "hashes": [
+ "sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5",
+ "sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7"
+ ],
+ "markers": "python_version >= '3.7'",
+ "version": "==23.2"
+ },
+ "pillow": {
+ "hashes": [
+ "sha256:0304004f8067386b477d20a518b50f3fa658a28d44e4116970abfcd94fac34a8",
+ "sha256:0689b5a8c5288bc0504d9fcee48f61a6a586b9b98514d7d29b840143d6734f39",
+ "sha256:0eae2073305f451d8ecacb5474997c08569fb4eb4ac231ffa4ad7d342fdc25ac",
+ "sha256:0fb3e7fc88a14eacd303e90481ad983fd5b69c761e9e6ef94c983f91025da869",
+ "sha256:11fa2e5984b949b0dd6d7a94d967743d87c577ff0b83392f17cb3990d0d2fd6e",
+ "sha256:127cee571038f252a552760076407f9cff79761c3d436a12af6000cd182a9d04",
+ "sha256:154e939c5f0053a383de4fd3d3da48d9427a7e985f58af8e94d0b3c9fcfcf4f9",
+ "sha256:15587643b9e5eb26c48e49a7b33659790d28f190fc514a322d55da2fb5c2950e",
+ "sha256:170aeb00224ab3dc54230c797f8404507240dd868cf52066f66a41b33169bdbe",
+ "sha256:1b5e1b74d1bd1b78bc3477528919414874748dd363e6272efd5abf7654e68bef",
+ "sha256:1da3b2703afd040cf65ec97efea81cfba59cdbed9c11d8efc5ab09df9509fc56",
+ "sha256:1e23412b5c41e58cec602f1135c57dfcf15482013ce6e5f093a86db69646a5aa",
+ "sha256:2247178effb34a77c11c0e8ac355c7a741ceca0a732b27bf11e747bbc950722f",
+ "sha256:257d8788df5ca62c980314053197f4d46eefedf4e6175bc9412f14412ec4ea2f",
+ "sha256:3031709084b6e7852d00479fd1d310b07d0ba82765f973b543c8af5061cf990e",
+ "sha256:322209c642aabdd6207517e9739c704dc9f9db943015535783239022002f054a",
+ "sha256:322bdf3c9b556e9ffb18f93462e5f749d3444ce081290352c6070d014c93feb2",
+ "sha256:33870dc4653c5017bf4c8873e5488d8f8d5f8935e2f1fb9a2208c47cdd66efd2",
+ "sha256:35bb52c37f256f662abdfa49d2dfa6ce5d93281d323a9af377a120e89a9eafb5",
+ "sha256:3c31822339516fb3c82d03f30e22b1d038da87ef27b6a78c9549888f8ceda39a",
+ "sha256:3eedd52442c0a5ff4f887fab0c1c0bb164d8635b32c894bc1faf4c618dd89df2",
+ "sha256:3ff074fc97dd4e80543a3e91f69d58889baf2002b6be64347ea8cf5533188213",
+ "sha256:47c0995fc4e7f79b5cfcab1fc437ff2890b770440f7696a3ba065ee0fd496563",
+ "sha256:49d9ba1ed0ef3e061088cd1e7538a0759aab559e2e0a80a36f9fd9d8c0c21591",
+ "sha256:51f1a1bffc50e2e9492e87d8e09a17c5eea8409cda8d3f277eb6edc82813c17c",
+ "sha256:52a50aa3fb3acb9cf7213573ef55d31d6eca37f5709c69e6858fe3bc04a5c2a2",
+ "sha256:54f1852cd531aa981bc0965b7d609f5f6cc8ce8c41b1139f6ed6b3c54ab82bfb",
+ "sha256:609448742444d9290fd687940ac0b57fb35e6fd92bdb65386e08e99af60bf757",
+ "sha256:69ffdd6120a4737710a9eee73e1d2e37db89b620f702754b8f6e62594471dee0",
+ "sha256:6fad5ff2f13d69b7e74ce5b4ecd12cc0ec530fcee76356cac6742785ff71c452",
+ "sha256:7049e301399273a0136ff39b84c3678e314f2158f50f517bc50285fb5ec847ad",
+ "sha256:70c61d4c475835a19b3a5aa42492409878bbca7438554a1f89d20d58a7c75c01",
+ "sha256:716d30ed977be8b37d3ef185fecb9e5a1d62d110dfbdcd1e2a122ab46fddb03f",
+ "sha256:753cd8f2086b2b80180d9b3010dd4ed147efc167c90d3bf593fe2af21265e5a5",
+ "sha256:773efe0603db30c281521a7c0214cad7836c03b8ccff897beae9b47c0b657d61",
+ "sha256:7823bdd049099efa16e4246bdf15e5a13dbb18a51b68fa06d6c1d4d8b99a796e",
+ "sha256:7c8f97e8e7a9009bcacbe3766a36175056c12f9a44e6e6f2d5caad06dcfbf03b",
+ "sha256:823ef7a27cf86df6597fa0671066c1b596f69eba53efa3d1e1cb8b30f3533068",
+ "sha256:8373c6c251f7ef8bda6675dd6d2b3a0fcc31edf1201266b5cf608b62a37407f9",
+ "sha256:83b2021f2ade7d1ed556bc50a399127d7fb245e725aa0113ebd05cfe88aaf588",
+ "sha256:870ea1ada0899fd0b79643990809323b389d4d1d46c192f97342eeb6ee0b8483",
+ "sha256:8d12251f02d69d8310b046e82572ed486685c38f02176bd08baf216746eb947f",
+ "sha256:9c23f307202661071d94b5e384e1e1dc7dfb972a28a2310e4ee16103e66ddb67",
+ "sha256:9d189550615b4948f45252d7f005e53c2040cea1af5b60d6f79491a6e147eef7",
+ "sha256:a086c2af425c5f62a65e12fbf385f7c9fcb8f107d0849dba5839461a129cf311",
+ "sha256:a2b56ba36e05f973d450582fb015594aaa78834fefe8dfb8fcd79b93e64ba4c6",
+ "sha256:aebb6044806f2e16ecc07b2a2637ee1ef67a11840a66752751714a0d924adf72",
+ "sha256:b1b3020d90c2d8e1dae29cf3ce54f8094f7938460fb5ce8bc5c01450b01fbaf6",
+ "sha256:b4b6b1e20608493548b1f32bce8cca185bf0480983890403d3b8753e44077129",
+ "sha256:b6f491cdf80ae540738859d9766783e3b3c8e5bd37f5dfa0b76abdecc5081f13",
+ "sha256:b792a349405fbc0163190fde0dc7b3fef3c9268292586cf5645598b48e63dc67",
+ "sha256:b7c2286c23cd350b80d2fc9d424fc797575fb16f854b831d16fd47ceec078f2c",
+ "sha256:babf5acfede515f176833ed6028754cbcd0d206f7f614ea3447d67c33be12516",
+ "sha256:c365fd1703040de1ec284b176d6af5abe21b427cb3a5ff68e0759e1e313a5e7e",
+ "sha256:c4225f5220f46b2fde568c74fca27ae9771536c2e29d7c04f4fb62c83275ac4e",
+ "sha256:c570f24be1e468e3f0ce7ef56a89a60f0e05b30a3669a459e419c6eac2c35364",
+ "sha256:c6dafac9e0f2b3c78df97e79af707cdc5ef8e88208d686a4847bab8266870023",
+ "sha256:c8de2789052ed501dd829e9cae8d3dcce7acb4777ea4a479c14521c942d395b1",
+ "sha256:cb28c753fd5eb3dd859b4ee95de66cc62af91bcff5db5f2571d32a520baf1f04",
+ "sha256:cb4c38abeef13c61d6916f264d4845fab99d7b711be96c326b84df9e3e0ff62d",
+ "sha256:d1b35bcd6c5543b9cb547dee3150c93008f8dd0f1fef78fc0cd2b141c5baf58a",
+ "sha256:d8e6aeb9201e655354b3ad049cb77d19813ad4ece0df1249d3c793de3774f8c7",
+ "sha256:d8ecd059fdaf60c1963c58ceb8997b32e9dc1b911f5da5307aab614f1ce5c2fb",
+ "sha256:da2b52b37dad6d9ec64e653637a096905b258d2fc2b984c41ae7d08b938a67e4",
+ "sha256:e87f0b2c78157e12d7686b27d63c070fd65d994e8ddae6f328e0dcf4a0cd007e",
+ "sha256:edca80cbfb2b68d7b56930b84a0e45ae1694aeba0541f798e908a49d66b837f1",
+ "sha256:f379abd2f1e3dddb2b61bc67977a6b5a0a3f7485538bcc6f39ec76163891ee48",
+ "sha256:fe4c15f6c9285dc54ce6553a3ce908ed37c8f3825b5a51a15c91442bb955b868"
+ ],
+ "index": "pypi",
+ "markers": "python_version >= '3.8'",
+ "version": "==10.2.0"
+ },
+ "plexapi": {
+ "hashes": [
+ "sha256:5af051b3e3967bb25d66ee492a7620e2ec28824df4efd647fecc1fadad009a67",
+ "sha256:d4436cd524260571a74a2056feabaa4c389f7b0956f927967d23424a9b7e4115"
+ ],
+ "index": "pypi",
+ "markers": "python_version >= '3.8'",
+ "version": "==4.15.10"
+ },
+ "prettytable": {
+ "hashes": [
+ "sha256:6536efaf0757fdaa7d22e78b3aac3b69ea1b7200538c2c6995d649365bddab92",
+ "sha256:9665594d137fb08a1117518c25551e0ede1687197cf353a4fdc78d27e1073568"
+ ],
+ "index": "pypi",
+ "markers": "python_version >= '3.8'",
+ "version": "==3.10.0"
+ },
+ "python-dateutil": {
+ "hashes": [
+ "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3",
+ "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"
+ ],
+ "index": "pypi",
+ "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
+ "version": "==2.9.0.post0"
+ },
+ "pytz": {
+ "hashes": [
+ "sha256:2a29735ea9c18baf14b448846bde5a48030ed267578472d8955cd0e7443a9812",
+ "sha256:328171f4e3623139da4983451950b28e95ac706e13f3f2630a879749e7a8b319"
+ ],
+ "version": "==2024.1"
+ },
+ "pyyaml": {
+ "hashes": [
+ "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5",
+ "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc",
+ "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df",
+ "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741",
+ "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206",
+ "sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27",
+ "sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595",
+ "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62",
+ "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98",
+ "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696",
+ "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290",
+ "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9",
+ "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d",
+ "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6",
+ "sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867",
+ "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47",
+ "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486",
+ "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6",
+ "sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3",
+ "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007",
+ "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938",
+ "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0",
+ "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c",
+ "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735",
+ "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d",
+ "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28",
+ "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4",
+ "sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba",
+ "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8",
+ "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef",
+ "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5",
+ "sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd",
+ "sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3",
+ "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0",
+ "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515",
+ "sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c",
+ "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c",
+ "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924",
+ "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34",
+ "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43",
+ "sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859",
+ "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673",
+ "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54",
+ "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a",
+ "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b",
+ "sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab",
+ "sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa",
+ "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c",
+ "sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585",
+ "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d",
+ "sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f"
+ ],
+ "index": "pypi",
+ "markers": "python_version >= '3.6'",
+ "version": "==6.0.1"
+ },
+ "qbittorrent-api": {
+ "hashes": [
+ "sha256:57ec679a8ed612a3d9a275cbe540cf55467a6e3c1c298d76442339dc506c3a14",
+ "sha256:db6eef9ce26661cadb61df0c8d31bc73cdac7f76b034e17f6f101bcb426549f0"
+ ],
+ "index": "pypi",
+ "markers": "python_version >= '3.8'",
+ "version": "==2024.2.59"
+ },
+ "requests": {
+ "hashes": [
+ "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f",
+ "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"
+ ],
+ "index": "pypi",
+ "markers": "python_version >= '3.7'",
+ "version": "==2.31.0"
+ },
+ "ruamel.yaml": {
+ "hashes": [
+ "sha256:57b53ba33def16c4f3d807c0ccbc00f8a6081827e81ba2491691b76882d0c636",
+ "sha256:8b27e6a217e786c6fbe5634d8f3f11bc63e0f80f6a5890f28863d9c45aac311b"
+ ],
+ "markers": "python_version >= '3.7'",
+ "version": "==0.18.6"
+ },
+ "ruamel.yaml.clib": {
+ "hashes": [
+ "sha256:024cfe1fc7c7f4e1aff4a81e718109e13409767e4f871443cbff3dba3578203d",
+ "sha256:03d1162b6d1df1caa3a4bd27aa51ce17c9afc2046c31b0ad60a0a96ec22f8001",
+ "sha256:07238db9cbdf8fc1e9de2489a4f68474e70dffcb32232db7c08fa61ca0c7c462",
+ "sha256:09b055c05697b38ecacb7ac50bdab2240bfca1a0c4872b0fd309bb07dc9aa3a9",
+ "sha256:1707814f0d9791df063f8c19bb51b0d1278b8e9a2353abbb676c2f685dee6afe",
+ "sha256:1758ce7d8e1a29d23de54a16ae867abd370f01b5a69e1a3ba75223eaa3ca1a1b",
+ "sha256:184565012b60405d93838167f425713180b949e9d8dd0bbc7b49f074407c5a8b",
+ "sha256:1b617618914cb00bf5c34d4357c37aa15183fa229b24767259657746c9077615",
+ "sha256:1dc67314e7e1086c9fdf2680b7b6c2be1c0d8e3a8279f2e993ca2a7545fecf62",
+ "sha256:25ac8c08322002b06fa1d49d1646181f0b2c72f5cbc15a85e80b4c30a544bb15",
+ "sha256:25c515e350e5b739842fc3228d662413ef28f295791af5e5110b543cf0b57d9b",
+ "sha256:305889baa4043a09e5b76f8e2a51d4ffba44259f6b4c72dec8ca56207d9c6fe1",
+ "sha256:3213ece08ea033eb159ac52ae052a4899b56ecc124bb80020d9bbceeb50258e9",
+ "sha256:3f215c5daf6a9d7bbed4a0a4f760f3113b10e82ff4c5c44bec20a68c8014f675",
+ "sha256:46d378daaac94f454b3a0e3d8d78cafd78a026b1d71443f4966c696b48a6d899",
+ "sha256:4ecbf9c3e19f9562c7fdd462e8d18dd902a47ca046a2e64dba80699f0b6c09b7",
+ "sha256:53a300ed9cea38cf5a2a9b069058137c2ca1ce658a874b79baceb8f892f915a7",
+ "sha256:56f4252222c067b4ce51ae12cbac231bce32aee1d33fbfc9d17e5b8d6966c312",
+ "sha256:5c365d91c88390c8d0a8545df0b5857172824b1c604e867161e6b3d59a827eaa",
+ "sha256:700e4ebb569e59e16a976857c8798aee258dceac7c7d6b50cab63e080058df91",
+ "sha256:75e1ed13e1f9de23c5607fe6bd1aeaae21e523b32d83bb33918245361e9cc51b",
+ "sha256:77159f5d5b5c14f7c34073862a6b7d34944075d9f93e681638f6d753606c6ce6",
+ "sha256:7f67a1ee819dc4562d444bbafb135832b0b909f81cc90f7aa00260968c9ca1b3",
+ "sha256:840f0c7f194986a63d2c2465ca63af8ccbbc90ab1c6001b1978f05119b5e7334",
+ "sha256:84b554931e932c46f94ab306913ad7e11bba988104c5cff26d90d03f68258cd5",
+ "sha256:87ea5ff66d8064301a154b3933ae406b0863402a799b16e4a1d24d9fbbcbe0d3",
+ "sha256:955eae71ac26c1ab35924203fda6220f84dce57d6d7884f189743e2abe3a9fbe",
+ "sha256:a1a45e0bb052edf6a1d3a93baef85319733a888363938e1fc9924cb00c8df24c",
+ "sha256:a5aa27bad2bb83670b71683aae140a1f52b0857a2deff56ad3f6c13a017a26ed",
+ "sha256:a6a9ffd280b71ad062eae53ac1659ad86a17f59a0fdc7699fd9be40525153337",
+ "sha256:a75879bacf2c987c003368cf14bed0ffe99e8e85acfa6c0bfffc21a090f16880",
+ "sha256:aa2267c6a303eb483de8d02db2871afb5c5fc15618d894300b88958f729ad74f",
+ "sha256:aab7fd643f71d7946f2ee58cc88c9b7bfc97debd71dcc93e03e2d174628e7e2d",
+ "sha256:b16420e621d26fdfa949a8b4b47ade8810c56002f5389970db4ddda51dbff248",
+ "sha256:b42169467c42b692c19cf539c38d4602069d8c1505e97b86387fcf7afb766e1d",
+ "sha256:bba64af9fa9cebe325a62fa398760f5c7206b215201b0ec825005f1b18b9bccf",
+ "sha256:beb2e0404003de9a4cab9753a8805a8fe9320ee6673136ed7f04255fe60bb512",
+ "sha256:bef08cd86169d9eafb3ccb0a39edb11d8e25f3dae2b28f5c52fd997521133069",
+ "sha256:c2a72e9109ea74e511e29032f3b670835f8a59bbdc9ce692c5b4ed91ccf1eedb",
+ "sha256:c58ecd827313af6864893e7af0a3bb85fd529f862b6adbefe14643947cfe2942",
+ "sha256:c69212f63169ec1cfc9bb44723bf2917cbbd8f6191a00ef3410f5a7fe300722d",
+ "sha256:cabddb8d8ead485e255fe80429f833172b4cadf99274db39abc080e068cbcc31",
+ "sha256:d176b57452ab5b7028ac47e7b3cf644bcfdc8cacfecf7e71759f7f51a59e5c92",
+ "sha256:da09ad1c359a728e112d60116f626cc9f29730ff3e0e7db72b9a2dbc2e4beed5",
+ "sha256:e2b4c44b60eadec492926a7270abb100ef9f72798e18743939bdbf037aab8c28",
+ "sha256:e79e5db08739731b0ce4850bed599235d601701d5694c36570a99a0c5ca41a9d",
+ "sha256:ebc06178e8821efc9692ea7544aa5644217358490145629914d8020042c24aa1",
+ "sha256:edaef1c1200c4b4cb914583150dcaa3bc30e592e907c01117c08b13a07255ec2",
+ "sha256:f481f16baec5290e45aebdc2a5168ebc6d35189ae6fea7a58787613a25f6e875",
+ "sha256:fff3573c2db359f091e1589c3d7c5fc2f86f5bdb6f24252c2d8e539d4e45f412"
+ ],
+ "markers": "python_version < '3.13' and platform_python_implementation == 'CPython'",
+ "version": "==0.2.8"
+ },
+ "six": {
+ "hashes": [
+ "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926",
+ "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"
+ ],
+ "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
+ "version": "==1.16.0"
+ },
+ "tqdm": {
+ "hashes": [
+ "sha256:1ee4f8a893eb9bef51c6e35730cebf234d5d0b6bd112b0271e10ed7c24a02bd9",
+ "sha256:6cd52cdf0fef0e0f543299cfc96fec90d7b8a7e88745f411ec33eb44d5ed3531"
+ ],
+ "index": "pypi",
+ "markers": "python_version >= '3.7'",
+ "version": "==4.66.2"
+ },
+ "unidecode": {
+ "hashes": [
+ "sha256:cfdb349d46ed3873ece4586b96aa75258726e2fa8ec21d6f00a591d98806c2f4",
+ "sha256:d130a61ce6696f8148a3bd8fe779c99adeb4b870584eeb9526584e9aa091fd39"
+ ],
+ "index": "pypi",
+ "markers": "python_version >= '3.5'",
+ "version": "==1.3.8"
+ },
+ "urllib3": {
+ "hashes": [
+ "sha256:450b20ec296a467077128bff42b73080516e71b56ff59a60a02bef2232c4fa9d",
+ "sha256:d0570876c61ab9e520d776c38acbbb5b05a776d3f9ff98a5c8fd5162a444cf19"
+ ],
+ "markers": "python_version >= '3.8'",
+ "version": "==2.2.1"
+ },
+ "wcwidth": {
+ "hashes": [
+ "sha256:3da69048e4540d84af32131829ff948f1e022c1c6bdb8d6102117aac784f6859",
+ "sha256:72ea0c06399eb286d978fdedb6923a9eb47e1c486ce63e9b4e64fc18303972b5"
+ ],
+ "version": "==0.2.13"
+ }
+ },
+ "develop": {}
+}
diff --git a/README.md b/README.md
index e81d782..91eaed4 100644
--- a/README.md
+++ b/README.md
@@ -1,7 +1,7 @@
-Collection of unRAID userscripts
+Collection of userScripts I've written or found useful
[![MIT License](https://img.shields.io/badge/license-MIT-blue.svg)](https://opensource.org/licenses/MIT)
[![GitHub issues](https://img.shields.io/github/issues/Drazzilb08/userScripts.svg)](https://github.com/Drazzilb08/userScripts/issues)
@@ -10,8 +10,9 @@ Collection of unRAID userscripts
[![Python](https://img.shields.io/badge/python-3.8-blue.svg)](https://www.python.org/downloads/release/python-380/)
[![Bash](https://img.shields.io/badge/bash-5.0-green.svg)](https://www.gnu.org/software/bash/)
-To help with documentation of the collections of these scripts, I've created a [WIKI](https://github.com/Drazzilb08/userScripts/wiki) If there are any questions or concerns please let me know with a [GitHub issue](https://github.com/Drazzilb08/userScripts/issues).
+[![Buy Me a Coffee](https://img.shields.io/badge/Buy%20Me%20a%20Coffee-Donate-orange?logo=buy-me-a-coffee)](https://www.buymeacoffee.com/drazzilb08)
+To help with documentation of the collections of these scripts, I've created a [WIKI](https://github.com/Drazzilb08/userScripts/wiki) If there are any questions or concerns please let me know with a [GitHub issue](https://github.com/Drazzilb08/userScripts/issues).
@@ -28,7 +29,9 @@ I do my best to keep up with everything
I have no problem adding features or fixing issues with these scripts. However if you're going to submit a PR to correct my code **PLEASE** add detailed notes as to why this change needs to be made (ELI5 or Explain Like I'm 5). This will help me grow and make better content in the futre.
-**Please note that these scripts are tested by me and a few friends. They very well may work for you but they are not tested under all conditions.**
+**Please note that these scripts are tested by me and a few friends. They very well may work for you but they are not tested under all conditions.**
+
+
**Please test and use at your own risk.**
@@ -40,7 +43,7 @@ Thanks
2. You can't have a list of scripts to use without including [JBOPS](https://github.com/blacktwin/JBOPS). I specifically use the Inactive User script and the kill stream script.
3. [qBitManager](https://github.com/StuffAnThings/qbit_manage) Amazing bit of python scripting that makes managing qBittorrent much easier
4. [Upgradinatorr](https://github.com/angrycuban13/Just-A-Bunch-Of-Starr-Scripts/tree/main/Upgradinatorr) An amazing bit of powershell writing by AngryCuban meant to keep your movie library as up to date as possible
-5. [Homepage](https://github.com/benphelps/homepage) Probably one of the best homelab dashboard around IMO.
-6. [reTraktArr](https://github.com/zakkarry/reTraktarr) Here is a neat script that will help you keep your Radarr and Sonarr libraries in sync with your Trakt lists. The way I'm using this is to have my entire library visable on trakt and visable to anybody I want to show my collection to
+5. [Homepage](https://github.com/benphelps/homepage) Probably one of the best HomeLab dashboard around IMO.
+6. [reTraktArr](https://github.com/zakkarry/reTraktarr) Here is a neat script that will help you keep your Radarr and Sonarr libraries in sync with your Trakt lists. The way I'm using this is to have my entire library visible on trakt and visable to anybody I want to show my collection to
If you think there are some other neat ones I should include or try out please let me know.
diff --git a/VERSION b/VERSION
new file mode 100755
index 0000000..341cf11
--- /dev/null
+++ b/VERSION
@@ -0,0 +1 @@
+0.2.0
\ No newline at end of file
diff --git a/browser-scripts/tpdb.js b/browser-scripts/tpdb.js
deleted file mode 100644
index ccb1bff..0000000
--- a/browser-scripts/tpdb.js
+++ /dev/null
@@ -1,233 +0,0 @@
-// ==UserScript==
-// @name ThePosterDB Downloader
-// @version 1.1
-// @description Downloads all posters on the feed page on theposterdb.com's site in sequential order
-// @author Drazzilb | Modified by s0len
-// @match https://theposterdb.com/feed*
-// @grant GM_addStyle
-// ==/UserScript==
-
-(function () {
- "use strict";
-
- // Add custom CSS to the page
- const customStyles = `
-@import url('https://fonts.googleapis.com/css2?family=Poppins:wght@100;300;400;500;600;700;800;900&display=swap');
-
-* {
- margin: 0;
- padding: 0;
- box-sizing: border-box;
- font-family: 'Poppins', sans-serif;
-}
-
-.fa-download {
- /* width: 250px;
- min-height: 100vh;*/
- display: flex;
- justify-content: center;
- align-items: center;
- flex-wrap: wrap;
- gap: 120px;
- background: none;
-}
-
-.button {
- display: flex;
- position: fixed;
- padding: 16px 30px;
- font-size: 1.5rem;
- color: #6eff3e;
- border: 2px solid rgba(0, 0, 0, 0.5);
- border-radius: 4px;
- text-shadow: 0 0 15px #6eff3e;
- text-decoration: none;
- text-transform: uppercase;
- letter-spacing: 0.1rem;
- transition: 0.5s;
- z-index: 1;
- top: 50%;
- left: 35px;
-}
-
-.button:hover {
- color: #fff;
- border: 2px solid rgba(0, 0, 0, 0);
- box-shadow: 0 0 0px #6eff3e;
-}
-
-.button::before {
- content: '';
- position: absolute;
- top: 0;
- left: 0;
- /*width: 100%;
- height: 100%;*/
- background: #6eff3e;
- z-index: -1;
- transform: scale(0);
- transition: 0.5s;
-}
-
-.button:hover::before {
- transform: scale(1);
- transition-delay: 0.5s;
- box-shadow: 0 0 10px #6eff3e,
- 0 0 30px #6eff3e,
- 0 0 60px #6eff3e;
-}
-
-.button span {
- position: absolute;
- background: #6eff3e;
- pointer-events: none;
- border-radius: 2px;
- box-shadow: 0 0 10px #6eff3e,
- 0 0 20px #6eff3e,
- 0 0 30px #6eff3e,
- 0 0 50px #6eff3e,
- 0 0 100px #6eff3e;
- transition: 0.5s ease-in-out;
- transition-delay: 0.25s;
-}
-
-.button:hover span {
- opacity: 0;
- transition-delay: 0s;
-}
-
-.button span:nth-child(2),
-.button span:nth-child(4) {
- width: 20px;
- height: 4px;
-}
-
-.button:hover span:nth-child(2),
-.button:hover span:nth-child(4) {
- transform: translateX(0);
-}
-
-.button span:nth-child(3),
-.button span:nth-child(5) {
- width: 4px;
- height: 20px;
-}
-
-.button:hover span:nth-child(2),
-.button:hover span:nth-child(4) {
- transform: translateY(0);
-}
-
-.button span:nth-child(2) {
- top: calc(50% - 2px);
- left: -30px;
- transform-origin: left;
-}
-
-.button:hover span:nth-child(2) {
- left: 50%;
-}
-
-.button span:nth-child(4) {
- top: calc(50% - 2px);
- right: -30px;
- transform-origin: right;
-}
-
-.button:hover span:nth-child(4) {
- right: 50%;
-}
-
-.button span:nth-child(3) {
- left: calc(50% - 2px);
- top: -30px;
- transform-origin: top;
-}
-
-.button:hover span:nth-child(3) {
- top: 50%;
-}
-
-.button span:nth-child(5) {
- left: calc(50% - 2px);
- bottom: -30px;
- transform-origin: bottom;
-}
-
-.button:hover span:nth-child(5 ) {
- bottom: 50%;
-}`;
-
- // Inject custom styles into the page
- GM_addStyle(customStyles);
-
- // Create the button
- const button = document.createElement("div");
- button.innerHTML = '';
- button.className = 'button';
-
- // Append four spans to create the effect from the provided CSS
- for (let i = 0; i < 4; i++) {
- let span = document.createElement('span');
- button.appendChild(span);
- }
-
- // Add the button to the page
- document.body.appendChild(button);
-
- // Handle button hover
- button.addEventListener("mouseover", function () {
- button.title = downloading ? "Stop Downloads" : "Download All Posters";
- });
-
- // Handle button click
- let downloading = false;
- button.addEventListener("click", function () {
- downloading = !downloading;
- if (downloading) {
- button.style.backgroundColor = "#ff1867";
- button.style.color = "#111";
- button.innerHTML = '';
- downloadPosters(0);
- } else {
- button.style.backgroundColor = "#6eff3e";
- button.style.color = "#FFF";
- button.innerHTML = '';
- }
- });
-
- function downloadPosters(i) {
- const links = document.querySelectorAll(
- 'a[download][href^="https://theposterdb.com/api/assets/"]'
- );
-
- if (i === links.length || !downloading) {
- button.style.backgroundColor = "#6eff3e";
- button.innerHTML = '';
- downloading = false;
- return;
- }
-
- const link = links[i];
- const url = link.getAttribute("href");
- const name = link.getAttribute("download");
-
- const xhr = new XMLHttpRequest();
- xhr.open("GET", url, true);
- xhr.responseType = "blob";
- xhr.onload = function () {
- const a = document.createElement("a");
- a.href = window.URL.createObjectURL(xhr.response);
- a.download = name;
- a.style.display = "none";
- document.body.appendChild(a);
- a.click();
-
- setTimeout(function () {
- downloadPosters(i + 1);
- }, 500);
- };
-
- xhr.send();
- }
-})();
\ No newline at end of file
diff --git a/config/config.sample.yml b/config/config.sample.yml
new file mode 100755
index 0000000..acbc641
--- /dev/null
+++ b/config/config.sample.yml
@@ -0,0 +1,465 @@
+schedule:
+ # Options:
+ # run - will run a single time per run of main.py (mainly useful for testing with dry_run)
+ # hourly(XX)
+ # Examples: hourly(00) or hourly(18) - Will perform the action every hour at the specified time
+ # daily(XX:XX)
+ # Examples: daily(12:23) or daily(18:15) - Will perform the action every day at the specified time
+ # Examples: daily(10:18|12:23) - Will perform the action every day at the specified times
+ # weekly(day_of_week@XX:XX)
+ # Examples: weekly(monday@12:00) or weekly(monday@18:15) - Will perform the action on the specified day of the week at the specified time
+ # Examples: weekly(monday@12:23)
+ # monthly(day_of_month@XX:XX)
+ # Examples: monthly(15@12:00) or monthly(15@18:15) - Will perform the action on the specified day of the month at the specified time
+ # cron()
+ # Examples: cron(0 0 * * *) - Will perform the action every day at midnight
+ # Examples: cron(*/5 * * * *) - Will perform the action every 5 minutes
+ # Examples: cron(0 */3 * * *) - Will perform the action every 3rd hour
+ # Please visit https://crontab.guru/ for more information on cron expressions
+ # Note: You cannot use both cron and human readable expressions in the same schedule
+
+ # If using the scripts through crontab or another scheduler, I would not recommend using the schedule w/in the config.
+ # The schedule is intended for use with Docker. If you want to use the schedule w/in the config, you will need to run the main.py script in the background.
+ # Schedule only supports the following options: hourly, daily, weekly, monthly, all
+ border_replacerr:
+ health_checkarr:
+ labelarr:
+ nohl:
+ sync_gdrive:
+ poster_cleanarr:
+ poster_renamerr:
+ queinatorr:
+ renameinatorr:
+ unmatched_assets:
+ upgradinatorr:
+ nohl_bash:
+ jduparr:
+
+instances:
+ radarr:
+ # Instance name can be whatever you want, it just needs to match the name used in other sections
+ radarr_1:
+ url: http://localhost:7878
+ api: abcdefghijklmnopqrstuvwxyz1234567890
+ radarr_2:
+ url: http://localhost:7878
+ api: abcdefghijklmnopqrstuvwxyz1234567890
+ radarr_4K:
+ url: http://localhost:7878
+ api: abcdefghijklmnopqrstuvwxyz1234567890
+ anime_radarr:
+ url: http://localhost:7878
+ api: abcdefghijklmnopqrstuvwxyz1234567890
+ sonarr:
+ # Instance name can be whatever you want, it just needs to match the name used in other sections
+ sonarr_1:
+ url: http://localhost:8989
+ api: abcdefghijklmnopqrstuvwxyz1234567890
+ sonarr_anime:
+ url: http://localhost:8989
+ api: abcdefghijklmnopqrstuvwxyz1234567890
+ sonarr_3:
+ url: http://localhost:8989
+ api: abcdefghijklmnopqrstuvwxyz1234567890
+ 4K_sonarr:
+ url: http://localhost:8989
+ api: abcdefghijklmnopqrstuvwxyz1234567890
+ plex:
+ plex_1:
+ # API key can be found here: https://support.plex.tv/articles/204059436-finding-an-authentication-token-x-plex-token/
+ url: http://localhost:32400
+ api: abcdefghijklmnopqrstuvwxyz1234567890
+ qbittorrent:
+ # Instance name can be whatever you want, it just needs to match the name used in other sections
+ qbittorrent_1:
+ url: http://localhost:8080
+ username: admin
+ password: adminadmin
+ qbittorrent_2:
+ url:
+ username:
+ password:
+
+discord:
+ # Discord notifications:
+ # You can use either Notifiarr or Discord's webhook
+ # Notifiarr:
+ # Use Notifiarr Passthrough
+ # Use channel ID so notifiarr knows what channel to send the message to
+ # Discord Webhook:
+ # Use Discord's webhook
+ # Channel ID not used
+ notifiarr_webhook:
+ # Main is used for version notifications
+ main:
+ discord_webhook:
+ channel_id:
+ renameinatorr:
+ # Will notify items that have been renamed
+ discord_webhook:
+ channel_id:
+ upgradinatorr:
+ # Will notify if an upgrade has been found
+ discord_webhook:
+ channel_id:
+ poster_renamerr:
+ # Note: It is recommended for a fresh run to not use a notifications channel, as it will spam the channel with all the matched assets
+ discord_webhook:
+ channel_id:
+ nohl:
+ # Will notify if non-hardlinked files are found/searched for
+ discord_webhook:
+ channel_id:
+ queinatorr:
+ # Will notify if a torrent/queue item has been handled
+ discord_webhook:
+ channel_id:
+ labelarr:
+ # Will notify if a label has been added/removed to a movie or series
+ discord_webhook:
+ channel_id:
+ nohl_bash:
+ # Will notify if a non-hardlinked file has been found
+ discord_webhook:
+ channel_id:
+ jduparr:
+ # Will notify if a duplicate file has been found and relinked
+ discord_webhook:
+ channel_id:
+
+sync_gdrive:
+ # THIS SCRIPT SYNCS GDRIVE TO A LOCAL FOLDER. ANYTHING UNIQUE IN THE FOLDER WILL BE DELETED.
+ # -> You can use client_id, client_secret, and token to authenticate, or you can use gdrive_sa_location to use a service account.
+ # DRAZZILB GDRIVE ID: 1VeeQ_frBFpp6AZLimaJSSr0Qsrl6Tb7z
+ # Below are others that have been shared around Discord. If you have your own and would like to share it please contact me.
+ # This script is meant to be used in conjunction with Plex-Meta-Manager, however, it can be used on its own.
+ # This is a script base of this one: https://discord.com/channels/492590071455940612/1124032073557086258/1126226814629576858
+ log_level: info # debug, info, warning, error, critical
+ # Client ID for rclone usually ends with .apps.googleusercontent.com
+ client_id: asdasds.apps.googleusercontent.com
+ # Client Secret for rclone, usually starts with GOCSPX-
+ client_secret: GOCSPX-asda123
+ # The token for rclone, this is the output of rclone config dump that needs to run manually
+ # Token looks like this: { "access_token": "value", "token_type": "Bearer", "refresh_token": "value", "expiry": "value" }
+ # NOTE: Include the curly braces
+ token: { "access_token": "value", "token_type": "Bearer", "refresh_token": "value", "expiry": "value" }
+ # The location of your rclone service account file (JSON)
+ gdrive_sa_location: /config/rclone_sa.json
+ gdrive_sync: # example of multiple gdrive_id's with multiple sync_locations as objects, we have deprecated gdrive_id and sync_location so please use this instead
+ # id: The ID of the folder you want to sync from
+ # location: Where you want to sync the posters to
+ - id: 1VeeQ_frBFpp6AZLimaJSSr0Qsrl6Tb7z # drazzilb
+ location: /path/to/location # Where you want to sync the posters to
+ - id: 1wrSru-46iIN1iqCl2Cjhj5ofdazPgbsz # dsaq
+ location: /path/to/location # Where you want to sync the posters to
+ - id: 1zWY-ORtJkOLcQChV--oHquxW3JCow1zm # solen
+ location: /path/to/location # Where you want to sync the posters to
+ - id: 1Xg9Huh7THDbmjeanW0KyRbEm6mGn_jm8 # BZ
+ location: /path/to/location # Where you want to sync the posters to
+ - id: 1oBzEOXXrTHGq6sUY_4RMtzMTt4VHyeJp # chrisdc
+ location: /path/to/location # Where you want to sync the posters to
+ - id: 1G77TLQvgs_R7HdMWkMcwHL6vd_96cMp7 # Quafley
+ location: /path/to/location # Where you want to sync the posters to
+ - id: 1bBbK_3JeXCy3ElqTwkFHaNoNxYgqtLug # Stupifier
+ location: /path/to/location # Where you want to sync the posters to
+ - id: 1KnwxzwBUQzQyKF1e24q_wlFqcER9xYHM # Sahara
+ location: /path/to/location # Where you want to sync the posters to
+ - id: 1alseEnUBjH6CjXh77b5L4R-ZDGdtOMFr # Lion City Gaming
+ location: /path/to/location # Where you want to sync the posters to
+
+poster_renamerr:
+ # This script will rename your posters to match Plex-Meta-Manager's naming scheme from TPDB's naming utilizing Radarr/Sonarr as a source.
+ #
+ # NOTE: If you're planning to run border_replacerr and/or sync_posters it will appear that this script has hung. It's simply waiting
+ # for the other scripts to finish. You can monitor progress and/or actions on their respective logs.
+ # Logging for poster_renamer and border_replacerr scripts happens mostly at the end of the run, so you may not see anything until the end.
+ # sync_posters is a bit more verbose as it utilizes the rclone sync command.
+ log_level: info
+ dry_run: false
+ sync_posters: true # <- This will run sync_gdrive before renaming
+ action_type: copy # <- Options: copy, move
+ asset_folders: false # <- This will copy the folder structure of the source_dir to the destination_dir, this MUST be the same as you use in Plex-Meta-Manager
+ print_only_renames: false # <- This will print the renames to the log, but will not actually rename anything
+ # This will integrate border_replacer with poster_renamerr, set this to true if you want to use them at the same time (no need to schedule border_replacer)
+ # Note: This will create a tmp dir in your output directory for border_replacer to use and for poster_renamer to have accurate account of what has been added
+ # Note: All of border_replacer's settings will be used, except for the input_dir and output_dir
+ border_replacerr: false
+ # Libraries are ONLY used for collections
+ library_names:
+ - Movies
+ - Anime Movies
+ # The order from lowest to highest priority
+ # The order is 100% personal preference
+ source_dirs:
+ - /path/to/stupifier/posters/ # <- This is the lowest priority
+ - /path/to/dsaq/posters/
+ - /path/to/solen/posters/
+ - /path/to/chrisdc/posters/
+ - /path/to/Quafley/posters/
+ - /path/to/drazzilb/posters/ # <- This is the highest priority
+ destination_dir: /path/to/output/
+ instances:
+ - plex_1
+ - radarr_1
+ - sonarr_1
+ # - sonarr_anime
+ # - radarr_2
+ # - sonarr_3
+
+border_replacerr:
+ # This script replaces the border on the posters or removes the border
+ # WARNING: If you have overlays that are carefully position and choose to remove the border around the poster, the overlay
+ # will not be in the correct position. This is because the overlay is positioned based on the border width.
+ # Colors: https://www.w3schools.com/colors/colors_picker.asp
+ log_level: info
+ dry_run: true
+ source_dirs: # <--- Not used if using with poster_renamerr only used if running border_replacer by itself
+ - /path/to/posters/
+ destination_dir: /path/to/output/ # <---- Not used if using with poster_renamerr only used if running border_replacer by itself
+ # If you want to adjust the border width (not all MM2K posters are 25px borders some are 26-27)
+ border_width: 26
+ # skip - This will skip border replacement until the next schedule/holiday
+ skip: false
+ # Setting a border color of nothing will remove the border, else it will add the border color
+ # Examples: Must include everything from double quotes to double quotes such as "#FF0000"
+ # "#FF0000" - Red
+ # "#00FF00" - Green
+ # "#0000FF" - Blue
+ # "#FFA500" - Orange
+ # "#FFFF00" - Yellow
+ # "#800080" - Purple
+ # "#FFC0CB" - Pink
+ # "#FFFFFF" - White
+ # "#000000" - Black
+ # "#808080" - Gray
+ # To remove borders set the border_colors to nothing.
+ # Example: border_colors:
+ border_colors:
+ schedule:
+ # You can make any holiday you want for any occasion. For the schedule please use the range(month/date-month/date) option
+ christmas:
+ schedule: range(12/01-12/31)
+ color:
+ - "#FF0000"
+ - "#00FF00"
+ halloween:
+ schedule: range(10/01-10/31)
+ color: "#FFA500"
+ thanksgiving:
+ schedule: range(11/01-11/30)
+ color: "#FFA500"
+ valentine:
+ schedule: range(2/5-2/15)
+ color:
+ - "#D41F3A"
+
+unmatched_assets:
+ # There is a limitation to how this script works with regards to it matching series assets the
+ # main series poster requires seasonal posters to be present. If you have a series that does
+ # not have a seasonal poster then it will not match the series poster. If you don't have a season poster
+ # your series will appear in the movies section.
+ log_level: info
+ instances:
+ - plex_1
+ - radarr_1
+ - sonarr_1
+ - sonarr_anime
+ # Supports both data path as well as base_name of folder path
+ # Note: If you use base_name, the name must match exactly just without the "/"
+ ignore_root_folders:
+ - anime movies
+ - /data/Kids Shows
+ # Libraries are ONLY used for collections
+ library_names:
+ - Movies
+ - Anime Movies
+ ignore_collections:
+ - "Star Wars Universe"
+ - "DC Animated Universe"
+ - "Alien / Predator"
+ - "Rocky / Creed"
+ - "MonsterVerse"
+ - "Star Trek"
+ - "X-Men Universe"
+ - "In Association With Marvel"
+ - "View Askewniverse"
+ - "The Mummy Universe"
+ - "Middle Earth"
+ - "In Association With DC"
+ - "Wizarding World"
+ - "Fast & Furious"
+ - "The Spiderverse"
+ - "Marvel Cinematic Universe"
+ - "The Conjuring Universe"
+ - "The Batman Universe"
+ - "DC Extended Universe"
+ source_dirs:
+ - /path/to/assets
+
+poster_cleanarr:
+ # This script will remove any assets from your plex-meta-manager asset directory that are not being used by your media.
+ # Note: This script will remove things that renamer has put in to the assets directory that do not have a folder in your
+ # Media directory and cause a loop. I wouldn't recommend running this script very often (weekly at most, monthly is probably)
+ log_level: info
+ dry_run: true
+ instances:
+ - plex_1
+ - radarr_1
+ - sonarr_1
+ - sonarr_anime
+ # Libraries are ONLY used for collections
+ library_names:
+ - Movies
+ - Anime Movies
+ ignore_collections:
+ - "Random Collection in assets you want to keep"
+ # If using poster_renamer with border_replacer, include the tmp folder that border_replacer uses
+ source_dirs:
+ - /path/to/assets
+
+upgradinatorr:
+ # A script to upgrade Sonarr/Radarr libraries to the keep in line with trash-guides
+ log_level: info
+ dry_run: false
+ instances:
+ radarr_1:
+ count: 3
+ tag_name: checked
+ unattended: true
+ radarr_2:
+ count: 10
+ tag_name: checked
+ unattended: true
+ sonarr_1:
+ count: 1
+ tag_name: checked
+ unattended: true
+ sonarr_anime:
+ count: 1
+ tag_name: checked
+ unattended: true
+ sonarr_3:
+ count: 20
+ tag_name: checked
+ unattended: true
+
+renameinatorr:
+ # This script will rename all series in Sonarr/Radarr to match the naming scheme of the
+ # Naming Convention within Radarr/Sonarr. It will also add a tag to the series so that it can be easily
+ # identified as having been renamed.
+ #
+ # Warning: With a large library this script can take a while as it wait's for all API calls to finish before moving on to the next instance
+ # Note If you see something along the lines of Unable to move series from ''. Try moving files manually: Source and destination can't be the same
+ # This is normal and you'll even see this error if you perform the same action the script does manually.
+ log_level: info
+ dry_run: false
+ rename_folders: true
+ # Tag name and count are optional, if you don't want to use them leave them blank
+ # The reason for count and tag is if you want to perform renaming actions in batches or in bulk
+ count: 10
+ tag_name: renamed
+ instances:
+ - radarr_2
+ - sonarr_3
+
+nohl:
+ # This script will find all files that are not hardlinked and will process them in radarr
+ # and sonarr. This is useful for finding files that are not hardlinked and wish to have 100%
+ # hardlinks seeding.
+ dry_run: true
+ log_level: info
+ maximum_searches: 10 # Please keep this number low to avoid bans
+ print_files: false # Will not respect excludes for simply printing the files
+ instances:
+ - radarr_1
+ - sonarr_1
+ paths:
+ - /path/to/media/movies
+ - /path/to/media/series
+ filters:
+ exclude_profiles: test3, test4
+ # Do not include year in the exclude list
+ # The title should be exactly as it appears in `Radarr` or `Sonarr`
+ exclude_movies:
+ - "The Matrix"
+ - "The Matrix Reloaded"
+ - "The Matrix Revolutions"
+ exclude_series:
+ - Diners, Drive-ins and Dives
+ - Marvel's Spider-Man
+ - M*A*S*H
+
+queinatorr:
+ # This script will move torrents from one category to another in qBittorrent based on
+ # the title of the torrent. This is useful for moving torrents from a category that are stuck
+ # in a queue due to a missing file or not being an upgrade for existing episode file(s).
+ log_level: info
+ dry_run: true
+ days_to_keep: 14 # The number of days to keep in the pre_import_category prior to moving to the post_import_category
+ instances:
+ radarr_1:
+ qbit_instance: qbittorrent_1
+ pre_import_category: movies
+ post_import_category: completed-movies
+ sonarr_1:
+ qbit_instance: qbittorrent_2
+ pre_import_category: series
+ post_import_category: completed-series
+ sonarr_anime:
+ qbit_instance: qbittorrent_2
+ pre_import_category: anime
+ post_import_category: completed-anime
+
+labelarr:
+ # A script to sync labels between Plex and Radarr/Sonarr
+ # Warning: Due to the way that Plex API works, this script can take a while to run with a large library
+ dry_run: true
+ log_level: info
+ instances:
+ radarr_2:
+ library_names:
+ - Movies
+ plex_instances:
+ - plex_1
+ # Labels/tags to sync between Radarr/Sonarr and Plex
+ labels:
+ - kids
+ sonarr_3:
+ library_names:
+ - Animated Series
+ - Childrens Series
+ plex_instances:
+ - plex_1
+ labels:
+ - kids
+
+health_checkarr:
+ # This script will delete movies and shows from Radarr and Sonarr based on the if they show
+ # up in the health check. This is useful for removing movies and shows that have been removed
+ # from TMDB or TVDB.
+ log_level: info
+ dry_run: true
+ instances:
+ - radarr_1
+ - sonarr_1
+ - sonarr_anime
+
+bash_scripts:
+ # Bash Scripts
+ # This is a collection of BASH scripts that can be run by the script.
+ log_level: info
+ nohl_bash:
+ source:
+ include:
+ -
+ -
+ # Exclude certain movie/series folders from being searched
+ # NOTE: It must be exactly as the folder is listed in your file system
+ exclude:
+ -
+ -
+ jduparr:
+ data_dir:
+
+main:
+ log_level: info
\ No newline at end of file
diff --git a/bash-scripts/backup-plex/backup-plex-example.conf b/extra-scripts/backup-plex-example.conf
old mode 100644
new mode 100755
similarity index 97%
rename from bash-scripts/backup-plex/backup-plex-example.conf
rename to extra-scripts/backup-plex-example.conf
index bfe5526..fbc97e6
--- a/bash-scripts/backup-plex/backup-plex-example.conf
+++ b/extra-scripts/backup-plex-example.conf
@@ -9,6 +9,8 @@
# Note: All these variables can be overridden by using the command line arguments simply type `bash .sh --help` for information on usage
+#!!!!!NOTE: DO NOT USE IF USING THE YAML SETTINGS WITH MAIN.PY!!!!!
+
#------------- DEFINE VARIABLES -------------#
source_dir='/path/to/plex' # Path to your plex appdata location
destination_dir='/path/to/plex/backup' # Path to your backup folder
diff --git a/bash-scripts/backup-appdata/backup-appdata-example.conf b/extra-scripts/backup_appdata.sample.conf
old mode 100644
new mode 100755
similarity index 95%
rename from bash-scripts/backup-appdata/backup-appdata-example.conf
rename to extra-scripts/backup_appdata.sample.conf
index 6b53944..0ea9db4
--- a/bash-scripts/backup-appdata/backup-appdata-example.conf
+++ b/extra-scripts/backup_appdata.sample.conf
@@ -10,7 +10,7 @@
destination_dir='/path/to/backup' # Path to your backup folder
keep_backup=2 # Number of days of backups you'd like to keep
compress=false # Due to the size of some backups, compress your backups down as much as possible use 7Zip
-exclude_file='/path/to/exclude-file.txt' # Location of your exclusion file.
+exclude_file='/app/scripts/exclude-file.txt' # Location of your exclusion file.
add_to_stop=false # Add any new containers to your stop and backup list
add_to_no_stop=false # Add any new container to your backup without stopping list
dry_run=true # Test the script out to make sure everythign is to your liking
@@ -22,12 +22,12 @@ quiet=false # Run script without echoing actions
# Each container should be on its own line as shown below
# The # symbol at the beginning is only for commenting and will cause the script to pass over it if used on real contianers
stop_list=(
- # "Conatiner 1"
+ # "Container 1"
# "Container 2"
# "Container 3"
)
no_stop_list=(
- # "Conatiner 4"
+ # "Container 4"
# "Container 5"
# "Container 6"
)
@@ -35,10 +35,11 @@ no_stop_list=(
# If you don't wish to be notified about them or have themn added to those list, define them here
# An example woudl be plex, as I have a backup solution for plex I don't need to be notified about it not being in the list
exclusion_list=(
- # "Conatiner 1"
+ # "Container 1"
# "Container 2"
# "Container 3"
"plex"
+ "userScripts"
)
# Define locations for appdata directories
diff --git a/bash-scripts/backup-appdata/backup-appdata.sh b/extra-scripts/backup_appdata.sh
old mode 100644
new mode 100755
similarity index 91%
rename from bash-scripts/backup-appdata/backup-appdata.sh
rename to extra-scripts/backup_appdata.sh
index 420b63f..9a2f7d6
--- a/bash-scripts/backup-appdata/backup-appdata.sh
+++ b/extra-scripts/backup_appdata.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/usr/bin/env bash
# _ _ ____ _
# /\ | | | | | _ \ | |
# / \ _ __ _ __ __| | __ _| |_ __ _ | |_) | __ _ ___| | ___ _ _ __
@@ -14,27 +14,40 @@
# License: MIT License
# ====================================================
-# Path to config file such as "/path/to/config/config_file.txt"
-# Only use if config file is not in the same location as the script.
-config_file=""
-
# <----- Do not edit below this point ----->
+compress=None
+dry_run=None
+exclusion_list=()
+no_stop_list=()
+stop_list=()
+use_summary=None
+add_to_stop=None
+add_to_no_stop=None
+quiet=None
+bar_color=None
+unraid_notify=None
+keep_backup=None
+appdata_dir1=
+appdata_dir2=
+
config_file() {
- if [ -z "$config_file" ]; then
- script_dir=$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" &>/dev/null && pwd)
- config_file="$script_dir/backup-appdata.conf"
- fi
+ script_path=$(dirname "$0")
+ config_file="${script_path}/backup_appdata.conf"
+
+ echo "Config File: $config_file"
# Check if config file exists
if [ -f "$config_file" ]; then
# Read config file
- # shellcheck source=backup-appdata.conf
- . "$config_file"
+ # shellcheck source=/dev/null
+ source "$config_file"
+ echo "Config file exists and is accessible."
else
# Use command line arguments
# handle_options "$@"
- verbose_output "no config file found"
+ echo "no config file found"
+ exit 0
fi
}
@@ -53,7 +66,7 @@ check_space() {
if [ "$backup_size" -gt "$available_space" ]; then
# Print error message and exit if not enough space available
echo "Error: Not enough disk space on $destination_dir. Available: $available_space_mb MB, Required: $backup_size_mb MB"
- exit 1
+ exit 0
fi
else
# Calculate backup size in bytes
@@ -61,7 +74,7 @@ check_space() {
if [ "$backup_size" -gt "$available_space" ]; then
# Print error message and exit if not enough space available
echo "Error: Not enough disk space on $destination_dir."
- exit 1
+ exit 0
fi
fi
# Print message that space check is complete
@@ -105,34 +118,34 @@ find_new_containers() {
if [ "$add_to_stop" == true ]; then
# Add new containers to stop_list in config file
for new_container in "${new_containers[@]}"; do
- awk -i inplace -v new_container="$new_container" '
- /^stop_list=\(/ {
- print;
- printf(" %s\n", new_container);
- next;
- }
- {
- print;
- }
- ' "$config_file"
+ awk -v new_container="$new_container" '
+ /^stop_list=\(/ {
+ print;
+ printf(" %s\n", new_container);
+ next;
+ }
+ {
+ print;
+ }
+ ' "$config_file" > temp && mv temp "$config_file"
done
for new_container in "${secondary_new_containers[@]}"; do
- awk -i inplace -v new_container="$new_container" '
- /^stop_list=\(/ {
- print;
- printf(" %s\n", new_container);
- next;
- }
- {
- print;
- }
- ' "$config_file"
+ awk -v new_container="$new_container" '
+ /^stop_list=\(/ {
+ print;
+ printf(" %s\n", new_container);
+ next;
+ }
+ {
+ print;
+ }
+ ' "$config_file" > temp && mv temp "$config_file"
done
fi
if [ "$add_to_no_stop" == true ]; then
# Add new containers to no_stop_list in config file
for new_container in "${new_containers[@]}"; do
- awk -i inplace -v new_container="$new_container" '
+ awk -v new_container="$new_container" '
/^no_stop_list=\(/ {
print;
printf(" %s\n", new_container);
@@ -141,10 +154,10 @@ find_new_containers() {
{
print;
}
- ' "$config_file"
+ ' "$config_file" > temp && mv temp "$config_file"
done
for new_container in "${secondary_new_containers[@]}"; do
- awk -i inplace -v new_container="$new_container" '
+ awk -v new_container="$new_container" '
/^no_stop_list=\(/ {
print;
printf(" %s\n", new_container);
@@ -153,7 +166,7 @@ find_new_containers() {
{
print;
}
- ' "$config_file"
+ ' "$config_file" > temp && mv temp "$config_file"
done
fi
fi
@@ -336,7 +349,15 @@ get_paths() {
# Get the container name
container_name="$1"
# Get the config path of the container
- config_paths=$(docker inspect -f '{{json .Mounts}}' "$container_name" | jq -r '.[] | select(.Destination | test("^/config")) | .Source')
+ output=$(docker inspect -f '{{json .Mounts}}' "$container_name" | jq -r '.[] | select(.Destination | test("^/config")) | .Source')
+ if [ -n "$output" ]; then
+ config_path_basename=$(echo $output | xargs basename)
+ fi
+ if [ -n "$DOCKER_ENV" ]; then
+ config_paths="${APPDATA_PATH}/${config_path_basename}"
+ else
+ config_paths=$(docker inspect -f '{{json .Mounts}}' "$container_name" | jq -r '.[] | select(.Destination | test("^/config")) | .Source')
+ fi
# if config paths has more than 1 entry itterate over them
if [ "$(echo "$config_paths" | wc -w)" -gt 1 ]; then
for config_path in $config_paths; do
@@ -351,12 +372,18 @@ get_paths() {
fi
done
else
- config_path="$(echo "$config_paths" | tr '\n' ' ' | sed 's/ *$//')"
+ # If no config_path_basename
+ if [ -z "$config_path_basename" ]; then
+ config_path=""
+ else
+ config_path="$(echo "$config_paths" | tr '\n' ' ' | sed 's/ *$//')"
+ fi
fi
+
# Check if config path is empty
if [ -z "$config_path" ]; then
# Get the appdata path of the container
- appdata_path=$(docker inspect -f '{{json .Mounts}}' "$container_name" | jq -r '.[] | select(.Source | test("^'"$appdata_dir1"'|^'"$appdata_dir2"'")) | .Source' | grep -o -e "^$appdata_dir1/[^/]*" -e "^$appdata_dir2/[^/]*" | head -n1)
+ appdata_path=$(docker inspect -f '{{json .Mounts}}' "$container_name" | jq -r '.[] | select(.Source | test("^'"$appdata_dir1"'|^'"$appdata_dir2"'")) | .Source' | head -n1)
# Check if appdata path is empty
if [ -z "$appdata_path" ]; then
# Skip over the container if it does not use appdata
@@ -364,7 +391,7 @@ get_paths() {
# Remove the container's entry from the config file
sed -i "/^[[:space:]]*$container_name$/d" "$config_file"
# Add the container's name to the exclusion list
- awk -i inplace -v new_container="$container_name" '
+ awk -v new_container="$container_name" '
/^exclusion_list=\(/ {
print;
printf(" %s # Container automatically added here due to no appdata dir\n", new_container);
@@ -373,12 +400,19 @@ get_paths() {
{
print;
}
- ' "$config_file"
+ ' "$config_file" > temp && mv temp "$config_file"
verbose_output "-----------------------------------"
return
+ else
+ # Set the source directory to the appdata path
+ if [ -n "$DOCKER_ENV" ]; then
+ # get mount path from appdata_1 or appdata 2
+ appdata_path_basename=$(echo $appdata_path | xargs basename)
+ source_dir="${APPDATA_PATH}/${appdata_path_basename}"
+ else
+ source_dir="$(echo "$appdata_path" | tr '\n' ' ' | sed 's/ *$//')"
+ fi
fi
- # Set the source directory to the appdata path
- source_dir="$appdata_path"
else
# Set the source directory to the config path
source_dir="$config_path"
@@ -829,7 +863,7 @@ hex_to_decimal() {
else
echo "Bar color: $bar_color"
echo -e "Invalid color format. Please provide a valid 6-digit hex color code (e.g. ff0000 for red)"
- exit 1
+ exit 0
fi
}
verbose_output() {
@@ -840,35 +874,40 @@ verbose_output() {
fi
}
check_config() {
+ # Check if docker is installed
+ if ! command -v docker &>/dev/null; then
+ echo "Docker is not installed. Please install docker and rerun."
+ exit 0
+ fi
# Check if destination directory exists
if [ ! -d "$destination_dir" ]; then
echo "ERROR: Your destination directory ($destination_dir) does not exist please check your configuration"
- exit 2
+ exit 0
fi
# Check if destination directory is set
if [ -z "$destination_dir" ]; then
echo "ERROR: Your source directory ($destination_dir) is not set please check your configuration"
- exit 2
+ exit 0
fi
# Check if 7zip command is available if compress is set to true
if [ "$compress" == "true" ]; then
command -v 7z >/dev/null 2>&1 || {
echo -e "7Zip is not installed.\nPlease install 7Zip and rerun.\nIf on unRaid 7Zip can be found through NerdPack/NerdTools in the UnRaid appstore" >&2
- exit 1
+ exit 0
}
fi
# Check if webhook is set and in the correct format
if [ -n "$webhook" ]; then
if [[ ! $webhook =~ ^https://discord\.com/api/webhooks/ ]] && [[ ! $webhook =~ ^https://notifiarr\.com/api/v1/notification/passthrough ]]; then
echo "ERROR: Invalid webhook provided please enter a valid webhook url in the format https://discord.com/api/webhooks/ or https://notifiarr.com/api/v1/notification/passthrough"
- exit 1
+ exit 0
fi
# Check if channel is set if using Notifiarr
if [[ $webhook =~ ^https://notifiarr\.com/api/v1/notification/passthrough ]] && [ -z "$channel" ]; then
echo "ERROR: It appears you're trying to use Notifiarr as your notification agent but haven't set a channel. How will the bot know where to send the notification?"
echo "Please use the -C or --channel argument to set the channel ID used for this notification"
echo "You can find the channel ID by going to the channel you want to use and clicking the settings icon and selecting 'Copy ID'"
- exit 1
+ exit 0
fi
# Check if webhook returns valid response code
if [[ $webhook =~ ^https://notifiarr\.com/api/v1/notification/passthrough ]]; then
@@ -887,7 +926,7 @@ check_config() {
}
cleanup() {
- find "$destination_dir" -mindepth 1 -maxdepth 1 -type d | sort -r | tail -n +"$(( $keep_backup + 1 ))" | xargs -I {} rm -rf {}
+ find "$destination_dir" -mindepth 1 -maxdepth 1 -type d | sort -r | tail -n +"$((keep_backup + 1))" | xargs -I {} rm -rf {}
}
main() {
@@ -896,13 +935,12 @@ main() {
container_no_stop_list=()
now="$(date +"%H.%M")"
start=$(date +%s)
- if ! pgrep dockerd >/dev/null; then
- echo "Docker daemon is not running. Please start it first." >&2
- exit 1
- fi
config_file
- echo "Config file: $config_file"
check_config
+ if ! docker --version >/dev/null 2>&1; then
+ echo "Docker is not installed. Please install Docker and rerun." >&2
+ exit 0
+ fi
hex_to_decimal "$bar_color"
find_new_containers
config_file
@@ -915,7 +953,6 @@ main() {
if [ "$unraid_notify" == "true" ]; then
unraid_notification
fi
- chmod -R 777 "$backup_path"
cleanup
if [ ${#new_containers[@]} -gt 0 ]; then
printf "New containers found:\n"
@@ -943,4 +980,4 @@ main() {
fi
}
-main
\ No newline at end of file
+main
\ No newline at end of file
diff --git a/bash-scripts/backup-folder/backup-folder.sh b/extra-scripts/backup_folder.sh
old mode 100644
new mode 100755
similarity index 75%
rename from bash-scripts/backup-folder/backup-folder.sh
rename to extra-scripts/backup_folder.sh
index 23e670a..28de487
--- a/bash-scripts/backup-folder/backup-folder.sh
+++ b/extra-scripts/backup_folder.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/usr/bin/env bash
# ____ _ ______ _ _
# | _ \ | | | ____| | | | |
# | |_) | __ _ ___| | ___ _ _ __ | |__ ___ | | __| | ___ _ __
@@ -17,10 +17,10 @@
source_dir=''
destination_dir=''
webhook=''
-keep_backup=2
-compress=false
-unraid_notify=false
-quiet=false
+keep_backups=2
+compress=False
+unraid_notify=False
+quiet=False
bot_name='Notification Bot'
bar_color='FF00FF'
channel="0"
@@ -32,42 +32,43 @@ check_config() {
# Check if source directory exists
if [ ! -d "$source_dir" ]; then
echo "ERROR: Your source directory ($source_dir) does not exist please check your configuration"
- exit 2
+ exit 0
fi
# Check if source directory is set
if [ -z "$source_dir" ]; then
echo "ERROR: Your source directory ($source_dir) is not set please check your configuration"
- exit 2
+ exit 0
fi
# Check if destination directory exists
if [ ! -d "$destination_dir" ]; then
echo "ERROR: Your destination directory ($destination_dir) does not exist please check your configuration"
- exit 2
+ mkdir -p "$destination_dir"
+ echo "Destination directory created"
fi
# Check if destination directory is set
if [ -z "$destination_dir" ]; then
echo "ERROR: Your source directory ($destination_dir) is not set please check your configuration"
- exit 2
+ exit 0
fi
- # Check if 7zip command is available if compress is set to true
- if [ "$compress" == "true" ]; then
+ # Check if 7zip command is available if compress is set to True
+ if [ "$compress" == "True" ]; then
command -v 7z >/dev/null 2>&1 || {
echo -e "7Zip is not installed.\nPlease install 7Zip and rerun.\nIf on unRaid 7Zip can be found through NerdPack/NerdTools in the UnRaid appstore" >&2
- exit 1
+ exit 0
}
fi
# Check if webhook is set and in the correct format
if [ -n "$webhook" ]; then
if [[ ! $webhook =~ ^https://discord\.com/api/webhooks/ ]] && [[ ! $webhook =~ ^https://notifiarr\.com/api/v1/notification/passthrough ]]; then
echo "ERROR: Invalid webhook provided please enter a valid webhook url in the format https://discord.com/api/webhooks/ or https://notifiarr.com/api/v1/notification/passthrough"
- exit 1
+ exit 0
fi
# Check if channel is set if using Notifiarr
if [[ $webhook =~ ^https://notifiarr\.com/api/v1/notification/passthrough ]] && [ -z "$channel" ]; then
echo "ERROR: It appears you're trying to use Notifiarr as your notification agent but haven't set a channel. How will the bot know where to send the notification?"
echo "Please use the -C or --channel argument to set the channel ID used for this notification"
echo "You can find the channel ID by going to the channel you want to use and clicking the settings icon and selecting 'Copy ID'"
- exit 1
+ exit 0
fi
# Check if webhook returns valid response code
if [[ $webhook =~ ^https://notifiarr\.com/api/v1/notification/passthrough ]]; then
@@ -90,7 +91,7 @@ check_space() {
verbose_output "Checking space requirements... Please wait..."
# Get the available space in the destination directory
available_space=$(df -P "$destination_dir" | awk 'NR==2 {print $4}')
- if [ "$compress" = "true" ]; then
+ if [ "$compress" = "True" ]; then
# Calculate backup size in bytes
backup_size=$(du -s "$source_dir" | awk '{print $1}')
# Convert byte values to MB or GB
@@ -100,7 +101,7 @@ check_space() {
if [ "$backup_size" -gt "$available_space" ]; then
# Print error message and exit if not enough space available
echo "Error: Not enough disk space on $destination_dir. Available: $available_space_mb MB, Required: $backup_size_mb MB"
- exit 1
+ exit 0
fi
else
# Calculate backup size in bytes
@@ -108,7 +109,7 @@ check_space() {
if [ "$backup_size" -gt "$available_space" ]; then
# Print error message and exit if not enough space available
echo "Error: Not enough disk space on $destination_dir."
- exit 1
+ exit 0
fi
fi
# Print message that space check is complete
@@ -117,48 +118,45 @@ check_space() {
# Function to display help
display_help() {
- echo "Usage: $0 [ -s | --source ] [ -d | --destination ] [ -c | --compress ] [ -k | --keep-backup ] [ -u | --unraid-notify ] [ -w | --webhook ] [ -n | --bot-name ] [ -b | --bar-color ] [ -h | --help ]"
echo "This script will backup defined folders to a defined destination, you can either archive your files (using tar) or compress them (using 7Zip)"
echo "Options:"
- echo " -s --source : Set the source directory to backup"
- echo " -d --destination : Set the destination directory to save the backup"
- echo " -c --compress : Use compression on the backup file (default: false)"
- echo " -k --keep-backup : Number of daily backups to keep (default: 2)"
- echo " -u --unraid-notify : Use unRAID notifications for backup status (default: false)"
- echo " -q --quiet : Run script without displaying output"
- echo " -w --webhook : Use webhoo notifications (currently discord is the only one accepted) for backup status (default: false)"
- echo " -n --bot-name : Set the bot name for notifications (default: Notification Bot)"
- echo " -b --bar-color : Set the bar color for notifications supports Hex colors (default: ff00ff)"
- echo " -h --help : Show this help message"
+ echo " -s : Set the source directory to backup"
+ echo " -d : Set the destination directory to save the backup"
+ echo " -c : Use compression on the backup file (default: False)"
+ echo " -k : Number of daily backups to keep (default: 2)"
+ echo " -u : Use unRAID notifications for backup status (default: False)"
+ echo " -q : Run script without displaying output"
+ echo " -w : Use webhook notifications for backup status (default: False)"
+ echo " -n : Set the bot name for notifications (default: Notification Bot)"
+ echo " -b : Set the bar color for notifications supports Hex colors (default: ff00ff)"
+ echo " -h : Show this help message"
exit 0
}
# Function to create backups of desired directory
create_backup() {
# Print starting message
- if [ "$quiet" == false ]; then
+ if [ "$quiet" == False ]; then
echo -e "Creating backup..."
fi
# Get the current timestamp
start=$(date +%s)
- # Get the absolute path of the destination directory
- dest=$(realpath -s "$destination_dir")
# Create the backup directory in the destination directory with the name of the source directory and the current date
cd "$source_dir"/.. || exit
folder_name=$(basename "$source_dir")
- backup_path="$dest/$(date +%F)"
+ backup_path="$destination_dir/$(date +%F)"
backup_name="$(basename "$source_dir")"
mkdir -p "$backup_path"
now="$(date +"%H.%M")"
- # Check if the compress variable is true
- if [ "$compress" == "true" ]; then
+ # Check if the compress variable is True
+ if [ "$compress" == "True" ]; then
# Use tar and 7z to create a compressed archive of the source directory and save it to the backup directory
- tar --ignore-failed-read -cf - "$folder_name" | 7z a -si -t7z -m0=lzma2 -mx=1 -md=32m -mfb=64 -mmt=on -ms=off "$backup_path/$backup_name-$now.tar.7z"
+ tar -cf - "$folder_name" | 7z a -si -t7z -m0=lzma2 -mx=1 -md=32m -mfb=64 -mmt=on -ms=off "$backup_path/$backup_name-$now.tar.7z"
backup_size=$(du -sh "$backup_path/$backup_name-$now.tar.7z" | awk '{print $1}')
else
# Use tar to create an archive of the source directory and save it to the backup directory
- tar --ignore-failed-read -cf "$backup_path/$backup_name-$now.tar" "$folder_name"
+ tar -cf "$backup_path/$backup_name-$now.tar" "$folder_name"
backup_size=$(du -sh "$backup_path/$backup_name-$now.tar" | awk '{print $1}')
fi
# Get the total size of the backup folder
@@ -167,12 +165,11 @@ create_backup() {
end=$(date +%s)
# Calculate the runtime of the backup process
calculate_runtime
- # Print backup complete message, backup size and runtime if verbose is true
+ # Print backup complete message, backup size and runtime if verbose is True
verbose_output "Backup complete"
verbose_output "Backup size $total_size"
verbose_output "$run_output"
# Change permissions of the backup directory to 777
- chmod -R 777 "$dest"
}
# Function to calculate script runtime
@@ -230,7 +227,7 @@ notifiarr_payload() {
payload='{
"notification":
{
- "update": false,
+ "update": False,
"name": "'"$(basename "$source_dir") Backup"'",
"event": ""
},
@@ -326,13 +323,13 @@ hex_to_decimal() {
else
echo "Bar color: $bar_color"
echo -e "Invalid color format. Please provide a valid 6-digit hex color code (e.g. ff0000 for red)"
- exit 1
+ exit 0
fi
}
verbose_output() {
- # Check if "quiet" variable is false
- if [ "$quiet" == false ]; then
+ # Check if "quiet" variable is False
+ if [ "$quiet" == False ]; then
# Print the argument passed to the function
echo "$1"
fi
@@ -340,97 +337,45 @@ verbose_output() {
cleanup() {
# Remove oldest backups
- verbose_output "Keeping $keep_backup daily backups, removing the rest"
- find "$destination_dir" -mindepth 1 -maxdepth 1 -type d | sort -r | tail -n +"$(( $keep_backup + 1 ))" | xargs -I {} rm -rf {}
+ verbose_output "Keeping $keep_backups daily backups, removing the rest"
+ find "$destination_dir" -mindepth 1 -maxdepth 1 -type d | sort -r | tail -n +"$(( $keep_backups + 1 ))" | xargs -I {} rm -rf {}
}
# Main function
main() {
handle_options "$@"
- hex_to_decimal "$bar_color"
check_config
+ hex_to_decimal "$bar_color"
check_space
create_backup
if [ -n "$webhook" ]; then
send_notification
fi
- if [ $unraid_notify == true ]; then
+ if [ "$unraid_notify" == True ]; then
unraid_notify
fi
cleanup
}
# Define function to handle options
handle_options() {
-
- # Define valid options
- valid_long_options=("source:" "destination:" "keep-backup:" "compress" "unraid-notify" "quiet" "webhook:" "bar-color:" "bot-name:" "channel:" "help")
- valid_short_options=("s:" "d:" "k:" "c" "u" "q" "w:" "b:" "n:" "C:" "h")
-
- # Handle command-line options
- TEMP=$(getopt -o "${valid_short_options[*]}" --long "${valid_long_options[*]}" -n "$0" -- "$@")
- eval set -- "$TEMP"
- while true; do
- case "$1" in
- --source | -s)
- source_dir="$2"
- shift 2
- ;;
- --destination | -d)
- destination_dir="$2"
- shift 2
- ;;
- --keep-backup | -k)
- keep_backup="$2"
- shift 2
- ;;
- --compress | -c)
- compress=true
- shift
- ;;
- --unraid-notify | -u)
- unraid_notify=true
- shift
- ;;
- --quiet | -q)
- quiet=true
- shift
- ;;
- --webhook | -w)
- webhook="$2"
- shift 2
- ;;
- --bar-color | -b)
- hex_to_decimal "$2"
- shift 2
- ;;
- --bot-name | -n)
- bot_name="$2"
- shift 2
- ;;
- --channel | -C)
- channel="$2"
- shift 2
- ;;
- --help | -h)
- shift
- display_help
- ;;
- --)
- shift
- break
- ;;
- *)
- echo "Internal error!"
- exit 1
- ;;
+ while getopts ":s:d:c:k:u:q:w:n:b:h:C:" opt; do
+ case $opt in
+ s) source_dir="$OPTARG";;
+ d) destination_dir="$OPTARG";;
+ c) compress="$OPTARG";;
+ k) keep_backups="$OPTARG";;
+ u) unraid_notify="$OPTARG";;
+ q) quiet="$OPTARG";;
+ w) webhook="$OPTARG";;
+ n) bot_name="$OPTARG";;
+ b) bar_color="$OPTARG";;
+ C) channel="$OPTARG";;
+ h) display_help;;
+ \?) echo "Invalid option: -$OPTARG" >&2; display_help; exit 0;;
+ :) echo "Option -$OPTARG requires an argument." >&2; display_help; exit 0;;
esac
done
- # Check for any remaining arguments
- for arg in "$@"; do
- echo "Invalid argument: $arg" >&2
- display_help
- done
}
# Call main function
diff --git a/bash-scripts/backup-plex/backup-plex.sh b/extra-scripts/backup_plex.sh
old mode 100644
new mode 100755
similarity index 76%
rename from bash-scripts/backup-plex/backup-plex.sh
rename to extra-scripts/backup_plex.sh
index f34a75d..7bee485
--- a/bash-scripts/backup-plex/backup-plex.sh
+++ b/extra-scripts/backup_plex.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/usr/bin/env bash
#
# _____ _ ____ _ _____ _ _
# | __ \| | | _ \ | | / ____| (_) | |
@@ -14,24 +14,26 @@
# Author: Drazzilb
# License: MIT License
-# Please see the config file for more information, not required if config file is in the same directory as the script
-config_file=""
-
# <----- Do not edit below this point ----->
-config_file() {
- debug=false
- shutdown_plex=false
+quiet="False"
+unraid_notify="False"
- if [ -z "$config_file" ]; then
- script_dir=$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" &>/dev/null && pwd)
- config_file="$script_dir/backup-plex.conf"
- fi
+config_dir_setup() {
+ config_dir=$(dirname "$(readlink -f "$0")")
+
+ script_path=$(dirname "$0")
+
+ config_file="$script_path/backup-plex.conf"
+}
+
+config_file() {
# Check if config file exists
if [ -f "$config_file" ]; then
# Read config file
# shellcheck source=/dev/null
source "$config_file"
+ echo "Config file exists and is accessible."
else
# Use command line arguments
# handle_options "$@"
@@ -44,59 +46,59 @@ check_config() {
# Check if source directory exists
if [ ! -d "$source_dir" ]; then
echo "ERROR: Your source directory ($source_dir) does not exist please check your configuration"
- exit 2
+ exit 0
fi
# Check if source directory is set
if [ -z "$source_dir" ]; then
echo "ERROR: Your source directory ($source_dir) is not set please check your configuration"
- exit 2
+ exit 0
fi
# Check if destination directory exists
if [ ! -d "$destination_dir" ]; then
echo "ERROR: Your destination directory ($destination_dir) does not exist please check your configuration"
- exit 2
+ exit 0
fi
# Check if destination directory is set
if [ -z "$destination_dir" ]; then
echo "ERROR: Your source directory ($destination_dir) is not set please check your configuration"
- exit 2
+ exit 0
fi
- # Check if 7zip command is available if compress is set to true
- if [ "$compress" == "true" ]; then
+ # Check if 7zip command is available if compress is set to True
+ if [[ "${compress,,}" == "true" ]]; then
command -v 7z >/dev/null 2>&1 || {
echo -e "7Zip is not installed.\nPlease install 7Zip and rerun.\nIf on unRaid 7Zip can be found through NerdPack/NerdTools in the UnRaid appstore" >&2
- exit 1
+ exit 0
}
fi
# Check if webhook is set and in the correct format
if [ -n "$webhook" ]; then
if [[ ! $webhook =~ ^https://discord\.com/api/webhooks/ ]] && [[ ! $webhook =~ ^https://notifiarr\.com/api/v1/notification/passthrough ]]; then
echo "ERROR: Invalid webhook provided please enter a valid webhook url in the format https://discord.com/api/webhooks/ or https://notifiarr.com/api/v1/notification/passthrough"
- exit 1
+ exit 0
fi
# Check if channel is set if using Notifiarr
if [[ $webhook =~ ^https://notifiarr\.com/api/v1/notification/passthrough ]] && [ -z "$channel" ]; then
echo "ERROR: It appears you're trying to use Notifiarr as your notification agent but haven't set a channel. How will the bot know where to send the notification?"
echo "Please use the -C or --channel argument to set the channel ID used for this notification"
echo "You can find the channel ID by going to the channel you want to use and clicking the settings icon and selecting 'Copy ID'"
- exit 1
+ exit 0
fi
# Check if webhook returns valid response code
if [[ $webhook =~ ^https://notifiarr\.com/api/v1/notification/passthrough ]]; then
apikey="${webhook##*/}"
- if [ "$debug" == "true" ]; then
+ if [[ "${full_backup,,}" == "true" ]]; then
echo "Checking webhook validity: $webhook"
echo "API Key: $apikey"
fi
response_code=$(curl --write-out "%{response_code}" --silent --output /dev/null -H "x-api-key: $apikey" "https://notifiarr.com/api/v1/user/validate")
else
- if [ "$debug" == "true" ]; then
+ if [[ "${full_backup,,}" == "true" ]]; then
echo "Checking webhook validity: $webhook"
fi
response_code=$(curl --write-out "%{response_code}" --silent --output /dev/null "$webhook")
fi
- if [ "$debug" == "true" ]; then
+ if [[ "${full_backup,,}" == "true" ]]; then
echo "Response: $response_code"
fi
@@ -123,13 +125,13 @@ hex_to_decimal() {
else
echo "Bar color: $bar_color"
echo -e "Invalid color format. Please provide a valid 6-digit hex color code (e.g. ff0000 for red)"
- exit 1
+ exit 0
fi
}
verbose_output() {
- # Check if "quiet" variable is false
- if [ "$quiet" == false ]; then
+ # Check if "quiet" variable is False
+ if [[ "${quiet,,}" == "false" ]]; then
# Print the argument passed to the function
echo -e "$1"
fi
@@ -188,8 +190,8 @@ unraid_notification() {
send_notification() {
if [[ -n "$webhook" ]]; then
- if [ $debug == "true" ]; then
- echo -e "\ncurl -s -H \"Content-Type: application/json\" -X POST -d \"$payload\" \"$webhook\""
+ if [[ "${full_backup,,}" == "true" ]]; then
+ echo -e "\ncurl -s -H \"Content-Type: application/json\" -X POST -d \'$payload\' \"$webhook\""
curl -s -H "Content-Type: application/json" -X POST -d "$payload" "$webhook"
else
curl -s -H "Content-Type: application/json" -X POST -d "$payload" "$webhook" /dev/null
@@ -202,7 +204,7 @@ field_builder() {
local title_text="$1"
local text_value="$2"
local reset="$3"
- if [ "$reset" == "true" ]; then
+ if [ "$reset" == "True" ]; then
fields=""
fi
field_builder='{
@@ -223,6 +225,7 @@ build_payload(){
# Get a random joke from the specified file
joke=$(curl -s https://raw.githubusercontent.com/Drazzilb08/userScripts/master/jokes.txt | shuf -n 1)
# ensure joke is valid for json
+ # shellcheck disable=SC2001
joke=$(echo "$joke" | sed 's/"/\\"/g')
# ensure jokes is valid for json
if [[ $webhook =~ ^https://discord\.com/api/webhooks/ ]]; then
@@ -251,7 +254,7 @@ build_payload(){
common_fields='{
"notification":
{
- "update": false,
+ "update": False,
"name": "Plex Backup",
"event": ""
},
@@ -287,7 +290,7 @@ check_space() {
verbose_output "Checking space requirements... Please wait..."
# Get the available space in the destination directory
available_space=$(df -P "$destination_dir" | awk 'NR==2 {print $4}')
- if [ "$compress" = "true" ]; then
+ if [ "$compress" = "True" ]; then
# Calculate backup size in bytes
backup_size=$(du -s "$source_dir" | awk '{print $1}')
# Convert byte values to MB or GB
@@ -296,7 +299,7 @@ check_space() {
if [ "$backup_size" -gt "$available_space" ]; then
# Print error message and exit if not enough space available
echo "Error: Not enough disk space on $destination_dir. Available: $available_space_mb MB, Required: $backup_size_mb MB"
- exit 1
+ exit 0
fi
else
# Calculate backup size in bytes
@@ -304,7 +307,7 @@ check_space() {
if [ "$backup_size" -gt "$available_space" ]; then
# Print error message and exit if not enough space available
echo "Error: Not enough disk space on $destination_dir."
- exit 1
+ exit 0
fi
fi
# Print message that space check is complete
@@ -333,15 +336,18 @@ create_backup() {
"$source_dir/Plug-in Support/Preferences"
"$source_dir/Preferences.xml"
)
- exclude=()
+ exclude=(
+ "--exclude=$source_dir/*.db-*-*-*"
+ )
else
backup_source=("$source_dir")
exclude=(
"--exclude=$source_dir/Cache"
"--exclude=$source_dir/Codecs"
+ "--exclude=$source_dir/*.db-*-*-*"
)
fi
- if [ "$debug" == "true" ]; then
+ if [[ "${full_backup,,}" == "true" ]]; then
echo "Source: $source_dir"
echo "Destination: $destination_dir"
echo "Dest: $dest"
@@ -355,8 +361,8 @@ create_backup() {
done
fi
# Check if the compress flag is set, and create the archive accordingly
- if [ "$compress" == "true" ]; then
- if [ "$dry_run" == true ]; then
+ if [ "$compress" == "True" ]; then
+ if [ "$dry_run" == "True" ]; then
extension="tar.7z.dry_run"
echo "Dry run: Would create $backup_path/$folder_type-plex_backup.tar.7z"
touch "$backup_path/$folder_type-plex_backup.tar.7z.dry_run"
@@ -366,7 +372,7 @@ create_backup() {
tar --ignore-failed-read -cf - "${exclude[@]}" "${backup_source[@]}" | 7z a -si -t7z -m0=lzma2 -mx=1 -md=32m -mfb=64 -mmt=on -ms=off "$backup_path/$folder_type-plex_backup.tar.7z"
fi
else
- if [ "$dry_run" == true ]; then
+ if [ "$dry_run" == "True" ]; then
extension="tar.dry_run"
echo "Dry run: Would create $backup_path/$folder_type-plex_backup.tar"
touch "$backup_path/$folder_type-plex_backup.tar.dry_run"
@@ -388,10 +394,9 @@ create_backup() {
# Set the end time
end=$(date +%s)
# Set permissions of the destination directory to 777
- chmod -R 777 "$dest"
verbose_output "\nBackup complete"
calculate_runtime
- if [ "$dry_run" == true ]; then
+ if [ "$dry_run" == "True" ]; then
# create made up sizes for dry run
essential_backup_size="1.0G"
full_backup_size="1.0G"
@@ -402,7 +407,7 @@ create_backup() {
}
stop_plex(){
- if [ "$shutdown_plex" == "true" ]; then
+ if [ "$shutdown_plex" == "True" ]; then
if [ "$backup_type" == "essential" ]; then
backup_notification="Essential Backup"
elif [ "$backup_type" == "full" ]; then
@@ -412,6 +417,7 @@ stop_plex(){
elif [ "$backup_type" == "essential_no_full" ]; then
backup_notification="Essential Backup"
fi
+ # shellcheck disable=SC2143
if [[ $(docker ps --format '{{.Names}}' 2>/dev/null | grep -w '^plex$') ]]; then
plex_type="docker"
# check if plex is running in systemctl
@@ -419,7 +425,7 @@ stop_plex(){
plex_type="systemctl"
fi
current_state="running"
- if [ "$debug" == "true" ]; then
+ if [[ "${full_backup,,}" == "true" ]]; then
echo "Current state: $current_state"
echo "Plex type: $plex_type"
fi
@@ -433,14 +439,14 @@ stop_plex(){
# none of the above stop commands work throw error
else
echo "ERROR: Plex is not running in a docker container and systemctl is not installed."
- exit 1
+ exit 0
fi
build_payload
- field_builder "Plex is being shut down for a/an" "$backup_notification backup" "true"
+ field_builder "Plex is being shut down for a backup" "True"
payload "Plex Status"
send_notification
current_state="stopped"
- if [ "$debug" == "true" ]; then
+ if [[ "${full_backup,,}" == "true" ]]; then
echo "Current state: $current_state"
echo "Plex type: $plex_type"
fi
@@ -448,8 +454,17 @@ stop_plex(){
}
start_plex() {
- if [ "$shutdown_plex" == "true" ]; then
- if [ "$debug" == "true" ]; then
+ if [ "$shutdown_plex" == "True" ]; then
+ if [ "$backup_type" == "essential" ]; then
+ backup_notification="Essential Backup"
+ elif [ "$backup_type" == "full" ]; then
+ backup_notification="Full Backup"
+ elif [ "$backup_type" == "both" ]; then
+ backup_notification="Essential & Full Backup"
+ elif [ "$backup_type" == "essential_no_full" ]; then
+ backup_notification="Essential Backup"
+ fi
+ if [[ "${full_backup,,}" == "true" ]]; then
echo "Current state: $current_state"
echo "Plex type: $plex_type"
fi
@@ -463,14 +478,14 @@ start_plex() {
# none of the above stop commands work throw error
else
echo "ERROR: Plex is not running in a docker container and systemctl is not installed."
- exit 1
+ exit 0
fi
build_payload
- field_builder "Plex is being started started after a/an" "$backup_notification backup" "true"
+ field_builder "Plex is being started started after a/an" "$backup_notification backup" "True"
payload "Plex Status"
send_notification
current_state="stopped"
- if [ "$debug" == "true" ]; then
+ if [[ "${full_backup,,}" == "true" ]]; then
echo "Current state: $current_state"
echo "Plex type: $plex_type"
fi
@@ -479,41 +494,46 @@ start_plex() {
main() {
# Check if config file is defined in command line arguments
- config_file
handle_options "$@"
- # Check for --config= argument in command line options and assign the value to config_file variable
- for arg in "$@"; do
- if [[ $arg == --config=* ]]; then
- config_file="${arg#*=}"
- break
- fi
- done
+ config_dir_setup
+ if [ "$use_config_file" == "True" ]; then
+ config_file
+ fi
hex_to_decimal "$bar_color"
check_config "$@"
- # check for last_plex_backup.tmp file and if it exists, read the file to get the last backup date
- if [ -f "$(dirname "$0")/last_plex_backup.tmp" ]; then
- while IFS= read -r line; do
- lastbackup=$line
- done <"$(dirname "$0")/last_plex_backup.tmp"
+ last_plex_backup="$config_dir/.last_plex_backup.tmp"
+
+ # check for .last_plex_backup.tmp file and if it exists, read the file to get the last backup date
+ if [ -f "$last_plex_backup" ]; then
+ lastbackup=$(cat "$last_plex_backup")
else
lastbackup=0
fi
-
+
+ if [[ "${full_backup,,}" == "true" ]]; then
+ echo "Config Dir: $config_dir"
+ echo "Last Plex Backup: $last_plex_backup"
+ echo "Last backup: $lastbackup"
+ fi
# get current date
current_date=$(date +"%m/%d/%y")
# calculate the number of days since last backup
days=$((($(date --date="$current_date" +%s) - $(date --date="$lastbackup" +%s)) / (60 * 60 * 24)))
+ if [[ "${full_backup,,}" == "true" ]]; then
+ echo "Current Date: $current_date"
+ echo "Days since last backup: $days"
+ fi
start=$(date +%s)
- # check if full_backup is set to false
+ # check if full_backup is set to False
stop_plex
- if [ "$full_backup" == "false" ]; then
+ if [[ "${full_backup,,}" == "false" ]]; then
# create essential backup
backup_type="essential"
create_backup "Essential"
build_payload
- field_builder "Runtime" "$run_output" "true"
- field_builder "This Essential backup size" "$essential_backup_size" "false"
- field_builder "Total size of all Essential backups" "$essential_backup_total_size" "false"
+ field_builder "Runtime" "$run_output" "True"
+ field_builder "This Essential backup size" "$essential_backup_size" "False"
+ field_builder "Total size of all Essential backups" "$essential_backup_total_size" "False"
payload "Essential Backup"
send_notification
verbose_output ""
@@ -526,13 +546,13 @@ main() {
backup_type="both"
create_backup "Full"
build_payload
- field_builder "Runtime" "$run_output" "true"
- field_builder "This Full backup size" "$full_backup_size" "false"
- field_builder "Total size of all Full backups" "$full_backup_total_size" "false"
+ field_builder "Runtime" "$run_output" "True"
+ field_builder "This Full backup size" "$full_backup_size" "False"
+ field_builder "Total size of all Full backups" "$full_backup_total_size" "False"
payload "Full Backup"
send_notification
days="0"
- echo "$current_date" >"$(dirname "$0")"/last_plex_backup.tmp
+ echo "$current_date" > "$last_plex_backup"
verbose_output "Total size of this Essential backup: ${essential_backup_size}"
verbose_output "Total size of this Full backup: ${full_backup_size}"
else
@@ -545,15 +565,16 @@ main() {
backup_type="full"
create_backup "Full"
build_payload
- field_builder "Runtime" "$run_output" "true"
- field_builder "This Essential backup size" "$essential_backup_size" "false"
- field_builder "This Full backup size" "$full_backup_size" "false"
- field_builder "Total size of all Essential backups" "$essential_backup_total_size" "false"
- field_builder "Total size of all Full backups" "$full_backup_total_size" "false"
- field_builder "Days since last Full backup" "$days" "false"
+ field_builder "Runtime" "$run_output" "True"
+ field_builder "This Essential backup size" "$essential_backup_size" "False"
+ field_builder "This Full backup size" "$full_backup_size" "False"
+ field_builder "Total size of all Essential backups" "$essential_backup_total_size" "False"
+ field_builder "Total size of all Full backups" "$full_backup_total_size" "False"
+ field_builder "Days since last Full backup" "$days" "False"
payload "Full and Essential Backup"
send_notification
- echo "$current_date" >"$(dirname "$0")"/last_plex_backup.tmp
+
+ echo "$current_date" > "$last_plex_backup"
days="0"
verbose_output "Total size of this Full backup: ${full_backup_size}"
fi
@@ -569,12 +590,12 @@ main() {
if [ -d "$destination_dir/Full/" ]; then
verbose_output "Total size of all Full backups: $full_backup_total_size"
fi
- # check if unraid_notify is set to true and call unraid_notification function
- if [ "$unraid_notify" == "true" ]; then
+ # check if unraid_notify is set to True and call unraid_notification function
+ if [[ "${unraid_notify,,}" == "true" ]]; then
unraid_notification
fi
- # check if debug is set to true and call debug_output_function
- if [ $debug == "true" ]; then
+ # check if debug is set to True and call debug_output_function
+ if [[ "${full_backup,,}" == "true" ]]; then
debug_output_function
fi
verbose_output 'All Done!'
@@ -614,118 +635,54 @@ debug_output_function() {
# Function to display help
display_help() {
- echo "Usage: $0 [ -s | --source ] [ -d | --destination ] [ -F | --force-full ] [ -f | full-backup] [ -k | --keep-essential ] [ -K | --keep-full ] [ -c | --compress ] [ -u | --unraid-notify ] [ -q | --quiet ] [ -w | --webhook ] [ -n | --bot-name ] [ -b | --bar-color ] [ -h | --help ]"
+ echo "Usage: $0 [ -s ] [ -d ] [ -F ] [ -f ] [ -k ] [ -K ] [ -c ] [ -u ] [ -q ] [ -w ] [ -C ] [ -b ] [ -n ] [ -r ] [ -x ] [ -h ]"
echo "This script is for backing up and compressing Plex Media Server data"
echo "Options:"
- echo " -s --source : Source directory to backup"
- echo " -d --destination : Destination directory to store backups"
- echo " -F --force-full : Number of days to wait before forcing a full backup"
- echo " -f --full-backup : Perform full backup"
- echo " -k --keep-essential : Number of essential backups to keep"
- echo " -K --keep-full : Number of full backups to keep"
- echo " -c --compress : Compress backups using 7zip"
- echo " -u --unraid-notify : Send notification to Unraid webGui"
- echo " -q --quiet : Quiet mode"
- echo " -w --webhook : Webhook url (Notifarr and Discord Supported)"
- echo " -C --channel : Channel ID for discord noticiations (used with Notifiarr)"
- echo " -b --bar-color : Discord bar color"
- echo " -n --bot-name : Discord bot name"
- echo " -r --dry-run : Run script without backing up any fils (for testing)"
- echo " -x --config-file : Set config file location, with config file if command arguments are used they will take precedence"
- echo " -h --help : Display this help and exit"
+ echo " -s : Source directory to backup"
+ echo " -d : Destination directory to store backups"
+ echo " -F : Number of days to wait before forcing a full backup"
+ echo " -f : Perform full backup"
+ echo " -k : Number of essential backups to keep"
+ echo " -K : Number of full backups to keep"
+ echo " -c : Compress backups using 7zip"
+ echo " -u : Send notification to Unraid webGui"
+ echo " -q : Quiet mode"
+ echo " -w : Webhook url (Notifarr and Discord Supported)"
+ echo " -C : Channel ID for discord noticiations (used with Notifiarr)"
+ echo " -b : Discord bar color"
+ echo " -n : Discord bot name"
+ echo " -r : Run script without backing up any files (for testing)"
+ echo " -h : Display this help and exit"
exit 0
}
handle_options() {
+ bar_color='e5a00d' # default bar color
+ # Set default values for variables
+ use_config_file="True"
+ shutdown_plex="False"
+ dry_run="False"
- # Define valid options
- valid_long_options=("source:" "destination:" "force-full:" "full-backup" "keep-essential:" "keep-full:" "compress" "unraid-notify" "quiet" "webhook:" "channel:" "bar-color:" "bot-name:" "dry-run" "config-file:" "help")
- valid_short_options=("s:" "d:" "F:" "f" "k:" "K:" "c" "u" "q" "w:" "C:" "b:" "n:" "r" "x:" "h")
-
- # Handle command-line options
- TEMP=$(getopt -o "${valid_short_options[*]}" --long "${valid_long_options[*]}" -n "$0" -- "$@")
- eval set -- "$TEMP"
- while true; do
- case "$1" in
- --source | -s)
- source_dir="$2"
- shift 2
- ;;
- --destination | -d)
- destination_dir="$2"
- shift 2
- ;;
- --force-full | -F)
- force_full_backup="$2"
- shift 2
- ;;
- --full-backup | -f)
- full_backup=true
- shift
- ;;
- --keep-essential | -k)
- keep_essential="$2"
- shift 2
- ;;
- --keep-full | -K)
- keep_full="$2"
- shift 2
- ;;
- --compress | -c)
- compress=true
- shift
- ;;
- --unraid-notify | -u)
- unraid_notify=true
- shift
- ;;
- --quiet | -q)
- quiet=true
- shift
- ;;
- --webhook | -w)
- webhook="$2"
- shift 2
- ;;
- --channel | -C)
- channel="$2"
- shift 2
- ;;
- --bar-color | -b)
- bar_color="$2"
- shift 2
- ;;
- --bot-name | -n)
- bot_name="$2"
- shift 2
- ;;
- --dry-run | -r)
- dry_run=true
- shift
- ;;
- --config-file | -x)
- config_file="$2"
- shift 2
- ;;
- --help | -h)
- shift
- display_help
- ;;
- --)
- shift
- break
- ;;
- *)
- echo "Internal error!"
- exit 1
- ;;
+ # Function to handle options
+ while getopts ":s:d:k:c:w:C:K:F:f:r:S:D:x:" opt; do
+ case $opt in
+ s) source_dir=$OPTARG ;;
+ d) destination_dir=$OPTARG ;;
+ k) keep_essential=$OPTARG ;;
+ c) compress=$OPTARG ;;
+ w) webhook=$OPTARG ;;
+ C) channel=$OPTARG ;;
+ K) keep_full=$OPTARG ;;
+ F) force_full_backup=$OPTARG ;;
+ f) full_backup=$OPTARG ;;
+ r) dry_run=$OPTARG ;;
+ S) shutdown_plex=$OPTARG ;;
+ D) debug=$OPTARG ;;
+ x) use_config_file=$OPTARG ;;
+ \?) echo "Invalid option: -$OPTARG" >&2 ;;
+ :) echo "Option -$OPTARG requires an argument." >&2 ;;
esac
done
- # Check for any remaining arguments
- for arg in "$@"; do
- echo "Invalid argument: $arg" >&2
- display_help
- done
}
# Call main function
main "$@"
diff --git a/bash-scripts/backup-appdata/exclude-file.txt b/extra-scripts/exclude-file.txt
old mode 100644
new mode 100755
similarity index 100%
rename from bash-scripts/backup-appdata/exclude-file.txt
rename to extra-scripts/exclude-file.txt
diff --git a/jokes.txt b/jokes.txt
index f0df8f7..1552144 100644
--- a/jokes.txt
+++ b/jokes.txt
@@ -45,18 +45,15 @@ Want to hear a joke about a piece of paper? Never mind... it's tearable.
I just watched a documentary about beavers. It was the best dam show I ever saw!
If you see a robbery at an Apple Store what re you? An iWitness?
Spring is here! I got so excited I wet my plants!
-What’s Forrest Gump’s password? 1forrest1
Why did the Clydesdale give the pony a glass of water? Because he was a little horse!
CASHIER: "Would you like the milk in a bag, sir?" DAD: "No, just leave it in the carton!’”
Did you hear about the guy who invented Lifesavers? They say he made a mint.
I bought some shoes from a drug dealer. I don't know what he laced them with, but I was tripping all day!
Why do chicken coops only have two doors? Because if they had four, they would be chicken sedans!
How do you make a Kleenex dance? Put a little boogie in it!
-A termite walks into a bar and asks "Is the bar tender here?"
Why did the invisible man turn down the job offer? He couldn't see himself doing it.
I used to have a job at a calendar factory but I got the sack because I took a couple of days off.
A woman is on trial for beating her husband to death with his guitar collection. Judge says, "First offender?" She says, "No, first a Gibson! Then a Fender!”
-How do you make holy water? You boil the hell out of it.
I had a dream that I was a muffler last night. I woke up exhausted!
Did you hear about the circus fire? It was in tents!
Don't trust atoms. They make up everything!
@@ -96,8 +93,6 @@ I could tell a joke about pizza, but it's a little cheesy.
When does a joke become a dad joke? When it becomes apparent.
What’s an astronaut’s favorite part of a computer? The space bar.
What did the shy pebble wish for? That she was a little boulder.
-I'm tired of following my dreams. I'm just going to ask them where they are going and meet up with them later.
-Did you hear about the guy whose whole left side was cut off? He's all right now.
Why didn’t the skeleton cross the road? Because he had no guts.
What did one nut say as he chased another nut? I'm a cashew!
Chances are if you' ve seen one shopping center... you've seen a mall.
@@ -114,16 +109,11 @@ What is a centipedes's favorite Beatle song? I want to hold your hand, hand, ha
My first time using an elevator was an uplifting experience. The second time let me down.
To be Frank... I'd have to change my name.
Slept like a log last night … woke up in the fireplace.
-Why does a Moon-rock taste better than an Earth-rock? Because it's a little meteor.
How many South Americans does it take to change a lightbulb? A Brazilian
-I don't trust stairs. They're always up to something.
-A police officer caught two kids playing with a firework and a car battery. He charged one and let the other one off.
What is the difference between ignorance and apathy? I don't know and I don't care.
I went to a Foo Fighters Concert once... It was Everlong...
Some people eat light bulbs. They say it's a nice light snack.
What do you get hanging from Apple trees? Sore arms.
-Last night me and my girlfriend watched three DVDs back to back. Luckily I was the one facing the TV.
-I got a reversible jacket for Christmas, I can't wait to see how it turns out.
What did Romans use to cut pizza before the rolling cutter was invented? Lil Caesars
My pet mouse 'Elvis' died last night. He was caught in a trap..
Never take advice from electrons. They are always negative.
@@ -137,21 +127,22 @@ When is a door not a door? When it's ajar.
I made a belt out of watches once... It was a waist of time.
This furniture store keeps emailing me, all I wanted was one night stand!
How do you find Will Smith in the snow? Look for fresh prints.
-I just read a book about Stockholm syndrome. It was pretty bad at first, but by the end I liked it.
-Why do trees seem suspicious on sunny days? Dunno, they're just a bit shady.
If at first you don't succeed sky diving is not for you!
What kind of music do mummy's like? Rap
A book just fell on my head. I only have my shelf to blame.
What did the dog say to the two trees? Bark bark.
If a child refuses to sleep during nap time... are they guilty of resisting a rest?
-Have you ever heard of a music group called Cellophane? They mostly wrap.
-What did the mountain climber name his son? Cliff.
Why should you never trust a pig with a secret? Because it's bound to squeal.
Why are mummys scared of vacation? They're afraid to unwind.
Whiteboards ... are remarkable.
What kind of dinosaur loves to sleep? A stega-snore-us.
+Why don't scientists trust atoms? Because they make up everything.
+What do you call a dinosaur that is sleeping? A dino-snore.
+What do you call a dinosaur that never gives up? Try and try and try and try-ceratops.
What kind of tree fits in your hand? A palm tree!
I used to be addicted to the hokey pokey but I turned myself around.
+What do you call a fake noodle? An impasta.
+What do you call a cow with two legs? Lean beef.
How many tickles does it take to tickle an octopus? Ten-tickles!
What musical instrument is found in the bathroom? A tuba toothpaste.
My boss told me to attach two pieces of wood together... I totally nailed it!
@@ -159,7 +150,6 @@ What was the pumpkin’s favorite sport? Squash.
What do you call corn that joins the army? Kernel.
I've been trying to come up with a dad joke about momentum but I just can't seem to get it going.
Why don't sharks eat clowns? Because they taste funny.
-Just read a few facts about frogs. They were ribbiting.
Why didn’t the melons get married? Because they cantaloupe.
What’s a computer’s favorite snack? Microchips!
Why was the robot so tired after his road trip? He had a hard drive.
@@ -183,20 +173,16 @@ What's the difference between a TV and a newspaper? Ever tried swatting a fly wi
What did one elevator say to the other elevator? I think I'm coming down with something!
Why was the belt arrested? Because it held up some pants!
What makes the calendar seem so popular? Because it has a lot of dates!
-Why did Mickey Mouse take a trip into space?He wanted to find Pluto!
Why do you go to bed every night? Because the bed won't come to you!
What has four wheels and flies? A garbage truck!
Why did the robber take a bath before he stole from the bank? He wanted to make a clean get away!
Just watched a documentary about beavers. It was the best damn program I’ve ever seen.
Slept like a log last night woke up in the fireplace.
-Why did the scarecrow win an award? Because he was outstanding in his field.
-Why does a chicken coop only have two doors? Because if it had four doors it would be a chicken sedan.
What’s the difference between an African elephant and an Indian elephant? About 5000 miles
Why did the coffee file a police report? It got mugged.
What did the grape do when he got stepped on? He let out a little wine.
How many apples grow on a tree? All of them.
What name do you give a person with a rubber toe? Roberto
-Did you hear about the kidnapping at school? It’s fine, he woke up.
Why do scuba divers fall backwards into the water? Because if they fell forwards they’d still be in the boat.
How does a penguin build it’s house? Igloos it together.
What do you call a man with a rubber toe? Roberto
@@ -351,4 +337,680 @@ My teachers told me I'd never amount to much because I procrastinate so much. I
Will glass coffins be a success? Remains to be seen.
Did you hear about the guy whose whole left side got amputated? He’s all right now.
The man who survived both mustard gas and pepper spray is a seasoned veteran now.
-Have you heard about the new restaurant called ‘Karma?’ There’s no menu—you get what you deserve.
\ No newline at end of file
+Have you heard about the new restaurant called ‘Karma?’ There’s no menu—you get what you deserve.
+What did one pirate say to the other when he beat him at chess?<>Checkmatey.
+I burned 2000 calories today<>I left my food in the oven for too long.
+I startled my next-door neighbor with my new electric power tool. <>I had to calm him down by saying “Don’t worry, this is just a drill!”
+I broke my arm in two places. <>My doctor told me to stop going to those places.
+I quit my job at the coffee shop the other day. <>It was just the same old grind over and over.
+I never buy anything that has Velcro with it...<>it’s a total rip-off.
+I used to work at a soft drink can crushing company...<>it was soda pressing.
+I wondered why the frisbee kept on getting bigger. <>Then it hit me.
+I was going to tell you a fighting joke...<>but I forgot the punch line.
+What is the most groundbreaking invention of all time? <>The shovel.
+I’m starting my new job at a restaurant next week. <>I can’t wait.
+I visited a weight loss website...<>they told me I have to have cookies disabled.
+Did you hear about the famous Italian chef that recently died? <>He pasta way.
+Broken guitar for sale<>no strings attached.
+I could never be a plumber<>it’s too hard watching your life’s work go down the drain.
+I cut my finger slicing cheese the other day...<>but I think I may have grater problems than that.
+What time did you go to the dentist yesterday?<>Tooth-hurty.
+What kind of music do astronauts listen to?<>Neptunes.
+Rest in peace, boiled water. <>You will be mist.
+What is the only concert in the world that costs 45 cents? <>50 Cent, featuring Nickelback.
+It’s not a dad bod<> it’s a father figure.
+My wife recently went on a tropical food diet and now our house is full of this stuff. <>It’s enough to make a mango crazy.
+What do you call Santa’s little helpers? <>Subordinate clauses.
+Want to hear a construction joke? <>Sorry, I’m still working on it.
+What’s the difference between a hippo and a zippo? <>One is extremely big and heavy, and the other is a little lighter.
+I burnt my Hawaiian pizza today in the oven, <>I should have cooked it on aloha temperature.
+Anyone can be buried when they die<>but if you want to be cremated then you have to urn it.
+Where did Captain Hook get his hook? <>From the second-hand store.
+I am such a good singer that people always ask me to sing solo<>solo that they can’t hear me.
+I am such a good singer that people ask me to sing tenor<>tenor twelve miles away.
+Occasionally to relax I just like to tuck my knees into my chest and lean forward.<> That’s just how I roll.
+What did the glass of wine say to the glass of beer? Nothing. <>They barley knew each other.
+I’ve never trusted stairs. <>They are always up to something.
+Why did Shakespeare’s wife leave him? <>She got sick of all the drama.
+I just bought a dictionary but all of the pages are blank. <>I have no words to describe how mad I am.
+If you want to get a job at the moisturizer factory... <>you’re going to have to apply daily.
+I don’t know what’s going to happen next year. <>It’s probably because I don’t have 2020 vision.
+Want to hear a joke about going to the bathroom? <>Urine for a treat.
+I couldn’t figure out how to use the seat belt. <>Then it just clicked.
+I got an email the other day teaching me how to read maps backwards<>turns out it was just spam.
+I'm reading a book about anti-gravity.<> It's impossible to put down!
+You're American when you go into the bathroom, and you're American when you come out, but do you know what you are while you're in there?<> European.
+Did you know the first French fries weren't actually cooked in France?<> They were cooked in Greece.
+Want to hear a joke about a piece of paper? Never mind... <>it's tearable.
+I just watched a documentary about beavers. <>It was the best dam show I ever saw!
+If you see a robbery at an Apple Store what re you?<> An iWitness?
+Spring is here! <>I got so excited I wet my plants!
+What’s Forrest Gump’s password?<> 1forrest1
+Why did the Clydesdale give the pony a glass of water? <>Because he was a little horse!
+CASHIER: "Would you like the milk in a bag, sir?" <>DAD: "No, just leave it in the carton!’”
+Did you hear about the guy who invented Lifesavers? <>They say he made a mint.
+I bought some shoes from a drug dealer.<> I don't know what he laced them with, but I was tripping all day!
+Why do chicken coops only have two doors?<> Because if they had four, they would be chicken sedans!
+How do you make a Kleenex dance? <>Put a little boogie in it!
+A termite walks into a bar and asks<>"Is the bar tender here?"
+Why did the invisible man turn down the job offer?<> He couldn't see himself doing it.
+I used to have a job at a calendar factory <>but I got the sack because I took a couple of days off.
+A woman is on trial for beating her husband to death with his guitar collection. Judge says, "First offender?" <>She says, "No, first a Gibson! Then a Fender!”
+How do you make holy water?<> You boil the hell out of it.
+I had a dream that I was a muffler last night.<> I woke up exhausted!
+Did you hear about the circus fire?<> It was in tents!
+Don't trust atoms.<> They make up everything!
+How many tickles does it take to make an octopus laugh? <>Ten-tickles.
+I’m only familiar with 25 letters in the English language.<> I don’t know why.
+Why did the cow in the pasture get promoted at work?<> Because he is OUT-STANDING in his field!
+What do prisoners use to call each other?<> Cell phones.
+Why couldn't the bike standup by itself? <>It was two tired.
+Who was the fattest knight at King Arthur’s round table?<> Sir Cumference.
+Did you see they made round bails of hay illegal in Wisconsin? <>It’s because the cows weren’t getting a square meal.
+You know what the loudest pet you can get is?<> A trumpet.
+What do you get when you cross a snowman with a vampire?<> Frostbite.
+What do you call a deer with no eyes?<> No idea!
+Can February March? <>No, but April May!
+What do you call a lonely cheese? <>Provolone.
+Why can't you hear a pterodactyl go to the bathroom?<> Because the pee is silent.
+What did the buffalo say to his son when he dropped him off at school?<> Bison.
+What do you call someone with no body and no nose? <>Nobody knows.
+You heard of that new band 1023MB? <>They're good but they haven't got a gig yet.
+Why did the crab never share?<> Because he's shellfish.
+How do you get a squirrel to like you? <>Act like a nut.
+Why don't eggs tell jokes? <>They'd crack each other up.
+Why can't a nose be 12 inches long? <>Because then it would be a foot.
+Did you hear the rumor about butter? <>Well, I'm not going to spread it!
+I made a pencil with two erasers. <>It was pointless.
+I used to hate facial hair...<>but then it grew on me.
+I decided to sell my vacuum cleaner—<>it was just gathering dust!
+I had a neck brace fitted years ago<> and I've never looked back since.
+You know, people say they pick their nose,<> but I feel like I was just born with mine.
+What do you call an elephant that doesn't matter?<> An irrelephant.
+What do you get from a pampered cow? <>Spoiled milk.
+It's inappropriate to make a 'dad joke' if you're not a dad.<> It's a faux pa.
+How do lawyers say goodbye? <>Sue ya later!
+Wanna hear a joke about paper? <>Never mind—it's tearable.
+What's the best way to watch a fly fishing tournament? <>Live stream.
+I could tell a joke about pizza,<> but it's a little cheesy.
+When does a joke become a dad joke?<> When it becomes apparent.
+What’s an astronaut’s favorite part of a computer? <>The space bar.
+What did the shy pebble wish for?<>That she was a little boulder.
+I'm tired of following my dreams. <>I'm just going to ask them where they are going and meet up with them later.
+Did you hear about the guy whose whole left side was cut off? <>He's all right now.
+Why didn’t the skeleton cross the road? <>Because he had no guts.
+What did one nut say as he chased another nut? <> I'm a cashew!
+Chances are if you' ve seen one shopping center...<> you've seen a mall.
+I knew I shouldn't steal a mixer from work...<>but it was a whisk I was willing to take.
+How come the stadium got hot after the game? <>Because all of the fans left.
+Why was it called the dark ages? <>Because of all the knights.
+Why did the tomato blush? <>Because it saw the salad dressing.
+Did you hear the joke about the wandering nun? <>She was a roman catholic.
+What creature is smarter than a talking parrot? <>A spelling bee.
+I'll tell you what often gets over looked...<> garden fences.
+Why did the kid cross the playground? <>To get to the other slide.
+Why do birds fly south for the winter?<> Because it's too far to walk.
+What is a centipedes's favorite Beatle song? <> I want to hold your hand, hand, hand, hand...
+My first time using an elevator was an uplifting experience. <>The second time let me down.
+To be Frank...<> I'd have to change my name.
+Slept like a log last night … <>woke up in the fireplace.
+Why does a Moon-rock taste better than an Earth-rock? <>Because it's a little meteor.
+How many South Americans does it take to change a lightbulb?<> A Brazilian
+I don't trust stairs.<> They're always up to something.
+A police officer caught two kids playing with a firework and a car battery.<> He charged one and let the other one off.
+What is the difference between ignorance and apathy?<>I don't know and I don't care.
+I went to a Foo Fighters Concert once... <>It was Everlong...
+Some people eat light bulbs. <>They say it's a nice light snack.
+What do you get hanging from Apple trees? <> Sore arms.
+Last night me and my girlfriend watched three DVDs back to back.<> Luckily I was the one facing the TV.
+I got a reversible jacket for Christmas,<> I can't wait to see how it turns out.
+What did Romans use to cut pizza before the rolling cutter was invented? <>Lil Caesars
+My pet mouse 'Elvis' died last night. <>He was caught in a trap..
+Never take advice from electrons. <>They are always negative.
+Why are oranges the smartest fruit? <>Because they are made to concentrate.
+What did the beaver say to the tree? <>It's been nice gnawing you.
+How do you fix a damaged jack-o-lantern?<> You use a pumpkin patch.
+What did the late tomato say to the early tomato? <>I’ll ketch up
+I have kleptomania...<>when it gets bad, I take something for it.
+I used to be addicted to soap...<> but I'm clean now.
+When is a door not a door?<> When it's ajar.
+I made a belt out of watches once...<> It was a waist of time.
+This furniture store keeps emailing me,<> all I wanted was one night stand!
+How do you find Will Smith in the snow?<> Look for fresh prints.
+I just read a book about Stockholm syndrome.<> It was pretty bad at first, but by the end I liked it.
+Why do trees seem suspicious on sunny days? <>Dunno, they're just a bit shady.
+If at first you don't succeed<> sky diving is not for you!
+What kind of music do mummy's like?<>Rap
+A book just fell on my head. <>I only have my shelf to blame.
+What did the dog say to the two trees? <>Bark bark.
+If a child refuses to sleep during nap time...<> are they guilty of resisting a rest?
+Have you ever heard of a music group called Cellophane?<> They mostly wrap.
+What did the mountain climber name his son?<>Cliff.
+Why should you never trust a pig with a secret?<> Because it's bound to squeal.
+Why are mummys scared of vacation?<> They're afraid to unwind.
+Whiteboards ...<> are remarkable.
+What kind of dinosaur loves to sleep?<>A stega-snore-us.
+What kind of tree fits in your hand?<> A palm tree!
+I used to be addicted to the hokey pokey<> but I turned myself around.
+How many tickles does it take to tickle an octopus?<> Ten-tickles!
+What musical instrument is found in the bathroom?<> A tuba toothpaste.
+My boss told me to attach two pieces of wood together... <>I totally nailed it!
+What was the pumpkin’s favorite sport?<>Squash.
+What do you call corn that joins the army?<> Kernel.
+I've been trying to come up with a dad joke about momentum <>but I just can't seem to get it going.
+Why don't sharks eat clowns? <> Because they taste funny.
+Just read a few facts about frogs.<> They were ribbiting.
+Why didn’t the melons get married?<>Because they cantaloupe.
+What’s a computer’s favorite snack?<>Microchips!
+Why was the robot so tired after his road trip?<>He had a hard drive.
+Why did the computer have no money left?<>Someone cleaned out its cache!
+I'm not anti-social. <>I'm just not user friendly.
+Why did the computer get cold?<>Because it forgot to close windows.
+What is an astronaut's favorite key on a keyboard?<>The space bar!
+What's the difference between a computer salesman and a used-car salesman?<>The used-car salesman KNOWS when he's lying.
+If at first you don't succeed...<> call it version 1.0
+Why did Microsoft PowerPoint cross the road?<>To get to the other slide!
+What did the computer do at lunchtime?<>Had a byte!
+Why did the computer keep sneezing?<>It had a virus!
+What did one toilet say to the other?<>You look a bit flushed.
+Why did the picture go to jail?<>Because it was framed.
+What did one wall say to the other wall?<>I'll meet you at the corner.
+What do you call a boy named Lee that no one talks to?<>Lonely
+Why do bicycles fall over?<>Because they are two-tired!
+Why was the broom late?<>It over swept!
+What part of the car is the laziest?<>The wheels, because they are always tired!
+What's the difference between a TV and a newspaper?<>Ever tried swatting a fly with a TV?
+What did one elevator say to the other elevator?<>I think I'm coming down with something!
+Why was the belt arrested?<>Because it held up some pants!
+What makes the calendar seem so popular?<>Because it has a lot of dates!
+Why did Mickey Mouse take a trip into space?He wanted to find Pluto!
+Why do you go to bed every night?<>Because the bed won't come to you!
+What has four wheels and flies?<>A garbage truck!
+Why did the robber take a bath before he stole from the bank?<>He wanted to make a clean get away!
+Just watched a documentary about beavers.<>It was the best damn program I’ve ever seen.
+Slept like a log last night<>woke up in the fireplace.
+Why did the scarecrow win an award?<>Because he was outstanding in his field.
+Why does a chicken coop only have two doors? <>Because if it had four doors it would be a chicken sedan.
+What’s the difference between an African elephant and an Indian elephant? <>About 5000 miles
+Why did the coffee file a police report? <>It got mugged.
+What did the grape do when he got stepped on? <>He let out a little wine.
+How many apples grow on a tree? <>All of them.
+What name do you give a person with a rubber toe? <>Roberto
+Did you hear about the kidnapping at school? <>It’s fine, he woke up.
+Why do scuba divers fall backwards into the water? <>Because if they fell forwards they’d still be in the boat.
+How does a penguin build it’s house? <>Igloos it together.
+What do you call a man with a rubber toe?<>Roberto
+Did you hear about the restaurant on the moon?<>Great food, no atmosphere.
+Why was the belt sent to jail?<>For holding up a pair of pants!
+Did you hear about the scientist who was lab partners with a pot of boiling water?<>He had a very esteemed colleague.
+What happens when a frogs car dies?<>He needs a jump. If that doesn't work he has to get it toad.
+What did the flowers do when the bride walked down the aisle?<>They rose.
+Why did the man fall down the well?<>Because he couldn’t see that well.
+My boss told me to have a good day...<>...so I went home.
+How can you tell it’s a dogwood tree?<>By the bark.
+Did you hear about the kidnapping at school?<>It’s fine, he woke up.
+Why is Peter Pan always flying?<>Because he Neverlands.
+Which state has the most streets?<>Rhode Island.
+What do you call 26 letters that went for a swim?<>Alphawetical.
+Why was the color green notoriously single?<>It was always so jaded.
+Why did the coach go to the bank?<>To get his quarterback.
+How do celebrities stay cool?<>They have many fans.
+What's the most depressing day of the week?<>sadder day.
+Dogs can’t operate MRI machines<>But catscan.
+I was going to tell a time-traveling joke<>but you guys didn’t like it.
+Stop looking for the perfect match<>instead look for a lighter.
+I told my doctor I heard buzzing<>but he said it’s just a bug going around.
+What kind of car does a sheep like to drive?<>A lamborghini.
+What did the accountant say while auditing a document?<>This is taxing.
+What did the two pieces of bread say on their wedding day?<>It was loaf at first sight.
+Why do melons have weddings?<>Because they cantaloupe.
+What did the drummer call his twin daughters?<>Anna One, Anna Two!
+What do you call a toothless bear?<> A gummy bear!
+Two goldfish are in a tank. <>One says to the other, “Do you know how to drive this thing?”
+What’s Forrest Gump’s password?<>1forrest1
+What is a child guilty of if they refuse to nap?<> Resisting a rest.
+I know a lot of jokes about retired people<>but none of them work.
+Why are spiders so smart?<>They can find everything on the web.
+What has one head, one foot, and four legs?<> A bed.
+What does a house wear?<> Address.
+What’s red and smells like blue paint?<>Red paint.
+My son asked me to put his shoes on<> but I don’t think they’ll fit me.
+I’ve been bored recently, so I decided to take up fencing.<> The neighbors keep demanding that I put it back.
+What do you call an unpredictable camera?<>A loose Canon.
+Which U.S. state is known for its especially small soft drinks?<>Minnesota.
+What do sprinters eat before a race?<> Nothing—they fast.
+I’m so good at sleeping...<>I can do it with my eyes closed.
+People are usually shocked that I have a Police record.<>But I love their greatest hits!
+I told my girlfriend she drew on her eyebrows too high.<> She seemed surprised.
+What do you call a fibbing cat?<> A lion.
+Why shouldn’t you write with a broken pencil?<> Because it’s pointless.
+I like telling Dad jokes…<>sometimes he laughs.
+How do you weigh a millennial?<> In Instagrams.
+The wedding was so beautiful<>even the cake was in tiers.
+What’s the most patriotic sport?<> Flag football.
+How do you know when you are going to drown in milk? When its past your eyes!
+Milk is also the fastest liquid on earth – its pasteurized before you even see it
+A steak pun is a rare medium well done.
+Did you hear that the police have a warrant out on a midget psychic ripping people off? It reads "Small medium at large."
+A panda walks into a bar and says to the bartender "I'll have a Scotch and . . . . . . . . . . . . . . Coke thank you".
+"Sure thing" the bartender replies and asks "but what's with the big pause?"
+The panda holds up his hands and says "I was born with them"
+A man was caught stealing in a supermarket today while balanced on the shoulders of a couple of vampires. He was charged with shoplifting on two counts.
+I heard there was a new store called Moderation. They have everything there
+Our wedding was so beautiful, even the cake was in tiers.
+Did you hear about the new restaurant on the moon? The food is great, but there's just no atmosphere.
+I went to a book store and asked the saleswoman where the Self Help section was, she said if she told me it would defeat the purpose.
+What did the mountain climber name his son? Cliff.
+"What's ET short for? Because he's only got little legs."
+What do you call an Argentinian with a rubber toe? Roberto
+What do you call a Mexican man leaving the hospital? Manuel
+Today a girl said she recognized me from vegetarian club, but I'm sure I've never met herbivore.
+I dreamed about drowning in an ocean made out of orange soda last night. It took me a while to work out it was just a Fanta sea.
+I needed a password eight characters long so I picked Snow White and the Seven Dwarfs.
+Last night me and my girlfriend watched three DVDs back to back. Luckily I was the one facing the TV.
+How do you organize a space party? You planet.
+Breaking news! Energizer Bunny arrested – charged with battery.
+Conjunctivitis.com – now that's a site for sore eyes.
+A Sandwich walks into a bar, the bartender says "Sorry, we don't serve food here"
+They laughed when I said I wanted to be a comedian – they're not laughing now.
+I'm reading a book on the history of glue – can't put it down.
+Where does Napoleon keep his armies? In his sleevies.
+I went to the zoo the other day, there was only one dog in it. It was a shitzu.
+Why can't you hear a pterodactyl go to the bathroom? The p is silent.
+Q: What's 50 Cent's name in Zimbabwe? A: 400 Million Dollars.
+"My Dog has no nose." "How does he smell?" "Awful"
+What do you call a cow with no legs? Ground beef.
+What did the Buffalo say to his little boy when he dropped him off at school? Bison.
+So a duck walks into a pharmacy and says "Give me some chap-stick... and put it on my bill"
+Why did the scarecrow win an award? Because he was outstanding in his field.
+Why did the girl smear peanut butter on the road? To go with the traffic jam.
+Why does a chicken coop only have two doors? Because if it had four doors it would be a chicken sedan.
+Why don't seagulls fly over the bay? Because then they'd be bay-gulls!
+What do you call a fly without wings? A walk.
+What do you do when a blonde throws a grenade at you? Pull the pin and throw it back.
+What's brown and sounds like a bell? Dung!
+How do you make a hankie dance? Put a little boogie in it.
+Where does batman go to the bathroom? The batroom.
+What's the difference between an African elephant and an Indian elephant? About 5000 miles.
+Two muffins were sitting in an oven, and the first looks over to the second, and says, "man, it's really hot in here". The second looks over at the first with a surprised look, and answers, "WHOA, a talking muffin!"
+A man walks into a bar and orders helicopter flavor chips. The barman replies "sorry mate we only do plain"
+ Sgt.: Commissar! Commissar! The troops are revolting! Commissar: Well, you're pretty repulsive yourself.
+What do you call a sheep with no legs? A cloud.
+I knew i shouldn't have ate that seafood. Because now i'm feeling a little... Eel
+What did the late tomato say to the early tomato? I'll ketch up
+What did the 0 say to the 8? Nice belt.
+Why didn't the skeleton cross the road? Because he had no guts.
+Why don't skeletons ever go trick or treating? Because they have nobody to go with.
+Why do scuba divers fall backwards into the water? Because if they fell forwards they'd still be in the boat.
+Have you ever heard of a music group called Cellophane? They mostly wrap.
+What kind of magic do cows believe in? MOODOO.
+Wife: Honey I'm pregnant. Me: Well.... what do we do now? Wife: Well, I guess we should go to a baby doctor. Me: Hm.. I think I'd be a lot more comfortable going to an adult doctor.
+At what time does the soldier go to the dentist? 1430.
+"Hold on, I have something in my shoe" "I'm pretty sure it's a foot"
+Why does it take longer to get from 1st to 2nd base, than it does to get from 2nd to 3rd base? Because there's a Shortstop in between!
+Dad I'm hungry' ... Hi hungry I'm dad
+When phone ringing Dad says 'If it's for me don't answer it.'
+Put the cat out ... I didn't realize it was on fire
+Where's the bin? Dad: I haven't been anywhere!
+Can I watch the TV? Dad: Yes, but don't turn it on.
+When Dad drops a pea off of his plate 'oh dear I've pee'd on the table!'
+I've been addicted to cold turkey for 2 years. I keep telling people I'm trying to quit cold turkey but nobody is taking me seriously.
+Old yachtsmen don't die... They just keel over.
+3.14% of sailors are pi-rates.
+Bad at golf? Join the club.
+I just ate a frozen apple. Hardcore.
+Have you met my friend Annette? She's married to a fisherman.
+Why is Irish whiskey triple distilled? To be sure, to be sure, to be sure.
+I just read a book about Stockholm syndrome. It was pretty bad at first, but by the end I liked it.
+RIP boiled water. You will be mist.
+Archaeology really is a career in ruins...
+I don't trust stairs. They're always up to something.
+If you want a job in the moisturiser industry, the best advice I can give is to apply daily.
+A big cat escaped it's cage at the zoo yesterday. If I saw that I'd puma pants.
+My Czech mate is surprisingly bad at chess.
+Why are Lada's so bad? Because the keep Stalin.
+What do you get hanging off banana trees? Sore arms.
+I made my wife a cocktail with fairy liquid in it.... She was foaming at the mouth when she tasted it.
+What do you call a fat psychic? A four-chin teller.
+Found out I was colour blind the other day... That one came right out the purple.
+I hate perforated lines, they're tearable.
+A man tried to sell me a coffin today... I told him that's the last thing I need.
+Whenever I want to start eating healthy, a chocolate bar looks at me and Snickers.
+Don't kiss your wife with a runny nose. You might think it's funny, but it's snot.
+My friend keeps telling me I'm in the closet. I just say it's Narnia business. @WillFerreI
+I burnt my Hawaiian pizza last night... I should've put it on aloha setting.
+Dad: Where can I get a potato clock? Son: Why a potato clock?!? Dad: I've got a new job and my boss said I need to get-a-potato-clock
+My son asked me to stop singing oasis songs in public. I said maybe.
+When my wife told me to stop impersonating a flamingo I had to put my foot down.
+What's the difference between a hippo and a zippo? One is really heavy, the other is a little lighter.
+To the man in the wheelchair that stole my camouflage jacket... You can hide but you can't run.
+They don't watch the flintstones in Dubai. But Abu Dhabi do.
+Lone Ranger sees Tonto riding with a dustbin. LR: "Where are you going Tonto?" T: "to-the-dump-to-the dump-to-the-dump-dump dump..."
+Why can't you hear a pterodactyl using the bathroom? Because the P is silent
+Happy Father's Day! Did you hear about the crazy Mexican train thief? He had loco motives
+Singing in the shower is all fun and games until you get shampoo in your mouth.... Then it's a soap opera
+The rotation of earth really makes my day.
+You can't run through a camp site. You can only ran, because it's past tents.
+"Does this uniform make me look fat" - insecurity guard
+How do you tell the difference between a crocodile and an alligator? You will see one later and one in a while.
+I told my wife she drew her eyebrows too high. She seemed surprised.
+Why do trees seem suspicious on sunny days? Dunno, they're just a bit shady.
+You know what they say about cliffhangers...
+Want to hear a joke about construction? Nah, I'm still working on it.
+Ever noticed that glass tastes like blood?
+A classic from who's line is it anyway.
+You heard the rumor going around about butter? Nevermind, I shouldn't spread it.
+I have the heart of a lion and a lifetime ban from London zoo. @zsllondonzoo
+What did the Buddhist ask the hot dog vendor? "Make me one with everything."
+How does the moon cut his hair? Eclipse it!
+What do you call an elephant that doesn't matter? An irrelephant
+What happened to the cow that jumped over the barbed wire fence? Udder destruction.
+I thought about going on an all-almond diet..... But that's just nuts
+What's a duck's favourite dip? Quackamole
+What do you call a fake noodle? An Impasta
+I hate it when people ask me what I will be doing in 5 years time. Come on, I don't have 2020 vision.
+Steak puns... They're a rare medium, well done
+The shovel was a ground-breaking invention.
+Past, present, and future walked into a bar.... It was tense.
+Comedians who tell one too many lightbulb jokes soon burn out.
+Look! I'm wearing a Thai.
+What do you call a Mexican who has lost his car? Carlos.
+How does a penguin build it's house? Igloos it together.
+Knock knock. Who's there? To. To Who? To whom.
+Why do you never see elephants hiding in trees? Because they're so good at it.
+I went out with a girl called Simile, I don't know what I metaphor.
+I went on a two week holiday to the south of France. It was Toulon.
+A pirate walks into a bar with a ship's wheel on his belt buckle. Bartender: What's that on your belt? Pirate: Arrr, It's drivin' me nuts!
+Plateaus are the highest form of flattery.
+Me: Doctor you've got to help me, I'm addicted to Twitter. Doctor: I don't follow you.
+There's no I in denial.
+My computer sings, it's a Dell.
+It's time to rock around the Christmas tree.
+I got a reversible jacket for Christmas, I can't wait to see how it turns out.
+I ate a clock yesterday, it was so time consuming.
+I'm tired of following my dreams. I'm just going to ask them where they are going and meet up with them later.
+What's brown and sticky? A stick.
+How do you find Will Smith in the snow? You look for the fresh prints.
+Did you hear about the kidnapping at school? Its ok, he woke up.
+What's the best thing about elevator jokes? They work on so many levels.
+How do you make antifreeze? Steal her blanket.
+What's the difference between beer nuts and deer nuts? Beer nuts are about 49cents and deer nuts are just under a buck.
+Did you hear about the guy who jumped off a bridge in Paris? He was in Seine.
+There are only two types of people in the world, those who can extrapolate from incomplete data...
+What did the buffalo say to his son as he left for college? Bison
+A truck of Terrapins crashed into a truck of tortoises. It was a turtle disaster.
+What does a house wear? A dress.
+I asked a Frenchman if he played video games. He said "wii".
+Full Meal Jacket
+A furniture store keeps calling me. But all I wanted was one night stand.
+What did the dog say after a long day at work? "Today was Ruff"
+Where are average things built? In the satisfactory.
+I've eaten too much Middle Eastern food. Now I falafel.
+A pet store had a bird contest. No perches necessary.
+What's the worst thing about ancient history class? The teachers tend to Babylon.
+Yesterday a clown held a door open for me. I thought it was a nice jester.
+How many optometrists does it take to change a light bulb?... 1 or 2? 1... or 2?
+My son asked me to take him to the hospital because he had a large red mark on his face. I said "Let's not make any rash decisions."
+Just read a few facts about frogs. They were ribbiting.
+Sean Connery famously said he would leave The Bahamas and return to Scotland, if it ever gained independence. He must be shitting himself.
+I used to work in a shoe recycling shop. It was sole destroying.
+The universe implodes. No matter.
+I can give you the cause of an anaphylactic shock in a nutshell.
+I just swapped our bed for a trampoline. My wife hit the roof.
+I heard a rumour that Cadbury is bringing out an oriental chocolate bar. Could be a Chinese Wispa.
+My dog Minton ate a shuttlecock... Bad Minton.
+Astronomers got tired of watching the moon go round the earth for 24 hours. So the decided to call it a day.
+I've got an addiction to water, I think I'm an aquaholic.
+What did the hungry clock do? Went back four seconds!
+My sea sickness comes in waves.
+I play triangle for a reggae band. It's pretty casual. I just stand at the back and ting.
+I'm afraid I've caught poetry. Don't worry, I used to suffer from short stories. Really?When? Once upon a time
+I asked the checkout girl for a date. She said "They're in the fruit aisle next to the bananas."
+What did the chicken say about the scrambled egg? There goes my crazy, mixed up kid.
+Why do so many people with laser hair want to get it removed?
+What's the difference between a well dressed man on a a bicycle and a poorly dressed man on a tricycle? Attire!
+Why does Peter pan always fly?Because he neverlands!
+For all American Dads, this is all you need today.
+What did the pirate say on his 80th birthday? Aye matey
+I jumped into the sea today. My friends pier pressured me into it.
+What do you call a sketchy Italian neighbourhood? The Spaghetto.
+I have kleptomania, but when it gets bad, I take something for it.
+Why can't you have a nose 12 inches long? Because then it would be a foot.
+Why do bears have hairy coats? Fur protection.
+Someone said my clothes were gay. I said "Yeah, they came out of the closet this morning."
+I just misspelt Armageddon, it's not the end of the world.
+Volunteering in America is absurd, it just makes no cents.
+Jonny Wilkinson is announcing his retirement from rugby. You can't say he didn't try.
+Why don't you want to taco bout it? 'Cause i'm nacho friend anymore.
+Doorbells, don't knock 'em.
+I'm back from holiday in the South Pacific. I wish I had Samoa time off.
+"I'm on a whiskey diet, I've lost 4 days already." Tommy Cooper What's your favorite Cooperism?
+My wife is on a tropical food diet, the house is full of the stuff. It's enough to make a mango crazy.
+Whiteboards are remarkable.
+Sweet dreams are made of cheese, who am I to dis a Brie.
+Happy Easter! What's your best egg yolk? Mine is: A boiled egg is hard to beat.
+What do you call an Alligator wearing a vest? An investigator.
+Did you hear about the magic tractor? It turned into a field.
+Can February march? No, but April May.
+Full credit to the whoever made this for Putin in the effort.
+What does a grape say when it is stepped on? Nothing, it just lets out a little wine.
+What do you call a dinosaur with an extensive vocabulary? A thesaurus.
+I swallowed some Tippex last night. I woke up this morning with a massive correction.
+Just got a text from Snoop Dogg. No biggy.
+What do you get when you cross a rhetorical question with a joke?
+Pink Panthers to do list: To do To do To do, to do, to do To do, to doooo
+What did the bra say to the hat? You go on ahead, I'll give these two a lift.
+What did one eye say to the other? Something smells between us.
+Two elephants fall off a cliff... Boom boom!
+How many tickles does it take to make an octopus laugh? Tentacles.
+I don't like atoms, they're liars. They make up everything.
+If you want to set up a company and run it, then that's your own business.
+My friend is going on holiday to the Middle East. Oman, that sounds fun...
+Whoever invented the door knocker deserves a no-bell prize!
+Why did the elf push his bed into the fireplace? He wanted to sleep like a log.
+I can't stand Russian dolls.... They're so full of themselves.
+I remember the first time I saw a universal remote controller. I thought to myself "well, this changes everything..."
+I got this extra electron I didn't want. My friend said "don't be so negative."
+A boat builder is showing his son one of his forests. He turns to him and says, "Son, one day this will all be oars"
+Molestation is a touchy subject.
+I’ve decided to put up a marquee in my garden with some funky music and flashing lights. Now is the winter of my disco tent.
+I was thinking about moving to Moscow but there is no point Russian into things.
+I met a Dutch girl with inflatable shoes last week, I phoned her up for a date but she'd popped her clogs.
+My New Years resolution is to stop leaving things so late.
+Did you hear about the man who gave up making haggis? He didn't have the guts for it anymore.
+Retrospective baddadjoke: Why are there no pain killers in the jungle? Because parrots-eat-em-all
+Sometimes I squat on the floor, put my arms around my legs and lean forward. That's how I roll.
+Got lost in a corn field today, it was a-maize-ing.
+I needed a password eight characters long so I picked Snow White and the Seven Dwarves.
+Just out buying some new chairs for the house, sofa so good.
+My wife told me I was average, I think she's mean.
+I was going to tell a dairy joke, but it was too cheesy.
+Just had my first round of golf. I'm not very good, in fact I've got a fairway to go.
+My daughter just lost her mood ring, really don't know how she feels about it.
+I told a friend I was off to California this summer. He told me to be more pacific... so I went to Hawaii instead...
+I gave all my dead batteries away today... Free of charge.
+Why is there a long line at the cemetery? Because people are dying to get in.
+Why did the can crusher quit his job? Because it was soda pressing.
+I'm starting a band called 1023mb We'll never get a gig.
+What's Forest Gump's Facebook password? 1forest1
+A photon checks into a hotel. Receptionist: "May I take your bags sir?" Photon: "I don't have any bags, I'm travelling light."
+Melon 1: "Let's run away and get married." Melon 2: "Sorry but I Cantaloupe."
+Did you hear about the Italian chef who died? He pasta way.
+I lost my job last week. Unemployment is not working for me.
+A termite walks into a bar and asks "Is the bar tender here?"
+Hitler was surprised by the Invasion of Normandy. He did nazi that coming.
+A Freudian slip is when you say one thing but mean your mother.
+So, I asked my North Korean mate how his life was going? He said "can't complain"
+Just quit my job at Starbucks because day after day it was the same old grind.
+I went to the zoo the other day, there was only one dog in it, it was a shitzu.
+Why is Saudi Arabia free of mental illness? Because No-mad people live there.
+Without geometry life is pointless.
+I broke my guitar string last night. Don't fret, I had another.
+Had a new beaver curry last night. It's like a normal curry, just a bit 'otter.
+Went to the corner shop today... Bought four corners.
+Have you heard the conspiracy about Russian allotments. It's all just a communist plot.
+My uncle works with Digital radios. You could say he’s a DAB hand.
+I dreamt about drowning in an ocean made out of orange soda last night. It took me a while to work out it was just a Fanta sea.
+My cat was just sick on the carpet, I don't think it's feline well.
+Why do the French only put one egg in an omelette? Because one egg is un oeuf.
+The other day someone left plasticine in my house. I didn't know what to make of it.
+What happens when you tell an egg a joke? It cracks up.
+How do you make holy water? Boil the hell out of it.
+Sorry I've been away for a while, I was at the fabric shop looking for new material.
+I've just been to a very emotional wedding. Even the cake was in tiers.
+When you have a bladder infection, urine trouble.
+I stayed up all night to find out where the sun went, then it dawned on me...
+I went to the doctor today and he told me I had type A blood but it was a type O.
+Today a girl said she recognised me from vegetarian club, but I'm sure I've never met herbivore.
+Jokes about German sausages are the wurst.
+I tried to throw a ball at a cloud. I mist.
+I woke up with a face full of rice. I must've fallen asleep as soon as my head hit the pilau.
+I couldn't pay for my coffee because my wallet was in my other pair of moccachinos. I got it for free. Thanks a latté @lashingsbristol!
+I cut my finger chopping cheese, but I think that I may have grater problems.
+First rule of Thesaurus Club. You don't talk, converse, discuss, speak, chat, deliberate, confer, gab, gossip or natter about Thesaurus Club
+Don't have a Findus lasagne before bed. You'll have a nightMARE.
+How does a muppet die? Apparently, it kermits suicide.
+What did the Mexican say to his chicken? Oh-lay!
+A pet shop was ransacked last week... ...there are currently no leads.
+How do you drown a hipster? In the mainstream.
+Sleeping comes naturally to me. I can do it with my eyes closed.
+I ate some rotten chicken last night. Now I feel fowl.
+There is a new disease found in margarine... Apparently it spreading very easily.
+What do you call an Italian with a rubber toe? Roberto
+Why do crabs never give to charity? Because they're shellfish.
+What is Santa's favourite pizza? One that's deep pan, crisp and even.
+People are making apocalypse jokes like there's no tomorrow.
+Someone called me pretentious the other day... I almost choked on my latte.
+My mate dug a hole in the garden and filled it with water....I think he meant well.
+What's your favourite Christmas Cracker Joke? Here's one of mine: "What's ET short for? Because he's only got little legs."
+If you're struggling to think of what to get someone for Christmas. Get them a fridge and watch their face light up when they open it.
+A mate of mine has admitted to being addicted to break fluid. I'm worried but he says he can stop whenever he wants.
+Start a new job in Seoul next week. I thought it was a good Korea move.
+Soya Milk. Looked in your fridge.
+A book just fell on my head. I've only got myshelf to blame.
+Bloody thespians, always making a scene.
+My dad fought in the war and survived mustard gas and pepper spray. He is now classed as a seasoned veteran.
+Tea is for mugs.
+This thesaurus isn't just terrible, it is also terrible.
+I am terrified of elevators. I'm going to start taking steps to avoid them.
+Need an ark to save two of every animal? I Noah guy.
+What did the father say to the son who was going fishing? Let minnow when you get there.
+I am delighted with the corn crop this year. It's A-maize-ing.
+How does Moses make his tea? Hebrews it.
+I think rowing is oarsome.
+What's the advantage of living in Switzerland? Well, the flag is a big plus.
+Nostalgia isn't what it used to be.
+Why do accountants look so good in heels? Because they never lose their balance.
+I'll stop at nothing to avoid using negative numbers.
+Wind turbines. I'm a big fan!
+What's the definition o a good farmer? A man outstanding in his field.
+Why did the octopus beat the shark in a fight? Because it was well armed.
+Why does a Moon-rock taste better than an Earth-rock? Because it's a little meteor.
+I fired my masseuse today. She rubbed me up the wrong way.
+A red and a blue ship have just collided in the Caribbean. Apparently the survivors are marooned.
+Breaking news! A hurricane has just hit the the main cheese factory in France. All that's left is de-Brie.
+I'm glad I know sign language, it's pretty handy.
+I like sea food. I often just have it for the halibut.
+A girl walks into a bar and asked for a double entendre. So the barman gave her one.
+I took the shell off of my racing snail to see if it went any faster. If anything though, it just made it more sluggish.
+I've deleted the phone numbers of all the Germans I know from my mobile phone. Now it's Hans free.
+Was kept awake last night by someone flashing a light in my face. It was torch-ure.
+My wife said to me "Your lack of originality is pathetic."I said "Yeah, well your lack of originality is pathetic."
+It was really hard overcoming my addiction to the hokey cokey. But I turned myself around and that’s what it’s all about.
+"I saw a documentary on how ships are kept together. Riveting!" Stewart Francis
+Last night me and my girlfriend watched three DVDs back to back. Luckily I was the one facing the telly.
+My wife just split up with me because I've got a pasta fetish. I'm feeling cannelloni right now.
+I'm thinking about getting a new haircut... I'm going to mullet over.
+Had a bowl of scotch broth for lunch today... It was souper hot.
+I got really quick service at the fish and chip shop. It was very e-fish-ent
+How do you organise a space party? You planet.
+What did one bird say to the other cheating parrot? Toucan play at that game.
+What's wrong with the Southern French's trousers? They're Toulouse.
+How much does a hipster weigh? An instagram.
+A photon enters a hotel. Porter: 'Need any help with your luggage?' Photon: 'No thanks, I'm travelling light'
+Give me ambiguity or give me something else.
+A banker came home from work today worried about his job. He said its in the balance.
+sorry "a *pod* of killer whales"
+What do you call a group of killer whales playing instruments? An Orca-stra.
+The only thing that can survive a
+A man has taken @British_Airways to court after they misplaced his luggage. He lost his case.
+Did you hear about the guy whose whole left side was cut off? He's all right now.
+Why was the big cat disqualified from the race? Because it was a cheetah.
+What do you call a man with rabbits living in his bum? Warren
+Just been fishing... It was reely good.
+A man walked in to a bar with some asphalt on his arm. He said "Two beers please, one for me and one for the road."
+It's so hard to think of another chemistry joke... All the good ones Argon.
+Why do people dislike mushrooms? Because they're made from Toads Stools...
+There was so much fighting on our Easter camping trip... it was in-tents.
+It's easter already?!
+I'm off to Nairobi in the Summer. Kenya believe it?
+My first girlfriend's name was Ivy... she was all over me.
+I've just voted for Charlie's odyssey by Charlie Denholm as the funniest film
+Helvetica walks into a bar. The barman says "We don't serve your type around here."
+Argon walks into a bar. The barman says "Get the hell out!" Argon doesn't react.
+Just watched a documentary about beavers... It was the best damn program I've ever seen.
+Last night it was raining cats and dogs... I stepped in a poodle.
+I thought about being a juggler, but I didn't have the balls.
+My mate got a job as a lion's hairdresser at the zoo today. He is literally the mane man.
+I'm not as think as you drunk I am.
+I'm thinking about moving to France... I've got nothing Toulouse.
+Went surfing the other day, it was swell.
+Watershed joke: A baker was caught bonking his bread loaves. They say he was inbread.
+The only thing that can survive a double dip is a hobnob. Osborne, call McVities.
+I enjoy using the comedy technique of self-deprecation – but I’m not very good at it.
+My wife... its difficult to say what she does... She sells seashells on the seashore.
+A poker player loses his arm in a nasty accident. He's now got a prosthetic replacement. He just can't deal with it.
+A girl invited me back to her place last night for champagne... It turned out it was real pain.
+Theres a new type of pillow made from corduroy... Its making headlines.
+What did the father say to his crying son at his Indian themed birthday party? It's chapatti and you can cry if you want to.
+Breaking news! Energizer Bunny arrested - charged with battery.
+Wow who saw that coming? Harry Potter and News of the World two of the Biggest selling modern fiction publications ending in the same week.
+I went in to a pet shop. I said 'Can I buy a goldfish?' The guy said, 'Do you want an aquarium?' I said 'I don't care what star sign it is.'
+A man was found today vacuum cleaning the top of nelsons column without any safety equipment. Police say he was Dyson with death.
+A man went to A&E at the weekend who swallowed 12 plastic horses. Don't worry the doctors describe his condition as stable.
+Conjunctivitis.com - now that's a site for sore eyes.
+A guy walks into the psychiatrist wearing only clingfilm for shorts. The shrink says, "Well, I can clearly see you're nuts."
+WIMBLEDON SPECIAL Why should you never fall in love with a tennis player? To them, "Love" means nothing.
+I went to the doctor the other day I said 'have you got anything for wind' so he gave me a kite.
+A sandwich walks into a bar. The barman says "we don't serve food here."
+The recruitment consultant asked me "What do you think of voluntary work?" I said "I wouldn't do it if you paid me."
+An ice cream man was found lying on the floor of his van covered with hundreds and thousands. Police say that he topped himself.
+"Doctor, I've broken my arm in several places" Doctor "Well don't go to those places."
+A boiled egg in the morning is hard to beat.
+I'm on a whiskey diet. I've lost three days already.
+What do you do with chemists when they die? We barium.
+Pretty appropriate. Seven days without a pun makes one weak.
+Hand me my Mondeo, my semidetached house, my unloved wife, my unfulfilling job, my xbox kids. Twitter, I am your dad and I tell bad jokes.
+What type of onion is the best painkiller? A-sprin' onion...
+Just passed a manicurist and a dentist quarreling in the street- they were fighting tooth and nail.
+I fear for the calendar, it's days are numbered.
+What's the definition of 'A Will'? (I'll give you a clue, it's a dead giveaway.)
+I buy a different brand of cling flim every time I go to the shops. Just to keep things fresh.
+The advantages of origami are twofold.
+There's a new type of broom out, it's sweeping the nation.
+Atheism is a non-prophet organisation.
+I went to a seafood disco last night and pulled a muscle.
+My friend drowned in a bowl of muesli. A strong currant pulled him in.
+Sometimes I drink my whiskey neat. Other times I take off my tie and untuck my shirt.
+I don't want to sound big headed but I wear extra large hats.
+My friend said "You remind me of a ketchup bottle", I said "I'll take that as a condiment".
+Slept like a log last night ... woke up in the fireplace.
+Exit signs - they're on the way out aren't they.
+What did the fish say when it swam into a wall? Damn!
+A cat hijacked a plane, stuck a pistol to the pilots ribs and said "TAKE ME TO THE CANARIES!"
+They laughed when I said I wanted to be a comedian - they're not laughing now.
+One arm butlers - they can take it, but they can't dish it out.
+A shark will only attack you if you're wet
+Beware of alphabet grenades, they might spell disaster.
+What cheese can never be yours? Nacho cheese.
+Last night I saw this guy chatting up a cheetah at the bar. I thought 'he's trying to pull a fast one.'
+My housemate opened the fridge last night and threw a block of cheese at me. I said "That's mature."
+A police officer caught two kids playing with a firework and a car battery. He charged one and let the other one off.
+I used to be indecisive, but now I'm not quite sure.
+Why are there no pain killers in the jungle? Because parrots-eat-em-all
+I'm reading a book on the history of glue - can't put it down.
+Albinos - can't say fairer than that.
+Velcro... What a rip-off.
+A man walks into a butcher. The butcher bets him £5 he can't guess a shelf of meat's weight. Man replies "I cant, the steaks are too high."
+Two aerials meet on a roof, fall in love and get married... The ceremony was rubbish but the reception was excellent.
+Black beauty... He's a dark horse.
\ No newline at end of file
diff --git a/main.py b/main.py
new file mode 100755
index 0000000..9bd1134
--- /dev/null
+++ b/main.py
@@ -0,0 +1,287 @@
+import json
+import sys
+import os
+from util.config import Config
+from util.scheduler import check_schedule
+from util.logger import setup_logger
+from util.utility import *
+from prettytable import PrettyTable
+import importlib
+import multiprocessing
+import time
+import datetime
+
+# Set the current time
+current_time = datetime.datetime.now().strftime("%H:%M")
+
+already_run = {}
+
+list_of_python_scripts = [
+ "border_replacerr",
+ "health_checkarr",
+ "labelarr",
+ "nohl",
+ "poster_cleanarr",
+ "poster_renamerr",
+ "queinatorr",
+ "renameinatorr",
+ "sync_gdrive",
+ "upgradinatorr",
+ "unmatched_assets",
+ "backup_appdata",
+]
+
+list_of_bash_scripts = [
+ "jduparr",
+ "nohl_bash",
+]
+
+ran_modules = {}
+
+branch = get_current_git_branch()
+
+def get_logger(config, script_name):
+ """
+ Get the logger for the script
+
+ Args:
+ log_level (str): The log level to use
+ script_name (str): The name of the script
+ config (dict): The config file
+
+ Returns:
+ logger: The logger for the script
+ """
+ # Get loglevel from script config
+ log_level = config.log_level
+ logger = setup_logger(log_level, script_name)
+
+ return logger
+
+def get_config(script_to_run):
+ """
+ Get the config for the script
+
+ Args:
+ script_name (str): The name of the script
+
+ Returns:
+ dict: The config for the script
+ """
+
+ # Load the config file
+ config = Config(script_to_run)
+
+ return config
+
+def run_module(script_to_run, logger):
+ process = None
+ if script_to_run in list_of_python_scripts:
+ try:
+ config = get_config(script_to_run)
+ module = importlib.import_module(f"modules.{script_to_run}")
+ process = multiprocessing.Process(target=module.main, args=(config,))
+ if process:
+ if script_to_run == "poster_renamerr":
+ config = Config(script_to_run)
+ script_config = config.script_config
+ sync_posters = script_config.get("sync_posters", False)
+ border_replacerr = script_config.get("border_replacerr", False)
+ posters = ", also running gdrive_sync" if sync_posters else ""
+ border = ", also running border_replacerr" if border_replacerr else ""
+ additional_scripts = f"{posters}{border}"
+ if logger: logger.info(f"Running script: {script_to_run}{additional_scripts}.")
+ else: print(f"Running script: {script_to_run}{additional_scripts}.")
+ else:
+ if logger: logger.info(f"Running script: {script_to_run}.")
+ else: print(f"Running script: {script_to_run}.")
+ process.start()
+ return process
+ except ModuleNotFoundError:
+ if logger: logger.error(f"Script: {script_to_run} does not exist")
+ else: print(f"Script: {script_to_run} does not exist")
+ return
+ except Exception as e:
+ if logger: logger.error(f"An error occurred while running the script: {script_to_run}.", exc_info=True)
+ else: print(f"An error occurred while running the script: {script_to_run}.\n{e}")
+ return
+ elif script_to_run and any(script in script_to_run for script in list_of_bash_scripts):
+ module = "bash_scripts"
+ try:
+ config = get_config(module)
+ module = importlib.import_module(f"modules.{module}")
+ process = multiprocessing.Process(target=module.main, args=(script_to_run, config))
+ if process:
+ if logger: logger.info(f"Running script: {script_to_run}")
+ else: print(f"Running script: {script_to_run}")
+ process.start()
+ return process
+ except ModuleNotFoundError:
+ if logger: logger.error(f"Script: {script_to_run} does not exist in the list of bash scripts.")
+ else: print(f"Script: {script_to_run} does not exist in the list of bash scripts.")
+ return
+ except Exception as e:
+ if logger: logger.error(f"An error occurred while running the script: {script_to_run}.", exc_info=True)
+ else: print(f"An error occurred while running the script: {script_to_run}.\n{e}")
+ return
+ else:
+ if logger: logger.error(f"Script: {script_to_run} does not exist in either bash or python scripts")
+ else: print(f"Script: {script_to_run} does not exist in either bash or python scripts")
+ return
+
+def load_schedule():
+ """
+ Load the schedule from the config file
+
+ Returns:
+ dict: The schedule from the config file
+ """
+
+ # Load the config file
+ config = Config("main")
+
+ # Get the schedule from the config
+ schedule = config.scheduler
+
+ return schedule
+
+
+def main():
+ """
+ Main function
+ """
+ # Set the script name
+
+ initial_run = True
+ last_check = None
+ old_schedule = None
+ running_scripts = {}
+ waiting_message_shown = False
+ scripts_schedules=load_schedule()
+ if len(sys.argv) > 1:
+ for input_name in sys.argv[1:]:
+ if input_name and any(script in input_name for script in list_of_bash_scripts):
+ run_module(input_name, None)
+ elif input_name in list_of_python_scripts:
+ run_module(input_name, None)
+ elif input_name not in list_of_python_scripts or (input_name and not any(script in input_name for script in list_of_bash_scripts)):
+ print(f"Script: {input_name} does not exist")
+ return
+ else:
+ try:
+ main_config = Config("main")
+ log_level = main_config.log_level
+ logger = setup_logger(log_level, "main")
+ logger.info("Starting the script...")
+ # If config file is not found
+ while True:
+ scripts_schedules= load_schedule()
+
+ # Check for new version
+ if last_check is None or last_check.date() < datetime.datetime.now().date():
+ from util.version import version_check
+ version_check(logger, branch)
+ last_check = datetime.datetime.now()
+ next_check = (last_check + datetime.timedelta(days=1)).strftime("%A %I:%M %p")
+ logger.info(f"Next version check: {next_check}")
+ # Print the start message on the first run
+ if initial_run or old_schedule != scripts_schedules:
+ if initial_run:
+ logger.info(create_bar("START"))
+
+ # Print the schedule
+ logger.info(create_bar("SCHEDULE"))
+ table = PrettyTable(["Script", "Schedule"])
+ table.align = "l"
+ table.padding_width = 1
+ for script_name, schedule_time in scripts_schedules.items():
+ if isinstance(schedule_time, dict):
+ for instance, schedule_time in schedule_time.items():
+ table.add_row([instance, schedule_time])
+ else:
+ table.add_row([script_name, schedule_time])
+ logger.info(f"{table}")
+ logger.info(create_bar("SCHEDULE"))
+ initial_run = False
+ waiting_message_shown = False
+
+ if not waiting_message_shown:
+ logger.info("Waiting for scheduled scripts...")
+ waiting_message_shown = True
+
+
+ # Check for scheduled scripts
+ for script_name, schedule_time in scripts_schedules.items():
+ if isinstance(schedule_time, dict):
+ for instance, instance_schedule_time in schedule_time.items():
+ script_name = instance
+ schedule_time = instance_schedule_time
+
+ if script_name in running_scripts or not schedule_time:
+ continue
+
+ if (script_name in list_of_python_scripts or any(script in script_name for script in list_of_bash_scripts)) and (schedule_time == "run" and script_name not in already_run) or (schedule_time != "run" and check_schedule(script_name, schedule_time, logger)):
+ if schedule_time == "run":
+ already_run[script_name] = True
+ process = run_module(script_name, logger)
+ running_scripts[script_name] = process
+ else:
+ if script_name in running_scripts or not schedule_time:
+ continue
+
+ if (script_name in list_of_python_scripts or any(script in script_name for script in list_of_bash_scripts)) and (schedule_time == "run" and script_name not in already_run) or (schedule_time != "run" and check_schedule(script_name, schedule_time, logger)):
+ if schedule_time == "run":
+ already_run[script_name] = True
+ process = run_module(script_name, logger)
+ running_scripts[script_name] = process
+
+ # Remove the from running_scripts if the process is done
+ processes_to_remove = []
+ for script_name, process in running_scripts.items():
+ if process and not process.is_alive():
+ processes_to_remove.append(script_name)
+
+ for script_name in processes_to_remove:
+ if script_name in processes_to_remove:
+ logger.info(f"Script: {script_name.capitalize()} has finished")
+ del running_scripts[script_name]
+ if script_name in already_run:
+ # Check script_schedule to see if it's set to run still
+ if script_name in scripts_schedules:
+ schedule_time = scripts_schedules[script_name]
+ if isinstance(schedule_time, dict):
+ for instance, instance_schedule_time in schedule_time.items():
+ if instance_schedule_time == "run":
+ break
+ else:
+ del already_run[script_name]
+ else:
+ if schedule_time != "run":
+ del already_run[script_name]
+ else:
+ del already_run[script_name]
+ waiting_message_shown = False
+
+ old_schedule = scripts_schedules
+ time.sleep(60)
+
+ # If the script is interrupted
+ except KeyboardInterrupt:
+ print("Keyboard Interrupt detected. Exiting...")
+ sys.exit()
+
+ # If an error occurs
+ except Exception:
+ logger.error(f"\n\nAn error occurred:\n", exc_info=True)
+ logger.error(f"\n\n")
+
+ # If the script is stopped
+ finally:
+ logger.info(create_bar("END"))
+
+
+if __name__ == '__main__':
+ """
+ Main function
+ """
+ main()
diff --git a/modules/__init__.py b/modules/__init__.py
new file mode 100755
index 0000000..e69de29
diff --git a/modules/bash_scripts.py b/modules/bash_scripts.py
new file mode 100755
index 0000000..00db68b
--- /dev/null
+++ b/modules/bash_scripts.py
@@ -0,0 +1,206 @@
+import shlex
+import json
+import sys
+from util.config import Config
+from util.call_script import *
+from util.discord import get_discord_data, discord_check
+from util.utility import create_bar
+from util.logger import setup_logger
+import pathlib
+
+def set_cmd_args(settings, bash_script_file, logger, script_name):
+ """
+ Set the command line arguments for the bash script.
+
+ Args:
+ settings (dict): The settings for the bash script.
+ bash_script_file (str): The bash script file.
+ logger (obj): The logger object.
+ script_name (str): The name of the bash script.
+
+ Returns:
+ list: A list of commands to run.
+
+ """
+ cmds = []
+ cmd = [bash_script_file]
+ channel = None
+ webhook_url = None
+ if discord_check(script_name):
+ webhook_url, channel = get_discord_data(script_name, logger)
+ if settings:
+ script_debug = str(settings.get('debug')) if 'debug' in settings else None
+
+ source = str(settings.get('source')) if 'source' in settings else None
+ destination = str(settings.get('destination')) if 'destination' in settings else None
+ keep_backups = str(settings.get('keep_backups')) if 'keep_backups' in settings else None
+ compress = str(settings.get('compress')) if 'compress' in settings else None
+ data_dir = str(settings.get('data_dir')) if 'data_dir' in settings else None
+ include = list(settings.get('include')) if 'include' in settings else None
+ exclude = list(settings.get('exclude')) if 'exclude' in settings else None
+
+ keep_essential = str(settings.get('keep_essential')) if 'keep_essential' in settings else None
+ keep_full = str(settings.get('keep_full')) if 'keep_full' in settings else None
+ force_full_Backup = str(settings.get('force_full_backup')) if 'force_full_backup' in settings else None
+ script_dry_run = str(settings.get('dry_run')) if 'dry_run' in settings else None
+ shutdown_plex = str(settings.get('shutdown_plex')) if 'shutdown_plex' in settings else None
+ full_backup = str(settings.get('full_backup')) if 'full_backup' in settings else None
+
+ logger.debug(f"channel: {channel}")
+ logger.debug(f"webhook_url: {webhook_url}")
+ logger.debug(f"source: {source}")
+ logger.debug(f"destination: {destination}")
+ logger.debug(f"keep_backups: {keep_backups}")
+ logger.debug(f"compress: {compress}")
+ logger.debug(f"keep_essential: {keep_essential}")
+ logger.debug(f"keep_full: {keep_full}")
+ logger.debug(f"force_full_Backup: {force_full_Backup}")
+ logger.debug(f"script_dry_run: {script_dry_run}")
+ logger.debug(f"shutdown_plex: {shutdown_plex}")
+ logger.debug(f"script_debug: {script_debug}")
+ logger.debug(f"full_backup: {full_backup}")
+ logger.debug(f"webhook_url: {webhook_url}")
+ logger.debug(f"channel: {channel}")
+ logger.debug(f"script_name: {script_name}")
+ logger.debug(f"settings: {settings}")
+ logger.debug(f"bash_script_file: {bash_script_file}")
+ logger.debug(f"include: {include}")
+ logger.debug(f"exclude: {exclude}")
+
+ if source:
+ cmd.append('-s')
+ cmd.append(shlex.quote(str(source)))
+ if destination:
+ cmd.append('-d')
+ cmd.append(shlex.quote(str(destination)))
+ if keep_backups:
+ cmd.append('-k')
+ cmd.append(shlex.quote(str(keep_backups)))
+
+ if compress:
+ cmd.append('-c')
+ cmd.append(shlex.quote(str(compress)))
+
+ if webhook_url:
+ cmd.append('-w')
+ cmd.append(shlex.quote(str(webhook_url)))
+
+ if channel:
+ cmd.append('-C')
+ cmd.append(shlex.quote(str(channel)))
+
+ if keep_essential:
+ cmd.append('-k')
+ cmd.append(shlex.quote(str(keep_essential)))
+
+ if keep_full:
+ cmd.append('-K')
+ cmd.append(shlex.quote(str(keep_full)))
+
+ if force_full_Backup:
+ cmd.append('-F')
+ cmd.append(shlex.quote(str(force_full_Backup)))
+
+ if full_backup:
+ cmd.append('-f')
+ cmd.append(shlex.quote(str(full_backup)))
+
+ if script_dry_run:
+ cmd.append('-r')
+ cmd.append(shlex.quote(str(script_dry_run)))
+
+ if shutdown_plex:
+ cmd.append('-S')
+ cmd.append(shlex.quote(str(shutdown_plex)))
+
+ if script_debug:
+ cmd.append('-D')
+ cmd.append(shlex.quote(str(script_debug)))
+
+ if data_dir:
+ cmd.append('-D')
+ cmd.append(shlex.quote(str(data_dir)))
+
+ if include:
+ include = ",".join([f"{i}" for i in include])
+ cmd.append('-i')
+ cmd.append(include)
+
+ if exclude:
+ exclude = ",".join([f"{i}" for i in exclude])
+ cmd.append('-e')
+ cmd.append(exclude)
+
+ if script_name in ['backup_appdata', 'backup_plex']:
+ use_config_file = None
+ cmd.append('-x')
+ cmd.append(shlex.quote(str(use_config_file)))
+ cmds.append(cmd)
+ logger.debug(json.dumps(cmds, indent=4))
+ return cmds
+
+def run_script(cmds, logger):
+ """
+ Run the bash script.
+
+ Args:
+ cmds (list): A list of commands to run.
+ logger (obj): The logger object.
+ """
+ for cmd in cmds:
+ try:
+ logger.debug(f"Running command: {cmd}")
+ call_script(cmd, logger)
+ except Exception as e:
+ logger.error(f"Error running command: {cmd}")
+ logger.error(e)
+ return
+
+def main(script_name, config):
+ """
+ Run the bash script.
+
+ Args:
+ settings (dict): The settings for the bash script.
+ script_name (str): The name of the bash script.
+ """
+ name = script_name.replace("_", " ").upper()
+ log_level = config.log_level
+ logger = setup_logger(log_level, script_name)
+ settings = None
+ try:
+ for script_setting_key, script_setting_value in config.bash_config.items():
+ # If value is a dictionary
+ if isinstance(script_setting_value, dict):
+ for sub_script_key, v in script_setting_value.items():
+ if script_name == sub_script_key:
+ settings = config.bash_config.get(script_setting_key, {}).get(script_name, {})
+ script_name = script_setting_key
+ else:
+ settings = config.bash_config.get(script_name, {})
+ logger.info(create_bar(f" START OF {name} "))
+ root_dir = pathlib.Path(__file__).parents[1]
+ bash_script_file = f'{root_dir}/scripts/{script_name}.sh'
+ if settings:
+ logger.debug(f"Running: {script_name.capitalize()}")
+ cmds = set_cmd_args(settings, bash_script_file, logger, script_name)
+ run_script(cmds, logger)
+ logger.debug(f"{script_name.capitalize()} complete.")
+ elif script_name in ['backup_appdata']:
+ settings = {}
+ logger.debug(f"Running: {script_name.capitalize()}")
+ cmds = set_cmd_args(settings, bash_script_file, logger, script_name)
+ run_script(cmds, logger)
+ logger.debug(f"{script_name.capitalize()} complete.")
+ else:
+ logger.error(f"Script: {script_name} does not have a valid configuration. Exiting...")
+ return
+ except KeyboardInterrupt:
+ print("Keyboard Interrupt detected. Exiting...")
+ sys.exit()
+ except Exception:
+ logger.error(f"\n\nAn error occurred:\n", exc_info=True)
+ logger.error(f"\n\n")
+ finally:
+ logger.info(create_bar(f" END OF {name} "))
+
diff --git a/modules/border_replacerr.py b/modules/border_replacerr.py
new file mode 100755
index 0000000..7a3e036
--- /dev/null
+++ b/modules/border_replacerr.py
@@ -0,0 +1,549 @@
+# ____ _ _____ _
+# | _ \ | | | __ \ | |
+# | |_) | ___ _ __ __| | ___ _ __ | |__) |___ _ __ | | __ _ ___ ___ _ __ _ __
+# | _ < / _ \| '__/ _` |/ _ \ '__| | _ // _ \ '_ \| |/ _` |/ __/ _ \ '__| '__|
+# | |_) | (_) | | | (_| | __/ | | | \ \ __/ |_) | | (_| | (_| __/ | | |
+# |____/ \___/|_| \__,_|\___|_| |_| \_\___| .__/|_|\__,_|\___\___|_| |_|
+# | |
+# |_|
+# =================================================================================
+# Author: Drazzilb
+# Description: This script replaces the border on the posters or removes the border
+# Usage: python border_replacerr.py
+# Requirements: Pillow, tqdm
+# License: MIT License
+# =================================================================================
+
+import os
+import json
+import re
+import logging
+import filecmp
+import shutil
+import sys
+
+from util.utility import *
+from util.scheduler import check_schedule
+from util.logger import setup_logger
+
+try:
+ from tqdm import tqdm
+ from PIL import Image, UnidentifiedImageError
+except ImportError as e:
+ print(f"ImportError: {e}")
+ print("Please install the required modules with 'pip install -r requirements.txt'")
+ exit(1)
+
+script_name = "border_replacerr"
+# dry_run = config.dry_run
+logging.getLogger("PIL").setLevel(logging.WARNING)
+
+# Set regex patterns
+illegal_chars_regex = re.compile(r'[<>:"/\\|?*\x00-\x1f]+')
+year_regex = re.compile(r"\((\d{4})\).*")
+remove_special_chars = re.compile(r'[^a-zA-Z0-9\s]+')
+
+def check_holiday(data, border_colors, logger):
+ """
+ Checks if the schedule is a range schedule and if so, runs the range schedule.
+
+ Args:
+ data (dict): The schedule data.
+ border_colors (list): The list of border colors.
+
+ Returns:
+ list: The list of border colors.
+ """
+
+ # Regular expression pattern to match a range schedule format
+ pattern = r"^range(\((\d{1,2}/\d{1,2}-\d{1,2}/\d{1,2}\|?)+\))$"
+
+ # Iterate through each holiday and its corresponding schedule and color in the data
+ for holiday, schedule_color in data.items():
+ schedule = schedule_color.get('schedule', None)
+
+ # If schedule exists for the holiday
+ if schedule:
+ # Check if the schedule matches the range pattern
+ if re.match(pattern, schedule):
+
+ # If 'check_schedule' returns True (indicating successful execution)
+ if check_schedule(script_name, schedule, logger):
+ # Retrieve the color for the holiday from schedule_color or use default border_colors
+ holiday_colors = schedule_color.get('color', border_colors)
+
+ # If holiday_colors exist, log the schedule execution and colors being used
+ if holiday_colors:
+ table = [
+ [f"Running {holiday.capitalize()} Schedule"],
+ ]
+ logger.info(create_table(table))
+ logger.info(f"Schedule: {holiday.capitalize()} | Using {', '.join(holiday_colors)} border colors.")
+
+ return holiday_colors, True, holiday # Return the colors for the holiday
+
+ else:
+ # Log an error if the schedule doesn't match the expected pattern
+ logger.error(f"Error: {schedule} is not a valid range schedule.")
+
+ # Return the original border colors if no range schedule was found or executed
+ return border_colors, False, None
+
+
+def convert_to_rgb(hex_color, logger):
+ """
+ Converts a hexadecimal color code to an RGB tuple.
+
+ Args:
+ hex_color (str): The hexadecimal color code.
+
+ Returns:
+ tuple: The RGB tuple.
+ """
+
+ hex_color = hex_color.strip("#") # Remove the leading hash if present
+ if len(hex_color) == 3: # Expand shorthand notation if necessary
+ hex_color = hex_color * 2 # e.g. #ABC becomes #AABBCC
+ try:
+ color_code = tuple(int(hex_color[i : i + 2], 16) for i in (0, 2, 4)) # Convert each pair of hex digits to an integer
+ except ValueError:
+ logger.error(f"Error: {hex_color} is not a valid hexadecimal color code.\nDefaulting to white.")
+ return (255, 255, 255)
+ return color_code
+
+def fix_borders(assets_dict, script_config, border_colors, destination_dir, dry_run, logger):
+ """
+ Replaces the border on the posters.
+
+ Args:
+ assets_dict (dict): The dictionary of assets.
+ script_config (dict): The script config.
+ border_colors (list): The list of border colors.
+ destination_dir (str): The output directory.
+
+ Returns:
+ list: The list of messages.
+ """
+
+ # Extracting necessary parameters from the script config
+ border_width = script_config['border_width']
+ rgb_border_colors = []
+
+ # Convert border colors to RGB format if available
+ if border_colors:
+ for color in border_colors:
+ rgb_color = convert_to_rgb(color, logger) # Convert color to RGB
+ rgb_border_colors.append(rgb_color)
+
+ # Determining the action based on configuration
+ action = None
+ if not border_colors:
+ action = "Removed border on"
+ banner = "Removing Borders"
+ else:
+ action = "Replacing border on"
+ banner = "Replacing Borders"
+
+ # Initialize asset types to process
+ asset_types = ["movies", "series", "collections"]
+
+ # Logging the action if it's determined
+ if action:
+ table = [
+ [f"{banner}"],
+ ]
+ logger.info(create_table(table))
+
+ messages = [] # List to hold progress messages
+
+ # Loop through each asset type
+ for asset_type in asset_types:
+ if asset_type in assets_dict:
+ current_index = 0 # Index for cycling through border colors
+ items = assets_dict[asset_type]
+ # Loop through each item in the asset type
+ for data in tqdm(items, desc=f"Processing {asset_type.capitalize()}", total=len(items), unit="items", disable=None, leave=True):
+ files = data.get('files', None)
+ path = data.get('path', None)
+ year = data.get('year', None)
+ if year:
+ year = f"({year})"
+ else:
+ year = ""
+
+ # Prepare output directory for saving processed files
+ if path:
+ path_basename = os.path.basename(path)
+ output_path = f"{destination_dir}/{path_basename}"
+ if not os.path.exists(output_path):
+ os.makedirs(output_path)
+ else:
+ output_path = destination_dir
+
+ # Process each input file within the asset
+ for input_file in files:
+ file_name, extension = os.path.splitext(input_file)
+ if extension not in [".jpg", ".png", ".jpeg", ".JPG", ".PNG", ".JPEG"]:
+ logger.warning(f"Skipping {input_file} as it is not a jpg or png file.")
+ continue
+ file_name = os.path.basename(input_file)
+ if rgb_border_colors:
+ rgb_border_color = rgb_border_colors[current_index]
+ else:
+ rgb_border_color = None
+
+ # Actual processing or dry run action
+ if not dry_run:
+
+ if rgb_border_color:
+ results = replace_border(input_file, output_path, rgb_border_color, border_width, logger)
+ else:
+ results = remove_border(input_file, output_path, border_width, logger)
+ if results:
+ if path:
+ messages.append(f"{action} {data['title']}{year} - {file_name}")
+ else:
+ messages.append(f"{action} {file_name}")
+ else:
+ messages.append(f"Would have {action} {file_name}")
+
+ if rgb_border_colors:
+ current_index = (current_index + 1) % len(rgb_border_colors)
+ else:
+ logger.info(f"No {asset_type} found.")
+ return messages
+
+
+def replace_border(input_file, output_path, border_colors, border_width, logger):
+ """
+ Crops the center of an image, adds a 25-pixel border around it, and saves the result.
+
+ Args:
+ input_file (str): The input file.
+ output_path (str): The output path.
+ border_colors (list): The list of border colors.
+ border_width (int): The border width.
+
+ Returns:
+ bool: True if the file was saved, False otherwise.
+ """
+
+ # Open the image
+ try:
+ with Image.open(input_file) as image:
+ # Set the border width
+ width, height = image.size # Get the width and height of the image
+
+ # Remove border
+ cropped_image = image.crop((border_width, border_width, width - border_width, height - border_width)) # Crop the image to remove the border
+
+ # Add border
+ new_width = cropped_image.width + 2 * border_width # Add 2 * border_width to the width and height
+ new_height = cropped_image.height + 2 * border_width # to account for the new border
+ final_image = Image.new("RGB", (new_width, new_height), border_colors) # Create a new image with the new border color
+ final_image.paste(cropped_image, (border_width, border_width)) # Paste the cropped image onto the new image
+
+ file_name = os.path.basename(input_file)
+ final_path = f"{output_path}/{file_name}" # Set the output path to the parent directory
+
+ # Resize the image to 1500x1000
+ final_image = final_image.resize((1000, 1500)).convert("RGB")
+
+ if os.path.isfile(final_path):
+ # Save file to /tmp/ and compare to existing file
+ tmp_path = f"/tmp/{file_name}"
+ final_image.save(tmp_path)
+ if not filecmp.cmp(final_path, tmp_path):
+ final_image.save(final_path)
+ # Remove tmp file
+ os.remove(tmp_path)
+ return True
+ else:
+ # Remove tmp file
+ os.remove(tmp_path)
+ return False
+ else:
+ final_image.save(final_path)
+ return True
+
+ # Log an error if the image can't be opened
+ except UnidentifiedImageError as e:
+ logger.error(f"Error: {e}")
+ logger.error(f"Error processing {input_file}")
+ return False
+
+ except Exception as e:
+ logger.error(f"Error: {e}")
+ logger.error(f"Error processing {input_file}")
+ return False
+
+def remove_border(input_file, output_path, border_width, logger):
+ """
+ Crops the center of an image, reducing its dimensions by 50 pixels on each side.
+
+ Args:
+ input_file (str): The input file.
+ output_path (str): The output path.
+ border_width (int): The border width.
+ Returns:
+ bool: True if the file was saved, False otherwise.
+ """
+
+ # Open the image
+ try:
+ with Image.open(input_file) as image: # Open the image
+ # Set the border width
+ width, height = image.size # Get the width and height of the image
+
+ # Remove top, left, and right borders, and replace bottom border with black
+ final_image = image.crop((border_width, border_width, width - border_width, height)) # Crop the image to remove the borders
+ bottom_border = Image.new("RGB", (width - 2 * border_width, border_width), color='black') # Create a black image for the bottom border
+ bottom_border_position = (0, height - border_width - border_width) # Position the bottom border 25 pixels from the bottom
+ final_image.paste(bottom_border, bottom_border_position) # Paste the black bottom border at the specified position
+
+ # Resize the image to 1500x1000
+ final_image = final_image.resize((1000, 1500)).convert("RGB")
+
+ file_name = os.path.basename(input_file)
+ final_path = f"{output_path}/{file_name}" # Set the output path to the parent directory
+
+ if os.path.isfile(final_path):
+ # Save file to /tmp/ and compare to existing file
+ tmp_path = f"/tmp/{file_name}"
+ final_image.save(tmp_path)
+ if not filecmp.cmp(final_path, tmp_path):
+ final_image.save(final_path)
+ # Remove tmp file
+ os.remove(tmp_path)
+ return True
+ else:
+ # Remove tmp file
+ os.remove(tmp_path)
+ return False
+ else:
+ final_image.save(final_path)
+ return True
+
+ # Log an error if the image can't be opened
+ except UnidentifiedImageError as e:
+ logger.error(f"Error: {e}")
+ logger.error(f"Error processing {input_file}")
+ return False
+ except Exception as e:
+ logger.error(f"Error: {e}")
+ logger.error(f"Error processing {input_file}")
+ return False
+
+def copy_files(assets_dict, destination_dir, dry_run, logger):
+ """
+ Copies the files in the input directory to the output directory.
+
+ Args:
+ assets_dict (dict): The dictionary of assets.
+ destination_dir (str): The output directory.
+ dry_run (bool): Whether to perform a dry run.
+ Returns:
+ None
+ """
+ messages = []
+ # Remove trailing slash
+ if destination_dir.endswith('/'):
+ destination_dir = destination_dir.rstrip('/')
+
+ # Initialize asset types to process
+ asset_types = ["movies", "series", "collections"]
+ for asset_type in asset_types:
+ if asset_type in assets_dict:
+ items = assets_dict[asset_type]
+ for data in tqdm(items, desc=f"Processing {asset_type.capitalize()}", total=len(items), unit="items", disable=None, leave=True):
+ files = data.get('files', None)
+ path = data.get('path', None)
+ year = data.get('year', None)
+ if year:
+ year = f"({year})"
+ else:
+ year = ""
+
+ # Prepare output directory for saving processed files
+ if path:
+ path_basename = os.path.basename(path)
+ output_path = f"{destination_dir}/{path_basename}"
+ if not dry_run:
+ if not os.path.exists(output_path):
+ os.makedirs(output_path)
+ else:
+ logger.debug(f"Would have created {output_path}")
+ else:
+ output_path = destination_dir
+
+ # Process each input file within the asset
+ for input_file in files:
+ file_name, extension = os.path.splitext(input_file)
+ if extension not in [".jpg", ".png", ".jpeg", ".JPG", ".PNG", ".JPEG"]:
+ logger.warning(f"Skipping {input_file} as it is not a jpg or png file.")
+ continue
+ file_name = os.path.basename(input_file)
+ final_path = f"{output_path}/{file_name}"
+ output_basename = os.path.basename(output_path)
+ if not dry_run:
+ if os.path.isfile(final_path):
+ if not filecmp.cmp(final_path, input_file):
+ try:
+ shutil.copy(input_file, final_path)
+ except shutil.SameFileError:
+ logger.debug(f"Input file {input_file} is the same as {final_path}, skipping")
+ logger.debug(f"Input file {input_file} is different from {final_path}, copying to {output_basename}")
+ messages.append(f"Copied {data['title']}{year} - {file_name} to {output_basename}")
+ else:
+ try:
+ shutil.copy(input_file, final_path)
+ except shutil.SameFileError:
+ logger.debug(f"Input file {input_file} is the same as {final_path}, skipping")
+ logger.debug(f"Input file {input_file} does not exist in {output_path}, copying to {output_basename}")
+ messages.append(f"Copied {data['title']}{year} - {file_name} to {output_basename}")
+ else:
+ messages.append(f"Would have copied {data['title']}{year} - {file_name} to {output_basename}")
+ return messages
+
+def process_files(source_dirs, destination_dir, dry_run, log_level, script_config, logger):
+ """
+ Processes the files in the input directory.
+
+ Args:
+ source_dirs (str): The input directory.
+ destination_dir (str): The output directory.
+
+ Returns:
+ None
+ """
+
+ # Obtain script configuration details
+ schedule = script_config.get('schedule', None)
+ border_colors = script_config.get('border_colors', None)
+ skip = script_config.get('skip', False)
+
+ # Convert single string border color to a list if necessary
+ border_colors = [border_colors] if isinstance(border_colors, str) else border_colors
+ source_dirs = [source_dirs] if isinstance(source_dirs, str) else source_dirs
+
+ table = [
+ ["Script Settings"],
+ ]
+ logger.debug(create_table(table))
+ logger.debug(f'{"Dry_run:":<20}{dry_run}')
+ logger.debug(f'{"Log Level:":<20}{log_level}')
+ logger.debug(f'{"Input Dir:":<20}{source_dirs}')
+ logger.debug(f'{"Output Dir:":<20}{destination_dir}')
+ logger.debug(f'{"Border Colors:":<20}{border_colors}')
+ logger.debug(f'{"Skip:":<20}{skip}')
+ logger.debug(f'{"Schedule:":<20}{schedule}')
+ logger.debug(create_bar("-"))
+
+ run_holiday = False
+
+ # Check for a scheduled event to update border colors if provided
+ if schedule:
+ border_colors, run_holiday, holiday = check_holiday(schedule, border_colors, logger)
+
+ if not os.path.exists(destination_dir):
+ logger.error(f"Output directory {destination_dir} does not exist.")
+ return
+
+ assets_list = []
+ # Categorize files in the input directory into assets
+ for path in source_dirs:
+ results = categorize_files(path)
+ if results:
+ assets_list.extend(results)
+ else:
+ logger.error(f"No assets found in {path}.")
+
+ if assets_list:
+ assets_dict = sort_assets(assets_list)
+ logger.debug(f"Asset Files:\n{json.dumps(assets_dict, indent=4)}")
+ else:
+ logger.error(f"No assets found in {(', '.join(source_dirs))}, if running Poster Renamerr in dry_run, this is expected")
+ return
+
+ # If Run holiday is False and Skip is set to True, return
+ if not run_holiday and skip:
+ messages = copy_files(assets_dict, destination_dir, dry_run, logger)
+ logger.info(f"Skipping {script_name} as it is not scheduled to run today.")
+ if messages:
+ table = [
+ ["Processed Files", f"{len(messages)}"],
+ ]
+ logger.info(create_table(table))
+ for message in messages:
+ logger.info(message)
+ return
+
+ # If no border colors are available, log a message
+ if not border_colors:
+ logger.info(f"No border colors set, removing border instead.")
+ else:
+ logger.info(f"Using {', '.join(border_colors)} border color(s).")
+
+ # if trailing slash on destination_dir, remove it
+ if destination_dir.endswith("/"):
+ destination_dir = destination_dir[:-1]
+
+ # If assets are found in the input directory
+ if any(assets_dict['movies']) or any(assets_dict['series']) or any(assets_dict['collections']):
+ logger.debug(f"assets_dict:\n{json.dumps(assets_dict, indent=4)}")
+
+ # Fix borders for assets using specified configurations
+ messages = fix_borders(assets_dict, script_config, border_colors, destination_dir, dry_run, logger)
+ logger.debug(f"messages:\n{json.dumps(messages, indent=4)}")
+
+ # If there are messages (indicating processed files), log each message
+ if messages:
+ table = [
+ ["Processed Files", f"{len(messages)}"],
+ ]
+ logger.info(create_table(table))
+ for message in messages:
+ logger.info(message)
+ else:
+ # Log a message if no files were processed
+ logger.info(f"\nNo files processed")
+ else:
+ logger.error(f"No assets found in {source_dirs}, if running Poster Renamerr in dry_run, this is expected.")
+ return
+
+
+def main(config):
+ """
+ Main function.
+ """
+ global dry_run
+ dry_run = config.dry_run
+ log_level = config.log_level
+ logger = setup_logger(log_level, script_name)
+ script_config = config.script_config
+ name = script_name.replace("_", " ").upper()
+
+ try:
+ logger.info(create_bar(f"START {name}"))
+ # Obtain script configuration details
+
+ source_dirs = script_config['source_dirs']
+ destination_dir = script_config['destination_dir']
+ border_colors = script_config['border_colors']
+
+ # Convert single string border color to a list if necessary
+ if isinstance(border_colors, str):
+ border_colors = [border_colors]
+
+
+ # Process files in the input directory with specified settings
+ process_files(source_dirs, destination_dir, dry_run, log_level=log_level, script_config=script_config, logger=logger)
+
+ except KeyboardInterrupt:
+ print("Keyboard Interrupt detected. Exiting...")
+ sys.exit()
+ except Exception:
+ logger.error(f"\n\nAn error occurred:\n", exc_info=True)
+ logger.error(f"\n\n")
+ finally:
+ logger.info(create_bar(f"END {name}"))
\ No newline at end of file
diff --git a/modules/health_checkarr.py b/modules/health_checkarr.py
new file mode 100644
index 0000000..46184ea
--- /dev/null
+++ b/modules/health_checkarr.py
@@ -0,0 +1,112 @@
+# _ _ _ _ _ _____ _ _
+# | | | | | | | | | / ____| | | |
+# | |__| | ___ __ _| | |_| |__ | | | |__ ___ ___| | ____ _ _ __ _ __
+# | __ |/ _ \/ _` | | __| '_ \ | | | '_ \ / _ \/ __| |/ / _` | '__| '__|
+# | | | | __/ (_| | | |_| | | | | |____| | | | __/ (__| < (_| | | | |
+# |_| |_|\___|\__,_|_|\__|_| |_| \_____|_| |_|\___|\___|_|\_\__,_|_| |_|
+# ===================================================================================================
+# Author: Drazzilb
+# Description: This script will delete movies and shows from Radarr and Sonarr based on the if they show
+# up in the health check. This is useful for removing movies and shows that have been removed
+# from TMDB or TVDB.
+# Usage: python3 health_checkarr.py
+# Requirements: requests
+# License: MIT License
+# ===================================================================================================
+
+import json
+import re
+import sys
+
+from util.arrpy import StARR
+from util.utility import *
+from util.discord import discord
+from util.logger import setup_logger
+
+try:
+ from tqdm import tqdm
+except ImportError as e:
+ print(f"ImportError: {e}")
+ print("Please install the required modules with 'pip install -r requirements.txt'")
+ exit(1)
+
+script_name = "health_checkarr"
+
+tmdb_id_extractor = re.compile(r"tmdbid (\d+)")
+tvdb_id_extractor = re.compile(r"tvdbid (\d+)")
+
+def main(config):
+ """
+ Main function.
+ """
+ global dry_run
+ dry_run = config.dry_run
+ log_level = config.log_level
+ logger = setup_logger(log_level, script_name)
+ script_config = config.script_config
+ name = script_name.replace("_", " ").upper()
+ try:
+ logger.info(create_bar(f"START {name}"))
+ health = None
+ script_config = config.script_config
+ instances = script_config.get('instances', None)
+ valid = validate(config, script_config, logger)
+ # Log script settings
+ table = [
+ ["Script Settings"]
+ ]
+ logger.debug(create_table(table))
+ logger.debug(f'{"Dry_run:":<20}{dry_run if dry_run else "False"}')
+ logger.debug(f'{"Log level:":<20}{log_level if log_level else "INFO"}')
+ logger.debug(f'{"Instances:":<20}{instances if instances else "Not Set"}')
+ logger.debug(create_bar("-"))
+
+ if dry_run:
+ table = [
+ ["Dry Run"],
+ ["NO CHANGES WILL BE MADE"]
+ ]
+ logger.info(create_table(table))
+ logger.info('')
+
+ for instance_type, instance_data in config.instances_config.items():
+ for instance in instances:
+ if instance in instance_data:
+ app = StARR(instance_data[instance]['url'], instance_data[instance]['api'], logger)
+ server_name = app.get_instance_name()
+ health = app.get_health()
+ media_dict = handle_starr_data(app, server_name, instance_type, include_episode=False)
+ id_list = []
+ if health:
+ for health_item in health:
+ if health_item['source'] == "RemovedMovieCheck" or health_item['source'] == "RemoveSeriesCheck":
+ if instance_type == "Radarr":
+ for m in re.finditer(tmdb_id_extractor, health_item['message']):
+ id_list.append(int(m.group(1)))
+ if instance_type == "Sonarr":
+ for m in re.finditer(tvdb_id_extractor, health_item['message']):
+ id_list.append(int(m.group(1)))
+ logger.debug(f"id_list:\n{json.dumps(id_list, indent=4)}")
+ output = []
+ for item in tqdm(media_dict, desc=f"Processing {instance_type}", unit="items", disable=None, total=len(media_dict)):
+ if item['db_id'] in id_list:
+ logger.debug(f"Found {item['title']} with: {item['db_id']}")
+ output.append(item)
+ logger.debug(f"output:\n{json.dumps(output, indent=4)}")
+
+ if output:
+ logger.info(f"Deleting {len(output)} {instance_type} items from {server_name}")
+ for item in tqdm(output, desc=f"Deleting {instance_type} items", unit="items", disable=None, total=len(output)):
+ if not dry_run:
+ logger.info(f"{item['title']} deleted with id: {item['db_id']}")
+ app.delete_media(item['db_id'], instance_type)
+ else:
+ logger.info(f"{item['title']} would have been deleted with id: {item['db_id']}")
+ except KeyboardInterrupt:
+ print("Keyboard Interrupt detected. Exiting...")
+ sys.exit()
+ except Exception:
+ logger.error(f"\n\nAn error occurred:\n", exc_info=True)
+ logger.error(f"\n\n")
+ finally:
+ logger.info(create_bar(f"END {name}"))
\ No newline at end of file
diff --git a/modules/labelarr.py b/modules/labelarr.py
new file mode 100755
index 0000000..953d07a
--- /dev/null
+++ b/modules/labelarr.py
@@ -0,0 +1,361 @@
+# _ _ _
+# | | | | | |
+# | | __ _| |__ ___| | __ _ _ __ _ __
+# | | / _` | '_ \ / _ \ |/ _` | '__| '__|
+# | |___| (_| | |_) | __/ | (_| | | | |
+# |______\__,_|_.__/ \___|_|\__,_|_| |_|
+# ======================================================================================
+# Author: Drazzilb
+# Description: A script to sync labels between Plex and Radarr/Sonarr
+# Usage: python3 labelarr.py
+# Requirements: requests, pyyaml, plexapi
+# License: MIT License
+# ======================================================================================
+
+import json
+import time
+import sys
+
+from util.discord import discord, discord_check
+from util.arrpy import StARR
+from util.utility import *
+from util.logger import setup_logger
+
+try:
+ from plexapi.server import PlexServer
+ from plexapi.exceptions import BadRequest
+except ImportError as e:
+ print(f"ImportError: {e}")
+ print("Please install the required modules with 'pip install -r requirements.txt'")
+ exit(1)
+
+script_name = "labelarr"
+
+def process_data(plex_dict, media_dict, labels):
+ """
+ Process the data to be synced to Plex.
+
+ Args:
+ plex_dict (dict): The Plex data.
+ media_dict (dict): The Radarr/Sonarr data.
+ labels (list): The list of labels to sync.
+
+ Returns:
+ data_dict (dict): The data to be synced to Plex.
+ """
+
+
+ # Initialize the list to store data to be synced to Plex
+ data_dict = []
+
+ # Iterate through each media item in the Radarr/Sonarr data
+ for media_item in media_dict:
+ # Iterate through each Plex item in the Plex data
+ for plex_item in plex_dict:
+ # Check if the normalized title and year match between Plex and media data
+ if (
+ media_item['normalized_title'] == plex_item['normalized_title']
+ and media_item['year'] == plex_item['year']
+ ):
+ # Get labels from Plex and media tags from Radarr/Sonarr
+ plex_labels = plex_item.get('labels', [])
+ media_tags = media_item.get('tag_data', {}).keys()
+
+ # Dictionary to store labels to add or remove
+ add_remove = {}
+
+ # Check each label in the provided list
+ for label in labels:
+ # Determine labels to add or remove based on comparison between Plex labels and media tags
+ if label in plex_labels and label not in media_tags:
+ add_remove[label] = "remove"
+ elif label not in plex_labels and label in media_tags:
+ add_remove[label] = "add"
+
+ # If there are labels to add or remove, append data to data_dict
+ if add_remove:
+ data_dict.append({
+ "title": media_item['title'],
+ "year": media_item['year'],
+ "add_remove": add_remove
+ })
+
+ # Return the data to be synced to Plex
+ return data_dict
+
+
+def sync_to_plex(plex, data_dict, instance_type, logger):
+ """
+ Sync the data to Plex.
+
+ Args:
+ plex (obj): The Plex server object.
+ data_dict (dict): The data to be synced to Plex.
+ instance_type (str): The type of instance (radarr/sonarr).
+
+ Returns:
+ None
+ """
+
+ logger.info(f"Syncing labels to Plex")
+
+ # Loop through each item in the data_dict
+ for item in data_dict:
+ if instance_type == "sonarr":
+ type = "show"
+ elif instance_type == "radarr":
+ type = "movie"
+
+ # Search for the item in the Plex library based on title and year
+ try:
+ plex_item = plex.library.search(item['title'], libtype=type, year=item['year'])[0]
+ except IndexError:
+ # Log an error if the title is not found in Plex and continue to the next item
+ logger.error(f"Title: {item['title']} ({item['year']}) | Title not found in Plex")
+ continue
+
+ # If the Plex item is found
+ if plex_item:
+ # Iterate through each label and corresponding action (add/remove)
+ for label, action in item['add_remove'].items():
+ # Perform add or remove action based on the label and action type
+ if action == "add":
+ plex_item.addLabel(label)
+ elif action == "remove":
+ plex_item.removeLabel(label)
+
+ # No explicit return value, as it's modifying Plex items directly
+ return
+
+
+def handle_messages(data_dict, logger):
+ """
+ Handle the messages to be sent to Discord.
+
+ Args:
+ data_dict (dict): The data to be synced to Plex.
+
+ Returns:
+ None
+ """
+ # Loop through each item in the data_dict
+ for item in data_dict:
+ # Log the title and year of the item
+ logger.info(f"Title: {item['title']} ({item['year']})")
+
+ # Iterate through each label and corresponding action (add/remove) in the item
+ for label, action in item['add_remove'].items():
+ # Log information about label addition or removal
+ if action == "add":
+ logger.info(f"\tLabel: {label} added.")
+ elif action == "remove":
+ logger.info(f"\tLabel: {label} removed.")
+
+
+def notification(data_dict, logger):
+ """
+ Send the notification to Discord.
+
+ Args:
+ data_dict (dict): The data to be synced to Plex.
+
+ Returns:
+ None
+ """
+
+ fields = []
+ built_fields = {}
+ count = 0
+ message_count = 0
+
+ # Iterate through each item in the data_dict
+ for item in data_dict:
+ actions = []
+ # Extract actions (add or remove labels) for each item
+ for label, action in item['add_remove'].items():
+ if action == "add":
+ action = f"{label} {action}ed"
+ elif action == "remove":
+ action = f"{label} {action}d"
+ actions.append(action)
+ actions = "\n".join(actions)
+ # Create a field for the Discord message
+ field = {
+ "name": f"{item['title']} ({item['year']})",
+ "value": f"```{actions}```",
+ "inline": False
+ }
+ count += 1
+ fields.append(field)
+ # If the count of fields reaches 25, store the current fields and reset the count
+ if count >= 25:
+ count = 0
+ message_count += 1
+ built_fields[message_count] = fields
+ fields = []
+
+ # If there are remaining fields, store them
+ if fields:
+ message_count += 1
+ built_fields[message_count] = fields
+
+ # If there are built_fields, send the notifications
+ if built_fields:
+ for message_number, fields in built_fields.items():
+ print(f"Sending message {message_number} of {message_count}...")
+ # Discord function call (may require specific parameters to function)
+ discord(fields, logger, script_name, description=f"{'__**Dry Run**__' if dry_run else ''}", color=0x00ff00, content=None)
+
+
+def handle_tags(app, media_dict, tag_names):
+ """
+ Handle the tags for the media.
+
+ Args:
+ app (obj): The StARR object.
+ media_dict (dict): The media data.
+ tag_names (list): The list of tag names.
+
+ Returns:
+ media_dict (dict): The media data with the tag data added.
+ """
+
+ tag_dict = {}
+
+ # If tag_names list is not empty
+ if tag_names:
+ # Convert tag names to lowercase and store in 'tags'
+ tags = [tag.lower() for tag in tag_names]
+
+ # Iterate through each tag in the lowercase 'tags' list
+ for tag in tags:
+ # Get the tag ID from StARR object for each tag
+ tag_id = app.get_tag_id_from_name(tag)
+
+ # If tag ID exists, add it to the tag dictionary
+ if tag_id:
+ tag_dict[tag] = tag_id
+
+ # If tag_dict is not empty
+ if tag_dict:
+ # Iterate through each item in the media dictionary
+ for item in media_dict:
+ tag_data = {}
+ # Check each tag and its ID against the item's tags
+ for tag, tag_id in tag_dict.items():
+ # If the tag ID exists in the item's tags, add it to tag_data
+ if tag_id in item['tags']:
+ tag_data[tag] = tag_id
+ # Assign the collected tag_data to the item
+ item['tag_data'] = tag_data
+
+ return media_dict
+
+
+def main(config):
+ """
+ Main function.
+ """
+ global dry_run
+ dry_run = config.dry_run
+ log_level = config.log_level
+ logger = setup_logger(log_level, script_name)
+ script_config = config.script_config
+ name = script_name.replace("_", " ").upper()
+
+ try:
+ logger.info(create_bar(f"START {name}"))
+ # If in dry run mode, create a table indicating no changes will be made
+ if dry_run:
+ table = [
+ ["Dry Run"],
+ ["NO CHANGES WILL BE MADE"]
+ ]
+ logger.info(create_table(table))
+
+ # Fetch script configurations
+ script_config = config.script_config
+ instances = script_config.get('instances', None)
+ valid = validate(config, script_config, logger)
+
+ # Iterate through instance types and their respective configurations
+ for instance_type, instance_data in config.instances_config.items():
+ for instance, instance_settings in instances.items():
+ if instance in instance_data:
+ # Extract various settings for the instance
+ plex_instances = instance_settings.get('plex_instances', None)
+ labels = instance_settings.get('labels', None)
+ library_names = instance_settings.get('library_names', None)
+
+ # Create StARR object and get instance name
+ app = StARR(instance_data[instance]['url'], instance_data[instance]['api'], logger)
+ starr_server_name = app.get_instance_name()
+
+ # Fetch and process media data from the StARR instance
+ media_dict = handle_starr_data(app, starr_server_name, instance_type, include_episode=False)
+ media_dict = handle_tags(app, media_dict, labels)
+
+ # If media data is found
+ if media_dict:
+ # Logging settings and instance information
+ logger.debug(f"Media Data:\n{json.dumps(media_dict, indent=4)}")
+ # (Additional logging and table creation omitted for brevity)
+
+ # Iterate through Plex instances associated with the current StARR instance
+ for plex_instance in plex_instances:
+ if plex_instance in config.plex_config:
+ # Connect to the Plex server
+ try:
+ logger.info("Connecting to Plex...")
+ plex = PlexServer(config.plex_config[plex_instance]['url'], config.plex_config[plex_instance]['api'], timeout=120)
+ except BadRequest:
+ logger.error(f"Error connecting to Plex instance: {plex_instance}")
+ continue
+ server_name = plex.friendlyName
+
+ # Fetch Plex data and process it
+ if library_names:
+ library_names_str = ", ".join(library_names)
+ logger.info(f"Gathering plex data on {server_name} for {library_names_str}... Please wait...")
+ plex_dict = get_plex_data(plex, library_names, logger, include_smart=False, collections_only=False)
+ logger.info(f"Completed gathering plex data...")
+ else:
+ logger.error(f"No library names provided for {starr_server_name}, against {server_name}. Skipping...")
+ continue
+ # If Plex data is found
+ if plex_dict:
+ # Logging Plex data
+ logger.debug(f"Plex Data:\n{json.dumps(plex_dict, indent=4)}")
+
+ # Process data for syncing to Plex
+ logger.info("Syncing labels to Plex")
+ data_dict = process_data(plex_dict, media_dict, labels)
+
+ # If items to sync are found
+ if data_dict:
+ logger.debug(f"Items to sync:\n{json.dumps(data_dict, indent=4)}")
+ # Perform actual syncing to Plex if not in dry run mode
+ if not dry_run:
+ sync_to_plex(plex, data_dict, instance_type, logger)
+
+ # Handle messages related to syncing actions
+ handle_messages(data_dict, logger)
+
+ # Send notifications related to syncing actions
+ if discord_check(script_name):
+ notification(data_dict, logger)
+ else:
+ logger.info(f"No items to sync from {starr_server_name} to {server_name}.\n")
+ else:
+ logger.error(f"No Plex Data found for {server_name}. Skipping...")
+ continue
+ else:
+ continue
+ except KeyboardInterrupt:
+ print("Keyboard Interrupt detected. Exiting...")
+ sys.exit()
+ except Exception:
+ logger.error(f"\n\nAn error occurred:\n", exc_info=True)
+ logger.error(f"\n\n")
+ finally:
+ logger.info(create_bar(f"END {name}"))
\ No newline at end of file
diff --git a/modules/nohl.py b/modules/nohl.py
new file mode 100755
index 0000000..101e008
--- /dev/null
+++ b/modules/nohl.py
@@ -0,0 +1,716 @@
+# _ _ _
+# | | | | |
+# _ __ ___ | |__| | | _ __ _ _
+# | '_ \ / _ \| __ | | | '_ \| | | |
+# | | | | (_) | | | | |____ _| |_) | |_| |
+# |_| |_|\___/|_| |_|______(_) .__/ \__, |
+# | | __/ |
+# |_| |___/
+# ===================================================================================================
+# Author: Drazzilb
+# Description: This script will find all files that are not hardlinked and will process them in radarr
+# and sonarr. This is useful for finding files that are not hardlinked and wish to have 100%
+# hardlinks seeding.
+# Usage: python3 nohl.py
+# Requirements: Python 3.8+, requests
+# License: MIT License
+# ===================================================================================================
+
+import os
+import re
+import sys
+import json
+
+from util.arrpy import StARR
+from util.discord import discord, discord_check
+from util.utility import *
+from util.logger import setup_logger
+
+try:
+ from tqdm import tqdm
+except ImportError as e:
+ print(f"ImportError: {e}")
+ print("Please install the required modules with 'pip install -r requirements.txt'")
+ exit(1)
+
+script_name = "nohl"
+
+# Regular expressions for file parsing
+season_regex = r"Season (\d{1,2})"
+episode_regex = r"(?:E|e)(\d{1,2})"
+title_regex = r".*\/([^/]+)\s\((\d{4})\).*"
+year_regex = re.compile(r"\s?\((\d{4})\).*")
+
+def find_no_hl_files(path, logger):
+ """
+ Finds all files that are not hardlinked in a given path.
+
+ Args:
+ path (str): Path to search for files.
+
+ Returns:
+ dict: Dictionary of files that are not hardlinked.
+ """
+ path_basename = os.path.basename(path.rstrip('/'))
+ nohl_data = {'movies':[], 'series':[]} # Initialize an empty list to store non-hardlinked file information
+ # Iterating through items in the specified path
+ for item in tqdm([i for i in os.listdir(path) if os.path.isdir(os.path.join(path, i))], desc=f"Searching '{path_basename}'", unit="item", total=len(os.listdir(path)), disable=None, leave=True):
+ if item.startswith('.'): # Skip hidden files or directories
+ continue
+
+ # Extracting title and year information using regex
+ title = re.sub(year_regex, '', item)
+ try:
+ year = int(year_regex.search(item).group(1))
+ except AttributeError as e:
+ year = 0
+ # Creating an asset dictionary to store file information
+ asset_list = {
+ 'title': title,
+ 'year': year,
+ 'normalized_title': normalize_titles(title),
+ 'root_path': path,
+ 'path': os.path.join(path, item)
+ }
+
+ if os.path.isdir(os.path.join(path, item)) and any(os.path.isdir(os.path.join(path, item, sub_folder)) for sub_folder in os.listdir(os.path.join(path, item))):
+ # If the item is a directory and contains sub folders
+ sub_folders = [sub_folder for sub_folder in os.listdir(os.path.join(path, item)) if os.path.isdir(os.path.join(path, item, sub_folder)) and not sub_folder.startswith('.')]
+ sub_folders.sort()
+ asset_list['season_info'] = [] # Initialize list to store season information
+
+ # Processing sub folders
+ for sub_folder in sub_folders:
+ sub_folder_files = [file for file in os.listdir(os.path.join(path, item, sub_folder)) if os.path.isfile(os.path.join(path, item, sub_folder, file)) and not file.startswith('.')]
+ season = re.search(season_regex, sub_folder)
+ try:
+ season_number = int(season.group(1))
+ except AttributeError as e:
+ season_number = 0
+ sub_folder_files.sort()
+ nohl_files = []
+
+ # Finding non-hardlinked files within sub folders
+ for file in sub_folder_files:
+ # If file extension is not mkv or mp4 skip
+ if not file.endswith('.mkv') and not file.endswith('.mp4'):
+ continue
+ file_path = os.path.join(path, item, sub_folder, file)
+ if (os.path.isfile(file_path) and os.stat(file_path).st_nlink == 1):
+ nohl_files.append(file_path)
+ # Extracting episode numbers
+ episodes = []
+ for file in nohl_files:
+ try:
+ episode_match = re.search(episode_regex, file)
+ if episode_match is not None:
+ episode = int(episode_match.group(1))
+ episodes.append(episode)
+ except AttributeError as e:
+ logger.error(f"{e}")
+ logger.error(f"Error processing file: {file}.")
+ continue
+
+ # Storing season information with non-hardlinked files
+ season_list = {
+ 'season_number': season_number,
+ 'episodes': episodes,
+ 'nohl': nohl_files
+ }
+
+ if nohl_files:
+ asset_list['season_info'].append(season_list)
+ if asset_list['season_info'] and any(season['nohl'] for season in asset_list['season_info']):
+ nohl_data['series'].append(asset_list)
+ else:
+ # For individual files within directories
+ files = [file for file in os.listdir(os.path.join(path, item)) if os.path.isfile(os.path.join(path, item, file)) and not file.startswith('.')]
+ files.sort()
+ nohl_files = []
+
+ # Finding non-hardlinked files within individual files
+ for file in files:
+ if not file.endswith('.mkv') and not file.endswith('.mp4'):
+ continue
+ file_path = os.path.join(path, item, file)
+ if (os.path.isfile(file_path) and os.stat(file_path).st_nlink == 1):
+ nohl_files.append(file_path)
+
+ # Storing non-hardlinked files
+ asset_list['nohl'] = nohl_files
+ if nohl_files:
+ nohl_data['movies'].append(asset_list)
+
+ return nohl_data # Return the list of dictionaries representing non-hardlinked files
+
+def handle_searches(app, search_list, instance_type):
+ """
+ Handles searching for files in Radarr or Sonarr.
+
+ Args:
+ app (StARR): StARR object for Radarr/Sonarr.
+ search_list (dict): Dictionary of files to search for.
+ instance_type (str): Type of instance, either 'radarr' or 'sonarr'.
+ """
+ print("Searching for files... this may take a while.")
+ searched_for = [] # Initialize a list to store media items that have been searched for
+ searches = 0 # Initialize the number of searches performed
+ for item in tqdm(search_list, desc="Searching...", unit="item", total=len(search_list), disable=None, leave=True):
+ if instance_type == 'radarr':
+ # For Radarr instance, handle search for movie files
+ app.delete_movie_file(item['file_ids']) # Delete specified movie files
+ results = app.refresh_items(item['media_id']) # Refresh the media item
+ ready = app.wait_for_command(results['id']) # Wait for the refresh command to complete
+ if ready:
+ app.search_media(item['media_id']) # Initiate search for the media item
+ searched_for.append(item)
+ searches += 1 # Increment the count of searches performed
+ elif instance_type == 'sonarr':
+ # For Sonarr instance, handle search for episodes or season packs
+ seasons = item.get('seasons', [])
+ if seasons:
+ for season in seasons:
+ season_pack = season['season_pack']
+ file_ids = list(set([episode['episode_file_id'] for episode in season['episode_data']]))
+ episode_ids = [episode['episode_id'] for episode in season['episode_data']]
+ if season_pack:
+ # Delete season files if it's a season pack
+ app.delete_episode_files(file_ids)
+ results = app.refresh_items(item['media_id'])
+ ready = app.wait_for_command(results['id'])
+ if ready:
+ app.search_season(item['media_id'], season['season_number'])
+ searched_for.append(item)
+ else:
+ # Delete episode files if individual episodes
+ app.delete_episode_files(file_ids)
+ results = app.refresh_items(item['media_id'])
+ ready = app.wait_for_command(results['id'])
+ if ready:
+ app.search_episodes(episode_ids)
+ searched_for.append(item)
+ print(f"Searches performed: {searches}")
+ return searched_for
+
+def filter_media(app, media_list, nohl_data, instance_type, exclude_profiles, exclude_media, max_search):
+ """
+ Filters media based on quality profile and monitored status.
+
+ Args:
+ app (StARR): StARR object for Radarr/Sonarr.
+ media_list (dict): Dictionary of media items.
+ nohl_list (dict): Dictionary of files that are not hardlinked.
+ instance_type (str): Type of instance, either 'radarr' or 'sonarr'.
+ exclude_profiles (list): List of quality profiles to exclude.
+ exclude_media (list): List of media titles to exclude.
+
+ Returns:
+ dict: Dictionary of filtered media and media to search for.
+ """
+ quality_profiles = app.get_quality_profile_names()
+ exclude_profile_ids = []
+
+ # Get IDs for quality profiles to be excluded
+ if exclude_profiles:
+ for profile in exclude_profiles:
+ for profile_name, profile_id in quality_profiles.items():
+ if profile_name == profile:
+ exclude_profile_ids.append(profile_id)
+
+ data_list = {'search_media': [], 'filtered_media': []} # Initialize dictionary to store filtered media and media to search
+
+ # Iterate through nohl_list (dictionary of non-hardlinked files)
+ for nohl_item in tqdm(nohl_data, desc="Filtering media...", unit="item", total=len(nohl_data), disable=None, leave=True):
+ # Iterate through media items in media_list
+ for media_item in media_list:
+ # Compare media items with non-hardlinked items
+ if media_item['normalized_title'] == nohl_item['normalized_title'] and media_item['year'] == nohl_item['year']:
+ # Check if the media item is not monitored
+ if media_item['monitored'] == False or (exclude_media is not None and media_item['title'] in exclude_media) or media_item['quality_profile'] in exclude_profile_ids:
+ data_list['filtered_media'].append({
+ 'title': media_item['title'],
+ 'year': media_item['year'],
+ 'monitored': media_item['monitored'],
+ 'exclude_media': media_item['title'] in exclude_media,
+ 'quality_profile': quality_profiles.get(media_item['quality_profile']) if media_item['quality_profile'] in exclude_profile_ids else None
+ })
+ continue
+ # Handle search for media files based on instance type (Radarr/Sonarr)
+ if instance_type == 'radarr':
+ file_ids = media_item['file_id']
+ data_list['search_media'].append({
+ 'media_id': media_item['media_id'],
+ 'title': media_item['title'],
+ 'year': media_item['year'],
+ 'file_ids': file_ids
+ })
+ elif instance_type == 'sonarr':
+ # Retrieve information about seasons of the media item from Sonarr
+ media_seasons_info = media_item.get('seasons', {})
+
+ # Retrieve information about non-hardlinked files specifically related to seasons
+ file_season_info = nohl_item.get('season_info', [])
+
+ # Lists to store data about episodes or season packs
+ season_data = []
+ filtered_seasons = []
+
+ # Iterate through each season of the media item
+ for media_season in media_seasons_info:
+ for file_season in file_season_info:
+ # Match the season number between media and non-hardlinked files
+ if media_season['season_number'] == file_season['season_number']:
+ # Check if the season is unmonitored
+ if media_season['monitored'] == False:
+ # Append unmonitored season to filtered_seasons list
+ filtered_seasons.append({
+ 'season_number': media_season['season_number'],
+ 'monitored': media_season['monitored'],
+ })
+ else:
+ # Check if it's a season pack
+ if media_season['season_pack'] == True:
+ # Append season pack information (including episode data) to season_data list
+ season_data.append({
+ 'season_number': media_season['season_number'],
+ 'season_pack': media_season['season_pack'],
+ 'episode_data': media_season['episode_data']
+ })
+ else:
+ # Lists to store episodes with hardlink issues and monitored episodes
+ filtered_episodes = []
+ episode_data = []
+
+ # Check each episode in the season
+ for episode in media_season['episode_data']:
+ # Check if the episode is unmonitored
+ if episode['monitored'] == False:
+ # Append unmonitored episode to filtered_episodes list
+ filtered_episodes.append(episode)
+ else:
+ # Check if the episode is not hardlinked
+ if episode['episode_number'] in file_season['episodes']:
+ # Append episode data to episode_data list
+ episode_data.append(episode)
+
+ # Append unmonitored episodes within the season to filtered_seasons
+ if filtered_episodes:
+ filtered_seasons.append({
+ 'season_number': media_season['season_number'],
+ 'monitored': media_season['monitored'],
+ 'episodes': filtered_episodes
+ })
+
+ # Append monitored episodes with hardlink issues to season_data
+ if episode_data:
+ season_data.append({
+ 'season_number': media_season['season_number'],
+ 'season_pack': media_season['season_pack'],
+ 'episode_data': episode_data
+ })
+
+ # If there are unmonitored seasons or season packs, add to filtered_media
+ if filtered_seasons:
+ data_list['filtered_media'].append({
+ 'title': media_item['title'],
+ 'year': media_item['year'],
+ 'seasons': filtered_seasons
+ })
+
+ # If there are monitored episodes or season packs with hardlink issues, add to search_media
+ if season_data:
+ data_list['search_media'].append({
+ 'media_id': media_item['media_id'],
+ 'title': media_item['title'],
+ 'year': media_item['year'],
+ 'monitored': media_item['monitored'],
+ 'seasons': season_data
+ })
+ # Limit the number of searches to the maximum allowed
+ if len(data_list['search_media']) > max_search:
+ data_list['search_media'] = data_list['search_media'][:max_search]
+ # Return the dictionary containing filtered media and media to search for in Sonarr
+ return data_list
+
+def handle_messages(output_dict, logger):
+ """
+ Handle CLI output for nohl.py
+
+ Args:
+ output_dict (dict): Dictionary of output data.
+
+ Returns:
+ None
+ """
+ table = [
+ ["Results"],
+ ]
+ logger.info(create_table(table))
+ # Iterate through each instance in the output_dict
+ for instance, instance_data in output_dict.items():
+
+ # Retrieve search and filtered media information
+ search_media = instance_data['data']['search_media']
+ filtered_media = instance_data['data']['filtered_media']
+
+ # Display searched media information
+ if search_media:
+ for search_item in search_media:
+ # For Radarr instances, display deleted and searched files
+ if instance_data['instance_type'] == 'radarr':
+ logger.info(f"{search_item['title']} ({search_item['year']})")
+ logger.info(f"\tDeleted and searched.\n")
+ else: # For Sonarr instances, display files that were searched
+ logger.info(f"{search_item['title']} ({search_item['year']})")
+ if search_item.get('seasons', None):
+ for season in search_item['seasons']:
+ if season['season_pack']:
+ logger.info(f"\tSeason {season['season_number']}, deleted and searched.")
+ else:
+ logger.info(f"\tSeason {season['season_number']}")
+ for episode in season['episode_data']:
+ logger.info(f"\t Episode {episode['episode_number']}, deleted and searched.")
+ logger.info("")
+ # Display filtered media information
+ table = [
+ ["Filtered Media"],
+ ]
+ if filtered_media:
+ logger.debug(create_table(table))
+ for filtered_item in filtered_media:
+ monitored = filtered_item.get('monitored', None)
+ logger.debug(f"{filtered_item['title']} ({filtered_item['year']})")
+ if monitored == False:
+ logger.debug(f"\tSkipping, not monitored.")
+ elif filtered_item.get('exclude_media', None):
+ logger.debug(f"\tSkipping, excluded.")
+ elif filtered_item.get('quality_profile', None):
+ logger.debug(f"\tSkipping, quality profile: {filtered_item['quality_profile']}")
+ elif filtered_item.get('seasons', None):
+ for season in filtered_item['seasons']:
+ if season['monitored'] == False:
+ logger.debug(f"\tSeason {season['season_number']}, skipping, not monitored.")
+ elif season.get('episodes', None):
+ logger.debug(f"\tSeason {season['season_number']}")
+ for episode in season['episodes']:
+ logger.debug(f"\t Episode {episode['episode_number']}, skipping, not monitored.")
+ logger.debug("")
+ else:
+ logger.debug(f"No files to filter for {instance_data['server_name']}")
+ logger.debug("")
+
+
+def notification(final_output, logger, log_level):
+ """
+ Sends a discord notification with the results of the script.
+
+ Args:
+ final_output (dict): Dictionary of output data.
+
+ Returns:
+ None
+ """
+ # Initialize variables for message building
+ fields = []
+ built_fields = {}
+ count = 0
+ message_count = 0
+
+ # Loop through each instance in the final output
+ previous_instance = None
+ for instance, instance_data in final_output.items():
+ server_name = instance_data['server_name']
+ data = instance_data['data']
+ search_media = data.get('search_media', [])
+ filtered_media = data.get('filtered_media', [])
+
+
+ # Build fields for search media
+ if search_media:
+ # Initialize variables for message building
+ if previous_instance != instance:
+ name = f"❌ {server_name}: Search Media"
+ else:
+ name = None
+ discord_messages = []
+ current_field = ""
+ for search_item in search_media:
+ sub_messages = []
+ # Construct messages for Radarr or Sonarr
+ if instance_data['instance_type'] == 'radarr':
+ sub_messages.append(f"{search_item['title']} ({search_item['year']})\n")
+ elif instance_data['instance_type'] == 'sonarr':
+ # Construct messages for Sonarr including season and episode data
+ sub_messages.append(f"{search_item['title']} ({search_item['year']})")
+ if search_item.get('seasons', None):
+ # Iterate through seasons and episodes
+ for season in search_item['seasons']:
+ if season['season_pack']:
+ sub_messages.append(f"\tSeason {season['season_number']}")
+ else:
+ sub_messages.append(f"\tSeason {season['season_number']}")
+ for episode in season['episode_data']:
+ sub_messages.append(f"\t\tEpisode {episode['episode_number']}")
+ sub_messages.append("")
+ discord_messages.append("\n".join(sub_messages))
+
+ # Split asset-specific messages into multiple fields if their total length exceeds Discord's field limit
+ if discord_messages:
+ current_field = ""
+ for message in discord_messages:
+ # Check if adding the message exceeds the character limit
+ if len(current_field) + len(message) <= 1000:
+ current_field += message + "\n"
+ else:
+ # Add the current field to the fields list
+ fields.append({
+ "name": name if name else "",
+ "value": f"```{current_field}```"
+ })
+ # Start a new field with the current message
+ name = ""
+ current_field = message + "\n"
+ # Add the last field to the fields list
+ fields.append({
+ "name": name if name else "",
+ "value": f"```{current_field}```"
+ })
+
+ # Add the fields to the built_fields dictionary
+ num_fields = len(fields)
+ num_messages_per_field = 25
+ num_keys = num_fields // num_messages_per_field
+ if num_fields % num_messages_per_field != 0:
+ num_keys += 1
+
+ for i in range(num_keys):
+ start_index = i * num_messages_per_field
+ end_index = min(start_index + num_messages_per_field, num_fields)
+ built_fields[i + 1] = fields[start_index:end_index]
+
+ if log_level == "debug" and filtered_media:
+ filter_message = []
+ for filtered_item in filtered_media:
+ # Construct messages for filtered media
+ filter_message.append(f"{filtered_item['title']} ({filtered_item['year']})")
+ monitored = filtered_item.get('monitored', None)
+ if monitored == False:
+ filter_message.append(f"\tSkipping, not monitored.")
+ # Handle other filtering criteria (exclusion, quality profile, etc.)
+ # Append season and episode information where available
+ elif filtered_item.get('exclude_media', None):
+ # Handle other filtering criteria (exclusion, quality profile, etc.)
+ filter_message.append(f"\tSkipping, excluded.")
+ elif filtered_item.get('quality_profile', None):
+ filter_message.append(f"\tSkipping, quality profile: {filtered_item['quality_profile']}")
+ elif filtered_item.get('seasons', None):
+ for season in filtered_item['seasons']:
+ if season['monitored'] == False:
+ filter_message.append(f"\tSeason {season['season_number']}, skipping, not monitored.")
+ elif season.get('episodes', None):
+ filter_message.append(f"\tSeason {season['season_number']}")
+ for episode in season['episodes']:
+ filter_message.append(f"\t\tEpisode {episode['episode_number']}, skipping, not monitored.")
+ filter_message.append("")
+ filter_message = "\n".join(filter_message)
+ fields.append({
+ "name": f"{server_name}: Filtered Media",
+ "value": f"```{filter_message}```",
+ "inline": False
+ })
+
+ # Handle cases where there are no files to search or filter
+ if not search_media:
+ fields.append({
+ "name": f"✅ {server_name} all files are hardlinked!",
+ "value": f"",
+ "inline": False
+ })
+
+ # Check character count for message splitting
+ count += 1
+ # Split messages if character count exceeds a certain limit
+ if count >= 25:
+ count = 0
+ message_count += 1
+ built_fields[message_count] = fields
+ fields = []
+ previous_instance = instance
+
+ # Create message blocks for Discord
+ if fields:
+ message_count += 1
+ built_fields[message_count] = fields
+
+ # Send messages to Discord
+ for message_number, fields in built_fields.items():
+ print(f"Sending message {message_number} of {message_count}...")
+ if dry_run:
+ description = "__**Dry Run**__\nAll items deleted and searched for"
+ else:
+ description = "All items deleted and searched for"
+
+ discord(fields, logger, script_name, description=description, color=0x00ff00, content=None)
+
+
+def main(config):
+ """
+ Main function.
+ """
+ global dry_run
+ dry_run = config.dry_run
+ log_level = config.log_level
+ logger = setup_logger(log_level, script_name)
+ script_config = config.script_config
+ name = script_name.replace("_", " ").upper()
+ try:
+ logger.info(create_bar(f"START {name}"))
+ # Check if a dry run is enabled
+ if dry_run:
+ # Display a notification for a dry run
+ table = [
+ ["Dry Run"],
+ ["NO CHANGES WILL BE MADE"]
+ ]
+ logger.info(create_table(table))
+
+ # Fetch configurations from the script's config file
+ script_config = config.script_config
+ max_search = script_config.get('maximum_searches', None)
+ instances = script_config.get('instances', None)
+ filters = script_config.get('filters', None)
+ paths = script_config.get('paths', None)
+ print_files = script_config.get('print_files', False)
+ script_config.get("print_files", False)
+ valid = validate(config, script_config, logger)
+
+ # Check if instances are properly configured
+ if instances is None:
+ logger.error("No instances set in config file.")
+ return
+
+ # Display script configurations in the logs
+ table = [
+ ["Script Configuration"],
+ ]
+ logger.debug(create_table(table))
+ logger.debug(f'{"Dry_run:":<20}{dry_run}')
+ logger.debug(f'{"Log level:":<20}{log_level}')
+ logger.debug(f'{"Maximum Searches:":<30}{max_search}')
+ logger.debug(f'{f"Instances:":<30}\n{json.dumps(instances, indent=4)}')
+ logger.debug(f'{"Filters:":<30}\n{json.dumps(filters, indent=4)}')
+ logger.debug(f'{"Paths:":<30}\n{json.dumps(paths, indent=4)}')
+ logger.debug(f'{"Print Files:":<30}{print_files}')
+ logger.debug(f'{"Exclude Profiles:":<30}\n{json.dumps(filters.get("exclude_profiles", []), indent=4)}')
+ logger.debug(f'{"Exclude Movies:":<30}\n{json.dumps(filters.get("exclude_movies", []), indent=4)}')
+ logger.debug(f'{"Exclude Series:":<30}\n{json.dumps(filters.get("exclude_series", []), indent=4)}')
+ logger.debug(create_bar("-"))
+
+ # Display the summary of non-hardlinked files in each directory
+ output_dict = {}
+ # Process provided paths to find non-hardlinked files
+ nohl_list = {'movies': [], 'series': []}
+ if paths:
+ for path in paths:
+ results = find_no_hl_files(path, logger)
+ if results:
+ nohl_list['movies'].extend(results['movies'])
+ nohl_list['series'].extend(results['series'])
+ # Display non-hardlinked files in the logs
+ logger.debug(f"Non-Hardlinked Files:\n{json.dumps(nohl_list, indent=4)}")
+
+ # Generate a summary of the number of non-hardlinked files in each directory
+ total = 0
+ table = [
+ ["Directory", "Number of Files"],
+ ]
+ counter = {}
+ results_table = [
+ ["Non-Hardlinked Files"],
+ ]
+ logger.info(create_table(results_table))
+ for media_type, results in nohl_list.items():
+ if results:
+ old_root_path = ""
+ for item in results:
+ root_path = os.path.basename(os.path.normpath(item['root_path']))
+ if media_type == 'movies':
+ counter[root_path] = counter.get(root_path, 0) + len(item['nohl'])
+ total += len(item['nohl'])
+ elif media_type == 'series':
+ for season in item['season_info']:
+ counter[root_path] = counter.get(root_path, 0) + len(season['nohl'])
+ total += len(season['nohl'])
+ if print_files:
+ if old_root_path != root_path:
+ logger.info(f"Root Path: {root_path}")
+ logger.info(f"\t{item['title']} ({item['year']})")
+ if media_type == 'movies':
+ for file in item['nohl']:
+ file_name = os.path.basename(file)
+ logger.info(f"\t\t{file_name}")
+ else:
+ for season in item['season_info']:
+ for file in season['nohl']:
+ file_name = os.path.basename(file)
+ logger.info(f"\t\t{file_name}")
+ logger.info("")
+ old_root_path = root_path
+ for key, value in counter.items():
+ table.append([key, value])
+ if total:
+ table.append(["Total", total])
+ logger.info(create_table(table))
+ logger.info("")
+ # Iterate through instances and handle the connections and data retrieval
+ for instance_type, instance_data in config.instances_config.items():
+ for instance in instances:
+ if instance in instance_data:
+ data_list = {'search_media': [], 'filtered_media': []}
+ instance_settings = instance_data.get(instance, None)
+ app = StARR(instance_settings['url'], instance_settings['api'], logger)
+ server_name = app.get_instance_name()
+ exclude_profiles = filters.get('exclude_profiles', [])
+ table = [
+ [f"{server_name}"],
+ ]
+ logger.info(create_table(table))
+ if instance_type == "radarr" and not nohl_list['movies'] or instance_type == "sonarr" and not nohl_list['series']:
+ logger.info(f"No non-hardlinked files found for server: {server_name}")
+ exclude_media = filters.get('exclude_movies', []) if instance_type == 'radarr' else filters.get('exclude_series', [])
+ nohl_data = nohl_list['movies'] if instance_type == "radarr" else nohl_list['series'] if instance_type == "sonarr" else None
+ if nohl_data:
+ media_list = handle_starr_data(app, server_name, instance_type, include_episode=True)
+ if media_list:
+ data_list = filter_media(app, media_list, nohl_data, instance_type, exclude_profiles, exclude_media, max_search)
+ else:
+ logger.info(f"No media found for server: {server_name}")
+ if data_list:
+ logger.debug(f"Data Media:\n{json.dumps(data_list, indent=4)}")
+ search_list = data_list.get('search_media', [])
+ if search_list:
+ # Conduct searches if not a dry run
+ if not dry_run:
+ search_list = handle_searches(app, search_list, instance_type)
+ data_list['search_media'] = search_list
+ # Prepare output data
+ output_dict[instance] = {
+ 'server_name': server_name,
+ 'instance_type': instance_type,
+ 'data': data_list
+ }
+ logger.debug(f"Output Data:\n{json.dumps(output_dict, indent=4)}")
+ # Display command-line output about processed files and excluded media
+ handle_messages(output_dict, logger)
+
+ # Send a Discord notification containing the output data
+ if discord_check(script_name):
+ notification(output_dict, logger, log_level)
+ except KeyboardInterrupt:
+ print("Keyboard Interrupt detected. Exiting...")
+ sys.exit()
+ except Exception:
+ logger.error(f"\n\nAn error occurred:\n", exc_info=True)
+ logger.error(f"\n\n")
+ finally:
+ logger.info(create_bar(f" ENDING {name} "))
\ No newline at end of file
diff --git a/modules/poster_cleanarr.py b/modules/poster_cleanarr.py
new file mode 100755
index 0000000..d825bf0
--- /dev/null
+++ b/modules/poster_cleanarr.py
@@ -0,0 +1,341 @@
+# _____ _ _____ _
+# | __ \ | | / ____| |
+# | |__) |__ ___| |_ ___ _ __| | | | ___ __ _ _ __ __ _ _ __ _ __
+# | ___/ _ \/ __| __/ _ \ '__| | | |/ _ \/ _` | '_ \ / _` | '__| '__|
+# | | | (_) \__ \ || __/ | | |____| | __/ (_| | | | | (_| | | | |
+# |_| \___/|___/\__\___|_| \_____|_|\___|\__,_|_| |_|\__,_|_| |_|
+# ______
+# |______|
+# ===========================================================================================================
+# Author: Drazzilb
+# Description: This script will remove any assets from your plex-meta-manager asset directory that are not being used by your media.
+# Note: This script will remove things that renamer has put in to the assets directory that do not have a folder in your
+# Media directory and cause a loop. I wouldn't recommend running this script very often (weekly at most, monthly is probably)
+# Usage: python3 poster_cleanarr.py
+# Requirements: requests
+# License: MIT License
+# ===========================================================================================================
+
+import os
+import re
+import json
+import logging
+import shutil
+import sys
+
+from util.utility import *
+from util.arrpy import StARR
+from util.logger import setup_logger
+
+try:
+ from plexapi.server import PlexServer
+ from tqdm import tqdm
+except ImportError as e:
+ print(f"ImportError: {e}")
+ print("Please install the required modules with 'pip install -r requirements.txt'")
+ exit(1)
+
+script_name = "poster_cleanarr"
+
+def match_assets(assets_list, media_dict):
+ """
+ Match assets to media.
+
+ Args:
+ assets_dict (dict): Dictionary of assets.
+ media_dict (dict): Dictionary of media.
+
+ Returns:
+ dict: Dictionary of unmatched assets.
+ """
+ # Initialize dictionary to store unmatched assets by media types
+ unmatched_assets = []
+ # Loop through different media types
+ # Iterate through each asset in the asset dictionary of the given media type
+ for asset_data in tqdm(assets_list, desc=f"Matching...", unit="assets", total=len(assets_list), disable=None, leave=True):
+ # Initialize a flag to track if an asset is matched with media
+ matched = False
+
+ if not asset_data['files']:
+ unmatched_assets.append({
+ 'title': asset_data['title'],
+ 'year': asset_data['year'],
+ 'files': asset_data['files'],
+ 'path': asset_data.get('path', None)
+ })
+ continue
+
+ # Iterate through each media data of the same media type
+ for media_data in media_dict:
+
+ if is_match(asset_data, media_data):
+ matched = True
+
+ # For series, check for missing seasons in the media
+ if media_data.get('season_numbers', None):
+ media_seasons_numbers = media_data.get('season_numbers', None)
+ asset_seasons_numbers = asset_data.get('season_numbers', None)
+ if asset_seasons_numbers and media_seasons_numbers:
+ missing_seasons = []
+ for season in asset_seasons_numbers:
+ if season not in media_seasons_numbers:
+ missing_seasons.append(season)
+ files = []
+ for season in missing_seasons:
+ season = str(season).zfill(2)
+ season = f"Season{season}"
+ for file in asset_data['files']:
+ if season in file:
+ files.append(file)
+ if missing_seasons:
+ unmatched_assets.append({
+ 'title': asset_data['title'],
+ 'year': asset_data['year'],
+ 'files': files,
+ 'path': asset_data.get('path', None),
+ 'missing_season': True,
+ 'missing_seasons': missing_seasons
+ })
+ break
+ # If no match is found, add the asset to unmatched assets based on media type
+ if not matched:
+ unmatched_assets.append({
+ 'title': asset_data['title'],
+ 'year': asset_data['year'],
+ 'files': asset_data['files'],
+ 'path': asset_data.get('path', None)
+ })
+ return unmatched_assets
+
+def remove_assets(unmatched_dict, source_dirs, logger):
+ """
+ Remove unmatched assets.
+
+ Args:
+ unmatched_dict (dict): Dictionary of unmatched assets.
+
+ Returns:
+ dict: Dictionary of assets removed.
+ """
+ # Define the types of assets
+
+ # Initialize a dictionary to store removed asset data categorized by asset types
+ remove_data = []
+
+ # Initialize a list to track items to be removed
+ remove_list = []
+ # Iterate through each asset type
+ # Iterate through each asset data within the unmatched assets of the given asset type
+ for asset_data in unmatched_dict:
+ messages = []
+
+ # Check if the asset has no associated files (empty folder)
+ if not asset_data['files'] and asset_data['path']:
+ # Add the path of the empty folder to the removal list and log a message
+ remove_list.append(asset_data['path'])
+ messages.append(f"Removing empty folder: {os.path.basename(asset_data['path'])}")
+ else:
+ # For each file associated with the asset, add it to the removal list and log a message
+ for file in asset_data['files']:
+ remove_list.append(file)
+ messages.append(f"Removing file: {os.path.basename(file)}")
+
+ # Store removal data for the current asset type
+ remove_data.append({
+ 'title': asset_data['title'],
+ 'year': asset_data['year'],
+ 'messages': messages
+ })
+
+ # If not a dry run, perform the removal operations
+ if not dry_run:
+ for path in remove_list:
+ try:
+ # Check if the path is a directory; if so, remove the directory recursively
+ if os.path.isdir(path):
+ shutil.rmtree(path)
+ else:
+ # If it's a file, remove the file and its parent folder if it becomes empty
+ os.remove(path)
+ folder_path = os.path.dirname(path)
+ if not os.listdir(folder_path):
+ os.rmdir(folder_path)
+ except OSError as e:
+ logger.error(f"Error: {e}")
+ logger.error(f"Failed to remove: {path}")
+ continue
+ # Check for empty directories and remove them
+ for assets_path in source_dirs:
+ for root, dirs, files in os.walk(assets_path, topdown=False):
+ for dir in dirs:
+ dir_path = os.path.join(root, dir)
+ if not os.listdir(dir_path):
+ try:
+ logger.info(f"Removing empty folder: {dir}")
+ os.rmdir(dir_path)
+ except OSError as e:
+ logger.error(f"Error: {e}")
+ logger.error(f"Failed to remove: {dir_path}")
+ continue
+
+ return remove_data
+
+def print_output(remove_data, logger):
+ """
+ Print output of removed assets.
+
+ Args:
+ remove_data (dict): Dictionary of removed assets.
+
+ Returns:
+ None
+ """
+
+ # Define the types of assets
+ count = 0 # Counter to track the total number of assets removed
+
+ for data in remove_data:
+ title = data['title']
+ year = data['year']
+
+ # Log the title and year (if available) of the removed asset
+ if year:
+ logger.info(f"\t{title} ({year})")
+ else:
+ logger.info(f"\t{title}")
+
+ # Log messages related to the removal of files or folders associated with the asset
+ asset_messages = data['messages']
+ for message in asset_messages:
+ logger.info(f"\t\t{message}")
+ count += 1 # Increment the counter for each removed asset message
+ logger.info("") # Add an empty line for better readability
+
+ # Log the total number of assets removed across all types
+ logger.info(f"\nTotal number of assets removed: {count}")
+
+
+def main(config):
+ """
+ Main function.
+ """
+ global dry_run
+ dry_run = config.dry_run
+ log_level = config.log_level
+ logger = setup_logger(log_level, script_name)
+ script_config = config.script_config
+ name = script_name.replace("_", " ").upper()
+
+ try:
+ logger.info(create_bar(f"START {name}"))
+ # Check if it's a dry run and log the message
+ if dry_run:
+ table = [
+ ["Dry Run"],
+ ["NO CHANGES WILL BE MADE"]
+ ]
+ logger.info(create_table(table))
+
+ # Fetch script configurations from the provided YAML file
+ script_config = config.script_config
+ results = validate(config, script_config, logger)
+ if not results:
+ logger.error("Invalid script configuration. Exiting.")
+ return
+ library_names = script_config.get('library_names', [])
+ source_dirs = script_config.get('source_dirs', [])
+ instances = script_config.get('instances', None)
+
+ # Log script settings for debugging purposes
+ table = [
+ ["Script Settings"]
+ ]
+ logger.debug(create_table(table))
+ logger.debug(f'{"Log level:":<20}{log_level}')
+ logger.debug(f'{"Dry_run:":<20}{dry_run}')
+ logger.debug(f'{"Assets paths:":<20}{source_dirs}')
+ logger.debug(f'{"Library names:":<20}{library_names}')
+ logger.debug(f'{"Instances:":<20}{instances}')
+ logger.debug(create_bar("-"))
+
+ source_dirs = [source_dirs] if isinstance(source_dirs, str) else source_dirs
+
+ assets_list = []
+ for path in source_dirs:
+ results = categorize_files(path)
+ if results:
+ assets_list.extend(results)
+ else:
+ logger.error(f"No assets found in {path}.")
+ # Checking for assets and logging
+ if assets_list:
+ logger.debug(f"Assets:\n{json.dumps(assets_list, indent=4)}")
+ else:
+ logger.error("No assets found, Check source_dirs setting in your config. Exiting.")
+ return
+
+ # Fetch information from Plex and StARR
+ media_dict = []
+ if instances:
+ for instance_type, instance_data in config.instances_config.items():
+ for instance in instances:
+ if instance in instance_data:
+ if instance_type == "plex":
+ url = instance_data[instance]['url']
+ api = instance_data[instance]['api']
+ try:
+ app = PlexServer(url, api)
+ except Exception as e:
+ logger.error(f"Error connecting to Plex: {e}")
+ app = None
+ if library_names and app:
+ print("Getting Plex data...")
+ results = get_plex_data(app, library_names, logger, include_smart=True, collections_only=True)
+ media_dict.extend(results)
+ else:
+ logger.warning("No library names specified in config.yml. Skipping Plex.")
+ else:
+ url = instance_data[instance]['url']
+ api = instance_data[instance]['api']
+ app = StARR(url, api, logger)
+ server_name = app.get_instance_name()
+ if app:
+ print(f"Getting {instance_type.capitalize()} data...")
+ results = handle_starr_data(app, server_name, instance_type, include_episode=False)
+ if results:
+ if instance_type == "radarr":
+ media_dict.extend(results)
+ elif instance_type == "sonarr":
+ media_dict.extend(results)
+ else:
+ logger.error(f"No {instance_type.capitalize()} data found.")
+
+ else:
+ logger.error(f"No instances found. Exiting script...")
+ return
+
+ if not media_dict:
+ logger.error("No media found, Check instances setting in your config. Exiting.")
+ return
+ else:
+ logger.debug(f"Media:\n{json.dumps(media_dict, indent=4)}")
+
+ # Match assets with media and log the results
+ unmatched_dict = match_assets(assets_list, media_dict)
+ if unmatched_dict:
+ logger.debug(f"Unmatched:\n{json.dumps(unmatched_dict, indent=4)}")
+ remove_data = remove_assets(unmatched_dict, source_dirs, logger)
+ if remove_data:
+ logger.debug(f"Remove Data:\n{json.dumps(remove_data, indent=4)}")
+ print_output(remove_data, logger)
+ else:
+ logger.info(f"No assets removed.")
+
+ except KeyboardInterrupt:
+ print("Keyboard Interrupt detected. Exiting...")
+ sys.exit()
+ except Exception:
+ logger.error(f"\n\nAn error occurred:\n", exc_info=True)
+ logger.error(f"\n\n")
+ finally:
+ logger.info(create_bar(f"END {name}"))
\ No newline at end of file
diff --git a/modules/poster_renamerr.py b/modules/poster_renamerr.py
new file mode 100755
index 0000000..7abca11
--- /dev/null
+++ b/modules/poster_renamerr.py
@@ -0,0 +1,683 @@
+# _____ _ _____
+# | __ \ | | | __ \
+# | |__) |__ ___| |_ ___ _ __| |__) |___ _ __ __ _ _ __ ___ ___ _ __ _ __
+# | ___/ _ \/ __| __/ _ \ '__| _ // _ \ '_ \ / _` | '_ ` _ \ / _ \ '__| '__|
+# | | | (_) \__ \ || __/ | | | \ \ __/ | | | (_| | | | | | | __/ | | |
+# |_| \___/|___/\__\___|_| |_| \_\___|_| |_|\__,_|_| |_| |_|\___|_| |_|
+# ______
+# |______|
+# ===================================================================================================
+# Author: Drazzilb
+# Description: This script will rename your posters to match Plex-Meta-Manager's naming scheme from TPDB's naming.
+# Usage: python3 poster_renamerr.py
+# Requirements: requests, tqdm, pyyaml
+# License: MIT License
+# ===================================================================================================
+
+import os
+import sys
+import re
+import json
+import filecmp
+import shutil
+import time
+
+from util.utility import *
+from util.discord import discord, discord_check
+from util.arrpy import StARR
+from util.logger import setup_logger
+
+try:
+ from plexapi.server import PlexServer
+ from tqdm import tqdm
+except ImportError as e:
+ print(f"ImportError: {e}")
+ print("Please install the required modules with 'pip install -r requirements.txt'")
+ exit(1)
+
+script_name = "poster_renamerr"
+
+year_regex = re.compile(r"\s?\((\d{4})\).*")
+
+def get_assets_files(source_dirs, logger):
+ """
+ Get assets files from source directories
+
+ Args:
+ source_dir (list): Path to source directory
+ Returns:
+ list: List of dictionaries containing assets files
+ """
+
+ # Convert source_dirs to list if it's a string
+ source_dirs = [source_dirs] if isinstance(source_dirs, str) else source_dirs
+
+ # Initialize final_assets list
+ final_assets = []
+
+ # Iterate through each source directory
+ for source_dir in source_dirs:
+ new_assets = categorize_files(source_dir)
+ if new_assets:
+ # Merge new_assets with final_assets
+ for new in new_assets:
+ found_match = False
+ for final in final_assets:
+ if final['normalized_title'] == new['normalized_title'] and final['year'] == new['year']:
+ found_match = True
+ # Compare normalized file names between final and new assets
+ for new_file in new['files']:
+ normalized_new_file = normalize_file_names(os.path.basename(new_file))
+ for final_file in final['files']:
+ normalized_final_file = normalize_file_names(os.path.basename(final_file))
+ # Replace final file with new file if the filenames match
+ if normalized_final_file == normalized_new_file:
+ final['files'].remove(final_file)
+ final['files'].append(new_file)
+ break
+ else:
+ # Add new file to final asset if the filenames don't match
+ final['files'].append(new_file)
+ # Merge season_numbers from new asset to final asset
+ new_season_numbers = new.get('season_numbers', None)
+ if new_season_numbers:
+ final_season_numbers = final.get('season_numbers', None)
+ if final_season_numbers:
+ final['season_numbers'] = list(set(final_season_numbers + new_season_numbers))
+ else:
+ final['season_numbers'] = new_season_numbers
+ break
+ if not found_match:
+ final_assets.append(new)
+ else:
+ logger.error(f"No assets found in {source_dir}")
+
+ return final_assets
+
+def match_data(media_dict, asset_files):
+ """
+ Matches media data to asset files
+
+ Args:
+ media_dict (dict): Dictionary of media data
+ asset_files (dict): Dictionary of asset files
+
+ Returns:
+ dict: Dictionary of matched and unmatched media data
+ """
+
+ # Initialize dictionaries for matched and unmatched media data
+ combined_dict = {
+ 'matched': {'collections': [], 'movies': [], 'series': []},
+ 'unmatched': {'collections': [], 'movies': [], 'series': []}
+ }
+
+ # List of asset types to consider
+ asset_types = [type for type in media_dict if media_dict[type] is not None]
+
+ # Iterate through each asset type
+ with tqdm(total=len(asset_types), desc=f"Matching assets...", unit="asset types", leave=True) as pbar_outer:
+ for asset_type in asset_types:
+ if asset_type in media_dict:
+ unmatched_dict = []
+ matched_dict = []
+ asset_data = asset_files[asset_type]
+ media_data = media_dict[asset_type]
+ # Iterate through each media entry of the current asset type
+ with tqdm(total=len(media_data), desc=f"Matching {asset_type}", unit="media", leave=True, disable=None) as pbar_inner:
+ for media in media_data:
+ matched = False
+ if asset_type == 'series':
+ media_seasons_numbers = [season['season_number'] for season in media.get('seasons', [])]
+ # Iterate through each asset entry of the current asset type
+ for asset in asset_data:
+ # Extracting various properties of assets and media for comparison
+ if is_match(asset, media):
+ matched = True # Set flag to indicate a match
+ asset_season_numbers = asset.get('season_numbers', None)
+ if asset_type == "series":
+ # Iterate through each file in the asset
+ files_to_remove = []
+ seasons_to_remove = []
+ for file in asset['files']:
+ # Check for season-related file naming
+ if re.search(r' - Season| - Specials', file):
+ if re.search(r"Season (\d+)", file):
+ season_number = int(re.search(r"Season (\d+)", file).group(1))
+ elif "Specials" in file:
+ season_number = 0
+ if season_number not in media_seasons_numbers:
+ files_to_remove.append(file)
+ continue
+ for file in files_to_remove:
+ asset['files'].remove(file)
+ for season in asset_season_numbers:
+ if season not in media_seasons_numbers:
+ seasons_to_remove.append(season)
+ for season in seasons_to_remove:
+ asset_season_numbers.remove(season)
+
+ # Store matched data in the matched dictionary
+ matched_dict.append({
+ 'title': media['title'],
+ 'year': media['year'],
+ 'folder': media['folder'],
+ 'files': asset['files'],
+ 'seasons_numbers': asset_season_numbers,
+ })
+ break # Break loop after finding a match
+
+ if not matched:
+ # If no match is found, add to unmatched dictionary
+ unmatched_dict.append({
+ 'title': media['title'],
+ 'year': media['year'],
+ 'folder': media['folder'],
+ })
+
+ # Update combined matched and unmatched dictionaries
+ combined_dict['matched'][asset_type] = matched_dict
+ combined_dict['unmatched'][asset_type] = unmatched_dict
+
+ pbar_inner.update(1)
+ pbar_outer.update(1)
+
+ return combined_dict
+
+def process_file(file, new_file_path, action_type, logger):
+ """
+ Processes a file based on the action type
+
+ Args:
+ file (str): Path to file
+ new_file_path (str): Path to new file
+ action_type (str): Action type to perform on the file
+
+ Returns:
+ None
+ """
+
+ try:
+ # Check the action type and perform the appropriate operation
+ if action_type == "copy":
+ shutil.copy(file, new_file_path) # Copy the file to the new location
+ elif action_type == "move":
+ shutil.move(file, new_file_path) # Move the file to the new location
+ elif action_type == "hardlink":
+ os.link(file, new_file_path) # Create a hard link to the new location
+ elif action_type == "symlink":
+ os.symlink(file, new_file_path) # Create a symbolic link to the new location
+ except OSError as e:
+ # Handle errors if any operation fails
+ logger.error(f"Error {action_type}ing file: {e}") # Log the error message
+
+
+
+def rename_files(matched_assets, script_config, logger):
+ """
+ Renames files based on the matched assets and script config
+
+ Args:
+ matched_assets (dict): Dictionary of matched assets
+ script_config (dict): Dictionary of script config
+
+ Returns:
+ dict: Dictionary of output messages
+ """
+
+ output = {}
+
+ # Retrieve configuration settings from the script_config
+ asset_folders = script_config.get('asset_folders', False)
+ border_replacerr = script_config.get('border_replacerr', False)
+ action_type = script_config.get('action_type', False)
+ print_only_renames = script_config.get('print_only_renames', False)
+ destination_dir = script_config.get('destination_dir', False)
+
+ # Handle border_replacerr settings
+ if border_replacerr:
+ tmp_dir = os.path.join(destination_dir, 'tmp')
+ if not dry_run:
+ if not os.path.exists(tmp_dir):
+ os.makedirs(tmp_dir)
+ else:
+ logger.debug(f"{tmp_dir} already exists")
+ destination_dir = tmp_dir
+ else:
+ logger.debug(f"Would create folder {tmp_dir}")
+ destination_dir = tmp_dir
+ else:
+ destination_dir = script_config.get('destination_dir', False)
+
+ asset_types = ['collections', 'movies', 'series']
+
+ # Iterate through each asset type
+ for asset_type in asset_types:
+ output[asset_type] = []
+ # If assets to rename
+ if matched_assets[asset_type]:
+ for item in tqdm(matched_assets[asset_type], desc=f"Renaming {asset_type} posters", unit="assets", leave=True, disable=None, total=len(matched_assets[asset_type])):
+ messages = []
+ discord_messages = []
+ files = item['files']
+ folder = item['folder']
+
+ # Remove any OS illegal characters from the file name
+ if asset_type == "collections":
+ folder = re.sub(r'[<>:"/\\|?*]', '', folder.replace('/', ''))
+
+ # Handle asset_folders configuration
+ if asset_folders:
+ dest_dir = os.path.join(destination_dir, folder)
+ if not os.path.exists(dest_dir):
+ if not dry_run:
+ os.makedirs(dest_dir)
+ else:
+ dest_dir = destination_dir
+
+ # Iterate through each file in the asset
+ for file in files:
+ file_name = os.path.basename(file)
+ file_extension = os.path.splitext(file)[1]
+
+ # Check for season-related file naming
+ if re.search(r' - Season| - Specials', file_name):
+ season_number = (re.search(r"Season (\d+)", file_name).group(1) if "Season" in file_name else "00").zfill(2)
+ if asset_folders:
+ new_file_name = f"Season{season_number}{file_extension}"
+ else:
+ new_file_name = f"{folder}_Season{season_number}{file_extension}"
+ new_file_path = os.path.join(dest_dir, new_file_name)
+ else:
+ if asset_folders:
+ new_file_name = f"poster{file_extension}"
+ else:
+ new_file_name = f"{folder}{file_extension}"
+ new_file_path = os.path.join(dest_dir, new_file_name)
+
+ # Check if the new file path already exists
+ if os.path.isfile(new_file_path):
+ existing_file = os.path.join(dest_dir, new_file_name)
+ # Check if the existing file is the same as the new file True = same, False = different
+ if not filecmp.cmp(file, existing_file):
+ if file_name != new_file_name:
+ messages.append(f"{file_name} -renamed-> {new_file_name}")
+ discord_messages.append(f"{new_file_name}")
+ else:
+ if not print_only_renames:
+ messages.append(f"{file_name} -not-renamed-> {new_file_name}")
+ discord_messages.append(f"{new_file_name}")
+ if not dry_run:
+ process_file(file, new_file_path, action_type, logger)
+ else:
+ if file_name != new_file_name:
+ messages.append(f"{file_name} -renamed-> {new_file_name}")
+ discord_messages.append(f"{new_file_name}")
+ else:
+ if not print_only_renames:
+ messages.append(f"{file_name} -not-renamed-> {new_file_name}")
+ discord_messages.append(f"{new_file_name}")
+ if not dry_run:
+ process_file(file, new_file_path, action_type, logger)
+
+ # Append the messages to the output
+ if messages or discord_messages:
+ output[asset_type].append({
+ 'title': item['title'],
+ 'year': item['year'],
+ 'folder': item['folder'],
+ 'messages': messages,
+ 'discord_messages': discord_messages,
+ })
+ else:
+ print(f"No {asset_type} to rename")
+ return output
+
+def handle_output(output, asset_folders, logger):
+ """
+ Handles the output messages
+
+ Args:
+ output (dict): Dictionary of output messages
+
+ Returns:
+ None
+ """
+ # Iterate through each asset type in the output
+ for asset_type, assets in output.items():
+ if assets:
+ # Create a table and log the asset type
+ table = [
+ [f"{asset_type.capitalize()}"],
+ ]
+ if any(asset['messages'] for asset in assets):
+ logger.info(create_table(table))
+ # Iterate through each asset within the asset type
+ for asset in assets:
+ title = asset['title']
+ title = year_regex.sub("", title).strip()
+ year = asset['year']
+ folder = asset['folder']
+ messages = asset['messages']
+ if year:
+ year = f" ({year})"
+ else:
+ year = ""
+ messages.sort() # Sorting the messages alphabetically for consistency
+ # Log the asset title and year, along with its messages
+ if messages:
+ logger.info(f"{title}{year}")
+ if asset_folders:
+ if dry_run:
+ logger.info(f"\tWould create folder '{folder}'")
+ else:
+ logger.info(f"\tCreated folder '{folder}'")
+ if asset_type == "series":
+ for message in messages:
+ logger.info(f"\t{message}")
+ else:
+ for message in messages:
+ logger.info(f"\t{message}")
+ logger.info("")
+ else:
+ # If no assets are present for the asset type, log the message
+ logger.info(f"No {asset_type} to rename")
+
+
+def notification(output, logger):
+ """
+ Sends a notification to Discord
+
+ Args:
+ output (dict): Dictionary of output messages
+
+ Returns:
+ None
+ """
+
+ discord_dict = {} # Dictionary to organize messages to be sent to Discord
+ fields = [] # List to hold individual message fields
+ # Loop through the output dictionary containing messages for different asset types
+ for asset_type, assets in output.items():
+ if assets:
+ discord_messages = [] # List to hold individual messages for each asset
+ current_field = "" # String to store messages within the field character limit
+
+ # Prepare messages for each asset within the asset type
+ for asset in assets:
+ asset_messages = [] # List to hold individual lines for each asset's message
+ title = asset['title']
+ title = year_regex.sub("", title).strip()
+ year = asset['year']
+ if year:
+ year = f" ({year})"
+ else:
+ year = ""
+ messages = asset['discord_messages'] # Extracting specific messages for Discord display
+ # Sort messages
+ messages.sort() # Sorting the messages alphabetically for consistency
+ if messages:
+ asset_messages.append(f"{title}{year}") # Adding the title and year as the first line of the message
+ if asset_type == "series":
+ for message in messages:
+ asset_messages.append(f"\t{message}")
+ else:
+ for message in messages:
+ asset_messages.append(f"\t{message}")
+ if asset_messages:
+ asset_messages.append("") # Adding an empty line between assets
+ discord_messages.append("\n".join(asset_messages)) # Joining lines into an asset-specific message
+ else:
+ continue
+
+
+ # Split asset-specific messages into multiple fields if their total length exceeds Discord's field limit
+ for message in discord_messages:
+ if len(current_field) + len(message) + len("\t\n") <= 1000:
+ current_field += message + "\n" # Adding the message to the current field
+ else:
+ fields.append({ # Creating a field containing a set of messages
+ "name": asset_type.capitalize(), # Capitalizing the asset type for field name
+ "value": f"```{current_field}```" # Adding the current field's messages in code block format
+ })
+ current_field = message + "\n" # Starting a new field with the current message
+ asset_type = "" # Resetting asset_type for the next field within the same asset_type
+
+ # Add the remaining messages as a new field
+ if current_field:
+ fields.append({ # Creating a field containing the remaining messages
+ "name": asset_type.capitalize(),
+ "value": f"```{current_field}```"
+ })
+ if len(fields) <= 25: # Checking if the total number of fields is within the Discord limit
+ discord_dict[1] = fields # Storing fields in the discord_dict under key 1
+ else:
+ # Splitting fields into multiple keys if there are more than 25 fields
+ num_fields = len(fields)
+ num_messages_per_field = 25
+ num_keys = num_fields // num_messages_per_field
+ if num_fields % num_messages_per_field != 0:
+ num_keys += 1
+
+ for i in range(num_keys):
+ start_index = i * num_messages_per_field
+ end_index = min(start_index + num_messages_per_field, num_fields)
+ discord_dict[i + 1] = fields[start_index:end_index] # Splitting fields into separate keys
+ else:
+ continue
+
+ # Check if the total character count of the messages in the current dict exceeds 6000 characters
+ new_dict = {}
+ new_fields = []
+ new_field_count = 0
+ new_character_count = 0
+
+ # Iterate through the original 'discord_dict' to check character count and split messages if they exceed 5000 characters
+ for key, value in discord_dict.items():
+ total_character_count = sum(len(field['value']) for field in value)
+ if total_character_count > 5000:
+ for field in value:
+ field_character_count = len(field['value'])
+ # Check and split fields that exceed 5000 characters
+ if new_character_count + field_character_count + len("\n") + len("\t") <= 5000:
+ new_fields.append(field)
+ new_character_count += field_character_count
+ else:
+ new_dict[new_field_count + 1] = new_fields
+ new_fields = [field]
+ new_field_count += 1
+ new_character_count = field_character_count
+
+ if new_fields:
+ new_dict[new_field_count + 1] = new_fields
+ else:
+ new_dict[key] = value
+
+ discord_dict = new_dict # Update discord_dict with the restructured message data
+
+ # Calculate the total character count for each key in the updated 'discord_dict'
+ total_character_count_per_key = {}
+ for key, value in discord_dict.items():
+ total_character_count_per_key[key] = sum(len(field['value']) for field in value)
+
+ # Send messages to Discord by iterating through each key-value pair in the updated 'discord_dict'
+ for key, value in discord_dict.items():
+ print(f"Sending message {key} of {len(discord_dict)}") # Display message sending status
+ # Actual function to send messages to Discord (which is currently represented by a 'print' statement)
+ discord(fields=value, logger=logger, script_name=script_name, description=f"{'__**Dry Run**__' if dry_run else ''}", color=0x00ff00, content=None)
+ # Pauses for 5 seconds each 5th message
+ if key % 5 == 0:
+ print("Pausing for 5 seconds to let Discord catch up...")
+ time.sleep(5)
+
+def main(config):
+ """
+ Main function.
+ """
+ global dry_run
+ dry_run = config.dry_run
+ log_level = config.log_level
+ logger = setup_logger(log_level, script_name)
+
+ script_config = config.script_config
+ name = script_name.replace("_", " ").upper()
+ logger.info(f"Running {name}")
+ try:
+ logger.info(create_bar(f"START {name}"))
+ # Display script settings
+ table = [["Script Settings"]]
+ logger.debug(create_table(table))
+ script_config = config.script_config
+ valid = validate(config, script_config, logger)
+ # Extract script configuration settings
+ asset_folders = script_config.get('asset_folders', False)
+ library_names = script_config.get('library_names', False)
+ source_dirs = script_config.get('source_dirs', False)
+ source_overrides = script_config.get('source_overrides', False)
+ destination_dir = script_config.get('destination_dir', False)
+ action_type = script_config.get('action_type', False)
+ print_only_renames = script_config.get('print_only_renames', False)
+ border_replacerr = script_config.get('border_replacerr', False)
+ instances = script_config.get('instances', [])
+ sync_posters = script_config.get('sync_posters', False)
+
+ logger.debug(create_bar("-")) # Log separator
+ # Log script configuration settings
+ logger.debug(f'{"Dry_run:":<20}{dry_run}')
+ logger.debug(f'{"Log level:":<20}{log_level}')
+ logger.debug(f'{"Asset folders:":<20}{asset_folders}')
+ logger.debug(f'{"Library names:":<20}{library_names}')
+ logger.debug(f'{"Source dirs:":<20}\n{json.dumps(source_dirs, indent=4)}')
+ logger.debug(f'{"Source overrides:":<20}{source_overrides}')
+ logger.debug(f'{"Destination dir:":<20}{destination_dir}')
+ logger.debug(f'{"Action type:":<20}{action_type}')
+ logger.debug(f'{"Print only renames:":<20}{print_only_renames}')
+ logger.debug(f'{"Border replacerr:":<20}{border_replacerr}')
+ logger.debug(f'{"Instances:":<20}{instances}')
+ logger.debug(f'{"Sync posters:":<20}{sync_posters}')
+
+ if not os.path.exists(destination_dir):
+ logger.info(f"Creating destination directory: {destination_dir}")
+ os.makedirs(destination_dir)
+ else:
+ logger.debug(f"Destination directory already exists: {destination_dir}")
+ logger.debug(create_bar("-")) # Log separator
+ if dry_run:
+ # Log dry run message
+ table = [
+ ["Dry Run"],
+ ["NO CHANGES WILL BE MADE"]
+ ]
+ logger.info(create_table(table))
+ # Sync posters if enabled
+ if sync_posters:
+ # Run sync_posters.py or log intent to run
+ logger.info(f"Running sync_gdrive")
+ from modules.sync_gdrive import main as gdrive_main
+ from util.config import Config
+ gdrive_config = Config("sync_gdrive")
+ gdrive_main(gdrive_config, logger)
+ logger.info(f"Finished running sync_gdrive")
+ else:
+ logger.debug(f"Sync posters is disabled. Skipping...")
+
+ assets_list = []
+ print("Gathering all the posters, please wait...")
+ assets_list = get_assets_files(source_dirs, logger)
+
+ if assets_list:
+ assets_dict = sort_assets(assets_list)
+ logger.debug(f"Asset files:\n{json.dumps(assets_dict, indent=4)}")
+ else:
+ logger.error("No assets found. Exiting...")
+ return
+
+ media_dict = {
+ 'movies': [],
+ 'series': [],
+ 'collections': []
+ }
+ if instances:
+ for instance_type, instance_data in config.instances_config.items():
+ for instance in instances:
+ if instance in instance_data:
+ if instance_type == "plex":
+ url = instance_data[instance]['url']
+ api = instance_data[instance]['api']
+ try:
+ app = PlexServer(url, api)
+ except Exception as e:
+ logger.error(f"Error connecting to Plex: {e}")
+ app = None
+ if library_names and app:
+ print("Getting Plex data...")
+ results = get_plex_data(app, library_names, logger, include_smart=True, collections_only=True)
+ media_dict['collections'].extend(results)
+ else:
+ logger.warning("No library names specified in config.yml. Skipping Plex.")
+ else:
+ url = instance_data[instance]['url']
+ api = instance_data[instance]['api']
+ app = StARR(url, api, logger)
+ server_name = app.get_instance_name()
+ if app:
+ print(f"Getting {instance_type.capitalize()} data...")
+ results = handle_starr_data(app, server_name, instance_type, include_episode=False)
+ if results:
+ if instance_type == "radarr":
+ media_dict['movies'].extend(results)
+ elif instance_type == "sonarr":
+ media_dict['series'].extend(results)
+ else:
+ logger.error(f"No {instance_type.capitalize()} data found.")
+
+ else:
+ logger.error(f"No instances found. Exiting script...")
+ return
+
+ # Log media data
+ if not any(media_dict.values()):
+ logger.error("No media found, Check instances setting in your config. Exiting.")
+ return
+ else:
+ logger.debug(f"Media:\n{json.dumps(media_dict, indent=4)}")
+
+ if media_dict and assets_dict:
+ # Match media data to asset files
+ print(f"Matching media to assets, please wait...")
+ combined_dict = match_data(media_dict, assets_dict)
+ logger.debug(f"Matched and Unmatched media:\n{json.dumps(combined_dict, indent=4)}")
+ matched_assets = combined_dict.get('matched', None)
+ if any(matched_assets.values()):
+ output = rename_files(matched_assets, script_config, logger)
+ if any(output.values()):
+ logger.debug(f"Output:\n{json.dumps(output, indent=4)}")
+ handle_output(output, asset_folders, logger)
+ if discord_check(script_name):
+ notification(output, logger)
+ else:
+ logger.info(f"No new posters to rename.")
+ else:
+ logger.info(f"No assets matched to media.")
+
+ if border_replacerr:
+ # Run border_replacerr.py or log intent to run
+ logger.info(f"Running border_replacerr.py")
+ tmp_dir = os.path.join(destination_dir, 'tmp')
+ from modules.border_replacerr import process_files
+ from util.config import Config
+ replacerr_config = Config("border_replacerr")
+ replacerr_script_config = replacerr_config.script_config
+ process_files(tmp_dir, destination_dir, dry_run, log_level, replacerr_script_config, logger)
+ logger.info(f"Finished running border_replacerr.py")
+ else:
+ logger.debug(f"Border replacerr is disabled. Skipping...")
+ except KeyboardInterrupt:
+ print("Keyboard Interrupt detected. Exiting...")
+ sys.exit()
+ except Exception:
+ logger.error(f"\n\nAn error occurred:\n", exc_info=True)
+ logger.error(f"\n\n")
+ finally:
+ logger.info(create_bar(f"END {name}"))
+
diff --git a/modules/queinatorr.py b/modules/queinatorr.py
new file mode 100755
index 0000000..98a6886
--- /dev/null
+++ b/modules/queinatorr.py
@@ -0,0 +1,593 @@
+# ____ _ _
+# / __ \ (_) | |
+# | | | |_ _ ___ _ _ __ __ _| |_ ___ _ __ _ __
+# | | | | | | |/ _ \ | '_ \ / _` | __/ _ \| '__| '__|
+# | |__| | |_| | __/ | | | | (_| | || (_) | | | |
+# \___\_\\__,_|\___|_|_| |_|\__,_|\__\___/|_| |_|
+# ===================================================================================================
+# Author: Drazzilb
+# Description: This script will move torrents from one category to another in qBittorrent based on
+# the title of the torrent. This is useful for moving torrents from a category that are stuck
+# in a queue due to a missing file or not being an upgrade for existing episode file(s).
+# Usage: python3 queinatorr.py
+# Requirements: requests, qbittorrentapi
+# License: MIT License
+# ===================================================================================================
+
+import json
+from datetime import datetime
+import sys
+
+from qbittorrentapi import Client
+from util.arrpy import StARR
+from util.discord import discord, discord_check
+from util.utility import *
+from util.logger import setup_logger
+
+try:
+ from urllib.parse import urlsplit
+except ImportError as e:
+ print(f"ImportError: {e}")
+ print("Please install the required modules with 'pip install -r requirements.txt'")
+ exit(1)
+
+script_name = "queinatorr"
+
+queue_list = [
+ "Not an upgrade for existing episode file(s). Existing quality: WEBDL-720p. New Quality WEBDL-1080p.",
+ "New Quality is BR-DISK",
+ "No files found are eligible for import",
+ "The download is missing files",
+ "DownloadClientQbittorrentTorrentStateMissingFiles",
+ "qBittorrent is reporting an error"
+]
+
+def handle_qbit(queue_dict, qb, post_import_category, pre_import_category, days_to_keep, server_name, logger):
+ """
+ This function will move torrents from one category to another in qBittorrent based on
+ the title of the torrent. This is useful for moving torrents from a category that are stuck
+ in a queue due to a missing file or not being an upgrade for existing episode file(s).
+
+ Args:
+ queue_dict (dict): Dictionary of items in the queue.
+ qb (Client): qBittorrent Client instance.
+ post_import_category (str): Category to move torrents to.
+ pre_import_category (str): Category to move torrents from.
+ days_to_keep (int): Number of days to keep torrents in the pre_import_category before moving them to the post_import_category.
+
+ Returns:
+ dict: Dictionary of messages to send to Discord.
+ """
+
+ try:
+ torrents = qb.torrents_info() # Retrieve information about all torrents in qBittorrent
+ except Exception as e:
+ logger.error(f"Error getting torrents: {e}")
+ return # Return if an error occurs while fetching torrents
+
+ torrents_dict = {} # Dictionary to store torrents from the pre_import_category
+ qbit_messages = {} # Dictionary to store messages to send to Discord
+
+ # Iterate through all torrents and filter those in the pre_import_category
+ for torrent in torrents:
+ hash = torrent['hash']
+ human_readable_added_on = datetime.fromtimestamp(torrent['added_on']).strftime('%Y-%m-%d %H:%M:%S')
+ if torrent['category'] == pre_import_category:
+ torrents_dict[hash] = {
+ 'torrent': torrent['name'],
+ 'category': torrent['category'],
+ 'addedOn': torrent['added_on'],
+ 'humanReadableAddedOn': human_readable_added_on
+
+ }
+
+ # Log details of torrents in the pre_import_category
+ logger.debug(f"Torrents in '{pre_import_category}' category in qBittorrent apart of {server_name}: {len(torrents_dict)}\n {json.dumps(torrents_dict, indent=4)}")
+
+ list_of_torrents = [] # List to store torrents from the queue_dict
+
+ # Extract torrent names from queue_dict
+ for record in queue_dict.values():
+ list_of_torrents.append(record['torrent'])
+
+ # Check each torrent in torrents_dict and move them to post_import_category if required conditions are met
+ for hash, qb_data in torrents_dict.items():
+ qb_torrent = qb_data['torrent']
+ qb_torrent_without_extension = '.'.join(qb_torrent.split('.')[:-1])
+ added_on = qb_data['addedOn']
+ added_on = datetime.fromtimestamp(added_on)
+ days_ago = (datetime.now() - added_on).days
+
+ # Check if the torrent is in the list_of_torrents or older than days_to_keep and move accordingly
+ if qb_torrent in list_of_torrents or qb_torrent_without_extension in list_of_torrents:
+ move_torrent_to_category(qb, hash, post_import_category,) # Move torrent to post_import_category
+ elif days_ago > days_to_keep:
+ if qb_torrent in qbit_messages:
+ qbit_messages[qb_torrent]['count'] += 1
+ else:
+ qbit_messages[qb_torrent] = {
+ 'count': 1,
+ 'message': f"{qb_torrent} -> {post_import_category} (Downloaded {days_ago} days ago)"
+ }
+ move_torrent_to_category(qb, hash, post_import_category, logger) # Move torrent to post_import_category
+
+ return qbit_messages # Return messages for notifications
+
+def move_torrent_to_category(qb, torrent_hash, category, logger):
+ """
+ This function will move a torrent to a category in qBittorrent.
+
+ Args:
+ qb (Client): qBittorrent Client instance.
+ torrent_hash (str): Hash of the torrent to move.
+ category (str): Category to move the torrent to.
+
+ Returns:
+ None
+ """
+ # Check if it's not a dry run and attempt to move the torrent to the specified category
+ if not dry_run:
+ try:
+ qb.torrents_set_category(torrent_hashes=torrent_hash, category=category)
+ except Exception as e:
+ logger.error(f"Error moving torrent to {category}: {e}") # Log an error if the move operation fails
+
+def handle_queue(queue_dict, app):
+ """
+ This function will remove items from the queue in Radarr or Sonarr based on the status messages
+ of the item.
+
+ Args:
+ queue_dict (dict): Dictionary of items in the queue.
+ app (StARR): StARR instance.
+
+ Returns:
+ dict: Dictionary of messages to send to messages.
+ """
+ messages_dict = {}
+
+ # Iterate through each item in the queue dictionary
+ for id, record in queue_dict.items():
+ title = record['title']
+ year = record['year']
+ message = record['status_messages']
+ error = record['error_message']
+ torrent = record['torrent']
+
+ # Check if any substrings from queue_list are present in the status message or error message
+ if any((sub_string in (message or "")) for sub_string in queue_list) or any((sub_string in (error or "")) for sub_string in queue_list):
+ # Select the relevant substring from the status message or error message
+ if any((sub_string in (message or "")) for sub_string in queue_list):
+ message = next(sub_string for sub_string in queue_list if sub_string in (message or ""))
+ elif any((sub_string in (error or "")) for sub_string in queue_list):
+ message = next(sub_string for sub_string in queue_list if sub_string in (error or ""))
+
+ # Create or update messages_dict with the appropriate details for each torrent
+ if torrent not in messages_dict:
+ messages_dict[id] = {
+ 'title': title,
+ 'year': year,
+ 'torrent': torrent,
+ 'messages': {},
+ }
+
+ # Increment the count of each encountered status or error message
+ if message:
+ if message in messages_dict[id]['messages']:
+ messages_dict[id]['messages'][message] += 1
+ else:
+ messages_dict[id]['messages'][message] = 1
+ if error:
+ if error in messages_dict[id]['messages']:
+ messages_dict[id]['messages'][error] += 1
+ else:
+ messages_dict[id]['messages'][error] = 1
+
+ # Create a list of queue_ids for removal
+ queue_ids = list(messages_dict.keys())
+
+ # Remove items from the queue (if not a dry run)
+ if not dry_run:
+ app.remove_item_from_queue(queue_ids)
+
+ return messages_dict
+
+def queued_items(queue, instance_type):
+ """
+ This function will create a dictionary of items in the queue.
+
+ Args:
+ queue (dict): Dictionary of items in the queue.
+ instance_type (str): Type of instance to process.
+
+ Returns:
+ dict: Dictionary of items in the queue.
+ """
+
+ queue_dict = {}
+
+ # Traverse through the queue and extract relevant information for each item
+ for key, data in queue.items():
+ if key == 'records':
+ for item in data:
+ status_messages = []
+
+ # For Radarr instance
+ if instance_type == 'radarr':
+ media_id = item['movieId']
+ title = item['movie']['title']
+ year = item['movie']['year']
+ torrent = item['title']
+ status_messages = item.get('statusMessages', [])
+
+ # Skip items without status messages
+ if status_messages == []:
+ continue
+
+ list_of_messages = []
+ for message_item in status_messages:
+ if message_item['messages']:
+ list_of_messages.extend(message_item['messages'])
+ error_message = item.get('errorMessage', None)
+ queue_id = item['id']
+
+ # Concatenate status messages as comma-separated string
+ if list_of_messages:
+ status_messages = ','.join(list_of_messages)
+
+ # Create entry in queue_dict for each item
+ queue_dict[queue_id] = {
+ 'media_id': media_id,
+ 'title': title,
+ 'year': year,
+ 'torrent': torrent,
+ 'status_messages': status_messages,
+ 'error_message': error_message
+ }
+
+ # For Sonarr instance
+ elif instance_type == 'sonarr':
+ media_id = item['seriesId']
+ title = item['series']['title']
+ year = item['series']['year']
+ torrent = item['title']
+ status_messages = item['statusMessages']
+ error_message = item.get('errorMessage', None)
+ list_of_messages = []
+
+ # Extract messages from status_messages
+ for message_item in status_messages:
+ if message_item['messages']:
+ list_of_messages.extend(message_item['messages'])
+ queue_id = item['id']
+
+ # Concatenate status messages as comma-separated string
+ if list_of_messages:
+ status_messages = ','.join(list_of_messages)
+
+ # Create entry in queue_dict for each item
+ queue_dict[queue_id] = {
+ 'media_id': media_id,
+ 'title': title,
+ 'year': year,
+ 'torrent': torrent,
+ 'status_messages': status_messages,
+ 'error_message': error_message
+ }
+
+ return queue_dict
+
+def process_instance(instance_type, url, api, pre_import_category, post_import_category, qbit_instance, days_to_keep, logger, config):
+ """
+ This function will process a Radarr or Sonarr instance and move items from the queue to the
+ specified category based on the status messages of the item.
+
+ Args:
+ instance_type (str): Type of instance to process.
+ url (str): URL of the instance.
+ api (str): API key of the instance.
+ pre_import_category (str): Category to move torrents from.
+ post_import_category (str): Category to move torrents to.
+ qbit_instance (str): qBittorrent instance to move torrents to.
+ days_to_keep (int): Number of days to keep torrents in the pre_import_category before moving them to the post_import_category.
+
+ Returns:
+ dict: Dictionary of messages to send to Discord.
+ """
+
+ # Retrieve qBittorrent configuration from the script's configurations
+ qbit_data = config.instances_config.get('qbittorrent', {})
+ for key, data in qbit_data.items():
+ if key == qbit_instance:
+ qbit_url = data['url']
+ qbit_host = urlsplit(qbit_url).hostname
+ qbit_port = urlsplit(qbit_url).port
+ qbit_username = data['username']
+ qbit_password = data['password']
+ qb = Client(host=qbit_host, port=qbit_port)
+ break
+
+ # Initialize a StARR instance for Radarr or Sonarr
+ app = StARR(url, api, logger)
+ server_name = app.get_instance_name()
+
+ # Log script instance configuration details
+ table = [
+ [f"{server_name}"],
+ ]
+ logger.info(create_table(table))
+ logger.debug('\n')
+ table = [
+ [f"Script instance config for {server_name}"],
+ ]
+ logger.debug(create_table(table))
+ # Logging configuration details
+ logger.debug(f'{"URL:":<30}{url}')
+ logger.debug(f'{"API:":<30}{"*" * (len(api) - 5)}{api[-5:]}')
+ logger.debug(f'{"qBittorrent Instance:":<30}{qbit_instance}')
+ logger.debug(f'{"qBittorrent URL:":<30}{qbit_url}')
+ logger.debug(f'{"qBittorrent Host:":<30}{qbit_host}')
+ logger.debug(f'{"qBittorrent Port:":<30}{qbit_port}')
+ logger.debug(f'{"qBittorrent Username:":<30}{qbit_username}')
+ logger.debug(f'{"qBittorrent Password:":<30}{"*" * (len(qbit_password) - 5)}{qbit_password[-5:]}')
+ logger.debug(f'{"pre_import_category:":<30}{pre_import_category}')
+ logger.debug(f'{"post_import_category:":<30}{post_import_category}')
+ logger.debug(create_bar("-"))
+
+ # Retrieve the queue from Radarr or Sonarr instance
+ queue = app.get_queue(instance_type)
+ logger.debug(f"Queue for '{server_name}'\n{json.dumps(queue, indent=4)}\n")
+
+ queue_dict = queued_items(queue, instance_type)
+
+ logger.debug(f"Filtered down queue items for '{server_name}'\n{json.dumps(queue_dict, indent=4)}\n")
+
+ # Create a dictionary to store output messages
+ output_dict = {
+ 'server_name': server_name,
+ 'queue': {},
+ 'qbit': {}
+ }
+
+ # Check if the queue is empty for the specified instance
+ if queue_dict == {}:
+ logger.info(f"No items in the queue for {server_name} for Queinatorr to process.\n")
+ else:
+ logger.info(f"Number of items in the queue for {server_name}: {len(queue_dict)}\n")
+
+ # Process and handle the queue in Radarr or Sonarr
+ messages_dict = handle_queue(queue_dict, app)
+ if messages_dict:
+ output_dict['queue'] = messages_dict
+
+ # Handle moving torrents from the queue to the specified categories in qBittorrent
+ messages_dict = handle_qbit(queue_dict, qb, post_import_category, pre_import_category, days_to_keep, server_name, logger)
+ if messages_dict:
+ output_dict['qbit'] = messages_dict
+
+ return output_dict
+
+def notification(messages, logger):
+ """
+ This function will send a notification to Discord.
+
+ Args:
+ messages (dict): Dictionary of messages to send to Discord.
+
+ Returns:
+ None
+ """
+ # If there are no messages to send, log and exit the function
+ if not messages:
+ logger.info("No Discord messages to send.")
+ return
+
+ fields = []
+ # Iterate through each instance and its corresponding value in the messages dictionary
+ for instance_name, value in messages.items():
+ field_list = []
+
+ # Retrieve values for various parameters from the input 'value' dictionary
+ pre_import_category = value.get('pre_import_category', None)
+ post_import_category = value.get('post_import_category', None)
+ qbit_instance = value.get('qbit_instance', None)
+ days_to_keep = value.get('days_to_keep', None)
+
+ # Retrieve output from the value dictionary
+ output = value['output']
+ server_name = output['server_name']
+ queue_items = output.get('queue', None)
+ qbit_items = output.get('qbit', None)
+
+ total_queue_items = 0
+ total_qbit_items = 0
+
+ # Calculate total queue items and construct fields for queue items
+ if queue_items:
+ for torrent, data in queue_items.items():
+ messages = data['messages']
+ total = sum(messages.values())
+ total_queue_items += total
+ if total_queue_items > 0:
+ field = {
+ "name": f"{server_name} - Queue Items (Total: " + str(total_queue_items) + ")",
+ "value": f"```Items removed from queue:\nAction: {pre_import_category} -> {post_import_category}```",
+ "inline": False
+ }
+ fields.append(field)
+
+ # Calculate total qBittorrent items and construct fields for qBittorrent items
+ if qbit_items:
+ for torrent, data in qbit_items.items():
+ count = data['count']
+ total_qbit_items += count
+ if total_qbit_items > 0:
+ field = {
+ "name": f"{server_name} - qBittorrent Items (Total: " + str(total_qbit_items) + ")",
+ "value": f"```Items moved to from {pre_import_category} -> {post_import_category}:\nDownloaded {days_to_keep} days ago or more```",
+ "inline": False
+ }
+ fields.append(field)
+
+ if field_list:
+ fields.append(field_list)
+
+ # If there are fields to be sent to Discord, send the message
+ if fields:
+ discord(fields, logger, script_name, description=f"{'__**Dry Run**__' if dry_run else 'Queinatorr'}", color=0x800080, content=None)
+
+
+
+def print_output(messages, logger):
+ """
+ This function will print the output to the console.
+
+ Args:
+ messages (dict): Dictionary of messages to print.
+
+ Returns:
+ None
+ """
+
+ # Set the header based on whether it's a dry run or not
+ if dry_run:
+ header = f"DRY RUN: Items removed from queue."
+ else:
+ header = f"Items removed from queue."
+
+ # If there are no messages to print, log and exit the function
+ if not messages:
+ logger.info("No items to print.")
+ return
+
+ # Iterate through each instance and its corresponding value in the messages dictionary
+ for instance_name, value in messages.items():
+ pre_import_category = value.get('pre_import_category', None)
+ post_import_category = value.get('post_import_category', None)
+ qbit_instance = value.get('qbit_instance', None)
+
+ # Retrieve output from the value dictionary
+ output = value['output']
+ server_name = output['server_name']
+ queue_items = output.get('queue', None)
+ qbit_items = output.get('qbit', None)
+
+ # Process and print queue items information
+ if queue_items:
+ # Create header and subheader for queue items
+ table = [
+ [f"Queue Items Processed for {server_name}"],
+ [f"{header}"]
+ ]
+ logger.info(create_table(table))
+
+ # Iterate through each queue item and print its details
+ for id, data in queue_items.items():
+ title = data['title']
+ year = data['year']
+ torrent = data['torrent']
+ messages = data['messages']
+ total = sum(messages.values())
+
+ # Print details for each queue item and its messages
+ logger.info(f"\t{title} ({year}) - {torrent} - {total} items")
+ for message, count in messages.items():
+ logger.info(f"\t\t{message} ({count})")
+ logger.info('')
+
+ # Process and print qBittorrent items information
+ if qbit_items:
+ # Create header and subheader for qBittorrent items
+ table = [
+ [f"qBittorrent Items Processed for {server_name}"],
+ [f"{pre_import_category} -> {post_import_category} ({qbit_instance})"],
+ ]
+ logger.info(create_table(table))
+
+ # Iterate through each qBittorrent item and print its details
+ for torrent, data in qbit_items.items():
+ count = data['count']
+ message = data['message']
+
+ # Print details for each qBittorrent item
+ if count > 1:
+ logger.info(f"\t{message} - {count} times")
+ else:
+ logger.info(f"\t{message}")
+
+def main(config):
+ """
+ Main function.
+ """
+ global dry_run
+ dry_run = config.dry_run
+ log_level = config.log_level
+ logger = setup_logger(log_level, script_name)
+ script_config = config.script_config
+ name = script_name.replace("_", " ").upper()
+
+ try:
+ logger.info(create_bar(f"START {name}"))
+ # Display a notice for dry run mode if enabled
+ if dry_run:
+ table = [
+ ["Dry Run"],
+ ["NO CHANGES WILL BE MADE"]
+ ]
+ logger.info(create_table(table))
+
+ # Retrieve script configuration from the config file
+ script_config = config.script_config
+
+ # Get the number of days to keep torrents in pre-import category
+ days_to_keep = script_config.get('days_to_keep', 15)
+
+ # Retrieve instance information from the configuration
+ instances = script_config.get('instances', None)
+ if instances is None:
+ logger.error("No instances defined in the config.")
+ return
+
+ # Initialize the final output dictionary
+ final_output_dict = {}
+
+ # Iterate through each instance type and its settings in the configuration
+ for instance_type, instance_data in config.instances_config.items():
+ for instance, instance_settings in instances.items():
+ # Check if the instance exists in the configuration data
+ if instance in instance_data:
+ # Retrieve necessary instance settings
+ url = instance_data[instance]['url']
+ api = instance_data[instance]['api']
+ pre_import_category = instance_settings.get('pre_import_category', False)
+ post_import_category = instance_settings.get('post_import_category', False)
+ qbit_instance = instance_settings.get('qbit_instance', False)
+
+ # Process the instance and retrieve the output
+ output = process_instance(instance_type, url, api, pre_import_category, post_import_category, qbit_instance, days_to_keep, logger, config)
+
+ # If there is an output, update the final output dictionary
+ if output:
+ final_output_dict[instance] = {
+ 'output': output,
+ 'pre_import_category': pre_import_category,
+ 'post_import_category': post_import_category,
+ 'qbit_instance': qbit_instance,
+ 'days_to_keep': days_to_keep
+ }
+
+ # Print the final output details to the console
+ print_output(final_output_dict, logger)
+
+ # Send a notification to Discord with the final output
+ if discord_check(script_name):
+ notification(final_output_dict, logger)
+
+ except KeyboardInterrupt:
+ print("Keyboard Interrupt detected. Exiting...")
+ sys.exit()
+ except Exception:
+ logger.error(f"\n\nAn error occurred:\n", exc_info=True)
+ logger.error(f"\n\n")
+ finally:
+ logger.info(create_bar(f"END {name}"))
\ No newline at end of file
diff --git a/modules/renameinatorr.py b/modules/renameinatorr.py
new file mode 100755
index 0000000..1d4ab6d
--- /dev/null
+++ b/modules/renameinatorr.py
@@ -0,0 +1,412 @@
+# _____ _ _
+# | __ \ (_) | |
+# | |__) |___ _ __ __ _ _ __ ___ ___ _ _ __ __ _| |_ ___ _ __ _ __
+# | _ // _ \ '_ \ / _` | '_ ` _ \ / _ \ | '_ \ / _` | __/ _ \| '__| '__|
+# | | \ \ __/ | | | (_| | | | | | | __/ | | | | (_| | || (_) | | | |
+# |_| \_\___|_| |_|\__,_|_| |_| |_|\___|_|_| |_|\__,_|\__\___/|_| |_|
+# ===================================================================================================
+# Author: Drazzilb
+# Description: This script will rename all series in Sonarr/Radarr to match the naming scheme of the
+# Naming Convention within Radarr/Sonarr. It will also add a tag to the series so that it can be easily
+# identified as having been renamed.
+# Usage: python3 /path/to/renameinatorr.py
+# Requirements: requests, pyyaml
+# License: MIT License
+# ===================================================================================================
+
+import json
+import re
+import sys
+import time
+
+from util.arrpy import StARR
+from util.utility import *
+from util.discord import discord, discord_check
+from util.logger import setup_logger
+
+try:
+ from tqdm import tqdm
+except ImportError as e:
+ print(f"ImportError: {e}")
+ print("Please install the required modules with 'pip install -r requirements.txt'")
+ exit(1)
+
+script_name = "renameinatorr"
+
+def print_output(output_dict, logger):
+ """
+ Prints the output of the script to the console.
+
+ Args:
+ output_dict (dict): Dictionary containing the output of the script.
+
+ Returns:
+ None
+ """
+ # Iterate through each instance's output in the provided dictionary
+ for instance, instance_data in output_dict.items():
+ # Create a table for the specific instance's rename list
+ table = [
+ [f"{instance_data['server_name'].capitalize()} Rename List"],
+ ]
+ logger.info(create_table(table))
+
+ # Iterate through each item in the instance's data
+ for item in instance_data['data']:
+ # Display title and year if available
+ if item['file_info'] or item['new_path_name']:
+ logger.info(f"{item['title']} ({item['year']})")
+
+ # Display folder rename information if available
+ if item['new_path_name']:
+ if item['new_path_name']:
+ logger.info(f"\tFolder Renamed: {item['path_name']} -> {item['new_path_name']}")
+
+ # Display file information if available
+ if item['file_info']:
+ logger.info(f"\tFiles:")
+ for existing_path, new_path in item['file_info'].items():
+ logger.info(f"\t\tOriginal: {existing_path}\n\t\tNew: {new_path}\n")
+ logger.info('')
+
+ # Calculate total counts for various rename items
+ total_items = len(instance_data['data'])
+ total_rename_items = len([value['file_info'] for value in instance_data['data'] if value['file_info']])
+ total_folder_rename = len([value['new_path_name'] for value in instance_data['data'] if value['new_path_name']])
+
+ # Display summary of rename actions if any rename occurred
+ if any(value['file_info'] or value['new_path_name'] for value in instance_data['data']):
+ table = [
+ [f"{instance_data['server_name'].capitalize()} Rename Summary"],
+ [f"Total Items: {total_items}"],
+ ]
+ if any(value['file_info'] for value in instance_data['data']):
+ table.append([f"Total Renamed Items: {total_rename_items}"])
+ if any(value['new_path_name'] for value in instance_data['data']):
+ table.append([f"Total Folder Renames: {total_folder_rename}"])
+ logger.info(create_table(table))
+ else:
+ logger.info(f"No items renamed in {instance_data['server_name']}.")
+ logger.info('')
+
+def notification(output_dict, logger):
+ """
+ Sends a notification to Discord with the output of the script.
+
+ Args:
+ output_dict (dict): Dictionary containing the output of the script.
+
+ Returns:
+ None
+ """
+ # Initialize empty lists and dictionaries to store Discord messages and fields
+ fields = []
+ discord_dict = {}
+
+ # Process each instance's data in the output dictionary
+ for instance, instance_data in output_dict.items():
+
+ # Iterate through each item in the instance's data
+ for item in instance_data['data']:
+ # Prepare information for Discord message fields for each item (file renames and folder renames)
+ if item['file_info'] or item['new_path_name']:
+ current_field = ""
+ name = f"{item['title']} ({item['year']})"
+ item_messages = []
+
+ # Collect folder rename information if available
+ if item['new_path_name']:
+ item_messages.append(f"Folder:\n{item['path_name']} -> {item['new_path_name']}\n")
+
+ # Collect file rename information if available
+ if item['file_info']:
+ for existing_path, new_path in item['file_info'].items():
+ item_messages.append(f"{existing_path}\n\n{new_path}\n")
+
+ # Split collected messages into multiple fields if exceeding character limits
+ for message in item_messages:
+ if len(current_field) + len(message) + len("\t\n") <= 1000:
+ current_field += message + "\n"
+ else:
+ fields.append({
+ "name": name,
+ "value": f"```{current_field}```"
+ })
+ current_field = message + "\n"
+ name = ""
+
+ # Append the last remaining field or set of fields
+ if current_field:
+ fields.append({
+ "name": name,
+ "value": f"```{current_field}```"
+ })
+ if len(fields) <= 25:
+ discord_dict[1] = fields
+ else:
+ # Create multiple message keys if exceeding a certain number of fields
+ num_fields = len(fields)
+ num_messages_per_field = 25
+ num_keys = num_fields // num_messages_per_field
+ if num_fields % num_messages_per_field != 0:
+ num_keys += 1
+
+ for i in range(num_keys):
+ start_index = i * num_messages_per_field
+ end_index = min(start_index + num_messages_per_field, num_fields)
+ discord_dict[i + 1] = fields[start_index:end_index]
+
+ # Calculate character counts and split messages if they exceed specified limits
+ new_dict = {}
+ new_fields = []
+ new_field_count = 0
+ new_character_count = 0
+
+ for key, value in discord_dict.items():
+ total_character_count = sum(len(field['value']) for field in value)
+ if total_character_count > 5000:
+ for field in value:
+ field_character_count = len(field['value'])
+ if new_character_count + field_character_count + len("\n") + len("\t") <= 5000:
+ new_fields.append(field)
+ new_character_count += field_character_count
+ else:
+ new_dict[new_field_count + 1] = new_fields
+ new_fields = [field]
+ new_field_count += 1
+ new_character_count = field_character_count
+
+ if new_fields:
+ new_dict[new_field_count + 1] = new_fields
+ else:
+ new_dict[key] = value
+
+ discord_dict = new_dict
+
+ # Send Discord messages
+ total_character_count_per_key = {}
+ for key, value in discord_dict.items():
+ total_character_count_per_key[key] = sum(len(field['value']) for field in value)
+ for key, value in discord_dict.items():
+ print(f"Sending message {key} of {len(discord_dict)}")
+ discord(fields=value, logger=logger, script_name=script_name, description=f"{'__**Dry Run**__' if dry_run else ''}", color=0x00ff00, content=None)
+ if key % 5 == 0:
+ print("Pausing for 5 seconds to let Discord catch up...")
+ time.sleep(5)
+
+
+def process_instance(app, rename_folders, server_name, instance_type, count, tag_name, logger):
+ """
+ Processes the data for a specific instance.
+
+ Args:
+ app (StARR): StARR object for the instance.
+ rename_folders (bool): Whether or not to rename folders.
+ server_name (str): Name of the instance.
+ instance_type (str): Type of instance (Sonarr or Radarr).
+
+ Returns:
+ list: List of dictionaries containing the data for each item.
+ """
+ table = [
+ [f"Processing {server_name}"]
+ ]
+ print(create_table(table))
+
+ # Fetch data related to the instance (Sonarr or Radarr)
+ media_dict = handle_starr_data(app, server_name, instance_type, include_episode=False)
+
+ # If count and tag_name is specified, limit the number of items to process that do not have tag_name
+ tag_id = None
+ if count and tag_name:
+ tag_id = app.get_tag_id_from_name(tag_name)
+ if tag_id:
+ media_tmp = [item for item in media_dict if tag_id not in item['tags']][:count]
+
+ # If all media is tagged, remove tags and fetch new data
+ if not media_tmp:
+ media_ids = [item['media_id'] for item in media_dict]
+ logger.info("All media is tagged. Removing tags...")
+ app.remove_tags(media_ids, tag_id)
+ media_dict = handle_starr_data(app, server_name, instance_type, include_episode=False)
+ media_dict = [item for item in media_dict if tag_id not in item['tags']][:count]
+ else:
+ media_dict = media_tmp
+
+ logger.debug(f"media dict:\n{json.dumps(media_dict, indent=4)}")
+
+ # Process each item in the fetched data
+ rename_response = []
+ if media_dict:
+ print("Processing data... This may take a while.")
+ for item in tqdm(media_dict, desc=f"Processing '{server_name}' Media", unit="items", disable=None, leave=True):
+ file_info = {}
+ can_rename = False
+ # Fetch rename list and sort it by existingPath
+ rename_response = app.get_rename_list(item['media_id'])
+ rename_response.sort(key=lambda x: x['existingPath'])
+
+ if rename_response:
+ can_rename = True
+
+ # Process each item in the rename list to get file rename information
+ for items in rename_response:
+ existing_path = items.get('existingPath', None)
+ new_path = items.get('newPath', None)
+
+ # Remove 'Season' folders from paths if they exist
+ pattern = r"Season \d{1,2}/"
+ if re.search(pattern, existing_path) or re.search(pattern, new_path):
+ existing_path = re.sub(pattern, "", existing_path)
+ new_path = re.sub(pattern, "", new_path)
+
+ file_info[existing_path] = new_path
+
+ # Update item with file rename information
+ item["new_path_name"] = None
+ item["file_info"] = file_info
+ item["can_rename"] = can_rename
+
+ # If not in dry run, perform file renaming
+ if not dry_run:
+ # Get media IDs and initiate file renaming
+ media_ids = []
+ for item in media_dict:
+ if item["can_rename"]:
+ media_ids.append(item['media_id'])
+
+ if media_ids:
+ # Rename files and wait for media refresh
+ app.rename_media(media_ids)
+
+ # Refresh media and wait for it to be ready
+ print(f"Refreshing {server_name}...")
+ response = app.refresh_items(media_ids)
+
+ # Wait for media to be ready
+ ready = app.wait_for_command(response['id'])
+
+ if ready:
+ logger.info(f"Media refreshed on {server_name}...")
+ ready = False
+ else:
+ logger.info(f"No media to rename on {server_name}...")
+
+ if tag_id and count and tag_name:
+ # Add tag to items that were renamed
+ print(f"Adding tag '{tag_name}' to items in {server_name}...")
+ app.add_tags(media_ids, tag_id)
+
+ # Group and rename root folders if necessary
+ grouped_root_folders = {}
+
+ # Group root folders by root folder name
+ if rename_folders:
+ print(f"Renaming folders in {server_name}...")
+ for item in media_dict:
+ root_folder = item["root_folder"]
+ if root_folder not in grouped_root_folders:
+ grouped_root_folders[root_folder] = []
+ grouped_root_folders[root_folder].append(item['media_id'])
+
+ # Rename folders and wait for media refresh
+ for root_folder, media_ids in grouped_root_folders.items():
+ app.rename_folders(media_ids, root_folder)
+
+ # Refresh media and wait for it to be ready
+ print(f"Refreshing {server_name}...")
+ response = app.refresh_items(media_ids)
+
+ # Wait for media to be ready
+ print(f"Waiting for {server_name} to refresh...")
+ ready = app.wait_for_command(response['id'])
+
+ print(f"Folders renamed in {server_name}...")
+ # Get updated media data and update item with new path names
+ if ready:
+ print(f"Fetching updated data for {server_name}...")
+ new_media_dict = handle_starr_data(app, server_name, instance_type, include_episode=False)
+ for new_item in new_media_dict:
+ for old_item in media_dict:
+ if new_item['media_id'] == old_item['media_id']:
+ if new_item['path_name'] != old_item['path_name']:
+ old_item['new_path_name'] = new_item['path_name']
+
+ return media_dict
+
+def main(config):
+ """
+ Main function.
+ """
+ global dry_run
+ dry_run = config.dry_run
+ log_level = config.log_level
+ logger = setup_logger(log_level, script_name)
+ script_config = config.script_config
+ name = script_name.replace("_", " ").upper()
+
+ try:
+ logger.info(create_bar(f"START {name}"))
+ # Get instances and rename_folders settings from the script config
+ script_config = config.script_config
+ instances = config.script_config.get('instances', None)
+ rename_folders = config.script_config.get('rename_folders', False)
+ count = config.script_config.get('count', 0)
+ tag_name = config.script_config.get('tag_name', None)
+ valid = validate(config, script_config, logger)
+
+ # Log script settings
+ table = [
+ ["Script Settings"]
+ ]
+ logger.debug(create_table(table))
+ logger.debug(f'{"Dry_run:":<20}{dry_run}')
+ logger.debug(f'{"Log level:":<20}{log_level}')
+ logger.debug(f'{"Instances:":<20}{instances}')
+ logger.debug(f'{"Rename Folders:":<20}{rename_folders}')
+ logger.debug(f'{"Count:":<20}{count}')
+ logger.debug(f'{"Tag Name:":<20}{tag_name}')
+ logger.debug(create_bar("-"))
+
+ # Handle dry run settings
+ if dry_run:
+ table = [
+ ["Dry Run"],
+ ["NO CHANGES WILL BE MADE"]
+ ]
+ logger.info(create_table(table))
+ logger.info('')
+
+ # Output dictionary to store processed data
+ output_dict = {}
+
+ # Process instances and gather data
+ for instance_type, instance_data in config.instances_config.items():
+ for instance in instances:
+ if instance in instance_data:
+ # Initialize StARR object for the instance
+ app = StARR(instance_data[instance]['url'], instance_data[instance]['api'], logger)
+ server_name = app.get_instance_name()
+
+ # Process data for the instance and store in output_dict
+ data = process_instance(app, rename_folders, server_name, instance_type, count, tag_name, logger)
+ output_dict[instance] = {
+ "server_name": server_name,
+ "data": data
+ }
+
+ # Print output and send notifications if data exists
+ if any(value['data'] for value in output_dict.values()):
+ print_output(output_dict, logger)
+ if discord_check(script_name):
+ notification(output_dict, logger)
+ else:
+ logger.info("No media items to rename.")
+ except KeyboardInterrupt:
+ print("Keyboard Interrupt detected. Exiting...")
+ sys.exit()
+ except Exception:
+ logger.error(f"\n\nAn error occurred:\n", exc_info=True)
+ logger.error(f"\n\n")
+ finally:
+ logger.info(create_bar(f"END {name}"))
\ No newline at end of file
diff --git a/modules/sync_gdrive.py b/modules/sync_gdrive.py
new file mode 100755
index 0000000..540f9e4
--- /dev/null
+++ b/modules/sync_gdrive.py
@@ -0,0 +1,143 @@
+import shlex
+import json
+import os
+
+from util.call_script import call_script
+from util.utility import create_bar
+from util.logger import setup_logger
+import sys
+
+
+script_name = "sync_gdrive"
+
+bash_script_file = os.path.realpath(os.path.dirname(os.path.realpath(__file__)) + '/../scripts/rclone.sh')
+
+def output_debug_info(cmd, settings):
+ client_id = settings.get('client_id', None)
+ client_secret = settings.get('client_secret', None)
+ token = settings.get('token', None)
+ debug_cmd = cmd.copy()
+ if '-i' in debug_cmd:
+ debug_cmd[debug_cmd.index('-i') + 1] = '' if client_id else 'None'
+ if '-s' in debug_cmd:
+ debug_cmd[debug_cmd.index('-s') + 1] = '' if client_secret else 'None'
+
+ if '-t' in debug_cmd:
+ debug_cmd[debug_cmd.index('-t') + 1] = '' if token else 'None'
+
+ return debug_cmd
+
+def set_cmd_args(settings, logger):
+ cmds = []
+ cmd = [bash_script_file]
+ sync_list = []
+ client_id = settings.get('client_id', None)
+ client_secret = settings.get('client_secret', None)
+ token = settings.get('token', None)
+ gdrive_sa_location = settings.get('gdrive_sa_location', None)
+ gdrive_sync = settings.get('gdrive_sync', None)
+
+ sync_list = gdrive_sync if isinstance(gdrive_sync, list) else [gdrive_sync]
+
+ if gdrive_sa_location and os.path.isfile(gdrive_sa_location):
+ gdrive_okay = True
+ elif gdrive_sa_location and not os.path.isfile(gdrive_sa_location):
+ gdrive_okay = False
+ logger.warning(f"\nGoogle service account file '{gdrive_sa_location}' does not exist\nPlease make sure you have the correct path to the file or remove the path from the config file\n")
+ else:
+ gdrive_okay = False
+
+ logger.debug(f"Sync list: {sync_list}")
+ for sync_item in sync_list:
+ logger.debug(f"Syncing: {sync_item}")
+ sync_location = sync_item['location']
+ sync_id = sync_item['id']
+
+ sync_cmd = cmd.copy()
+ if client_id:
+ sync_cmd.append('-i')
+ sync_cmd.append(shlex.quote(client_id))
+ else:
+ logger.error("No client id provided")
+ return
+
+ if client_secret:
+ sync_cmd.append('-s')
+ sync_cmd.append(shlex.quote(client_secret))
+ else:
+ logger.error("No client secret provided")
+ return
+
+ if gdrive_sync:
+ if sync_location != '' and os.path.exists(sync_location):
+ sync_cmd.append('-l')
+ sync_cmd.append(shlex.quote(sync_item['location']))
+ else:
+ if not os.path.exists(sync_location):
+ logger.error(f"Sync location {sync_location} does not exist")
+ # Create the directory if it doesn't exist
+ try:
+ os.makedirs(sync_location)
+ logger.info(f"Created {sync_location}")
+ sync_cmd.append('-l')
+ sync_cmd.append(shlex.quote(sync_item['location']))
+ except Exception as e:
+ logger.error(f"Exception occurred while creating {sync_location}: {e}")
+ return
+ else:
+ logger.error("No sync location provided")
+ return
+ if sync_id != '':
+ sync_cmd.append('-f')
+ sync_cmd.append(shlex.quote(sync_item['id']))
+ else:
+ logger.error("No gdrive id provided")
+ return
+
+ if token:
+ sync_cmd.append('-t')
+ sync_cmd.append(json.dumps(token))
+
+ if gdrive_okay:
+ sync_cmd.append('-g')
+ sync_cmd.append(shlex.quote(gdrive_sa_location))
+
+ cmds.append(sync_cmd)
+
+ return cmds
+
+# run the rclone.sh script
+def run_rclone(cmd, settings, logger):
+ debug_cmd = output_debug_info(cmd, settings)
+ try:
+ logger.debug(f"RClone command with args: {debug_cmd}")
+ call_script(cmd, logger)
+ logger.debug(f"RClone command with args: {debug_cmd} --> Success")
+ except Exception as e:
+ logger.error(f"Exception occurred while running rclone.sh: {e}")
+ logger.error(f"RClone command with args: {debug_cmd} --> Failed")
+ pass
+
+# Main function
+def main(config, logger=None):
+ """
+ Main function.
+ """
+ global dry_run
+ settings = config.script_config
+ log_level = config.log_level
+ logger = setup_logger(log_level, script_name)
+ name = script_name.replace("_", " ").upper()
+
+ try:
+ logger.info(create_bar(f"START {name}"))
+ for cmd in set_cmd_args(settings, logger):
+ run_rclone(cmd, settings, logger)
+ except KeyboardInterrupt:
+ print("Keyboard Interrupt detected. Exiting...")
+ sys.exit()
+ except Exception:
+ logger.error(f"\n\nAn error occurred:\n", exc_info=True)
+ logger.error(f"\n\n")
+ finally:
+ logger.info(create_bar(f"END {name}"))
\ No newline at end of file
diff --git a/modules/unmatched_assets.py b/modules/unmatched_assets.py
new file mode 100755
index 0000000..e043cbb
--- /dev/null
+++ b/modules/unmatched_assets.py
@@ -0,0 +1,357 @@
+# _ _ _ _ _ _
+# | | | | | | | | | | /\ | |
+# | | | |_ __ _ __ ___ __ _| |_ ___| |__ ___ __| | / \ ___ ___ ___| |_ ___
+# | | | | '_ \| '_ ` _ \ / _` | __/ __| '_ \ / _ \/ _` | / /\ \ / __/ __|/ _ \ __/ __|
+# | |__| | | | | | | | | | (_| | || (__| | | | __/ (_| | / ____ \\__ \__ \ __/ |_\__ \
+# \____/|_| |_|_| |_| |_|\__,_|\__\___|_| |_|\___|\__,_| /_/ \_\___/___/\___|\__|___/
+# ______
+# |______|
+# ===========================================================================================================
+# Author: Drazzilb
+# Usage: python3 unmatched_assets.py
+# Note: There is a limitation to how this script works with regards to it matching series assets the
+# main series poster requires seasonal posters to be present. If you have a series that does
+# not have a seasonal poster then it will not match the series poster. If you don't have a season poster
+# your series will appear in the movies section.
+# Requirements: requests
+# License: MIT License
+# ===========================================================================================================
+
+import json
+import os
+import sys
+from util.utility import *
+from util.arrpy import StARR
+from util.logger import setup_logger
+
+try:
+ from plexapi.server import PlexServer
+ from tqdm import tqdm
+except ImportError as e:
+ print(f"ImportError: {e}")
+ print("Please install the required modules with 'pip install -r requirements.txt'")
+ exit(1)
+
+script_name = "unmatched_assets"
+
+def match_assets(assets_dict, media_dict, ignore_root_folders, logger):
+ """
+ Matches assets to media and returns a dictionary of unmatched assets.
+
+ Args:
+ assets_dict (dict): Dictionary of assets.
+ media_dict (dict): Dictionary of media.
+ ignore_root_folders (list): List of root folders to ignore.
+
+ Returns:
+ dict: Dictionary of unmatched assets.
+ """
+
+ # Initialize dictionary to store unmatched assets by media types
+ unmatched_assets = {}
+ skipped = []
+ # Loop through different media types
+ for media_type in ['movies', 'series', 'collections']:
+ unmatched_assets[media_type] = {}
+ # Check if the media type is present in both assets and media dictionaries
+ if media_type in media_dict and media_type in assets_dict:
+ # Iterate through each media data in the media dictionary of the current type
+ for media_data in tqdm(media_dict[media_type], desc=f"Matching {media_type}", unit="media", total=len(media_dict[media_type]), leave=True, disable=None):
+ # Initialize variable to store whether a match was found
+ matched = False
+
+ # Check if the media is released, ended, or continuing or not monitored
+ if media_type in ['series', 'movies'] and not media_data['status'] in ['released', 'ended', 'continuing']:
+ skipped.append(f"{media_data['title']} ({media_data['year']})")
+ continue
+
+ # Get location of media
+ if media_type == "collections":
+ location = media_data['location']
+ else:
+ location = media_data['root_folder']
+
+ # Get root folder name
+ root_folder = os.path.basename(location.rstrip('/')).lower()
+
+ # Check if the root folder should be ignored
+ if ignore_root_folders:
+ if root_folder in ignore_root_folders or location in ignore_root_folders:
+ continue
+
+ # Check if location is in unmatched_assets
+ if location not in unmatched_assets[media_type]:
+ unmatched_assets[media_type][location] = []
+
+ # Get season numbers for series
+ if media_type == 'series':
+ media_seasons_numbers = [season['season_number'] for season in media_data.get('seasons', []) if season['season_has_episodes']]
+
+
+ # Compare media data with each asset data for the same media type
+ for asset_data in assets_dict[media_type]:
+ asset_seasons_numbers = asset_data.get('season_numbers', None)
+
+ # Check if the asset matches the media
+ if is_match(asset_data, media_data):
+ matched = True
+ if media_type == 'series':
+ if asset_seasons_numbers and media_seasons_numbers:
+ missing_seasons = []
+ for season in media_seasons_numbers:
+ if season not in asset_seasons_numbers:
+ missing_seasons.append(season)
+ if missing_seasons:
+ unmatched_assets[media_type][location].append({
+ 'title': media_data['title'],
+ 'year': media_data['year'],
+ 'missing_seasons': missing_seasons,
+ 'season_numbers': media_seasons_numbers
+ })
+ if not matched:
+ if media_type == 'series':
+ unmatched_assets[media_type][location].append({
+ 'title': media_data['title'],
+ 'year': media_data['year'],
+ 'season_numbers': media_seasons_numbers
+ })
+ else:
+ unmatched_assets[media_type][location].append({
+ 'title': media_data['title'],
+ 'year': media_data['year']
+ })
+ return unmatched_assets, skipped
+
+def print_output(unmatched_dict, media_dict, logger):
+ """
+ Prints the output of the script.
+
+ Args:
+ unmatched_dict (dict): Dictionary of unmatched assets.
+ media_dict (dict): Dictionary of media.
+
+ Returns:
+ None
+ """
+ # Asset types to consider
+ asset_types = ['movies', 'series', 'collections']
+ # Loop through different asset types
+ for asset_type in asset_types:
+ data_set = unmatched_dict.get(asset_type, None)
+ if data_set:
+ table = [
+ [f"Unmatched {asset_type.capitalize()}"]
+ ]
+ logger.info(create_table(table))
+ for location, data in data_set.items():
+ location = location.rstrip('/')
+ location_base = os.path.basename(location)
+ if data:
+ if asset_type == "collections":
+ suffix = " Library"
+ else:
+ suffix = ""
+ table = [
+ [f"{location_base.title()}{suffix}", len(data)]
+ ]
+ logger.info(create_table(table))
+ logger.info("")
+ for item in data:
+ if asset_type == 'series':
+ missing_seasons = item.get('missing_seasons', False)
+ if missing_seasons:
+ logger.info(f"\t{item['title']} ({item['year']}) (Seasons listed below have missing posters)")
+ for season in item['missing_seasons']:
+ logger.info(f"\t\tSeason: {season} <- Missing")
+ else:
+ logger.info(f"\t{item['title']} ({item['year']})")
+ for season in item['season_numbers']:
+ logger.info(f"\t\tSeason: {season}")
+ else:
+ year = f" ({item['year']})" if item['year'] else ""
+ logger.info(f"\t{item['title']}{year}")
+ logger.info("")
+ logger.info("")
+ # Calculate statistics for movies, series, collections, and the overall unmatched assets
+ unmatched_movies_total = sum(len(data) for data in unmatched_dict.get('movies', {}).values())
+ total_movies = len(media_dict.get('movies', [])) if media_dict.get('movies') else 0
+ percent_movies_complete = (total_movies - unmatched_movies_total) / total_movies * 100 if total_movies != 0 else 0
+
+ unmatched_series_total = sum(len(data) for data in unmatched_dict.get('series', {}).values())
+ total_series = len(media_dict.get('series', [])) if media_dict.get('series') else 0
+ series_percent_complete = (total_series - unmatched_series_total) / total_series * 100 if total_series != 0 else 0
+
+ unmatched_seasons_total = 0
+ total_seasons = 0
+ for location, data in unmatched_dict.get('series', {}).items():
+ for item in data:
+ if item.get('missing_season'):
+ unmatched_seasons_total += len(item['missing_seasons']) if item['missing_seasons'] else 0
+ elif item.get('season_numbers'):
+ unmatched_seasons_total += len(item['season_numbers']) if item['season_numbers'] else 0
+ for item in media_dict.get('series', []):
+ seasons = item.get('seasons', None)
+ if seasons:
+ for season in seasons:
+ if season['season_has_episodes']:
+ total_seasons += 1
+
+ season_total_percent_complete = (total_seasons - unmatched_seasons_total) / total_seasons * 100 if total_seasons != 0 else 0
+
+ unmatched_collections_total = sum(len(data) for data in unmatched_dict.get('collections', {}).values())
+ total_collections = len(media_dict.get('collections', [])) if media_dict.get('collections') else 0
+ collection_percent_complete = (total_collections - unmatched_collections_total) / total_collections * 100 if total_collections != 0 else 0
+
+ grand_total = total_movies + total_series + total_seasons + total_collections
+ grand_unmatched_total = unmatched_movies_total + unmatched_series_total + unmatched_seasons_total + unmatched_collections_total
+ grand_percent_complete = (grand_total - grand_unmatched_total) / grand_total * 100 if grand_total != 0 else 0
+
+ # Print statistics to the logger
+ logger.info('')
+ table = [
+ ["Statistics"],
+ ]
+ logger.info(create_table(table))
+ table = [
+ ["Type", "Total", "Unmatched", "Percent Complete"]
+ ]
+ if unmatched_dict.get('movies', None) or media_dict.get('movies', None):
+ table.append(["Movies", total_movies, unmatched_movies_total, f"{percent_movies_complete:.2f}%"])
+ if unmatched_dict.get('series', None) or media_dict.get('series', None):
+ table.append(["Series", total_series, unmatched_series_total, f"{series_percent_complete:.2f}%"])
+ table.append(["Seasons", total_seasons, unmatched_seasons_total, f"{season_total_percent_complete:.2f}%"])
+ if unmatched_dict.get('collections', None) or media_dict.get('collections', None):
+ table.append(["Collections", total_collections, unmatched_collections_total, f"{collection_percent_complete:.2f}%"])
+ table.append(["Grand Total", grand_total, grand_unmatched_total, f"{grand_percent_complete:.2f}%"])
+ logger.info(create_table(table))
+
+def main(config):
+ """
+ Main function.
+ """
+ global dry_run
+ dry_run = config.dry_run
+ log_level = config.log_level
+ logger = setup_logger(log_level, script_name)
+ script_config = config.script_config
+ name = script_name.replace("_", " ").upper()
+
+ try:
+ logger.info(create_bar(f"STARTING {name}"))
+ # Logging script settings
+
+ # Retrieving script configuration
+ script_config = config.script_config
+ source_dirs = script_config.get('source_dirs', '')
+ library_names = script_config.get('library_names', [])
+ ignore_collections = script_config.get('ignore_collections', [])
+ instances = script_config.get('instances', None)
+ ignore_root_folders = script_config.get('ignore_root_folders', [])
+ valid = validate(config, script_config, logger)
+
+ # Logging script settings
+ table = [
+ ["Script Settings"]
+ ]
+ logger.debug(create_table(table))
+ logger.debug(f'{"Log level:":<20}{log_level}')
+ logger.debug(f'{"Assets path:":<20}{source_dirs}')
+ logger.debug(f'{"Library names:":<20}{library_names}')
+ logger.debug(f'{"Ignore collections:":<20}{ignore_collections}')
+ logger.debug(f'{"Instances:":<20}{instances}')
+ logger.debug(create_bar("-"))
+
+ source_dirs = [source_dirs] if isinstance(source_dirs, str) else source_dirs
+
+ assets_list = []
+ for path in source_dirs:
+ results = categorize_files(path)
+ if results:
+ assets_list.extend(results)
+ else:
+ logger.error(f"No assets found in {path}.")
+
+ # Checking for assets and logging
+ if assets_list:
+ assets_dict = sort_assets(assets_list)
+ logger.debug(f"Assets:\n{json.dumps(assets_dict, indent=4)}")
+ else:
+ logger.error("No assets found, Check source_dirs setting in your config. Exiting.")
+ return
+
+ # Fetch information from Plex and StARR
+ media_dict = {
+ 'movies': [],
+ 'series': [],
+ 'collections': []
+ }
+ if instances:
+ for instance_type, instance_data in config.instances_config.items():
+ for instance in instances:
+ if instance in instance_data:
+ if instance_type == "plex":
+ url = instance_data[instance]['url']
+ api = instance_data[instance]['api']
+ try:
+ app = PlexServer(url, api)
+ except Exception as e:
+ logger.error(f"Error connecting to Plex: {e}")
+ app = None
+ if library_names and app:
+ print("Getting Plex data...")
+ results = get_plex_data(app, library_names, logger, include_smart=True, collections_only=True)
+ # Remove ignored collections
+ if ignore_collections:
+ for item in ignore_collections:
+ for collection in results:
+ if collection['title'] == item:
+ results.remove(collection)
+ media_dict['collections'].extend(results)
+ else:
+ logger.warning("No library names specified in config.yml. Skipping Plex.")
+ else:
+ url = instance_data[instance]['url']
+ api = instance_data[instance]['api']
+ app = StARR(url, api, logger)
+ server_name = app.get_instance_name()
+ if app:
+ print(f"Getting {instance_type.capitalize()} data...")
+ results = handle_starr_data(app, server_name, instance_type, include_episode=False)
+ if results:
+ if instance_type == "radarr":
+ media_dict['movies'].extend(results)
+ elif instance_type == "sonarr":
+ media_dict['series'].extend(results)
+ else:
+ logger.error(f"No {instance_type.capitalize()} data found.")
+
+ else:
+ logger.error(f"No instances found. Exiting script...")
+ return
+
+ if not any(media_dict.values()):
+ logger.error("No media found, Check instances setting in your config. Exiting.")
+ return
+ else:
+ logger.debug(f"Media:\n{json.dumps(media_dict, indent=4)}")
+ # Matching assets and printing output
+ unmatched_dict, skipped = match_assets(assets_dict, media_dict, ignore_root_folders, logger)
+ if skipped:
+ logger.debug("The following media was skipped due to it not being released, ended, or continuing:")
+ for item in skipped:
+ logger.debug(f"\t{item}")
+ if any(unmatched_dict.values()):
+ logger.debug(f"Unmatched Dict:\n{json.dumps(unmatched_dict, indent=4)}")
+ print_output(unmatched_dict, media_dict, logger)
+ else:
+ logger.info("All assets matched.")
+
+ except KeyboardInterrupt:
+ print("Exiting due to keyboard interrupt.")
+ sys.exit()
+ except Exception:
+ logger.error(f"\n\nAn error occurred:\n", exc_info=True)
+ logger.error(f"\n\n")
+ return
+ finally:
+ logger.info(create_bar(f"ENDING {name}"))
diff --git a/modules/upgradinatorr.py b/modules/upgradinatorr.py
new file mode 100755
index 0000000..6f912b0
--- /dev/null
+++ b/modules/upgradinatorr.py
@@ -0,0 +1,350 @@
+# _ _ _ _ _
+# | | | | | (_) | |
+# | | | |_ __ __ _ _ __ __ _ __| |_ _ __ __ _| |_ ___ _ __ _ __
+# | | | | '_ \ / _` | '__/ _` |/ _` | | '_ \ / _` | __/ _ \| '__| '__|
+# | |__| | |_) | (_| | | | (_| | (_| | | | | | (_| | || (_) | | | |
+# \____/| .__/ \__, |_| \__,_|\__,_|_|_| |_|\__,_|\__\___/|_| |_|
+# | | __/ |
+# |_| |___/
+# ===================================================================================================
+# Author: Drazzilb
+# Description: A script to upgrade Sonarr/Radarr libraries to the keep in line with trash-guides
+# Usage: python3 /path/to/upgradinatorr.py
+# Requirements: requests, pyyaml
+# License: MIT License
+# ===================================================================================================
+
+import json
+import sys
+import time
+
+from util.arrpy import StARR
+from util.discord import discord, discord_check
+from util.utility import *
+from util.logger import setup_logger
+
+script_name = "upgradinatorr"
+
+def filter_media(media_dict, tag_id, count, logger):
+ """
+ Filter media_dict to remove items that are not monitored, have the tag_id, or are not in the correct status.
+
+ Args:
+ media_dict (list): A list of dictionaries containing media information.
+ tag_id (int): The tag_id to filter out.
+ count (int): The number of items to return.
+
+ Returns:
+ filtered_media_dict (list): A list of dictionaries containing media information.
+ """
+
+ filtered_media_dict = [] # Initializing an empty list to store filtered media
+ filter_count = 0 # Initializing a counter for the number of filtered items
+ for item in media_dict:
+ if filter_count == count: # Check if the desired count has been reached
+ break
+ # Check conditions: tag_id not in tags, monitored is True, and status is one of the specified statuses
+ if tag_id in item['tags'] or item['monitored'] == False or item['status'] not in ["continuing", "airing", "ended", "canceled", "released"]:
+ # Log skipped items
+ logger.debug(f"Skipping {item['title']} ({item['year']}), Status: {item['status']}, Monitored: {item['monitored']}, Tags: {item['tags']}")
+ continue # Move to the next item if conditions are not met
+ filtered_media_dict.append(item) # Append the item to the filtered list
+ filter_count += 1 # Increment the counter for filtered items
+ return filtered_media_dict # Return the filtered list of media
+
+
+def process_queue(queue, instance_type, media_ids):
+ """
+ Process the queue to return a list of dictionaries containing the download_id, media_id, and download.
+
+ Args:
+ queue (dict): A dictionary containing the queue information.
+ instance_type (str): The instance type to process.
+ media_ids (list): A list of media_ids to filter the queue with.
+
+ Returns:
+ queue_dict (list): A list of dictionaries containing the download_id, media_id, and download.
+ """
+
+ if instance_type == "radarr":
+ id_type = "movieId"
+ elif instance_type == "sonarr":
+ id_type = "seriesId"
+ queue_dict = [] # Initialize an empty list to store processed queue information
+ records = queue.get('records', None) # Retrieve 'records' from the queue dictionary
+ for item in records: # Iterate through each item in the 'records'
+ media_id = item[id_type] # Get the appropriate ID type ('movieId' or 'seriesId')
+ if media_id not in media_ids: # Check if the media_id is present in the provided list of media_ids
+ continue # Skip to the next item if the media_id is not in the list
+ # Append relevant information to the queue_dict list
+ queue_dict.append({
+ 'download_id': item['downloadId'],
+ 'media_id': media_id,
+ 'download': item['title'],
+ 'torrent_custom_format_score': item['customFormatScore'],
+ })
+ # Remove duplicate dictionaries in the queue_dict list and convert it to a list of unique dictionaries
+ queue_dict = [dict(t) for t in {tuple(d.items()) for d in queue_dict}]
+ return queue_dict # Return the processed queue information
+
+def process_instance(instance_type, instance_settings, app, logger):
+ """
+ Process the instance to return a dictionary containing the server_name, tagged_count, untagged_count,
+ total_count, and data.
+
+ Args:
+ instance_type (str): The instance type to process.
+ instance_settings (dict): A dictionary containing the instance settings.
+ app (StARR): An instance of the StARR class.
+
+ Returns:
+ output_dict (dict): A dictionary containing the server_name, tagged_count, untagged_count,
+ total_count, and data.
+ """
+ # Retrieve instance settings
+ tagged_count = 0
+ untagged_count = 0
+ total_count = 0
+ server_name = app.get_instance_name()
+ count = instance_settings.get('count', 2)
+ tag_name = instance_settings.get('tag_name', "checked")
+ unattended = instance_settings.get('unattended', False)
+
+ # Logging instance settings
+ table = [
+ [f"{instance_type} Settings"]
+ ]
+ logger.debug(create_table(table))
+ logger.debug(f'{"Count:":<20}{count}')
+ logger.debug(f'{"tag_name:":<20}{tag_name}')
+ logger.debug(f'{"unattended:":<20}{unattended}')
+ logger.debug('*' * 40)
+
+ # Fetch media from the instance
+ print(f"Gathering media from {server_name}...")
+ server_name = app.get_instance_name()
+ media_dict = handle_starr_data(app, server_name, instance_type, include_episode=False)
+ logger.debug(f"media_dict:\n{json.dumps(media_dict, indent=4)}")
+
+ # Get tag ID based on the provided tag name
+ tag_id = app.get_tag_id_from_name(tag_name)
+
+ # Filter media based on tag and count criteria
+ filtered_media_dict = filter_media(media_dict, tag_id, count, logger)
+ if not filtered_media_dict and unattended:
+ media_ids = [item['media_id'] for item in media_dict]
+ logger.info("All media is tagged. Removing tags...")
+ app.remove_tags(media_ids, tag_id)
+ media_dict = handle_starr_data(app, server_name, instance_type, include_episode=False)
+ filtered_media_dict = filter_media(media_dict, tag_id, count, logger)
+
+ # If no filtered_media and not unattended return
+ if not filtered_media_dict and not unattended:
+ logger.info(f"No media found for {server_name}.")
+ return
+
+ logger.debug(f"filtered_media_dict:\n{json.dumps(filtered_media_dict, indent=4)}")
+
+ # Processing tagged and untagged counts
+ if media_dict:
+ total_count = len(media_dict)
+ for item in media_dict:
+ if tag_id in item['tags']:
+ tagged_count += 1
+ else:
+ untagged_count += 1
+
+ # Initialize output dictionary
+ output_dict = {
+ 'server_name': server_name,
+ 'tagged_count': tagged_count,
+ 'untagged_count': untagged_count,
+ 'total_count': total_count,
+ 'data': []
+ }
+
+ # Processing media data
+ if not dry_run:
+ media_ids = [item['media_id'] for item in filtered_media_dict]
+ search_response = app.search_media(media_ids)
+ app.add_tags(media_ids, tag_id)
+ ready = app.wait_for_command(search_response['id'])
+ if ready:
+ sleep_time = 10 # Set the sleep time to 5 seconds
+ print(f"Waiting for {sleep_time} seconds to allow for search results to populate in the queue...")
+ time.sleep(sleep_time)
+ queue = app.get_queue(instance_type)
+ logger.debug(f"queue:\n{json.dumps(queue, indent=4)}")
+ queue_dict = process_queue(queue, instance_type, media_ids)
+ logger.debug(f"queue_dict:\n{json.dumps(queue_dict, indent=4)}")
+ for item in filtered_media_dict:
+ downloads = {}
+ for queue_item in queue_dict:
+ if item['media_id'] == queue_item['media_id']:
+ downloads[queue_item['download']] = queue_item['torrent_custom_format_score']
+ output_dict['data'].append({
+ 'media_id': item['media_id'],
+ 'title': item['title'],
+ 'year': item['year'],
+ 'download': downloads
+ })
+ else:
+ for item in filtered_media_dict:
+ output_dict['data'].append({
+ 'media_id': item['media_id'],
+ 'title': item['title'],
+ 'year': item['year'],
+ 'download': None,
+ 'torrent_custom_format_score': None
+ })
+ return output_dict
+
+def print_output(output_dict, logger):
+ """
+ Print the output to the console.
+
+ Args:
+ output_dict (dict): A dictionary containing the server_name, tagged_count, untagged_count,
+ total_count, and data.
+
+ Returns:
+ None
+ """
+ for instance, run_data in output_dict.items():
+ if run_data:
+ instance_data = run_data.get('data', None)
+ if instance_data:
+ # Create a table with the server name as the header
+ table = [
+ [f"{run_data['server_name']}"]
+ ]
+ logger.info(create_table(table))
+
+ # Iterate through the media items for this instance
+ for item in instance_data:
+ logger.info(f"{item['title']} ({item['year']})")
+
+ # Print downloads and their format scores associated with the media
+ if item['download']:
+ for download, format_score in item['download'].items():
+ logger.info(f"\t{download}\tScore: {format_score}")
+ else:
+ logger.info("\tNo upgrades found for this item.")
+
+ logger.info("") # Add a newline for separation between media items
+ else:
+ # If no items found for an instance, print a message indicating so
+ logger.info(f"No items found for {instance}.")
+
+def notification(output_dict, logger):
+ """
+ Send a notification to Discord.
+
+ Args:
+ output_dict (dict): A dictionary containing the server_name, tagged_count, untagged_count,
+ total_count, and data.
+
+ Returns:
+ None
+ """
+
+ fields = []
+ # Iterate through the instances in the output dictionary
+ for instance, run_data in output_dict.items():
+ if run_data:
+ server_name = run_data['server_name']
+ instance_data = run_data.get('data', None)
+ server_list = []
+ # Iterate through the media items for this instance
+ for item in instance_data:
+ title = item['title']
+ year = item['year']
+ download = item['download']
+
+ # Construct a list of downloads and their format scores associated with the media
+ if download:
+ torrent_list = []
+ torrent_list.append(f"{title} ({year})")
+ for torrent_item, format_score in download.items():
+ torrent_list.append(f"\t{torrent_item}\n\tCF Score: {format_score}\n")
+ else:
+ continue
+ torrent_list.append("\tNo upgrades found for this item.")
+ server_list.append("\n".join(torrent_list))
+ value = "\n".join(server_list)
+ # Construct a Discord field containing the server name and associated media/downloads
+ if server_list:
+ fields.append({
+ "name": server_name,
+ "value": f"```{value}```"
+ })
+ # Send the constructed fields as a Discord message
+ if fields:
+ discord(fields, logger, script_name, description=f"{'__**Dry Run**__' if dry_run else ''}", color=0x00ff00, content=None)
+
+def main(config):
+ """
+ Main function.
+ """
+ global dry_run
+ dry_run = config.dry_run
+ log_level = config.log_level
+ logger = setup_logger(log_level, script_name)
+ script_config = config.script_config
+ name = script_name.replace("_", " ").upper()
+
+ try:
+ logger.info(create_bar(f"STARTING {name}"))
+ # Check if it's a dry run and display a message
+ if dry_run:
+ table = [
+ ["Dry Run"],
+ ["NO CHANGES WILL BE MADE"]
+ ]
+ logger.info(create_table(table))
+
+ # Access the script configuration settings
+ script_config = config.script_config
+
+ # Retrieve instances from the configuration file
+ instances = script_config.get('instances', None)
+ valid = validate(config, script_config, logger)
+
+ # Check if instances are present in the configuration
+ if not instances:
+ logger.error("No instances found in config file.")
+ sys.exit()
+
+ # Dictionary to store the final output
+ final_output_dict = {}
+
+ # Iterate over instance configurations
+ for instance_type, instance_data in config.instances_config.items():
+ for instance, instance_settings in instances.items():
+ if instance in instance_data:
+ # Initialize StARR with instance URL, API, and logger
+ final_output_dict[instance] = {}
+ url = instance_data[instance]['url']
+ api = instance_data[instance]['api']
+ app = StARR(url, api, logger)
+
+ # Process instance and get output
+ output = process_instance(instance_type, instance_settings, app, logger)
+ final_output_dict[instance] = output
+
+ # Debug log of the final output dictionary
+ logger.debug(f"final_output_dict:\n{json.dumps(final_output_dict, indent=4)}")
+
+ # If there's data in the final output dictionary, print output and send notifications
+ if final_output_dict:
+ print_output(final_output_dict, logger)
+ if discord_check(script_name):
+ notification(final_output_dict, logger)
+ except KeyboardInterrupt:
+ print("Keyboard Interrupt detected. Exiting...")
+ sys.exit()
+ except Exception:
+ logger.error(f"\n\nAn error occurred:\n", exc_info=True)
+ logger.error(f"\n\n")
+ finally:
+ logger.info(create_bar(f"END OF {name}"))
diff --git a/python-scripts/config.sample.yml b/python-scripts/config.sample.yml
deleted file mode 100644
index c7a6944..0000000
--- a/python-scripts/config.sample.yml
+++ /dev/null
@@ -1,332 +0,0 @@
-# This is a sample config file for the python scripts
-# You can copy this file and rename it to config.yml
-# You can also rename the scripts to whatever you want, just make sure to update the config file to match
-# The config file is broken up into sections, each section is a script
-# Each script has a log_level and dry_run option
-# log_level can be debug, info, warning, error, critical
-# dry_run can be true or false
-# The rest of the options are script specific
-global:
- radarr:
- # name is the name of the radarr instance, this is used to reference the instance in other scripts
- - name: radarr_1
- # api is the api key for the radarr instance
- api: abcdefghijlmnop
- # url is the url for the radarr instance
- url: http://localhost:7878
- - name: radarr_2
- api: abcdefghijklmnop
- url: http://localhost:1212
- sonarr:
- # name is the name of the sonarr instance, this is used to reference the instance in other scripts
- - name: sonarr_1
- # api is the api key for the sonarr instance
- api: abcdefghijlmnop
- # url is the url for the sonarr instance
- url: http://localhost:8989
- # name is the name of the second sonarr instance, this is used to reference the instance in other scripts, names must match
- - name: sonarr_2
- api: abcdefghijlmnop
- url: http://localhost:9090
- plex:
- # name is the name of the plex instance, this is used to reference the instance in other scripts
- - name: plex
- # api is the token key for the plex instance, if you don't know your token please see https://support.plex.tv/articles/204059436-finding-an-authentication-token-x-plex-token/
- api: abcdefghijlmnop
- # url is the url for the plex instance
- url: http://localhost:32400
- qbittorrent:
- # name is the name of the qbittorrent instance, this is used to reference the instance in other scripts
- - name: qbittorrent_1
- # url is the url for the qbittorrent instance
- url: http://\
- # username is the username for the qbittorrent instance
- username: username
- # password is the password for the qbittorrent instance
- password: password
- - name: qbittorrent_2
- url: http://
- username: username
- password: password
-
-discord:
- # This is the webhook for Notifiarr, if you don't use Notifiarr you can leave this blank
- notifiarr_webhook:
- # These are the webhooks for the discord scripts, if you don't use discord you can leave these blank
- upgradinatorr:
- # This is the discord webhook for this script
- discord_webhook:
- # This is the channel id for the discord script. Channel id is only needed if using Notifiarr
- channel_id:
- renameinatorr:
- discord_webhook:
- channel_id:
- renamer:
- discord_webhook:
- channel_id:
- movie_deletarr:
- discord_webhook:
- channel_id:
- unmatched_assets:
- discord_webhook:
- channel_id:
- nohl:
- discord_webhook:
- channel_id:
- queinatorr:
- discord_webhook:
- channel_id:
- labelarr:
- discord_webhook:
- channel_id:
- renamer_cleanarr:
- discord_webhook:
- channel_id:
-
-# The upgradinatorr script is used to upgrade movies and tv shows, it can be used to upgrade all movies and tv shows or just a subset.
-# WARNING: This script can caues havoc with your usenet provider or torrent tracker if you are not careful.
-# I am not responsible for any issues that may arise from using this script.
-# Please use this script at your own risk.
-# Best practice is to keep the count variable low, 1-5 is a good number.
-upgradinatorr:
- log_level: info
- dry_run: true
- radarr:
- - name: radarr_1 # This is referenced to the instance name in the global section
- count: 2 # This is the number of movies you want to have upgradinatorr search (Set a low number, 1-5 is a good number)
- monitored: true # Options: true or false
- status: released
- tag_name: checked # Set the tag_name to the tag you want to use to track movies that have been searched for
- unattended: false # Options: true or false, set if you want upgradinatorr to reset tags when all movies have been searched for
- reset: false # Options: true or false, set if you want upgradinatorr to reset tags when all movies have been searched for
- sonarr:
- - name: sonarr_1 # This is referenced to the instance name in the global section
- count: 2 # This is the number of series you want to have upgradinatorr search (Set a low number, 1-3 is a good number for series)
- monitored: true
- status:
- - continuing
- - ended
- tag_name:
- unattended: false
- reset: false
- - name: sonarr_2
- count:
- monitored: true
- status: continuing
- tag_name:
- unattended: false
- reset: false
-
-# The renameinatorr script is used to rename movies and tv shows, it can be used to rename all movies and tv shows or just a subset.
-# This script is meant to keep your library clean and tidy, it will rename your movies and tv shows to match criteria set in Media Management tag of Radarr/Sonarr.
-renameinatorr:
- log_level: info
- dry_run: true
- radarr:
- - name: radarr_1
- count: 1
- tag_name: renamed
- unattended: false
- reset: false
- sonarr:
- - name: sonarr_1
- count: 1
- tag_name: renamed
- unattended: false
- reset: false
-
-# The renamer script is used to rename your posters to match your Plex Collections and Media Folder names for Movies and TV Shows
-# This script is meant to be used in conjuction with Plex-Meta-Manager, however it can be used on it's own.
-renamer:
- # Script Note: --> is a copy/move & rename / -->> is a copy/move w/o rename
- log_level: info
- # Options are 'true' or 'false'
- dry_run: true
- asset_folders: false
- # Options are 'Copy' or 'Move'
- action_type: move
- print_only_renames: true
- # Library names are used to match collections posters to the collections listed w/in Plex. Typically Movie Libraries are used
- library_names:
- - Movies
- - Anime Movies
- # Where your ingest movies folder is (THe source and destination should not be the same directory)
- source_dir: /path/to/posters/ <--- Lowest priority
- # What posters you'd like to override the souce dir with. This dir will take priority for assets over source_dir
- # To not use any override, simply leave it blank or remove it from the config
- # Can be a single dir or a list of dirs
- source_overrides:
- - /path/to/posters/override/ <--- Middle priority
- - /path/to/posters/override2/ <--- Highest priority
- # Where your posters are going to go. In my usecase I use Plex-Meta-Manager. This is the /config/assets dir for PMM for me.
- destination_dir: /path/to/poster/destination
- # The thresholds are used to consider what is a "Match", As with any automation there is never a 100% guarantee of accuracy. There will be times the script will mess up.
- # If however you see it messing up more often on things you can restrict the threshold.
- # 0 = Anything goes, 100 = Must be exact match
- # The default numbers here are based upon what I've seen to be the most effective, I've had one-offs where I had to manually fix things.
- collection_threshold: 99
- # Decide which radarr instance you will be using for renamer, this is useful if you have for example: A Sonarr/Sonarr-Anime and/or Radarr/Radarr-Anime
- # If you however duplicate entries between a Radarr/Radarr4K for example. this won't help and will only double the work for the script for no gain.
- radarr:
- - name: radarr_1
- sonarr:
- - name: sonarr_1
- - name: sonarr_2
-
-# This is a script that will take your existing collections, Movies, and Series, and match them to your Plex Library and/or your Media Dir
-unmatched_assets:
- log_level: info
- # Optional: Add libraries you want the script to check for unmatched assets
- asset_folders: false
- library_names:
- - Movies
- - Anime Movies
- # Optional: Add collections you'd like to ignore
- # These defaults are based upon my own usecase, you can remove them or add your own
- ignore_collections:
- - "Star Wars Universe"
- - "DC Animated Universe"
- - "Alien / Predator"
- - "Rocky / Creed"
- - "MonsterVerse"
- - "Star Trek"
- - "X-Men Universe"
- - "In Association With Marvel"
- - "View Askewniverse"
- - "The Mummy Universe"
- - "Middle Earth"
- - "In Association With DC"
- - "Wizarding World"
- - "Fast & Furious"
- - "The Spiderverse"
- - "Marvel Cinematic Universe"
- - "The Conjuring Universe"
- - "The Batman Universe"
- - "Collectionless"
- - "DC Extended Universe"
- assets_path: /path/to/poster-assets/
- media_paths:
- - /path/to/media/folders/movies
- - /path/to/media/folders/tv-shows
-
-# Paths should be the same paths that you use for your radarr/sonarr instance but should be full paths, not the ones you use if using docker
-# Eg. /mnt/user/data/media/movies/ or /mnt/user/data/media/tv-shows/
-# Include/Exclude profiles are used to filter out movies/tv shows that you don't want to be considered for the script
-# You can leave them blank if you want to include all profiles
-# Exclude series is used to exclude series that you don't want to be considered for the script
-nohl:
- dry_run: false
- maximum_searches: 10
- radarr:
- - name: radarr_1
- paths:
- -
- exclude_profiles:
- include_profiles:
- - Any
- sonarr:
- - name: sonarr_1
- paths:
- -
- exclude_profiles:
- include_profiles:
- -
- exclude_series:
- - name: sonarr_2
- paths:
- -
- exclude_profiles:
- include_profiles:
- -
- exclude_series:
-
-# This script's primary purpose is to monitor the queue of Radarr/Sonarr for stuck torrents, and to move them to a different category so that qBittorrent Manager can deal with them as needed
-# The secondary purpose of this script is to monitor a category for torrents that report as missing files and to move them to a different category so that qBittorrent Manager can deal with them as needed
-queinatorr:
- log_level: info
- dry_run: false
- qbittorrent:
- - name: qbittorrent_1
- # Required (String or List): If you have multiple instances of Radarr/Sonarr you can specify which one you want to use for this instance of qBittorrent
- starr_app: radarr_1
- # Required: If you recieve a message from Radarr/Sonarr that a torrent is ineligible for import, you can move it to a different category so that qBittorrent Manager can deal with it as needed
- move_category:
- radarr_1: completed-movies
- # Optional: If you want to move from the monitored category to the move_category if qBittorrent is reporting the torrent as missing files
- # Each Sonarr/Radarr instance can either be a string or a list
- # Note: This entire key is optional, if you don't want to move missing files, you can remove it
- move_missing:
- radarr_1: movies
- - name: qbittorrent_2
- # Required (String or List): If you have multiple instances of Radarr/Sonarr you can specify which one you want to use for this instance of qBittorrent
- starr_app:
- - sonarr_1
- - sonarr_2
- # Required: If you recieve a message from Radarr/Sonarr that a torrent is ineligible for import, you can move it to a different category so that qBittorrent Manager can deal with it as needed
- move_category:
- sonarr_1: completed-series
- sonarr_2: completed-anime
- # Optional: If you want to move from the monitored category to the move_category if qBittorrent is reporting the torrent as missing files
- # Each Sonarr/Radarr instance can either be a string or a list
- move_missing:
- sonarr_1: series
- sonarr_2: anime
-
-# This script's purpose is to monitor the health status of Radarr/Sonarr and to delete movies/series if they are reported as to have been
-# Removed on TheMovieDB/TheTVDB
-movie_deletarr:
- log_level: debug
- dry_run: true
-
-# This script's purpose is to add/remove labels in Plex based on tags in Sonarr/Radarr it can also be used to add sync tags to Radarr/Sonarr basedupon a certain label w/in Plex
-labelarr:
- dry_run: false
- log_level: info
- # This option is used to sync labesl from plex to Radarr/Sonarr
- add_from_plex: false
- radarr:
- - name: radarr_1
- sonarr:
- - name: sonarr_1
- # The label that you wish to sync from/to Plex
- labels:
- - kids
-
-renamer_cleanarr:
- log_level: info
- dry_run: true
- asset_folders: false
- library_names:
- - Movies
- - Anime Movies
- ignore_collections:
- - "Star Wars Universe"
- - "DC Animated Universe"
- - "Alien / Predator"
- - "Rocky / Creed"
- - "MonsterVerse"
- - "Star Trek"
- - "X-Men Universe"
- - "In Association With Marvel"
- - "View Askewniverse"
- - "The Mummy Universe"
- - "Middle Earth"
- - "In Association With DC"
- - "Wizarding World"
- - "Fast & Furious"
- - "The Spiderverse"
- - "Marvel Cinematic Universe"
- - "The Conjuring Universe"
- - "The Batman Universe"
- - "Collectionless"
- - "DC Extended Universe"
- assets_paths:
- - /mnt/user/appdata/plex-meta-manager/assets
- media_paths:
- - /mnt/user/data/media/anime movies/
- - /mnt/user/data/media/documentary movies/
- - /mnt/user/data/media/movies/
- - /mnt/user/data/media/anime series/
- - /mnt/user/data/media/animated series/
- - /mnt/user/data/media/documentary series/
- - /mnt/user/data/media/series/
diff --git a/python-scripts/cutoff.py b/python-scripts/cutoff.py
deleted file mode 100644
index 9961ab2..0000000
--- a/python-scripts/cutoff.py
+++ /dev/null
@@ -1,293 +0,0 @@
-# _____ _ __ __
-# / ____| | | / _|/ _|
-# | | _ _| |_ ___ | |_| |_ _ __ _ _
-# | | | | | | __/ _ \| _| _| '_ \| | | |
-# | |___| |_| | || (_) | | | |_| |_) | |_| |
-# \_____\__,_|\__\___/|_| |_(_) .__/ \__, |
-# | | __/ |
-# |_| |___/
-# ===================================================================================================
-# Author: Drazzilb
-# Description: This script will print a list of movies that are below the cutoff score for a given
-# cutoff custom format score. It will also optionally tag the movies that are below the cutoff
-# score with a tag of your choosing. This script is useful for finding movies that are below
-# the cutoff score that you may want to upgrade to a better quality.
-# Usage: python3 cutoff.py
-# Requirements: requests, tqdm
-# Version: 2.0.0
-# License: MIT License
-# ===================================================================================================
-
-import requests
-import os
-from tqdm import tqdm
-
-
-radarr_url = 'http://localhost:7878'
-radarr_api = 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
-
-quality_profile = 'quality_profile_name'
-cutoff_score = 1850
-ignore_tags = ['tag1', 'tag2']
-tagging = 'Yes'
-requesting = 'Yes'
-tracker_indexer = 'tracker/indexer_name'
-release_group = 'release_group_name'
-
-debug = False
-dry_run = True
-
-RED = '\033[31m'
-RESET = '\033[0m'
-
-def getMovies():
- """
- Get list of movies from Radarr
- Returns:
- list: List of movies
- """
- movies = requests.get(radarr_url + "/api/v3/movie",
- headers={"X-Api-Key": radarr_api}).json()
- return movies
-
-def getMovieFile(movie_id):
- """
- Get movie file from Radarr
- Parameters:
- movie_id (int): ID of movie
- Returns:
- dict: Movie file
- """
- moviefile = requests.get(
- radarr_url + f"/api/v3/moviefile?movieId={movie_id}", headers={"X-Api-Key": radarr_api}).json()
- return moviefile
-
-def getQualityProfiles():
- """
- Get list of quality profiles from Radarr
- Returns:
- list: List of quality profiles
- """
- quality_profiles = requests.get(
- radarr_url + "/api/v3/qualityprofile", headers={"X-Api-Key": radarr_api}).json()
- return quality_profiles
-
-def getTags():
- """
- Get list of tags from Radarr
- Returns:
- list: List of tags
- """
- tags = requests.get(radarr_url + "/api/v3/tag",
- headers={"X-Api-Key": radarr_api}).json()
- return tags
-
-def get_tag_id(tag_name):
- """
- Get tag ID from Radarr
- Parameters:
- tag_name (str): Name of tag
- Returns:
- int: Tag ID
- """
- response = requests.get(radarr_url + "/api/v3/tag",
- headers={"X-Api-Key": radarr_api})
- response.raise_for_status()
- for tag in response.json():
- if tag["label"] == tag_name:
- return tag["id"]
- return None
-
-
-def add_tag(tag_name):
- """
- Add tag to Radarr
- Parameters:
- tag_name (str): Name of tag
- """
- response = requests.post(radarr_url + "/api/v3/tag",
- headers={"X-Api-Key": radarr_api}, json={"label": tag_name})
- response.raise_for_status()
-
-
-def tag_movie(movie_id, tag_id):
- """
- Tag movie in Radarr
- Parameters:
- movie_id (int): ID of movie
- tag_id (int): ID of tag
- """
- response = requests.put(radarr_url + f"/api/v3/movie/editor", headers={
- "X-Api-Key": radarr_api}, json={"movieIds": [movie_id], "tags": [tag_id], "applyTags": "add"})
- response.raise_for_status()
-
-
-def untag_movie(movie_id, tag_id):
- """
- Untag movie in Radarr
- Parameters:
- movie_id (int): ID of movie
- tag_id (int): ID of tag
- """
- response = requests.put(radarr_url + f"/api/v3/movie/editor", headers={"X-Api-Key": radarr_api}, json={
- "movieIds": [movie_id], "tags": [tag_id], "applyTags": "remove"})
- response.raise_for_status()
-
-
-def main():
- """
- Main function for the script.
- """
- tag_id = None
- tag_name = None
- movies_printed = 0
- untagged_movies = 0
- tagged_movies = 0
- total_movies = 0
- cutoff_unmet = 0
- cutoff_met = 0
- if tagging == 'Yes':
- if dry_run:
- print(f"Dry Run = {dry_run}")
- tag_name = "cutoff-unmet"
- print(f"Retrieving tag ID for {tag_name}")
- tag_id = get_tag_id(tag_name)
- if not tag_id:
- print(f"Creating new tag {tag_name}")
- add_tag(tag_name)
- tag_id = get_tag_id(tag_name)
- if tag_id:
- print(f"Tag ID for {tag_name} is {tag_id}")
-
- movies = getMovies()
- tags = getTags()
- movies_without_tags = [movie for movie in movies if not any(
- tag['label'] in ignore_tags for tag in tags if tag['id'] in movie['tags'])]
- quality_profiles = getQualityProfiles()
- script_dir = os.path.dirname(os.path.abspath(__file__))
- logs_dir = os.path.join(script_dir, 'logs')
- os.makedirs(logs_dir, exist_ok=True)
- with open(os.path.join(logs_dir, 'scored_movies.txt'), 'w') as f:
- below_cutoff_count = 0
- for profile in quality_profiles:
- if profile['name'] == quality_profile:
- name_score_dict = {}
- for format in profile['formatItems']:
- name_score_dict[format['name']] = format['score']
- movies_to_print = []
- if requesting == 'Yes':
- for movie in tqdm(movies_without_tags, desc="Processing Movies to print..."):
- if movie['hasFile'] == True:
- movie_id = movie['id']
- if debug:
- print(
- f"Calculating score for movie: {movie['title']}")
- moviefile = getMovieFile(movie_id)
- movie_score = 0
- for format in moviefile:
- for custom_format in format['customFormats']:
- format_name = custom_format['name']
- if format_name in name_score_dict:
- if debug:
- print(
- f"\tFormat: {format_name} Score: {name_score_dict[format_name]}")
- movie_score += name_score_dict[format_name]
- if movie_score <= cutoff_score:
- movies_to_print.append(
- (
- movie['title'],
- movie['year'],
- movie_score,
- movie.get('tmdbId'),
- movie.get('imdbId')
- )
- )
- movies_printed += 1
- dry_run_print = []
- if tagging == 'Yes':
- for movie in tqdm(movies, desc="Processing Movies to tag..."):
- movie_id = movie['id']
- if movie['hasFile'] == True:
- moviefile = getMovieFile(movie_id)
- movie_score = 0
- for format in moviefile:
- for custom_format in format['customFormats']:
- format_name = custom_format['name']
- if format_name in name_score_dict:
- movie_score += name_score_dict[format_name]
- if movie_score <= cutoff_score and tag_id not in movie['tags']:
- if dry_run:
- dry_run_print.append(
- f"Would tag movie: {movie['title']} with: {tag_name}")
- else:
- tag_movie(movie_id, tag_id)
- tagged_movies += 1
- elif movie_score > cutoff_score and tag_id in movie['tags']:
- if dry_run:
- dry_run_print.append(
- f"Would untag movie: {movie['title']} with: {tag_name}")
- else:
- untag_movie(movie_id, tag_id)
- untagged_movies += 1
- if movie_score <= cutoff_score:
- cutoff_unmet += 1
- else:
- cutoff_met += 1
- total_movies += 1
- if dry_run:
- for dry_run_line in dry_run_print:
- print(dry_run_line)
- movies_to_print = sorted(movies_to_print, key=lambda x: ' '.join(
- [word for word in x[0].split() if word.lower() not in ['the', 'an', 'a']]))
- print(f'{" Statsistics ":*^40}', file=f)
- print(f"Total movies printed: {movies_printed}", file=f)
- print(f"Total movies tagged: {tagged_movies}", file=f)
- print(f"Total movies untagged: {untagged_movies}", file=f)
- print(f"Total movies: {total_movies}", file=f)
- print(
- f"Total movies below cutoff score: {cutoff_unmet}", file=f)
- print(f"Total movies above cutoff score: {cutoff_met}", file=f)
- print(
- f"Percentage of movies below cutoff score: {round(cutoff_unmet / total_movies * 100, 2)}%", file=f)
- print(f'*' * 40, file=f)
- print('', file=f)
- if requesting == 'Yes':
- print(
- f"Below is a list of movies below the cutoff score of {cutoff_score}\nThese are formatted to help make requests on {tracker_indexer}", file=f)
- for movie in movies_to_print:
- movie_title = movie[0]
- movie_year = movie[1]
- movie_score = movie[2]
- tmdb_id = movie[3]
- imdb_id = movie[4]
- print(
- f"\tMovie: {movie_title} ({movie_year}) has a total score of " + RED + f"{movie_score}" + RESET)
- print(f'*' * 40, file=f)
- if requesting == 'Yes':
- print(
- f"{movie_title} has a total score of {movie_score}\n", file=f)
- print(f"Movie IMDb ID: {imdb_id}", file=f)
- print(f"Movie TMDB ID: {tmdb_id}", file=f)
- print(f"{movie_title} ({movie_year})", file=f)
- print(
- f"{movie_title} ({movie_year}) - {release_group}", file=f)
- print(
- f"Requesting {movie_title} ({movie_year}) from {release_group}.\nThank you.", file=f)
- print('')
- print(f'{" Statsistics ":*^40}')
- print(f'Total movies: {total_movies}')
- print(f"Total movies printed: {movies_printed}")
- print(f"Total movies tagged: {tagged_movies}")
- print(f"Total movies untagged: {untagged_movies}")
- print(f"Total movies below cutoff score: {cutoff_unmet}")
- print(f"Total movies above cutoff score: {cutoff_met}")
- print(
- f"Percentage of movies below cutoff score: {round(cutoff_unmet / total_movies * 100, 2)}%")
- print(f'*' * 40)
-
-
-if __name__ == '__main__':
- """
- Main entry point for the script.
- """
- main()
diff --git a/python-scripts/labelarr.py b/python-scripts/labelarr.py
deleted file mode 100644
index 03d16cb..0000000
--- a/python-scripts/labelarr.py
+++ /dev/null
@@ -1,292 +0,0 @@
-# _ _ _
-# | | | | | |
-# | | __ _| |__ ___| | __ _ _ __ _ __
-# | | / _` | '_ \ / _ \ |/ _` | '__| '__|
-# | |___| (_| | |_) | __/ | (_| | | | |
-# |______\__,_|_.__/ \___|_|\__,_|_| |_|
-# ======================================================================================
-# Author: Drazzilb
-# Description: A script to sync labels between Plex and Radarr/Sonarr
-# Usage: python3 /path/to/labelarr.py
-# Requirements: requests, pyyaml, plexapi
-# License: MIT License
-# ======================================================================================
-
-script_version = "2.2.0"
-
-from plexapi.exceptions import BadRequest, NotFound
-from modules.discord import discord, field_builder
-from modules.arrpy import arrpy_py_version, StARR
-from modules.formatting import create_table
-from modules.logger import setup_logger
-from plexapi.server import PlexServer
-from modules.version import version
-from modules.config import Config
-from unidecode import unidecode
-from tqdm import tqdm
-import html
-import json
-import time
-import re
-
-script_name = "labelarr"
-config = Config(script_name)
-log_level = config.log_level
-logger = setup_logger(log_level, script_name)
-version(script_name, script_version, arrpy_py_version, logger, config)
-
-words_to_remove = [
- "(US)",
-]
-year_regex = re.compile(r"\((19|20)\d{2}\)")
-illegal_chars_regex = re.compile(r'[<>:"/\\|?*\x00-\x1f]+')
-remove_special_chars = re.compile(r'[^a-zA-Z0-9\s]+')
-
-def normalize_titles(title):
- normalized_title = title
- normalized_title = year_regex.sub('', normalized_title)
- normalized_title = illegal_chars_regex.sub('', normalized_title)
- normalized_title = unidecode(html.unescape(normalized_title))
- normalized_title = normalized_title.rstrip()
- normalized_title = normalized_title.replace('&', 'and')
- normalized_title = re.sub(remove_special_chars, '', normalized_title).lower()
- normalized_title = normalized_title.replace(' ', '')
- return normalized_title
-
-def get_plex_data(plex, instance_type):
- library_names = [name.title() for name in config.library_names]
- logger.debug(f"Library Names: {library_names}")
- if instance_type == "Radarr":
- type = "movie"
- elif instance_type == "Sonarr":
- type = "show"
- sections = plex.library.sections()
- plex_data = {}
- search_sections = []
- for section in sections:
- section_type = section.type
- if section_type == type and section.title in library_names or not library_names:
- search_sections.append(section)
- with tqdm(total=len(search_sections), desc=f"Getting '{instance_type}' data from Plex", disable=False) as pbar_sections:
- for library in search_sections:
- items = library.all()
- with tqdm(total=len(items), desc=f"Processing '{library.title}' library", leave=False, disable=False) as pbar_items:
- for item in items:
- labels = [str(label).lower() for label in item.labels]
- plex_data[item.title] = {'title': item.title, 'year': item.year, 'labels': labels}
- pbar_items.update(1)
- pbar_sections.update(1)
- logger.debug(json.dumps(plex_data, indent=4, sort_keys=True))
- return plex_data
-
-def sync_labels_to_plex(plex, media, instance_type, app, user_labels, dry_run, plex_data):
- logger.debug("Syncing labels to Plex")
- message = []
- items_to_sync = {}
- retries = 0
- user_labels = [label.lower() for label in user_labels]
- label_to_tag = {}
- while retries < 3:
- for label in user_labels:
- tag_id = app.get_tag_id_from_name(label)
- if not tag_id:
- logger.info(f"Tag ID not found for '{label}'. Creating tag...")
- tag_id = app.create_tag(label)
- logger.debug(f"Tag: {label} | Tag ID: {tag_id}")
- if tag_id:
- label_to_tag[label] = tag_id
- else:
- logger.debug(f"Tag: {label} | Tag ID: {tag_id}")
- label_to_tag[label] = tag_id
- # match labels to tags
- if label_to_tag:
- retries = 3
- for item in media:
- title = item['title']
- normalized_title = normalize_titles(title)
- year = item['year']
- tags = item['tags']
- for plex_item in plex_data:
- plex_title = plex_data[plex_item]['title']
- plex_year = plex_data[plex_item]['year']
- plex_labels = plex_data[plex_item]['labels']
- normalized_plex_title = normalize_titles(plex_title)
- if normalized_title == normalized_plex_title and year == plex_year:
- for label, tag_id in label_to_tag.items():
- if tag_id in tags and label not in plex_labels:
- if title not in items_to_sync:
- items_to_sync[title] = {'title': plex_title, 'year': plex_year, 'add_remove': "add", 'labels': []}
- if label not in items_to_sync[title]['labels']:
- items_to_sync[title]['labels'].append(label)
- elif tag_id not in tags and label in plex_labels:
- if title not in items_to_sync:
- items_to_sync[title] = {'title': plex_title, 'year': plex_year, 'add_remove': "remove", 'labels': []}
- if label not in items_to_sync[title]['labels']:
- items_to_sync[title]['labels'].append(label)
- else:
- logger.error(f"Label: {label} | Tag ID: {tag_id} | Tag ID not found in {instance_type} | Retrying...")
- retries += 1
- continue
- logger.debug(f"Items to sync: {len(items_to_sync)}")
- logger.debug(json.dumps(items_to_sync, indent=4, sort_keys=True))
- if items_to_sync:
- for title, data in items_to_sync.items():
- title = data['title']
- year = data['year']
- add_remove = data['add_remove']
- labels = data['labels'] # Updated variable name to 'labels'
- if instance_type == "Sonarr":
- type = "show"
- elif instance_type == "Radarr":
- type = "movie"
- if not dry_run:
- try:
- if add_remove == "add":
- for label in labels: # Iterate over the labels
- plex.library.search(title=title, year=year, libtype=type)[0].addLabel(label)
- message.append(f"Label: {label} | Title: {title} | Year: {year} | Add/Remove: {add_remove}")
- elif add_remove == "remove":
- for label in labels: # Iterate over the labels
- plex.library.search(title=title, year=year, libtype=type)[0].removeLabel(label)
- message.append(f"Label: {label} | Title: {title} | Year: {year} | Add/Remove: {add_remove}")
- except NotFound:
- logger.error(f"Label: {label} | Title: {title} | Year: {year} | Add/Remove: {add_remove} | Title not found in Plex")
- continue
- else:
- message.append(f"DRY RUN: Label: {label} | Title: {title} | Year: {year} | Add/Remove: {add_remove}")
- else:
- logger.info("No items to sync")
- return message
-
-def sync_labels_from_plex(plex, media, instance_type, app, labels, dry_run, plex_data):
- items_to_sync = {'add': [], 'remove': []}
- logger.info(f"Processing '{instance_type}' data")
- message = []
- for label in labels:
- tag_id = app.check_and_create_tag(label)
- for plex_item in plex_data:
- plex_title = plex_data[plex_item]['title']
- plex_year = plex_data[plex_item]['year']
- plex_labels = plex_data[plex_item]['labels']
- normalized_plex_title = normalize_titles(plex_title)
- for item in media:
- title = item['title']
- normalized_title = normalize_titles(title)
- year = item['year']
- media_id = item['id']
- tags = item['tags']
- if normalized_title == normalized_plex_title and year == plex_year:
- # Check if label is in Plex but not tagged in ARR
- if label in plex_labels and tag_id not in tags:
- # If tag_id is not in the add dict, add it
- if tag_id not in items_to_sync['add']:
- items_to_sync['add'][tag_id] = {'tag_id': tag_id, 'media_ids': []}
- # Add media_id to the add dict
- items_to_sync['add'][tag_id]['media_ids'].append(media_id)
- message.append(f"Label: {label} | Title: {title} | Year: {year} | Add/Remove: add")
- # Check if label is not in Plex but is tagged in ARR
- elif label not in plex_labels and tag_id in tags:
- # If tag_id is not in the remove dict, add it
- if tag_id not in items_to_sync['remove']:
- items_to_sync['remove'][tag_id] = {'tag_id': tag_id, 'media_ids': []}
- # Add media_id to the remove dict
- items_to_sync['remove'][tag_id]['media_ids'].append(media_id)
- message.append(f"Label: {label} | Title: {title} | Year: {year} | Add/Remove: remove")
- if items_to_sync:
- for item in items_to_sync:
- if item == 'add':
- for tag_id in items_to_sync[item]:
- tags = tag_id['tag_id']
- media_ids = tag_id['media_ids']
- if tags and media_ids:
- if not dry_run:
- app.add_tags(media_ids, tags)
- elif item == 'remove':
- for tag_id in items_to_sync[item]:
- tags = tag_id['tag_id']
- media_ids = tag_id['media_ids']
- if tags and media_ids:
- if not dry_run:
- app.remove_tags(media_ids, tags)
- return message
-
-def handle_messages(final_output):
- if final_output:
- for message in final_output:
- logger.info(message)
-
-def notification(final_output):
- fields = field_builder(final_output, name="Tagged items")
- if fields:
- for field_number, field in fields.items():
- discord(field, logger, config, script_name, description=None, color=0xFFA500, content=None)
-
-def main():
- data = [
- ["Script Settings"],
- ]
- create_table(data, log_level="debug", logger=logger)
- logger.debug(f'{"Dry_run:":<20}{config.dry_run if config.dry_run else "False"}')
- logger.debug(f'{"Log level:":<20}{log_level if log_level else "INFO"}')
- logger.debug(f'{"Labels:":<20}{config.labels if config.labels else "Not Set"}')
- logger.debug(f'{"Add From Plex:":<20}{config.add_from_plex if config.add_from_plex else "False"}')
- logger.debug(f'{"Library Names:":<20}{config.library_names if config.library_names else "Not Set"}')
- dry_run = config.dry_run
- labels = config.labels
- if config.dry_run:
- data = [
- ["Dry Run"],
- ["NO CHANGES WILL BE MADE"]
- ]
- create_table(data, log_level="info", logger=logger)
- if config.plex_data:
- for data in config.plex_data:
- api_key = data.get('api', '')
- url = data.get('url', '')
- try:
- plex = PlexServer(url, api_key)
- except BadRequest:
- logger.error("Plex URL or API Key is incorrect")
- exit()
- instance_data = {
- 'Radarr': config.radarr_data,
- 'Sonarr': config.sonarr_data
- }
- final_output = []
- for instance_type, instances in instance_data.items():
- for instance in instances:
- instance_name = instance['name']
- url = instance['url']
- api = instance['api']
- script_name = None
- if instance_type == "Radarr" and config.radarr:
- data = next((data for data in config.radarr if data['name'] == instance_name), None)
- if data:
- script_name = data['name']
- elif instance_type == "Sonarr" and config.sonarr:
- data = next((data for data in config.sonarr if data['name'] == instance_name), None)
- if data:
- script_name = data['name']
- if script_name and instance_name == script_name:
- data = [
- ["Script Name", "Instance Name"],
- [script_name, instance_name]
- ]
- create_table(data, log_level="info", logger=logger)
- logger.debug(f"url: {url}")
- logger.debug(f"api: {'*' * (len(api) - 5)}{api[-5:]}")
- app = StARR(url, api, logger)
- media = app.get_media()
- plex_data = get_plex_data(plex, instance_type)
- if config.add_from_plex:
- final_output.extend(sync_labels_from_plex(plex, media, instance_type, app, labels, dry_run, plex_data))
- else:
- final_output.extend(sync_labels_to_plex(plex, media, instance_type, app, labels, dry_run, plex_data))
- handle_messages(final_output)
-
-if __name__ == "__main__":
- start_time = time.time()
- main()
- end_time = time.time()
- total_time = round(end_time - start_time, 2)
- logger.info(f"Total Time: {time.strftime('%H:%M:%S', time.gmtime(total_time))}")
diff --git a/python-scripts/modules/config.py b/python-scripts/modules/config.py
deleted file mode 100644
index 591a036..0000000
--- a/python-scripts/modules/config.py
+++ /dev/null
@@ -1,70 +0,0 @@
-import pathlib
-import yaml
-
-base_dir = pathlib.Path(__file__).parent.parent
-config_path = f'{base_dir}/config.yml'
-
-class Config:
- def __init__(self, script_name):
- self.config_path = config_path
- self.script_name = script_name
- self.load_config()
-
- def load_config(self):
- # Read config from the YAML file
- with open(self.config_path, "r") as file:
- config = yaml.safe_load(file)
-
- # Load config into instance variables
- self.global_data = config['global']
- try:
- self.discord = config['discord']
- except KeyError:
- self.discord = {}
- self.script_data = config.get(f'{self.script_name}', None)
- if self.script_data is None:
- raise ValueError(f"Script data for '{self.script_name}' not found in the config.")
-
-
- # Global variables
- self.radarr_data = self.global_data.get('radarr', {}) # Use empty dict if radarr data is not found
- self.sonarr_data = self.global_data.get('sonarr', {}) # Use empty dict if sonarr data is not found
- self.qbit_data = self.global_data.get('qbittorrent', {}) # Use empty dict if qbit data is not found
- self.plex_data = self.global_data.get('plex', {}) # Use empty dict if plex data is not found
-
- # Typical variables
- self.log_level = self.script_data.get('log_level', 'info').lower() # Use 'info' as default log level if not provided
- self.dry_run = self.script_data.get('dry_run', False) # Use False as default value for dry_run if not provided
- self.asset_folders = self.script_data.get('asset_folders', []) # Use empty list as default value for asset_folders if not provided
- self.radarr = self.script_data.get('radarr', False) # Use False as default value for radarr if not provided')
- self.sonarr = self.script_data.get('sonarr', False) # Use False as default value for sonarr if not provided')
- self.qbit = self.script_data.get('qbittorrent', False) # Use False as default value for qbit if not provided')
-
- # Plex variables
- self.library_names = self.script_data.get('library_names', []) # Use empty list as default value for library_names if not provided
- self.ignore_collections = self.script_data.get('ignore_collections', []) # Use empty list as default value for ignore_collections if not provided
-
- # Renamer variables
- self.use_plex = self.script_data.get('use_plex', False) # Use False as default value for use_plex if not provided
- self.source_dir = self.script_data.get('source_dir', '') # Use empty string as default value for source_dir if not provided
- self.source_overrides = self.script_data.get('source_overrides', []) # Use empty list as default value for source_override if not provided
- self.destination_dir = self.script_data.get('destination_dir', '') # Use empty string as default value for destination_dir if not provided
- self.movies_threshold = self.script_data.get('movies_threshold', 0) # Use 0 as default value for movies_threshold if not provided
- self.series_threshold = self.script_data.get('series_threshold', 0) # Use 0 as default value for series_threshold if not provided
- self.collection_threshold = self.script_data.get('collection_threshold', 0) # Use 0 as default value for collection_threshold if not provided
- self.action_type = self.script_data.get('action_type', 'move') # Use 'move' as default value for action_type if not provided
- self.print_only_renames = self.script_data.get('print_only_renames', False) # Use False as default value for print_only_renames if not provided
-
- # unmatched-assets variables
- self.assets_path = self.script_data.get('assets_path', '') # Use empty string as default value for assets_path if not provided
- self.media_paths = self.script_data.get('media_paths', []) # Use empty list as default value for media_paths if not provided
-
- # nohl
- self.movies = self.script_data.get('movies', False) # Use False as default value for movies if not provided
- self.series = self.script_data.get('series', False) # Use False as default value for tv_shows if not provided
- self.maximum_searches = self.script_data.get('maximum_searches', 0) # Use 0 as default value for maximum_searches if not provided
-
- #labelarr
- self.labels = self.script_data.get('labels', '[]')
- self.add_from_plex = self.script_data.get('add_from_plex', False)
-
diff --git a/python-scripts/modules/discord.py b/python-scripts/modules/discord.py
deleted file mode 100644
index 35b6d92..0000000
--- a/python-scripts/modules/discord.py
+++ /dev/null
@@ -1,147 +0,0 @@
-import requests
-import json
-import random
-import requests
-from datetime import datetime
-
-def get_discord_data(config, script_name, logger):
- discord = config.discord
- notifiarr_webhook = discord.get('notifiarr_webhook', None)
- script_notification_info = discord.get(script_name, {})
-
- if notifiarr_webhook:
- channel_id = script_notification_info.get('channel_id', None)
- webhook = notifiarr_webhook
- if not channel_id:
- webhook = None
- logger.error("Discord channel ID is missing. Cannot send Discord notification.")
- else:
- webhook = script_notification_info.get('discord_webhook', None)
- channel_id = None
- return webhook, channel_id
-
-def get_message_data(logger):
- response = requests.get("https://raw.githubusercontent.com/Drazzilb08/userScripts/master/jokes.txt")
- if response.status_code == 200:
- jokes = response.text.splitlines()
- random_joke = random.choice(jokes)
- timestamp = datetime.utcnow().isoformat()
- else:
- logger.error(f"Failed to get jokes from GitHub. Status code: {response.status_code} with response: {response.text}")
- random_joke = "Error: Failed to get jokes from GitHub."
-
- return random_joke, timestamp
-
-def discord(fields, logger, config, script_name, description, color, content):
- webhook, channel_id = get_discord_data(config, script_name, logger)
- if webhook:
- random_joke, timestamp = get_message_data(logger)
- try:
- if fields and webhook:
- if webhook.startswith("https://notifiarr.com/api/v1/notification/passthrough"):
- if channel_id:
- # Convert color from hex to 6 digit html
- color = f"{color:06x}"
- payload = {
- "notification": {
- "update": False,
- "name": f"{script_name.capitalize()}",
- },
- "discord": {
- "color": f"{color}",
- "text": {
- "description": f"{description}",
- "content": f"{content}",
- "fields": fields,
- "footer": f"Powered by: Drazzilb | {random_joke}",
- },
- "ids": {
- "channel": f"{channel_id}",
- }
- }
- }
- else:
- logger.error("Discord channel ID is missing. Cannot send Discord notification.")
- logger.error(f"Discord channel ID: {channel_id}")
- return
- if not description:
- del payload["discord"]["text"]["description"]
- if not content:
- del payload["discord"]["text"]["content"]
- if not fields:
- del payload["discord"]["text"]["fields"]
- else:
- payload = {
- "username": "Notification Bot",
- "embeds": [
- {
- "title": f"{script_name.capitalize()}",
- "description": f"{description}",
- "color": color,
- "content": f"{content}",
- "fields": fields,
- "footer": {
- "text": f"Powered by: Drazzilb | {random_joke}"
- },
- "timestamp": timestamp,
- }
- ]
- }
- # if description is empty then remove it from the payload
- if not description:
- del payload["embeds"][0]["description"]
- # if fields is empty then remove it from the payload
- if not fields:
- del payload["embeds"][0]["fields"]
- if not content:
- del payload["embeds"][0]["content"]
- logger.debug(json.dumps(payload, indent=4))
- response = requests.post(webhook, json=payload)
- if response.status_code == 204 or response.status_code == 200:
- logger.debug("Discord notification sent.")
- return
- else:
- logger.error(f"Webhook failed: {webhook}")
- logger.error(f"Discord notification failed with status code: {response.status_code} with response: {response.text}")
- else:
- logger.error("Payload is empty. Cannot send Discord notification.")
- except Exception as e:
- logger.error(f"Error: {e}")
- return
- else:
- logger.debug(f"Discord webhook is missing. Cannot send Discord notification.")
- return
-
-def content_builder(discord_messages):
- pass
-
-def field_builder(discord_messages, name):
- discord_message = '\n'.join(discord_messages)
- modified_discord_messages = []
- current_message = ""
- for line in discord_message.splitlines():
- if len(current_message) + len(line) + 1 <= 1024:
- current_message += line + "\n"
- else:
- modified_discord_messages.append(current_message)
- current_message = line + "\n"
- if current_message:
- modified_discord_messages.append(current_message)
- discord_messages_dict = {}
- field_count = 1
- if len(modified_discord_messages) > 5:
- for i in range(0, len(modified_discord_messages), 5):
- discord_messages_dict[field_count] = modified_discord_messages[i:i + 5]
- field_count += 1
- else:
- discord_messages_dict[field_count] = modified_discord_messages
- fields = {}
- for field_number, messages in discord_messages_dict.items():
- fields[field_number] = []
- for message in messages:
- fields[field_number].append({
- "name": f"{name}",
- "value": f"```{message}```",
- "inline": False
- })
- return fields
diff --git a/python-scripts/modules/formatting.py b/python-scripts/modules/formatting.py
deleted file mode 100644
index c1fe4c9..0000000
--- a/python-scripts/modules/formatting.py
+++ /dev/null
@@ -1,70 +0,0 @@
-def create_table(data, log_level, logger):
- if not data:
- return "No data provided."
-
- num_rows = len(data)
- num_cols = len(data[0])
-
- # Calculate column widths
- col_widths = [max(len(str(data[row][col])) for row in range(num_rows)) for col in range(num_cols)]
-
- # Add two spaces padding to each cell
- col_widths = [max(width + 2, 5) for width in col_widths] # Set minimum width of 5 for each column
-
- # Calculate total table width without including padding
- total_width = sum(col_widths) + num_cols - 1 # Separator widths between columns
-
- # Ensure minimum width of 40
- if total_width < 40:
- additional_width = 40 - total_width
- extra_width_per_col = additional_width // num_cols
- remainder = additional_width % num_cols
-
- for i in range(num_cols):
- col_widths[i] += extra_width_per_col
- if remainder > 0:
- col_widths[i] += 1
- remainder -= 1
-
- # Recalculate total table width
- total_width = sum(col_widths) + num_cols - 1
-
- # Create the table
- table = ""
-
- # Top border
- table += "*" * (total_width + 2) + "\n"
-
- for row in range(num_rows):
- table += "*"
- for col in range(num_cols):
- cell_content = str(data[row][col])
- padding = col_widths[col] - len(cell_content)
- left_padding = padding // 2
- right_padding = padding - left_padding
-
- # Determine the separator for the cell
- separator = '|' if col < num_cols - 1 else '*'
-
- table += f"{' ' * left_padding}{cell_content}{' ' * right_padding}{separator}"
- table += "\n"
- if row < num_rows - 1:
- table += "*" + "-" * (total_width) + "*\n"
-
- # Bottom border
- table += "*" * (total_width + 2) + "\n"
-
- log_functions = {
- "info": logger.info,
- "debug": logger.debug,
- "warning": logger.warning,
- "error": logger.error,
- "critical": logger.critical
- }
- lines = table.split('\n')
- log_function = log_functions.get(log_level)
-
- if log_function:
- for line in lines:
- if line.strip():
- log_function(line)
\ No newline at end of file
diff --git a/python-scripts/modules/logger.py b/python-scripts/modules/logger.py
deleted file mode 100644
index cf94a84..0000000
--- a/python-scripts/modules/logger.py
+++ /dev/null
@@ -1,55 +0,0 @@
-import os
-import time
-import logging
-import logging.handlers
-import pathlib
-
-base_dir = pathlib.Path(__file__).parent.parent
-
-def setup_logger(log_level, script_name):
- """
- Setup the logger.
- Parameters:
- log_level (str): The log level to use
- Returns:
- A logger object for logging messages.
- """
- log_dir = f'{base_dir}/logs'
- if not os.path.exists(log_dir):
- os.makedirs(log_dir)
- today = time.strftime("%Y-%m-%d")
- log_file = f"{log_dir}/{script_name}_{today}.log"
- logger = logging.getLogger()
- log_level = log_level.upper()
- if log_level == 'DEBUG':
- logger.setLevel(logging.DEBUG)
- elif log_level == 'INFO':
- logger.setLevel(logging.INFO)
- elif log_level == 'CRITICAL':
- logger.setLevel(logging.CRITICAL)
- else:
- logger.critical(
- f"Invalid log level '{log_level}', defaulting to 'INFO'")
- logger.setLevel(logging.INFO)
- formatter = logging.Formatter(
- fmt='%(asctime)s %(levelname)s: %(message)s', datefmt='%I:%M %p')
- handler = logging.handlers.TimedRotatingFileHandler(
- log_file, when='midnight', interval=1, backupCount=3)
- handler.setFormatter(formatter)
- logger.addHandler(handler)
- formatter = logging.Formatter()
- console_handler = logging.StreamHandler()
- if log_level == 'debug':
- console_handler.setLevel(logging.DEBUG)
- elif log_level == 'info':
- console_handler.setLevel(logging.INFO)
- elif log_level == 'critical':
- console_handler.setLevel(logging.CRITICAL)
- logger.addHandler(console_handler)
- log_files = [f for f in os.listdir(log_dir) if os.path.isfile(
- os.path.join(log_dir, f)) and f.startswith(f"{script_name}_")]
- log_files.sort(key=lambda x: os.path.getmtime(
- os.path.join(log_dir, x)), reverse=True)
- for file in log_files[3:]:
- os.remove(os.path.join(log_dir, file))
- return logger
diff --git a/python-scripts/modules/version.py b/python-scripts/modules/version.py
deleted file mode 100644
index f55147f..0000000
--- a/python-scripts/modules/version.py
+++ /dev/null
@@ -1,129 +0,0 @@
-import requests
-from modules.discord import discord
-from datetime import datetime
-from modules.formatting import create_table
-
-def version(script_name, script_version, arrpy_py_version, logger, config):
- # Construct the URL for the GitHub raw file
- github_url = f"https://raw.githubusercontent.com/Drazzilb08/userScripts/master/python-scripts/{script_name}.py"
- arr_py_url = f"https://raw.githubusercontent.com/Drazzilb08/userScripts/master/python-scripts/modules/arrpy.py"
- # Send a GET request to the GitHub URL
- response = requests.get(github_url)
-
- # Check if the request was successful
- if response.status_code == 200:
- # Extract the version number from the GitHub file
- github_script_version = None
- for line in response.text.split("\n"):
- if "script_version =" in line or "version =" in line:
- github_script_version = line.split("=")[1].strip().strip('"') # Remove the quotes
- if arrpy_py_version:
- response = requests.get(arr_py_url)
- if response.status_code == 200:
- for line in response.text.split("\n"):
- if "arrpy_py_version =" in line:
- github_arrpy_py_version = line.split("=")[1].strip().strip('"') # Remove the quotes
- break
- github_download = "https://github.com/Drazzilb08/userScripts/archive/refs/heads/master.zip"
- description = f"Script version does not match GitHub version, please click the link \n{github_download}\n to download the latest version."
- color = 0xff0000
- script_version_int = int(script_version.replace(".", ""))
- github_script_version_int = int(github_script_version.replace(".", ""))
- if arrpy_py_version:
- arrpy_py_version_int = int(arrpy_py_version.replace(".", ""))
- github_arrpy_py_version_int = int(github_arrpy_py_version.replace(".", ""))
- else:
- arrpy_py_version_int = None
- github_arrpy_py_version_int = None
- # Compare the script version with the GitHub version
- data = [
- ["Script Version Data"]
- ]
- if script_version_int == github_script_version_int and arrpy_py_version_int == github_arrpy_py_version_int:
- logger.info("Script version matches GitHub version.")
- elif script_version_int < github_script_version_int and arrpy_py_version_int == github_arrpy_py_version_int:
- fields = build_fields(script_version, github_script_version, None, None)
- logger.error("Script version does not match GitHub version.")
- logger.error("Please update the script.")
- logger.error(version_output(script_version, github_script_version, None, None))
- discord(fields, logger, config, script_name, description, color, content=None)
- elif script_version_int == github_script_version_int and arrpy_py_version_int < github_arrpy_py_version_int:
- logger.error("Script version does not match GitHub version.")
- logger.error("Please update the script.")
- logger.error(version_output(script_version, github_script_version, arrpy_py_version, github_arrpy_py_version))
- fields = build_fields(None, None, arrpy_py_version, github_arrpy_py_version)
- discord(fields, logger, config, script_name, description, color, content=None)
- elif script_version_int < github_script_version_int and arrpy_py_version_int < github_arrpy_py_version_int:
- logger.error("Script version does not match GitHub version.")
- logger.error("Please update the script.")
- logger.error(version_output(script_version, github_script_version, arrpy_py_version, github_arrpy_py_version))
- fields = build_fields(script_version, github_script_version, arrpy_py_version, github_arrpy_py_version)
- discord(fields, logger, config, script_name, description, color, content=None)
- return
-
-def build_fields(script_version=None, github_script_version=None, arrpy_py_version=None, github_arrpy_py_version=None):
- fields = []
- if script_version and github_script_version and arrpy_py_version is None and github_arrpy_py_version is None:
- fields.append({
- "name": "Script Version",
- "value": f"```{script_version}```",
- "inline": False
- })
- fields.append({
- "name": "GitHub Version",
- "value": f"```{github_script_version}```",
- "inline": False
- })
-
- elif arrpy_py_version and github_arrpy_py_version and script_version is None and github_script_version is None:
- fields.append({
- "name": "arrpy.py Version",
- "value": f"```{arrpy_py_version}```",
- "inline": False
- })
- fields.append({
- "name": "GitHub Version",
- "value": f"```{github_arrpy_py_version}```",
- "inline": False
- })
- elif script_version and github_script_version and arrpy_py_version and github_arrpy_py_version:
- fields.append({
- "name": "Script Version",
- "value": f"```{script_version}```",
- "inline": False
- })
- fields.append({
- "name": "GitHub Version",
- "value": f"```{github_script_version}```",
- "inline": False
- })
- fields.append({
- "name": "arrpy.py Version",
- "value": f"```{arrpy_py_version}```",
- "inline": False
- })
- fields.append({
- "name": "GitHub Version",
- "value": f"```{github_arrpy_py_version}```",
- "inline": False
- })
-
- return fields
-
-def version_output(script_version=None, github_script_version=None, arrpy_py_version=None, github_arrpy_py_version=None):
- if script_version and github_script_version and arrpy_py_version is None and github_arrpy_py_version is None:
- data = [
- ["Script Version", "GitHub Version"],
- [script_version, github_script_version]
- ]
- elif arrpy_py_version and github_arrpy_py_version and script_version is None and github_script_version is None:
- data = [
- ["arrpy.py Version", "GitHub Version"],
- [arrpy_py_version, github_arrpy_py_version]
- ]
- elif script_version and github_script_version and arrpy_py_version and github_arrpy_py_version:
- data = [
- ["Script Version", "GitHub Version", "arrpy.py Version", "GitHub Version"],
- [script_version, github_script_version, arrpy_py_version, github_arrpy_py_version]
- ]
- return create_table(data)
diff --git a/python-scripts/nohl.py b/python-scripts/nohl.py
deleted file mode 100644
index 0fb355e..0000000
--- a/python-scripts/nohl.py
+++ /dev/null
@@ -1,473 +0,0 @@
-# _ _ _
-# | | | | |
-# _ __ ___ | |__| | | _ __ _ _
-# | '_ \ / _ \| __ | | | '_ \| | | |
-# | | | | (_) | | | | |____ _| |_) | |_| |
-# |_| |_|\___/|_| |_|______(_) .__/ \__, |
-# | | __/ |
-# |_| |___/
-# ===================================================================================================
-# Author: Drazzilb
-# Description: This script will find all files that are not hardlinked and will process them in Radarr
-# and Sonarr. This is useful for finding files that are not hardlinked and wish to have 100%
-# hardlinks seeding.
-# Usage: python3 nohl.py
-# Requirements: Python 3.8+, requests
-# License: MIT License
-# ===================================================================================================
-
-script_version = "2.1.1"
-
-import os
-import re
-import sys
-import time
-import json
-import re
-from modules.config import Config
-from modules.logger import setup_logger
-from modules.arrpy import StARR
-from unidecode import unidecode
-from modules.arrpy import arrpy_py_version
-from modules.version import version
-from modules.discord import discord
-from modules.formatting import create_table
-
-script_name = "nohl"
-config = Config(script_name)
-log_level = config.log_level
-logger = setup_logger(log_level, script_name)
-version(script_name, script_version, arrpy_py_version, logger, config)
-
-illegal_chars_regex = re.compile(r"[^\w\s\-\(\)/.'’]+")
-season_regex = r"(?i)S(\d{2})E"
-episode_regex = r"(?:E|e)(\d{1,2})"
-title_regex = r".*\/([^/]+)\s\((\d{4})\).*"
-
-def find_no_hl_files(media_paths):
- no_hl_files = []
- for dir in media_paths:
- try:
- logger.info(f"Processing directory: {dir}")
- for root, dirs, files in os.walk(dir):
- for file in files:
- try:
- if file.endswith(".mkv") or file.endswith(".mp4"):
- file_path = os.path.join(root, file)
- if (os.path.isfile(file_path) and os.stat(file_path).st_nlink == 1):
- no_hl_files.append(file_path)
- except Exception as e:
- logger.warning(f"Error processing file: {file}. Error: {e}")
- except Exception as e:
- logger.warning(f"Error processing directory: {dir}. Error: {e}")
- return no_hl_files
-
-def process_instances(instance_type, url, api, nohl_files, include_profiles, exclude_profiles, dry_run, exclude_series):
- nohl_files.sort()
- media_data = []
- media_data_episodes = []
- app = StARR(url, api, logger)
- media = app.get_media()
- title = None
- year = None
- season_number = None
- season_number_modified = None
- episode = None
- if instance_type == 'Radarr':
- for file in nohl_files:
- try:
- title_match = re.match(title_regex, file)
- if title_match:
- title = title_match.group(1)
- year = int(title_match.group(2))
- except Exception as e:
- logger.warning(f"Error processing file: {file}. Error: {e}")
- labled_data = {'title': title, 'year': year}
- media_data.append(labled_data)
- if instance_type == 'Sonarr':
- for file in nohl_files:
- try:
- season_number_match = re.search(season_regex, file)
- if season_number_match:
- season_number = season_number_match.group(1)
- except Exception as e:
- logger.warning(f"Error processing file: {file}. Error: {e}")
- try:
- title = None
- match = re.match(title_regex, file)
- if match:
- title = match.group(1)
- year = int(match.group(2))
- except Exception as e:
- logger.warning(f"Error processing file: {file}. Error: {e}")
- try:
- episode_match = re.search(episode_regex, file)
- if episode_match:
- episode = int(episode_match.group(1))
- except Exception as e:
- logger.warning(f"Error processing file: {file}. Error: {e}")
- if season_number:
- if season_number.startswith('0'):
- season_number_modified = int(season_number[1:])
- else:
- season_number_modified = int(season_number)
- existing_dict = next((d for d in media_data if d['title'] == title and d['year'] == year), None)
- if existing_dict:
- for season_info in existing_dict['season_info']:
- if season_info['season_number'] == season_number_modified:
- season_info['episodes'].append(episode)
- break
- else:
- existing_dict['season_info'].append({
- 'season_number': season_number_modified,
- 'episodes': [episode]
- })
- else:
- try:
- media_data.append({
- 'title': title,
- 'year': year,
- 'season_info': [{
- 'season_number': season_number_modified,
- 'episodes': [episode]
- }]
- })
- except Exception as e:
- logger.warning(f"Error processing file: {file}. Error: {e}")
- logger.debug(f"Media Data: {json.dumps(media_data, indent=4)}")
- results = []
- file_ids = []
- quality_profiles = []
- quality_profiles = app.get_quality_profile_names()
- for media_data_item in media_data:
- media_data_item_title = media_data_item['title']
- media_data_item_title_modified = unidecode(media_data_item['title'])
- media_data_item_title_modified = illegal_chars_regex.sub("", media_data_item_title_modified)
- media_data_item_year = media_data_item['year']
- for media_item in media:
- quality_profile_id = None
- quality_profile_name = None
- media_item_title = media_item['title']
- media_item_title_modified = unidecode(re.sub(r' \(\d+\)', '', (media_item['title'])))
- media_item_title_modified = illegal_chars_regex.sub("", media_item_title_modified)
- media_item_year = media_item['year']
- media_item_id = media_item['id']
- media_item_monitored = media_item['monitored']
- quality_profile_id = media_item['qualityProfileId']
- if media_data_item_title_modified == media_item_title_modified and abs(media_data_item_year - media_item_year) == 1:
- logger.warning(f"Found match for {media_item_title} and {media_data_item_title} but years do not match. Media Year: {media_item_year}, File Year: {media_data_item_year}")
- strings_match = lambda s1, s2: re.sub(r'\W+', ' ', s1) == re.sub(r'\W+', ' ', s2).replace(':', '-')
- if strings_match(media_data_item_title_modified, media_item_title_modified) and media_data_item_year == media_item_year:
- if media_item_title in exclude_series if exclude_series else False:
- logger.info(f"Skipping {media_item_title} because it is in the exclude list.")
- continue
- if quality_profiles:
- quality_profile_name = next(key for key, value in quality_profiles.items() if value == quality_profile_id)
-
- if (quality_profile_name in include_profiles if include_profiles else True) and (quality_profile_name not in exclude_profiles if exclude_profiles else True):
- if instance_type == 'Radarr':
- if media_item_monitored:
- try:
- file_ids = media_item['movieFile']['id']
- logger.debug(f"Found match: {media_item_title}, Media ID: {media_item_title}, File IDs: {file_ids}")
- results.append({
- 'title': media_item_title_modified,
- 'media_id': media_item_id,
- 'file_ids': file_ids
- })
- except:
- continue
- else:
- logger.info(f"Skipping {media_item_title} because it is not monitored.")
- continue
-
- elif instance_type == 'Sonarr':
- monitored_seasons = []
- media_data_seasons = [season['season_number'] for season in media_data_item['season_info']]
- media_seasons = media_item['seasons']
- episode_info = []
-
- if media_item_monitored:
- for s in media_seasons:
- season_monitored = s['monitored']
- if season_monitored:
- monitored_seasons.append(s['seasonNumber'])
- else:
- logger.debug(f"Skipping {media_item_title} because season {s['seasonNumber']} is not monitored.")
- common_seasons = list(set(monitored_seasons) & set(media_data_seasons))
- season_info = []
-
- for item in media_seasons:
- if item['seasonNumber'] in common_seasons:
- stats = item['statistics']
- episodeCount = stats['episodeFileCount']
- totalEpisodeCount = stats['totalEpisodeCount']
- season_pack = episodeCount == totalEpisodeCount
- season_info.append({
- 'season_number': item['seasonNumber'],
- 'season_pack': season_pack,
- 'episode_info': []
- })
-
- season_data = app.get_season_data(media_item_id)
-
- for item in season_info:
- season_number = item['season_number']
- season_pack = item['season_pack']
- episode_info = item['episode_info']
- episode_file_id = []
- episode_ids = []
- episode_numbers = []
-
- for season_data_item in season_data or []:
- if not season_data_item['monitored']:
- continue
- if season_data_item['seasonNumber'] == season_number:
- media_data_episodes = [episode for season in media_data_item['season_info'] if season['season_number'] == season_number for episode in season['episodes']]
- if season_pack:
- if season_data_item['episodeFileId'] not in episode_file_id:
- episode_file_id.append(season_data_item['episodeFileId'])
- elif not season_pack and season_data_item['episodeNumber'] in media_data_episodes:
- episode_file_id.append(season_data_item['episodeFileId'])
- episode_ids.append(season_data_item['id'])
- episode_numbers.append(season_data_item['episodeNumber'])
-
- episode_info.append({
- 'episode_file_id': episode_file_id,
- 'episode_ids': episode_ids,
- 'episode_numbers': episode_numbers
- })
- results.append({
- 'title': media_item_title,
- 'media_id': media_item_id,
- 'seasons': season_info
- })
- else:
- logger.info(f"Skipping {media_item_title} because it is not monitored.")
- continue
- else:
- if quality_profile_name:
- logger.info(f"Skipping {media_item_title} because it does not meet quality profile requirements. Quality Profile: {quality_profile_name}")
- else:
- logger.info(f"Skipping {media_item_title} because it does not have a quality profile.")
- continue
-
- logger.debug(f"Results: {json.dumps(results, indent=4)}")
- final_step(app, results, instance_type, dry_run)
-
-def final_step(app, results, instance_type, dry_run):
- searches = config.maximum_searches
- logger.debug(f"Searches: {searches}")
- script_dir = os.path.dirname(os.path.abspath(__file__))
- tmp_dir = os.path.join(script_dir, 'tmp')
- tmp_file_path = f'{tmp_dir}/search_count.txt'
- if not os.path.exists(tmp_dir):
- os.makedirs(tmp_dir)
- if not os.path.exists(tmp_file_path):
- try:
- with open(tmp_file_path, 'w') as f:
- f.write('0\n0')
- except OSError as e:
- logger.error(f"Error creating search count file: {e}")
- sys.exit(1)
- try:
- with open(tmp_file_path, 'r') as f:
- search_count, last_search_time = map(int, f.read().split('\n'))
- except ValueError:
- search_count, last_search_time = 0, 0
- current_time = int(time.time())
- if current_time - last_search_time >= 3600:
- search_count = 0
- last_search_time = current_time
- for result in results:
- if search_count >= searches:
- logger.warning('Maximum number of searches reached, cannot perform search')
- break
- media_id = result['media_id']
- title = result['title']
- if instance_type == 'Sonarr':
- seasons = result['seasons']
- for season in seasons:
- season_number = season['season_number']
- season_pack = season['season_pack']
- episode_info = season['episode_info']
- if season_pack:
- episode_file_id = episode_info[0]['episode_file_id']
- logger.debug(f"Processing {instance_type} - Deleting episode file for {title} Season {season_number}, Season Pack: {season_pack}")
- if not dry_run:
- app.delete_episode_files(episode_file_id)
- app.refresh_media(media_id)
- app.search_season(media_id, season_number)
- logger.info(f"Deleted Season {season_number} for {title}, and a search request was sent to Sonarr for Season {season_number}")
- search_count += 1
- else:
- logger.info(f"Would have deleted Season {season_number} for {title}, and the a search request would have been sent to Sonarr for Season {season_number}")
- elif not season_pack:
- episode_file_id = episode_info[0]['episode_file_id']
- episode_ids = episode_info[0]['episode_ids']
- episode_numbers = episode_info[0]['episode_numbers']
- logger.debug(f"Processing {instance_type} - Deleting episode file for {title} Season {season_number}, Season Pack: {season_pack}")
- if not dry_run:
- app.delete_episode_files(episode_file_id)
- app.refresh_media(media_id)
- app.search_episodes(episode_ids)
- search_count += 1
- logger.info(f"Deleted episode file for {title} Season {season_number} episodes {episode_numbers}, search request sent to Sonarr")
- else:
- logger.info(f"Would have deleted episode files for {title} Season {season_number} episodes {episode_numbers}, and the individual episodes would have been searched for a replacement")
- logger.debug(f"Search counter: {search_count}")
- elif instance_type == 'Radarr':
- file_ids = result['file_ids']
- logger.debug(f"Processing {instance_type} - Deleting movie file for {title}")
- if not dry_run:
- app.delete_movie_file(file_ids)
- app.refresh_media(media_id)
- app.search_media(media_id)
- logger.info(f"Deleted movie file for {title}, and the movie was searched for a replacement")
- search_count += 1
- else:
- logger.info(f"Would have deleted movie file for {title}, and the movie would have been searched for a replacement")
- logger.debug(f"Search Total: {search_count}")
- try:
- with open(tmp_file_path, 'w') as f:
- f.write(f'{search_count}\n{last_search_time}')
- except Exception as e:
- logger.error(f"Error writing to file: {e}")
-
-def main():
- dry_run = config.dry_run
- if config.dry_run:
- data = [
- ["Dry Run"],
- ["NO CHANGES WILL BE MADE"]
- ]
- create_table(data, log_level="info", logger=logger)
- search = config.maximum_searches
- if search >= 20:
- logger.error(f"Maximum searches set to {search}. This can cause devastating issues with your trackers. I will not be held responsible for any issues that arise from this. Please set this to a lower number.")
- logger.error(f"Exiting...")
- sys.exit()
- elif search >= 10:
- logger.warning(f"Maximum searches set to {search}. This can cause issues with your trackers. Please be careful.")
- elif search > 0 and search < 10:
- pass
- elif search == 0:
- logger.info(f"Maximum searches set to {search}, nothing will be searched for.")
- else:
- logger.error(f"Maximum searches set to {search}. This is not a valid number. Please set this to a positive number.")
- logger.error(f"Exiting...")
- sys.exit()
- paths = []
- monitored_paths = []
- for config_item in [config.radarr, config.sonarr]:
- for item in config_item or []:
- if item['paths']:
- paths.extend([item['paths']] if isinstance(item['paths'], str) else item['paths'])
- monitored_paths.extend([item['paths']] if isinstance(item['paths'], str) else item['paths'])
- else:
- logger.warning(f"No paths set for {item['name']}")
- continue
- nohl_files = find_no_hl_files(paths)
- if nohl_files:
- instances_to_run = []
- try:
- if config.script_data['radarr']:
- for radarr_config in config.script_data['radarr']:
- if radarr_config['paths']:
- for radarr_path in ([radarr_config['paths']] if isinstance(radarr_config['paths'], str) else radarr_config['paths']):
- if any(nohl_path.startswith(radarr_path) for nohl_path in nohl_files):
- instance_found = False
- for instance in instances_to_run:
- if instance['instance_name'] == radarr_config['name']:
- instance['files_to_process'].extend([nohl_file for nohl_file in nohl_files if nohl_file.startswith(radarr_path)])
- instance_found = True
- break
- if not instance_found:
- instances_to_run.append({
- 'instance_name': radarr_config['name'],
- 'files_to_process':[nohl_file for nohl_file in nohl_files if nohl_file.startswith(radarr_path)]
- })
- except KeyError:
- logger.warning("No Radarr instances found in script_data")
- try:
- if config.script_data['sonarr']:
- for sonarr_config in config.script_data['sonarr']:
- if sonarr_config['paths']:
- for sonarr_path in ([sonarr_config['paths']] if isinstance(sonarr_config['paths'], str) else sonarr_config['paths']):
- if any(nohl_path.startswith(sonarr_path) for nohl_path in nohl_files):
- instance_found = False
- for instance in instances_to_run:
- if instance['instance_name'] == sonarr_config['name']:
- instance['files_to_process'].extend([nohl_file for nohl_file in nohl_files if nohl_file.startswith(sonarr_path)])
- instance_found = True
- break
- if not instance_found:
- instances_to_run.append({
- 'instance_name': sonarr_config['name'],
- 'files_to_process':[nohl_file for nohl_file in nohl_files if nohl_file.startswith(sonarr_path)]
- })
- except KeyError:
- logger.warning("No Sonarr instances found in script_data")
- logger.debug(f"Instances to run: {json.dumps(instances_to_run, indent=4)}")
-
- instance_data = {
- 'Radarr': config.radarr_data,
- 'Sonarr': config.sonarr_data
- }
- for instance_type, instances in instance_data.items():
- for instance in instances:
- instance_name = instance['name']
- instance_type = instance_type.capitalize()
- url = instance['url']
- api = instance['api']
- if instances_to_run:
- for _instance in instances_to_run:
- if instance_name == _instance['instance_name']:
- if instance_type == "Radarr":
- logger.debug(f"Running {instance_type} instance {instance_name}")
- data = next((data for data in config.radarr if data['name'] == instance_name), None)
- if data:
- try:
- include_profiles = data['include_profiles']
- logger.debug(f"Include profiles: {include_profiles}")
- except KeyError:
- logger.error(f"Missing include_profiles in {instance_name} config. Please check your config.")
- sys.exit()
- try:
- exclude_profiles = data['exclude_profiles']
- logger.debug(f"Exclude profiles: {exclude_profiles}")
- except KeyError:
- logger.error(f"Missing exclude_profiles in {instance_name} config. Please check your config.")
- sys.exit()
- logger.debug(f"Processing {len(nohl_files)} files")
- elif instance_type == "Sonarr":
- logger.debug(f"Running {instance_type} instance {instance_name}")
- data = next((data for data in config.sonarr if data['name'] == instance_name), None)
- if data:
- try:
- include_profiles = data['include_profiles']
- logger.debug(f"Include profiles: {include_profiles}")
- except KeyError:
- logger.error(f"Missing include_profiles in {instance_name} config. Please check your config.")
- logger.error(f"Exiting...")
- sys.exit()
- try:
- exclude_profiles = data['exclude_profiles']
- logger.debug(f"Exclude profiles: {exclude_profiles}")
- except KeyError:
- logger.error(f"Missing exclude_profiles in {instance_name} config. Please check your config.")
- logger.error(f"Exiting...")
- sys.exit()
- try:
- exclude_series = data['exclude_series']
- logger.debug(f"Exclude series: {exclude_series}")
- except KeyError:
- logger.error(f"Missing exclude_series in {instance_name} config. Please check your config.")
- logger.error(f"Exiting...")
- sys.exit()
- nohl_files = _instance['files_to_process']
- logger.debug(f"Processing {len(nohl_files)} files")
- process_instances(instance_type, url, api, nohl_files, include_profiles, exclude_profiles, dry_run, exclude_series)
-
-if __name__ == "__main__":
- main()
\ No newline at end of file
diff --git a/python-scripts/queinatorr.py b/python-scripts/queinatorr.py
deleted file mode 100644
index c79926e..0000000
--- a/python-scripts/queinatorr.py
+++ /dev/null
@@ -1,191 +0,0 @@
-# ____ _ _
-# / __ \ (_) | |
-# | | | |_ _ ___ _ _ __ __ _| |_ ___ _ __ _ __
-# | | | | | | |/ _ \ | '_ \ / _` | __/ _ \| '__| '__|
-# | |__| | |_| | __/ | | | | (_| | || (_) | | | |
-# \___\_\\__,_|\___|_|_| |_|\__,_|\__\___/|_| |_|
-# ===================================================================================================
-# Author: Drazzilb
-# Description: This script will move torrents from one category to another in qBittorrent based on
-# the title of the torrent. This is useful for moving torrents from a category that are stuck
-# in a queue due to a missing file or not being an upgrade for existing episode file(s).
-# Usage: python3 queinatorr.py
-# Requirements: requests, qbittorrentapi
-# License: MIT License
-# ===================================================================================================
-
-version = "1.0.2"
-
-import json
-from modules.config import Config
-from modules.logger import setup_logger
-from qbittorrentapi import Client
-from modules.arrpy import StARR
-from urllib.parse import urlsplit
-from modules.arrpy import arrpy_py_version
-
-config = Config(script_name="queinatorr")
-logger = setup_logger(config.log_level, "queinatorr")
-
-queue_list = [
- "Not an upgrade for existing episode file(s)",
- "Not an upgrade for existing movie file(s)",
- "Not a Custom Format upgrade for existing movie file(s)",
- "Not a Custom Format upgrade for existing episode file(s)",
- "Not a quality revision upgrade for existing movie file(s)",
- "Not a quality revision upgrade for existing episode file(s)",
- "New Quality is BR-DISK",
- "Not an upgrade for existing movie file",
- "No files found are eligible for import in",
- "The download is missing files",
-]
-
-def handle_qbit(title_list, url, username, password, move_category, dry_run, move_missing):
- name = None
- logger.debug('*' * 40)
- logger.debug(f'* {"Processing qBittorrent":^36} *')
- logger.debug('*' * 40)
- dict_torrent_hash_name_category = {}
- url_parts = urlsplit(url)
- host = url_parts.hostname
- port = url_parts.port
- qb = Client(host=host, port=port)
- qb.auth_log_in(username=username, password=password)
- torrents = qb.torrents_info()
- if move_category not in qb.torrents_categories().keys():
- logger.error(f"Category {move_category} does not exist. Please create it in qBittorrent")
- return
- for torrent in torrents:
- torrent_name = torrent['name']
- hash = torrent['hash']
- category = torrent['category']
- if move_missing:
- if category in move_missing and torrent['state'] == 'missingFiles':
- dict_torrent_hash_name_category[name] = {'hash': hash, 'category': category}
- logger.info(f"Adding {torrent_name} to the list of torrents to move from {category} to {move_category} due to it missing files, chances are it's a cross-seed")
- if any(isinstance(title, str) and (title and torrent_name and title.lower() in str(torrent_name).lower() or title and isinstance(torrent_name, str) and '.' in torrent_name and title.lower() in torrent_name.rsplit('.', 1)[0].lower()) for title in title_list):
- try:
- if category != move_category:
- dict_torrent_hash_name_category[name] = {'hash': hash, 'category': category}
- except KeyError:
- logger.error(f"Could not find category for {name}")
- logger.debug(f"dict_torrent_hash_name_category: {json.dumps(dict_torrent_hash_name_category, indent=4)}")
- for torrent_name, dict_torrent_hash_category in dict_torrent_hash_name_category.items():
- torrent = torrent_name
- category = dict_torrent_hash_category['category']
- hash = dict_torrent_hash_category['hash']
- if category != move_category:
- if not dry_run:
- try:
- qb.torrents_set_category(torrent_hashes=hash, category=move_category)
- logger.info(f"Moving {torrent} from {category} to {move_category}")
- except Exception as e:
- logger.error(f"Could not move {torrent} from {category} to {move_category}")
- logger.error(e)
- else:
- logger.info(f"Would move {torrent} from {category} to {move_category}")
- qb.auth_log_out()
-
-def handle_queued_items(queue):
- logger.debug('*' * 40)
- logger.debug(f'* {"Handling queue items":^36} *')
- logger.debug('*' * 40)
- title_list = []
- for record in queue['records']:
- title = record['title']
- if record['statusMessages']:
- for message in record['statusMessages']:
- if not message['messages']:
- continue
- messages = message['messages']
- # if messages:
- # if any(queue_item in msg for msg in messages for queue_item in queue_list):
- # if title and messages:
- logger.info(f"Found {title} with {messages}")
- title_list.append(title)
- try:
- if record['errorMessage']:
- error_message = record['errorMessage']
- # if error_message:
- # if error_message in queue_list:
- # if title and error_message:
- logger.info(f"Found {title} with {error_message}....")
- title_list.append(title)
- except KeyError:
- pass
- logger.debug("")
- logger.debug(f"title_list: {title_list}")
- logger.debug("")
- return title_list
-
-def main():
- logger.info("Starting queinatorr")
- dry_run = config.dry_run
- logger.debug('*' * 40)
- logger.debug(f'* {"Unmatched Assets":^36} *')
- logger.debug(f'* {"Script Version:":<2} {version:>20} *')
- logger.debug(f'* {"arrpy.py Version:":<2} {arrpy_py_version:>18} *')
- logger.debug('*' * 40)
- logger.debug('')
- if config.dry_run:
- logger.info('*' * 40)
- logger.info(f'* {"Dry_run Activated":^36} *')
- logger.info('*' * 40)
- logger.info(f'* {" NO CHANGES WILL BE MADE ":^36} *')
- logger.info('*' * 40)
- logger.info('')
- for item in config.qbit:
- if item['starr_app'] is None:
- continue
- for app_data in [config.radarr_data, config.sonarr_data]:
- if app_data is not None:
- for i in app_data:
- if i['name'] in ([item['starr_app']] if isinstance(item['starr_app'], str) else item['starr_app']):
- app_type = 'Radarr' if app_data == config.radarr_data else 'Sonarr'
- logger.info('*' * 40)
- logger.info(f'* {f"Processing: {app_type}":^36} *')
- logger.info('*' * 40)
- url = i['url']
- api = i['api']
- logger.debug(f"url: {url}")
- logger.debug(f"api: {'*' * (len(api) - 5)}{api[-5:]}")
- app = StARR(url, api, logger)
- queue = app.get_queue()
- title_list = handle_queued_items(queue)
- for q in config.qbit_data:
- if q['name'] == item['name']:
- url = q['url']
- username = q['username']
- password = q['password']
- logger.debug(f"url: {url}")
- logger.debug(f"username: {username}")
- logger.debug(f"password: {'*' * len(password)}")
- move_category = item['move_category']
- try:
- move_missing = item['move_missing']
- except KeyError:
- move_missing = None
- for starr_app, category in move_category.items():
- if starr_app == i['name']:
- logger.debug(f"Matched {starr_app} with {i['name']}")
- move_category = category
- logger.debug(f"move_category: {move_category}")
- logger.debug(f"Move category for {starr_app} is {move_category}")
- logger.info('*' * 40)
- logger.info(f'* {f"Processing: {starr_app}":^36} *')
- logger.info('*' * 40)
- if move_missing:
- for starr_app, category in move_missing.items():
- if starr_app == i['name']:
- if isinstance(category, str):
- category = [category]
- move_missing = category
- logger.debug(f"move_missing: {move_missing}")
- logger.debug(f"Move missing for {starr_app} is {move_missing}")
- handle_qbit(title_list, url, username, password, move_category, dry_run, move_missing)
- app.refresh_queue()
- logger.info("Exiting queinatorr")
-
-
-if __name__ == '__main__':
- main()
\ No newline at end of file
diff --git a/python-scripts/renameinatorr.py b/python-scripts/renameinatorr.py
deleted file mode 100644
index 63ac91d..0000000
--- a/python-scripts/renameinatorr.py
+++ /dev/null
@@ -1,267 +0,0 @@
-# _____ _ _ _____
-# | __ \ (_) | | | __ \
-# | |__) |___ _ __ __ _ _ __ ___ ___ _ _ __ __ _| |_ ___ _ __ _ __| |__) | _
-# | _ // _ \ '_ \ / _` | '_ ` _ \ / _ \ | '_ \ / _` | __/ _ \| '__| '__| ___/ | | |
-# | | \ \ __/ | | | (_| | | | | | | __/ | | | | (_| | || (_) | | | |_ | | | |_| |
-# |_| \_\___|_| |_|\__,_|_| |_| |_|\___|_|_| |_|\__,_|\__\___/|_| |_(_)|_| \__, |
-# __/ |
-# |___/
-# ===================================================================================================
-# Author: Drazzilb
-# Description: This script will rename all series in Sonarr/Radarr to match the naming scheme of the
-# Naming Convention within Radarr/Sonarr. It will also add a tag to the series so that it can be easily
-# identified as having been renamed.
-# Usage: python3 /path/to/renameinatorr.py
-# Requirements: requests, pyyaml
-# License: MIT License
-# ===================================================================================================
-
-script_version = "3.2.3"
-
-from modules.config import Config
-from modules.logger import setup_logger
-from modules.arrpy import StARR
-from modules.arrpy import arrpy_py_version
-from modules.version import version
-from modules.discord import discord
-from modules.formatting import create_table
-
-script_name = "renameinatorr"
-config = Config(script_name)
-log_level = config.log_level
-logger = setup_logger(log_level, script_name)
-version(script_name, script_version, arrpy_py_version, logger, config)
-
-def check_all_tagged(all_media, tag_id):
- """
- Check if all media has been tagged.
-
- Args:
- all_media (list): The list of all media.
- tag_id (int): The ID of the tag to check.
-
- Returns:
- bool: True if all media has been tagged, False otherwise.
- """
- for media in all_media:
- if tag_id not in media['tags']:
- return False
- return True
-
-def print_format(items, instance_type, dry_run, total_count, tagged_percent, untagged_percent, media_type, tagged_count, untagged_count):
- """
- Print the format of the output.
-
- Args:
- items (list): The list of items to print.
- library_item_to_rename (list): The list of items to rename.
- instance_type (str): The type of instance to process.
- dry_run (bool): Whether or not to perform a dry run.
- total_count (int): The total number of items to process.
- tagged_percent (float): The percentage of items that have been tagged.
- untagged_percent (float): The percentage of items that have not been tagged.
- media_type (str): The type of media to process.
- tagged_count (int): The number of items that have been tagged.
- untagged_count (int): The number of items that have not been tagged.
- """
- if dry_run:
- tagged = "would have been tagged"
- renamed = "would have been renamed to"
- else:
- tagged = "has been tagged"
- renamed = "renamed to"
- for item, rename_items in items.items():
- title = item
- logger.info(f"Title: {title} {tagged}.")
- current_season = None
- for rename_item in rename_items:
- existing_path = rename_item["existingPath"]
- new_path = rename_item["newPath"]
- if instance_type == "sonarr":
- season_number = rename_item["seasonNumber"]
- if instance_type == "sonarr":
- season_number = rename_item["seasonNumber"]
- if current_season != season_number:
- current_season = season_number
- logger.info(f"\tSeason {season_number:02d}:")
- logger.info(f"\t\t{existing_path.split('/')[-1]} {renamed}")
- logger.info(f"\t\t{new_path.split('/')[-1]}")
- logger.info(f"")
- if instance_type == "radarr":
- logger.info(f"\t{existing_path.split('/')[-1]} {renamed}")
- logger.info(f"\t{new_path.split('/')[-1]}")
- logger.info(f"")
- if total_count > 0:
- tagged_percent = (tagged_count / total_count) * 100
- untagged_percent = (untagged_count / total_count) * 100
- logger.info(f'Total {media_type}: {total_count}, Tagged {media_type}: {tagged_count} ({tagged_percent:.2f}%), Untagged {media_type}: {untagged_count} ({untagged_percent:.2f}%)\n')
-
-def process_instance(instance_type, instance_name, url, api, tag_name, count, dry_run, reset, unattended):
- """
- Process the instance based on the instance type.
-
- Args:
- instance_type (str): The type of instance to process.
- instance_name (str): The name of the instance to process.
- url (str): The URL of the instance to process.
- api (str): The API key of the instance to process.
- tag_name (str): The name of the tag to use.
- count (int): The number of items to process.
- dry_run (bool): Whether or not to perform a dry run.
- reset (bool): Whether or not to reset the tag.
- unattended (bool): Whether or not to run unattended.
- """
- library_item_to_rename = []
- app = StARR(url, api, logger)
- server_name = app.get_instance_name()
- data = [
- [server_name],
- ]
- create_table(data, log_level="info", logger=logger)
- data = [
- [f"{server_name} Settings"]
- ]
- create_table(data, log_level="debug", logger=logger)
- logger.debug('*' * 40)
- logger.debug(f"Script Settings for {instance_name}:")
- logger.debug(f'{"Count:":<20}{count if count else "Not Set"}')
- logger.debug(f'{"tag_name:":<20}{tag_name if tag_name else "Not Set"}')
- logger.debug(f'{"reset: {reset}":<20}{reset if reset else "Not Set"}')
- logger.debug(f'{"unattended:":<20}{unattended if unattended else "Not Set"}')
- logger.debug(f'{"URL:":<20}{url if url else "Not Set"}')
- logger.debug(f'{"API:":<20}{"*" * (len(api) - 5)}{api[-5:] if api else "Not Set"}')
- logger.debug(f'{"Instance Type:":<20}{instance_type if instance_type else "Not Set"}')
- logger.debug(f'{"ARR name:":<20}{server_name if instance_name else "Not Set"}')
- logger.debug('*' * 40 + '\n')
- media = app.get_media()
- if instance_type == "Radarr":
- media_type = "Movies"
- elif instance_type == "Sonarr":
- media_type = "Series"
- logger.debug(f"Length of Media for {instance_name}: {len(media)}")
- arr_tag_id = app.get_tag_id_from_name(tag_name)
- if not arr_tag_id:
- if not dry_run:
- arr_tag_id = app.create_tag(tag_name)
- else:
- logger.info(f"Tag {tag_name} would have been created.")
- if arr_tag_id:
- logger.debug(f"Tag: {tag_name} | Tag ID: {arr_tag_id}")
- else:
- logger.debug(f"Tag: {tag_name} | Tag ID: {arr_tag_id}")
- all_tagged = check_all_tagged(media, arr_tag_id)
- all_media_ids = [item["id"] for item in media]
- if reset:
- if not dry_run:
- app.remove_tags(all_media_ids, arr_tag_id)
- logger.info(f'All of {instance_name} have had the tag {tag_name} removed.')
- all_tagged = False
- else:
- logger.info(f'All of {instance_name} would have had the tag {tag_name} removed.')
- all_tagged = False
- elif all_tagged and unattended:
- if not dry_run:
- app.remove_tags(all_media_ids, arr_tag_id)
- logger.info(f'All of {instance_name} have had the tag {tag_name} removed.')
- discord(None, logger, config, script_name, description=f"All of {instance_name} have had the tag {tag_name} removed.", color=0x00ff00, content=None)
- all_tagged = False
- else:
- logger.info(f'All of {instance_name} would have had the tag {tag_name} removed.')
- discord(None, logger, config, script_name, description=f"All of {instance_name} would have had the tag {tag_name} removed.",color=0x00ff00, content=None)
- all_tagged = False
- elif all_tagged and not unattended:
- logger.info(f'All of {instance_name} has been tagged with {tag_name}')
- logger.info("If you would like to remove the tag and re-run the script, please set reset to True or set unattended to True.")
- logger.info(f"Skipping {instance_name}...")
- discord(None, logger, config, script_name, description=f"All of {instance_name} has been tagged with {tag_name}, please set reset to True or set unattended to True to remove the tag and re-run the script, {instance_name} will be skipped.", color=0x00ff00, content=None)
- return
- if not all_tagged:
- untagged_media = [
- m for m in media if arr_tag_id not in m['tags']]
- media_to_process = untagged_media[:count]
- items = []
- media_ids = []
-
- if not all_tagged:
- untagged_media = [
- m for m in media if arr_tag_id not in m['tags']]
- media_to_process = untagged_media[:count]
- items = {}
- media_ids = []
- tagged_count = 0
- untagged_count = 0
- new_tag = 0
- for item in media_to_process:
- title = item["title"]
- media_id = item["id"]
- media_ids.append(media_id)
- library_item_to_rename = app.get_rename_list(media_id)
- items[title] = library_item_to_rename
- if not dry_run:
- app.rename_media(media_ids)
- app.add_tags(media_ids, arr_tag_id)
- new_tag += 1
- app.refresh_media(media_ids)
- for m in media:
- if (arr_tag_id in m["tags"]):
- tagged_count += 1
- elif (arr_tag_id not in m["tags"]):
- untagged_count += 1
- total_count = (tagged_count + new_tag) + untagged_count
- tagged_percent = ((tagged_count + new_tag) / total_count) * 100
- untagged_percent = (untagged_count / total_count) * 100
- print_format(items, instance_type.lower(), dry_run, total_count, tagged_percent, untagged_percent, media_type, tagged_count, untagged_count)
-
-# TODO: Add support for parrent folders
-def rename_folder():
- pass
-
-def main():
- data = [
- ["Script Settings"]
- ]
- create_table(data, log_level="debug", logger=logger)
- logger.debug(f'{"Dry_run:":<20}{config.dry_run if config.dry_run else "False"}')
- logger.debug(f'{"Log level:":<20}{log_level if log_level else "INFO"}')
- logger.debug(f'*' * 40 + '\n')
- if config.dry_run:
- data = [
- ["Dry Run"],
- ["NO CHANGES WILL BE MADE"]
- ]
- create_table(data, log_level="info", logger=logger)
- instance_data = {
- 'Radarr': config.radarr_data,
- 'Sonarr': config.sonarr_data
- }
-
- for instance_type, instances in instance_data.items():
- for instance in instances:
- instance_name = instance['name']
- url = instance['url']
- api = instance['api']
- script_name = None
- if instance_type == "Radarr" and config.radarr:
- data = next((data for data in config.radarr if data['name'] == instance_name), None)
- if data:
- script_name = data['name']
- count = data['count']
- tag_name = data['tag_name']
- reset = data['reset']
- unattended = data['unattended']
- elif instance_type == "Sonarr" and config.sonarr:
- data = next((data for data in config.sonarr if data['name'] == instance_name), None)
- if data:
- script_name = data['name']
- count = data['count']
- tag_name = data['tag_name']
- reset = data['reset']
- unattended = data['unattended']
- if script_name and instance_name == script_name:
- process_instance(instance_type, instance_name, url, api, tag_name, count, config.dry_run, reset, unattended)
-
-if __name__ == "__main__":
- """
- Main entry point for the script.
- """
- main()
\ No newline at end of file
diff --git a/python-scripts/renamer.py b/python-scripts/renamer.py
deleted file mode 100644
index 0cc5d6a..0000000
--- a/python-scripts/renamer.py
+++ /dev/null
@@ -1,707 +0,0 @@
-# _____ _____
-# | __ \ | __ \
-# | |__) |___ _ __ __ _ _ __ ___ ___ _ __| |__) | _
-# | _ // _ \ '_ \ / _` | '_ ` _ \ / _ \ '__| ___/ | | |
-# | | \ \ __/ | | | (_| | | | | | | __/ | | | | |_| |
-# |_| \_\___|_| |_|\__,_|_| |_| |_|\___|_| |_| \__, |
-# __/ |
-# |___/
-# ===================================================================================================
-# Author: Drazzilb
-# Description: This script will rename your posters to match Plex-Meta-Manager's naming scheme.
-# Usage: python3 renamer.py
-# Requirements: requests, tqdm, fuzzywuzzy, pyyaml
-# License: MIT License
-# ===================================================================================================
-
-script_version = "6.3.3"
-
-from modules.arrpy import arrpy_py_version
-from plexapi.exceptions import BadRequest
-from modules.logger import setup_logger
-from plexapi.server import PlexServer
-from modules.version import version
-from modules.formatting import create_table
-from modules.discord import discord, field_builder
-from modules.config import Config
-from modules.arrpy import StARR
-from unidecode import unidecode
-from fuzzywuzzy import process
-from fuzzywuzzy import fuzz
-from tqdm import tqdm
-import filecmp
-import shutil
-import errno
-import json
-import html
-import sys
-import os
-import re
-
-script_name = "renamer"
-config = Config(script_name)
-log_level = config.log_level
-logger = setup_logger(log_level, script_name)
-version(script_name, script_version, arrpy_py_version, logger, config)
-
-year_regex = re.compile(r"\((19|20)\d{2}\)")
-illegal_chars_regex = re.compile(r'[<>:"/\\|?*\x00-\x1f]+')
-remove_special_chars = re.compile(r'[^a-zA-Z0-9\s]+')
-
-season_name_info = [
- " - Season",
- " - Specials",
- "_Season"
-]
-
-words_to_remove = [
- "(US)",
-]
-
-prefixes = [
- "The",
- "A",
- "An"
-]
-suffixes = [
- "Collection",
-]
-
-def find_best_match(matches, title):
- best_match = None
- for match in matches:
- for i in match:
- if best_match:
- if i[1] > best_match[1]:
- best_match = i
- elif i[1] == best_match[1]:
- if i[0] == title:
- best_match = i
- else:
- best_match = i
- return best_match
-
-def match_collection(plex_collections, source_file_list, collection_threshold):
- matched_collections = {"matched_media": []}
- almost_matched = {"almost_matched": []}
- not_matched = {"not_matched": []}
- for plex_collection in tqdm(plex_collections, desc="Matching collections", total=len(plex_collections), disable=None):
- plex_normalize_title = normalize_titles(plex_collection)
- matches = [
- process.extract(plex_collection, [item['title'] for item in source_file_list['collections']], scorer=fuzz.ratio),
- process.extract(plex_normalize_title, [item['normalized_title'] for item in source_file_list['collections']], scorer=fuzz.ratio)
- ]
- for prefix in prefixes:
- matches.append(process.extract(plex_collection, [re.sub(rf"^{prefix}\s(?=\S)", '', item['title']) for item in source_file_list['collections']], scorer=fuzz.ratio))
- matches.append(process.extract(plex_normalize_title, [re.sub(rf"^{prefix}\s(?=\S)", '', item['normalized_title']) for item in source_file_list['collections']], scorer=fuzz.ratio))
- for suffix in suffixes:
- matches.append(process.extract(plex_collection, [re.sub(rf"\s*{suffix}*", '', item['title']) for item in source_file_list['collections']], scorer=fuzz.ratio))
- matches.append(process.extract(plex_normalize_title, [re.sub(rf"\s*{suffix}*", '', item['normalized_title']) for item in source_file_list['collections']], scorer=fuzz.ratio))
- best_match = find_best_match(matches, plex_collection)
- folder = illegal_chars_regex.sub('', plex_collection)
- if best_match:
- match_title = best_match[0]
- score = best_match[1]
- for item in source_file_list['collections']:
- file_title = item['title']
- files = item['files']
- file_normalized_title = item['normalized_title']
- without_prefix = []
- for prefix in prefixes:
- without_prefix = []
- for prefix in prefixes:
- without_prefix.append(re.sub(rf"^{prefix}\s(?=\S)", '', item['title']))
- without_prefix.append(re.sub(rf"^{prefix}\s(?=\S)", '', item['normalized_title']))
- without_suffix = []
- for suffix in suffixes:
- without_suffix.append(re.sub(rf"\s*{suffix}", '', item['title']))
- without_suffix.append(re.sub(rf"\s*{suffix}", '', item['normalized_title']))
- if score >= collection_threshold and (
- match_title == item['title'] or
- match_title == item['normalized_title'] or
- match_title in without_prefix or
- match_title in without_suffix
- ):
- matched_collections['matched_media'].append({
- "title": file_title,
- "normalized_title": file_normalized_title,
- "plex_collection": plex_collection,
- "normalized_collection": plex_normalize_title,
- "year": None,
- "files": files,
- "score": score,
- "best_match": best_match,
- "folder": folder,
- })
- break
- elif score >= collection_threshold - 10 and score < collection_threshold and (
- match_title == item['title'] or
- match_title == item['normalized_title'] or
- match_title in without_prefix or
- match_title in without_suffix
- ):
- almost_matched['almost_matched'].append({
- "title": file_title,
- "normalized_title": file_normalized_title,
- "plex_collection": plex_collection,
- "normalized_collection": plex_normalize_title,
- "year": None,
- "files": files,
- "score": score,
- "best_match": best_match,
- "folder": folder,
- })
- break
- elif score < collection_threshold - 10 and (
- match_title == item['title'] or
- match_title == item['normalized_title'] or
- match_title in without_prefix or
- match_title in without_suffix
- ):
- not_matched['not_matched'].append({
- "title": file_title,
- "normalized_title": file_normalized_title,
- "plex_collection": plex_collection,
- "normalized_collection": plex_normalize_title,
- "year": None,
- "files": files,
- "score": score,
- "best_match": best_match,
- "folder": folder,
- })
- break
-
- logger.debug(f"Not matched collections: {json.dumps(not_matched, ensure_ascii=False, indent=4)}")
- logger.debug(f"Matched collections: {json.dumps(matched_collections, ensure_ascii=False, indent=4)}")
- logger.debug(f"Almost matched collections: {json.dumps(almost_matched, ensure_ascii=False, indent=4)}")
- return matched_collections
-
-def match_media(media, source_file_list, type):
- matched_media = {"matched_media": []}
- not_matched = {"not_matched": []}
- for item in tqdm(media, desc="Matching media", total=len(media), disable=None):
- alternate_title = False
- alternate_titles = []
- normalized_alternate_titles = []
- arr_title = item['title']
- try:
- original_title = item['originalTitle']
- except KeyError:
- original_title = None
- arr_path = os.path.basename(item['path'])
- arr_path = year_regex.sub("", arr_path).strip()
- normalized_arr_path = normalize_titles(arr_path)
- try:
- arr_path_year = year_regex.search(item['path'])
- arr_path_year = int(arr_path_year.group(0)[1:-1])
- except AttributeError:
- if item['status'] == 'upcoming' or item['status'] == 'announced':
- continue
- else:
- logger.warning(f"Unable to find year in {item['title']} path")
- try:
- if item['alternateTitles']:
- for i in item['alternateTitles']:
- alternate_titles.append(i['title'])
- normalized_alternate_titles.append(normalize_titles(i['title']))
- except KeyError:
- alternate_titles = []
- year_from_title = year_regex.search(item['title'])
- arr_normalized_title = normalize_titles(arr_title)
- secondary_year = None
- if year_from_title:
- try:
- arr_year = int(year_from_title.group(0)[1:-1])
- except ValueError:
- logger.error(f"Could not convert year to int: {year_from_title.group(0)[1:-1]} for {item['title']}")
- continue
- else:
- arr_year = item['year']
- try:
- if item['secondaryYear']:
- secondary_year = item['secondaryYear']
- except KeyError:
- secondary_year = None
- path = item['path']
- folder = os.path.basename(os.path.normpath(path))
- files = []
- for i in source_file_list[type]:
- file_title = i['title']
- file_normalized_title = i['normalized_title']
- files = i['files']
- file_year = i['year']
- if (
- arr_title == file_title or
- arr_normalized_title == file_normalized_title or
- arr_path == file_title or
- normalized_arr_path == file_normalized_title or
- original_title == file_title or
- file_title in alternate_titles or
- file_normalized_title in normalized_alternate_titles
- ) and (
- arr_year == file_year or
- secondary_year == file_year or
- arr_path_year == file_year
- ):
- matched_media['matched_media'].append({
- "title": file_title,
- "normalized_title": file_normalized_title,
- "arr_title": arr_title,
- "arr_normalized_title": arr_normalized_title,
- "arr_path": arr_path,
- "normalized_arr_path": normalized_arr_path,
- "year": file_year,
- "arr_year": arr_year,
- "arr_path_year": arr_path_year,
- "secondaryYear": secondary_year,
- "files": files,
- "alternate_title": alternate_title,
- "folder": folder,
- })
- break
- elif (
- arr_title == file_title or
- arr_normalized_title == file_normalized_title or
- arr_path == file_title or
- normalized_arr_path == file_normalized_title or
- original_title == file_title or
- file_title in alternate_titles or
- file_normalized_title in normalized_alternate_titles
- ) and (
- arr_year != file_year or
- secondary_year != file_year or
- arr_path_year != file_year
- ):
- not_matched['not_matched'].append({
- "title": file_title,
- "normalized_title": file_normalized_title,
- "arr_title": arr_title,
- "arr_normalized_title": arr_normalized_title,
- "arr_path": arr_path,
- "normalized_arr_path": normalized_arr_path,
- "year": file_year,
- "arr_year": arr_year,
- "arr_path_year": arr_path_year,
- "secondaryYear": secondary_year,
- "files": files,
- "alternate_title": alternate_title,
- "folder": folder,
- })
- logger.debug(f"Matched media: {json.dumps(matched_media, ensure_ascii=False, indent=4)}")
- logger.debug(f"Not matched media: {json.dumps(not_matched, ensure_ascii=False, indent=4)}")
- return matched_media
-
-def rename_file(matched_media, destination_dir, dry_run, action_type, print_only_renames):
- messages = []
- discord_messages = []
- asset_folders = config.asset_folders
- destination_files = os.listdir(destination_dir)
- for media in tqdm(matched_media['matched_media'], desc="Renaming files", total=len(matched_media['matched_media']), disable=None):
- files = media['files']
- folder = media['folder']
- if asset_folders:
- if dry_run:
- if not os.path.exists(os.path.join(destination_dir, folder)):
- discord_messages.append(folder)
- else:
- if not os.path.exists(os.path.join(destination_dir, folder)):
- messages.append(f"Creating asset folder: {folder}")
- os.makedirs(os.path.join(destination_dir, folder), exist_ok=True)
- discord_messages.append(folder)
- for file in files:
- path = os.path.dirname(file)
- old_file_name = os.path.basename(file)
- source_file_path = os.path.join(path, file)
- file_extension = os.path.splitext(file)[1]
- if any(word in file for word in season_name_info):
- season_number = re.search(r"Season (\d+)", file)
- if season_number:
- season_number = season_number.group(1)
- season_number = season_number.zfill(2)
- if asset_folders:
- new_file_name = f"Season{season_number}{file_extension}"
- else:
- new_file_name = f"{folder}_Season{season_number}{file_extension}"
- elif season_number := re.search(r"Season (\d\d)", file):
- if asset_folders:
- season_number = season_number.group(1)
- new_file_name = f"Season{season_number}{file_extension}"
- else:
- season_number = season_number.group(1)
- new_file_name = f"{folder}_Season{season_number}{file_extension}"
- elif " - Specials" in file:
- if asset_folders:
- new_file_name = f"Season00{file_extension}"
- else:
- new_file_name = f"{folder}_Season00{file_extension}"
- elif "_Season" in file:
- new_file_name = file
- else:
- logger.error(f"Unable to find season number for {file}")
- continue
- else:
- if asset_folders:
- new_file_name = f"poster{file_extension}"
- else:
- new_file_name = f"{folder}{file_extension}"
- if asset_folders:
- destination_file_path = os.path.join(destination_dir, folder, new_file_name)
- else:
- destination_file_path = os.path.join(destination_dir, new_file_name)
- if config.source_overrides:
- if path in config.source_overrides:
- if asset_folders:
- for root, dirs, files in os.walk(destination_dir):
- basedir = os.path.basename(root)
- if basedir == folder:
- for file in files:
- if os.path.splitext(file)[0] == os.path.splitext(new_file_name)[0] and file_extension != os.path.splitext(file)[1]:
- if dry_run:
- messages.append(f"Would remove {file} from {basedir}")
- else:
- messages.append(f"Removed {file} from {basedir}")
- os.remove(os.path.join(root, file))
- else:
- for i in destination_files:
- if folder == os.path.splitext(i)[0] and file_extension != os.path.splitext(i)[1]:
- if dry_run:
- messages.append(f"Would remove {i} from {destination_dir}")
- else:
- messages.append(f"Removed {i} from {destination_dir}")
- os.remove(os.path.join(destination_dir, i))
- if new_file_name != old_file_name:
- processsed_file_info, discord_message = process_file(old_file_name, new_file_name, action_type, dry_run, destination_file_path, source_file_path, '-renamed->')
- messages.extend(processsed_file_info)
- if not asset_folders:
- discord_messages.extend(discord_message)
- else:
- if not print_only_renames:
- processsed_file_info, discord_message = process_file(old_file_name, new_file_name, action_type, dry_run, destination_file_path, source_file_path, '-not-renamed->>')
- messages.extend(processsed_file_info)
- if not asset_folders:
- discord_messages.extend(discord_message)
- if not asset_folders:
- for i in discord_messages:
- discord_messages = [os.path.splitext(i)[0] for i in discord_messages]
- return messages, discord_messages
-
-def process_file(old_file_name, new_file_name, action_type, dry_run, destination_file_path, source_file_path, arrow):
- output = []
- discord_output = []
- if dry_run:
- if action_type == 'copy':
- if os.path.isfile(destination_file_path):
- if filecmp.cmp(source_file_path, destination_file_path):
- logger.debug(f"Copy -> File already exists: {destination_file_path}")
- pass
- else:
- output.append(f"Action Type: {action_type.capitalize()}: {old_file_name} {arrow} {new_file_name}")
- discord_output.append(new_file_name)
- else:
- output.append(f"Action Type: {action_type.capitalize()}: {old_file_name} {arrow} {new_file_name}")
- discord_output.append(new_file_name)
- if action_type == 'hardlink':
- if os.path.isfile(destination_file_path):
- if filecmp.cmp(source_file_path, destination_file_path):
- logger.debug(f"Hardlink -> File already exists: {destination_file_path}")
- pass
- else:
- output.append(f"Action Type: {action_type.capitalize()}: {old_file_name} {arrow} {new_file_name}")
- discord_output.append(new_file_name)
- else:
- output.append(f"Action Type: {action_type.capitalize()}: {old_file_name} {arrow} {new_file_name}")
- discord_output.append(new_file_name)
- elif action_type == 'move':
- output.append(f"Action Type: {action_type.capitalize()}: {old_file_name} {arrow} {new_file_name}")
- discord_output.append(new_file_name)
- else:
- if action_type == 'copy':
- try:
- if os.path.isfile(destination_file_path):
- if filecmp.cmp(source_file_path, destination_file_path):
- logger.debug(f"Copy -> File already exists: {destination_file_path}")
- pass
- else:
- shutil.copyfile(source_file_path, destination_file_path)
- output.append(f"Action Type: {action_type.capitalize()}: {old_file_name} {arrow} {new_file_name}")
- discord_output.append(new_file_name)
- else:
- shutil.copyfile(source_file_path, destination_file_path)
- output.append(f"Action Type: {action_type.capitalize()}: {old_file_name} {arrow} {new_file_name}")
- discord_output.append(new_file_name)
- except OSError as e:
- logger.error(f"Unable to copy file: {e}")
- elif action_type == 'move':
- try:
- shutil.move(source_file_path, destination_file_path)
- output.append(f"Action Type: {action_type.capitalize()}: {old_file_name} {arrow} {new_file_name}")
- discord_output.append(new_file_name)
- except OSError as e:
- logger.error(f"Unable to move file: {e}")
- elif action_type == 'hardlink':
- try:
- os.link(source_file_path, destination_file_path)
- output.append(f"Action Type: {action_type.capitalize()}: {old_file_name} {arrow} {new_file_name}")
- discord_output.append(new_file_name)
- except OSError as e:
- if e.errno == errno.EEXIST:
- if os.path.samefile(source_file_path, destination_file_path):
- logger.debug(f"Hardlink -> File already exists: {destination_file_path}")
- pass
- else:
- os.replace(destination_file_path, source_file_path)
- os.link(source_file_path, destination_file_path)
- output.append(f"Action Type: {action_type.capitalize()}: {old_file_name} {arrow} {new_file_name}")
- discord_output.append(new_file_name)
- else:
- logger.error(f"Unable to hardlink file: {e}")
- return
- else:
- logger.error(f"Unknown action type: {action_type}")
- return output, discord_output
-
-def load_dict(title, year, files):
- return {
- "title": title,
- "normalized_title": None,
- "year": year,
- "files": files
- }
-
-def normalize_titles(title):
- normalized_title = title
- for word in words_to_remove:
- normalized_title = title.replace(word, '')
- normalized_title = year_regex.sub('', normalized_title)
- normalized_title = illegal_chars_regex.sub('', normalized_title)
- normalized_title = unidecode(html.unescape(normalized_title))
- normalized_title = normalized_title.rstrip()
- normalized_title = normalized_title.replace('&', 'and')
- normalized_title = re.sub(remove_special_chars, '', normalized_title).lower()
- normalized_title = normalized_title.replace(' ', '')
- return normalized_title
-
-def add_file_to_asset(category_dict, file):
- category_dict['files'].append(file)
-
-def find_or_create_show(show_list, title, year, files, path):
- for show in show_list:
- if title == show['title'] and year == show['year']:
- add_file_to_asset(show, files[0])
- return
- show = load_dict(title, year, files)
- show_list.append(show)
-
-def get_files(path):
- files = []
- try:
- files = os.listdir(path)
- except FileNotFoundError:
- logger.error(f"Path not found: {path}")
- return files
-
-def sort_files(files, path, dict, basename):
- for file in tqdm(files, desc=f'Sorting assets from \'{basename}\' directory', total=len(files), disable=None):
- full_path = os.path.join(path, file)
- if file.startswith('.'):
- continue
- base_name, extension = os.path.splitext(file)
- if not re.search(r'\(\d{4}\)', base_name):
- collection = load_dict(base_name, None, [full_path])
- dict['collections'].append(collection)
- else:
- file_name = os.path.splitext(file)[0]
- match = re.search(r'\((\d{4})\)', base_name)
- year = int(match.group(1)) if match else None
- title = base_name.replace(f'({year})', '').strip()
- if any(file.startswith(file_name) and any(file_name + season_name in file for season_name in season_name_info) for file in files):
- find_or_create_show(dict['series'], title, year, [full_path], path)
- elif any(word in file for word in season_name_info):
- for season_name in season_name_info:
- if season_name in file:
- title = title.split(season_name)[0].strip()
- find_or_create_show(dict['series'], title, year, [full_path], path)
- else:
- movie = load_dict(title, year, [full_path])
- dict['movies'].append(movie)
- return dict
-
-def get_assets_files(assets_path, override_paths):
- asset_files = {asset_type: [] for asset_type in ['series', 'movies', 'collections']}
- override_files = {asset_type: [] for asset_type in ['series', 'movies', 'collections']}
- if assets_path:
- files = get_files(assets_path)
- basename = os.path.basename(assets_path.rstrip('/'))
- asset_files = sort_files(files, assets_path, asset_files, basename)
- if isinstance(override_paths, str):
- override_paths = [override_paths]
- if override_paths:
- for path in tqdm(override_paths, desc="Processing override paths", total=len(override_paths)):
- files = get_files(path)
- basename = os.path.basename(path.rstrip('/'))
- override_files = sort_files(files, path, override_files, basename)
- if override_files and asset_files:
- asset_files = handle_override_files(asset_files, override_files, path, asset_types=['series', 'movies', 'collections'])
- for asset_type in asset_files:
- for asset in asset_files[asset_type]:
- normalized_title = normalize_titles(asset['title'])
- asset['normalized_title'] = normalized_title
- asset['files'].sort()
- logger.debug(json.dumps(asset_files, indent=4))
- return asset_files
-
-def handle_override_files(asset_files, override_files, path, asset_types):
- for type in asset_types:
- for override_asset in override_files[type]:
- asset_found = False
- for asset in asset_files[type]:
- if override_asset['title'] == asset['title'] and override_asset['year'] == asset['year']:
- asset_found = True
- for override_file in override_asset['files']:
- over_ride_file_name = os.path.split(override_file)[1]
- asset['files'] = [f for f in asset['files'] if os.path.split(f)[1] != over_ride_file_name]
- asset['files'].append(override_file)
- logger.debug(f"Override: Added {override_file} to {asset['title']}")
- break
- if not asset_found:
- asset_files[type].append(override_asset)
- logger.debug(f"Override: Added {override_asset['title']} to {type} from {path}")
- return asset_files
-
-def process_instance(instance_type, instance_name, url, api, final_output, asset_files):
- collections = []
- media = []
- collection_names = []
- if instance_type == "Plex":
- if config.library_names:
- app = PlexServer(url, api)
- for library_name in config.library_names:
- try:
- library = app.library.section(library_name)
- logger.debug(f"Library: {library_name} found in {instance_name}")
- collections += library.collections()
- except BadRequest:
- logger.error(f"Error: {library_name} does not exist in {instance_name}")
- # collection_names = [collection.title for collection in collections if collection.smart != True]
- collection_names = [collection.title for collection in collections]
- logger.debug(json.dumps(collection_names, indent=4))
- else:
- message = f"Error: No library names specified for {instance_name}"
- final_output.append(message)
- return final_output, None
- # get freindlyname of plex server
- server_name = app.friendlyName
- data = [
- [f"Plex Server: {server_name}"],
- ]
- create_table(data, log_level="info", logger=logger)
- else:
- app = StARR(url, api, logger)
- media = app.get_media()
- server_name = app.get_instance_name()
- data = [
- [server_name],
- ]
- create_table(data, log_level="info", logger=logger)
- data = [
- [f"{server_name} Settings"]
- ]
- create_table(data, log_level="debug", logger=logger)
- logger.debug('*' * 40)
- logger.debug(f"Script Settings for {instance_name}:")
- logger.debug(f'{"URL:":<20}{url if url else "Not Set"}')
- logger.debug(f'{"API:":<20}{"*" * (len(api) - 5)}{api[-5:] if api else "Not Set"}')
- logger.debug(f'{"Instance Type:":<20}{instance_type if instance_type else "Not Set"}')
- logger.debug(f'{"ARR name:":<20}{server_name if instance_name else "Not Set"}')
- logger.debug('*' * 40 + '\n')
- matched_media = []
- if instance_type == "Plex":
- matched_media = match_collection(collection_names, asset_files, config.collection_threshold)
- elif instance_type == "Radarr":
- matched_media = match_media(media, asset_files, "movies")
- elif instance_type == "Sonarr":
- matched_media = match_media(media, asset_files, "series")
- if matched_media:
- message, discord_messages = rename_file(matched_media, config.destination_dir, config.dry_run, config.action_type, config.print_only_renames)
- final_output.extend(message)
- else:
- message = f"No matches found for {instance_name}"
- final_output.append(message)
- return final_output, discord_messages
-
-def print_output(final_output):
- if final_output:
- for message in final_output:
- logger.info(message)
- return
- else:
- return
-
-def notification(file_list):
- if file_list:
- for instance_type, file_list in file_list.items():
- if not file_list:
- continue
- fields = field_builder(file_list, name="Renamed Posters")
- for field_number, field in fields.items():
- discord(field, logger, config, script_name, description=f"Number of posters added {len(file_list)}", color=0x00FF00, content=None)
- else:
- return
-
-def main():
- data = [
- ["Script Settings"],
- ]
- create_table(data, log_level="debug", logger=logger)
- logger.debug(f'*' * 40)
- logger.debug(f'{"Dry_run:":<20}{config.dry_run if config.dry_run else "False"}')
- logger.debug(f'{"Log level:":<20}{log_level if log_level else "INFO"}')
- logger.debug(f'{"Asset folders:":<20}{config.asset_folders if config.asset_folders else "False"}')
- logger.debug(f'{"Library names:":<20}{config.library_names if config.library_names else "Not set"}')
- logger.debug(f'{"Source dir:":<20}{config.source_dir if config.source_dir else "Not set"}')
- logger.debug(f'{"Source overrides:":<20}{config.source_overrides if config.source_overrides else "Not set"}')
- logger.debug(f'{"Destination dir:":<20}{config.destination_dir if config.destination_dir else "Not set"}')
- logger.debug(f'{"Threshold:":<20}{config.collection_threshold}')
- logger.debug(f'{"Action type:":<20}{config.action_type}')
- logger.debug(f'{"Print only renames:":<20}{config.print_only_renames}')
- logger.debug(f'*' * 40 + '\n')
- if config.dry_run:
- data = [
- ["Dry Run"],
- ["NO CHANGES WILL BE MADE"]
- ]
- create_table(data, log_level="info", logger=logger)
-
- asset_files = get_assets_files(config.source_dir, config.source_overrides)
-
- instance_data = {
- 'Plex': config.plex_data,
- 'Radarr': config.radarr_data,
- 'Sonarr': config.sonarr_data
- }
- discord_output = {}
- for instance_type, instances in instance_data.items():
- for instance in instances:
- final_output = []
- instance_name = instance['name']
- url = instance['url']
- api = instance['api']
- script_name = None
- if instance_type == "Radarr" and config.radarr:
- data = next((data for data in config.radarr if data['name'] == instance_name), None)
- if data:
- script_name = data['name']
- elif instance_type == "Sonarr" and config.sonarr:
- data = next((data for data in config.sonarr if data['name'] == instance_name), None)
- if data:
- script_name = data['name']
- elif instance_type == "Plex":
- script_name = instance_name
- if script_name and instance_name == script_name:
- final_output, file_list = process_instance(instance_type, instance_name, url, api, final_output, asset_files)
- discord_output[instance_name] = file_list
- print_output(final_output)
- notification(discord_output)
-
-if __name__ == "__main__":
- main()
diff --git a/python-scripts/renamer_cleanarr.py b/python-scripts/renamer_cleanarr.py
deleted file mode 100644
index 5efe340..0000000
--- a/python-scripts/renamer_cleanarr.py
+++ /dev/null
@@ -1,299 +0,0 @@
-# _____ _____ _
-# | __ \ / ____| |
-# | |__) |___ _ __ __ _ _ __ ___ ___ _ __ ______ | | | | ___ __ _ _ __ ___ _ __
-# | _ // _ \ '_ \ / _` | '_ ` _ \ / _ \ '__|______| | | | |/ _ \/ _` | '_ \ / _ \ '__|
-# | | \ \ __/ | | | (_| | | | | | | __/ | | |____| | __/ (_| | | | | __/ |
-# |_| \_\___|_| |_|\__,_|_| |_| |_|\___|_| \_____|_|\___|\__,_|_| |_|\___|_|
-# ===========================================================================================================
-# Author: Drazzilb
-# Description: This script will remove any assets from your plex-meta-manager asset directory that are not being used by your media.
-# Note: This script will remove things that renamer has put in to the assets directory that do not have a folder in your
-# Media directory and cause a loop. I wouldn't recommend running this script very often (weekly at most, monthly is probably)
-# Usage: python3 renamer_cleaner.py
-# Requirements: requests
-# License: MIT License
-# ===========================================================================================================
-
-script_version = "2.2.1"
-
-import os
-import re
-from pathlib import Path
-from plexapi.server import PlexServer
-from plexapi.exceptions import BadRequest
-from modules.logger import setup_logger
-from modules.config import Config
-from modules.formatting import create_table
-from tqdm import tqdm
-import json
-import logging
-import sys
-import shutil
-from modules.arrpy import arrpy_py_version
-from modules.version import version
-from modules.discord import discord
-
-script_name = "renamer_cleanarr"
-config = Config(script_name)
-log_level = config.log_level
-logger = setup_logger(config.log_level, script_name)
-version(script_name, script_version, arrpy_py_version, logger, config)
-
-logging.getLogger("requests").setLevel(logging.WARNING)
-logging.getLogger('urllib3').setLevel(logging.WARNING)
-
-illegal_chars_regex = re.compile(r'[<>:"/\\|?*\x00-\x1f]+')
-year_regex = re.compile(r"(.*)\s\((\d{4})\)")
-
-season_name_info = [
- "_Season",
-]
-
-def get_assets_files(assets_paths, asset_folders):
- assets = {'movies': [], 'series': [], 'collections': []}
-
- print("Getting assets files..., this may take a while.")
- for assets_path in assets_paths:
- files = os.listdir(assets_path)
- files = sorted(files, key=lambda x: x.lower())
-
- if not asset_folders:
- for file in tqdm(files, desc=f'Sorting assets', total=len(files)):
-
- if file.startswith('.'):
- continue
- base_name, extension = os.path.splitext(file)
- if not re.search(r'\(\d{4}\)', base_name):
- assets['collections'].append({
- 'title': base_name,
- 'files': file,
- 'source': assets_path
- })
- else:
- if any(file.startswith(base_name) and any(season_name in file for season_name in season_name_info) for file in files) and not any(season_name in file for season_name in season_name_info):
- season_files = [file for file in files if file.startswith(base_name) and any(season_name in file for season_name in season_name_info)]
- season_files.append(file)
- season_files = sorted(season_files)
- assets['series'].append({
- 'title': base_name,
- 'files': season_files,
- 'source': assets_path
- })
- elif any(season_name in file for season_name in season_name_info):
- continue
- else:
- assets['movies'].append({
- 'title': base_name,
- 'files': file,
- 'source': assets_path
- })
- else:
- for root, dirs, files in os.walk(assets_path):
- title = os.path.basename(root)
- if root == assets_path:
- continue
- if not files:
- continue
- if title.startswith('.'):
- continue
- if not re.search(year_regex, title):
- assets['collections'].append({
- 'title': title,
- 'files': files,
- 'source': root
- })
- else:
- if any("Season" in file for file in files):
- assets['series'].append({
- 'title': title,
- 'files': files,
- 'source': root
- })
- else:
- assets['movies'].append({
- 'title': title,
- 'files': files,
- 'source': root
- })
- logger.debug("Assets:")
- logger.debug(json.dumps(assets, ensure_ascii=False, indent=4))
- return assets
-
-
-def get_media_folders(media_paths):
- media = {'movies':[], 'series': []}
- print("Getting media folder information..., this may take a while.")
-
- for media_path in media_paths:
- for subfolder in sorted(Path(media_path).iterdir()):
- if subfolder.is_dir():
- for sub_sub_folder in sorted(Path(subfolder).iterdir()):
- if sub_sub_folder.is_dir():
- sub_sub_folder_base_name = os.path.basename(
- os.path.normpath(sub_sub_folder))
- if not (sub_sub_folder_base_name.startswith("Season ") or sub_sub_folder_base_name == "Specials"):
- logger.debug(
- f"Skipping '{sub_sub_folder_base_name}' because it is not a season folder.")
- continue
- if any(subfolder.name in s['title'] for s in media['series']):
- media['series'][-1]['season_number'].append(
- sub_sub_folder.name)
- else:
- media['series'].append({
- 'title': subfolder.name,
- 'season_number': [],
- })
- media['series'][-1]['season_number'].append(
- sub_sub_folder.name)
- if not any(sub_sub_folder.is_dir() for sub_sub_folder in Path(subfolder).iterdir()):
- media['movies'].append({
- 'title': subfolder.name,
- })
- logger.debug("Media Directories:")
- logger.debug(json.dumps(media, ensure_ascii=False, indent=4))
- return media
-
-
-def match_assets(assets, media, dict_plex):
- asset_types = ['movies', 'series', 'collections']
- unmatched_posters = {asset_type: [] for asset_type in asset_types}
- for asset_type in asset_types:
- for asset in assets[asset_type]:
- if asset_type == 'collections':
- if not any(asset['title'] in c['title'] for c in dict_plex['collections']):
- unmatched_posters[asset_type].append(asset)
- else:
- if not any(asset['title'] in m['title'] for m in media[asset_type]):
- unmatched_posters[asset_type].append(asset)
- logger.debug("Unmatched Posters:")
- logger.debug(json.dumps(unmatched_posters, ensure_ascii=False, indent=4))
- return unmatched_posters
-
-def remove_assets(asset_folders, unmatched_assets, dry_run):
- asset_types = ['movies', 'series', 'collections']
- messages = []
- for asset_type in asset_types:
- for asset in unmatched_assets[asset_type]:
- if asset_type == 'collections':
- if asset_folders:
- for root, dirs, files in os.walk(asset['source']):
- for dir in dirs:
- if dir == asset['title']:
- logger.debug(f"Removing {dir}")
- if not dry_run:
- shutil.rmtree(os.path.join(root, dir))
- messages.append(f"Removed {dir}")
- else:
- logger.debug(f"Removing {os.path.join(asset['source'], asset['files'])}")
- if not dry_run:
- os.remove(os.path.join(asset['source'], asset['files']))
- messages.append(f"Removed Path: {os.path.join(asset['source'], asset['files'])}")
- else:
- messages.append(f"Would have removed {os.path.join(asset['source'], asset['files'])}")
- else:
- if asset_folders:
- for root, dirs, files in os.walk(asset['source']):
- for dir in dirs:
- if dir == asset['title']:
- logger.debug(f"Removing {dir}")
- if not dry_run:
- shutil.rmtree(os.path.join(root, dir))
- messages.append(f"Removed {dir}")
- else:
- messages.append(f"Would have removed {dir}")
- else:
- # if files is a list then it is a series
- if isinstance(asset['files'], list):
- for file in asset['files']:
- file_path = os.path.join(asset['source'], file)
- logger.debug(f"Removing {os.path.join(asset['source'], file)}")
- if not dry_run:
- os.remove(file_path)
- messages.append(f"Removed Path: {os.path.join(asset['source'], file)}")
- else:
- messages.append(f"Would have removed {os.path.join(asset['source'], file)}")
- else:
- logger.debug(f"Removing {os.path.join(asset['source'], asset['files'])}")
- if not dry_run:
- os.remove(os.path.join(asset['source'], asset['files']))
- messages.append(f"Removed Path: {os.path.join(asset['source'], asset['files'])}")
- else:
- messages.append(f"Would have removed {os.path.join(asset['source'], asset['files'])}")
- return messages
-
-def print_output(messages):
- count = 0
- for message in messages:
- logger.info(message)
- count += 1
- logger.info(f"Total number of assets removed: {count}")
-
-def main():
- script_data = config.script_data
- assets_paths = script_data['assets_paths']
- library_names = script_data['library_names']
- asset_folders = script_data['asset_folders']
- media_paths = script_data['media_paths']
- log_level = script_data['log_level']
- assets_paths = script_data['assets_paths']
- dry_run = script_data['dry_run']
- data = [
- ["Script Settings"]
- ]
- create_table(data, log_level="debug", logger=logger)
- logger.debug(f'{"Log level:":<20}{log_level if log_level else "Not set"}')
- logger.debug(f'{"Dry_run:":<20}{dry_run if dry_run else "False"}')
- logger.debug(f'{"Asset Folders:":<20}{asset_folders if asset_folders else "Not set"}')
- logger.debug(f'{"Assets path:":<20}{assets_paths if assets_paths else "Not set"}')
- logger.debug(f'{"Media paths:":<20}{media_paths if media_paths else "Not set"}')
- logger.debug(f'{"Library names:":<20}{library_names if library_names else "Not set"}')
- logger.debug('*' * 40 + '\n')
- if dry_run:
- data = [
- ["Dry Run"],
- ["NO CHANGES WILL BE MADE"]
- ]
- create_table(data, log_level="info", logger=logger)
- if config.plex_data:
- for data in config.plex_data:
- api_key = data.get('api', '')
- url = data.get('url', '')
- if config.library_names:
- try:
- app = PlexServer(url, api_key)
- except:
- logger.error("Unable to connect to Plex server. Please check your config.yml.")
- sys.exit()
- else:
- logger.info("No library names specified in config.yml. Skipping Plex.")
- sys.exit()
- assets = get_assets_files(assets_paths, asset_folders)
- media = get_media_folders(media_paths)
- collections = []
- if library_names and app:
- for library_name in library_names:
- try:
- library = app.library.section(library_name)
- logger.debug(library)
- collections += library.collections()
- except BadRequest:
- logger.error(f"Library {library_name} not found.")
- sys.exit()
- else:
- logger.info(
- "No library names specified in config.yml. Skipping collections.")
- collection_names = [collection.title for collection in collections if collection.smart != True]
- logger.debug(json.dumps(collection_names, indent=4))
- dict_plex = {'collections': []}
- for collection in collection_names:
- sanitized_collection = illegal_chars_regex.sub('', collection)
- dict_plex['collections'].append({'title': sanitized_collection})
- unmatched_assets = match_assets(assets, media, dict_plex)
- message = remove_assets(asset_folders, unmatched_assets, dry_run)
- print_output(message)
-
-if __name__ == "__main__":
- """
- Entry point for the script.
- """
- main()
diff --git a/python-scripts/requirements.txt b/python-scripts/requirements.txt
deleted file mode 100644
index a0637e1..0000000
--- a/python-scripts/requirements.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-fuzzywuzzy
-requests
-tqdm
-pyyaml
-python-Levenshtein
-unidecode
-qbittorrent-api
-plexapi
\ No newline at end of file
diff --git a/python-scripts/unmatched_assets.py b/python-scripts/unmatched_assets.py
deleted file mode 100644
index a47f143..0000000
--- a/python-scripts/unmatched_assets.py
+++ /dev/null
@@ -1,368 +0,0 @@
-# _ _ _ _ _ _
-# | | | | | | | | | | /\ | |
-# | | | |_ __ _ __ ___ __ _| |_ ___| |__ ___ __| |______ / \ ___ ___ ___| |_ _ __ _ _
-# | | | | '_ \| '_ ` _ \ / _` | __/ __| '_ \ / _ \/ _` |______/ /\ \ / __/ __|/ _ \ __| | '_ \| | | |
-# | |__| | | | | | | | | | (_| | || (__| | | | __/ (_| | / ____ \\__ \__ \ __/ |_ _| |_) | |_| |
-# \____/|_| |_|_| |_| |_|\__,_|\__\___|_| |_|\___|\__,_| /_/ \_\___/___/\___|\__(_) .__/ \__, |
-# | | __/ |
-# |_| |___/
-# ===========================================================================================================
-# Author: Drazzilb
-# Description: This script will check your media folders against your assets folder to see if there
-# are any folders that do not have a matching asset. It will also check your collections
-# against your assets folder to see if there are any collections that do not have a
-# matching asset. It will output the results to a file in the logs folder.
-# Usage: python3 unmatched_assets.py
-# Note: There is a limitation to how this script works with regards to it matching series assets the
-# main series poster requires seasonal posters to be present. If you have a series that does
-# not have a seasonal poster then it will not match the series poster. If you don't have a season poster
-# your series will appear in the movies section.
-# Requirements: requests
-# License: MIT License
-# ===========================================================================================================
-
-script_version = "5.0.4"
-
-import os
-import re
-from pathlib import Path
-from plexapi.server import PlexServer
-from plexapi.exceptions import BadRequest
-from modules.logger import setup_logger
-from modules.config import Config
-from unidecode import unidecode
-from tqdm import tqdm
-import json
-import logging
-from modules.version import version
-from modules.discord import discord
-from modules.formatting import create_table
-
-script_name = "unmatched_assets"
-config = Config(script_name)
-log_level = config.log_level
-logger = setup_logger(log_level, script_name)
-version(script_name, script_version, arrpy_py_version=None, logger=logger, config=config)
-
-logging.getLogger("requests").setLevel(logging.WARNING)
-logging.getLogger('urllib3').setLevel(logging.WARNING)
-
-illegal_chars_regex = re.compile(r'[<>:"/\\|?*\x00-\x1f]+')
-year_regex = re.compile(r"\((19|20)\d{2}\).*")
-
-season_name_info = [
- "_Season",
- "Season"
-]
-
-def get_assets_files(assets_path):
- asset_folders = config.asset_folders
- asset_types = ['series', 'movies', 'collections']
- assets = {asset_type: [] for asset_type in asset_types}
- print("Getting assets files..., this may take a while.")
- files = os.listdir(assets_path)
- files = sorted(files, key=lambda x: x.lower())
- season_number = None
- def add_movies(title):
- assets['movies'].append({
- 'title': title
- })
-
- def extract_season_info(base_name):
- season_number = None
- season_number_match = re.search(r'\d{2}$', base_name)
- if season_number_match:
- if season_number_match.group(0) == '00':
- season_number = 'Specials'
- else:
- season_number = f"Season {season_number_match.group(0)}"
- return season_number
-
- def add_series(title_without_season_info, season_number):
- if any(d['title'] == title_without_season_info for d in assets['series']):
- if season_number:
- assets['series'][-1]['season_number'].append(season_number)
- else:
- assets['series'].append({
- 'title': title_without_season_info,
- 'season_number': []
- })
- if season_number:
- assets['series'][-1]['season_number'].append(season_number)
-
- if not asset_folders:
- for file in tqdm(files, desc=f'Sorting assets', total=len(files)):
- if file.startswith('.'):
- continue
- base_name, extension = os.path.splitext(file)
- if not re.search(r'\(\d{4}\)', base_name):
- assets['collections'].append({
- 'title': base_name
- })
- else:
- file_name = os.path.splitext(file)[0]
- title = base_name
- title = unidecode(title)
- title_without_season_info = title
- for season_info in season_name_info:
- title_without_season_info = re.sub(
- season_info + r'\d+', '', title_without_season_info)
- if any(file.startswith(file_name) and any(file_name + season_name in file for season_name in season_name_info) for file in files):
- season_number = extract_season_info(base_name)
- if season_number:
- add_series(title_without_season_info, season_number)
- else:
- add_movies(title)
- elif any(season_info in file for season_info in season_name_info):
- season_number = extract_season_info(base_name)
- if season_number:
- add_series(title_without_season_info, season_number)
- else:
- add_movies(title)
- else:
- assets['movies'].append({'title': title})
- else:
- for root, dirs, files in os.walk(assets_path):
- title = os.path.basename(root)
- if root == assets_path:
- continue
- if not files:
- continue
- if title.startswith('.'):
- continue
- if not re.search(year_regex, title):
- assets['collections'].append({
- 'title': title
- })
- else:
- if any(season_info in file for season_info in season_name_info for file in files):
- assets['series'].append({
- 'title': title,
- 'season_number': []
- })
- for file in files:
- if file.startswith('.'):
- continue
- base_name, extension = os.path.splitext(file)
- if any(season_info in file for season_info in season_name_info):
- season_number = extract_season_info(base_name)
- add_series(title, season_number)
- else:
- assets['movies'].append({'title': title})
- for asset_type in asset_types:
- assets[asset_type] = sorted(assets[asset_type], key=lambda x: x['title'])
- logger.debug("Assets:")
- logger.debug(json.dumps(assets, ensure_ascii=False, indent=4))
- return assets
-
-def get_media_folders(media_paths):
- asset_types = ['series', 'movies']
- media = {'movies': [], 'series': []}
- print("Getting media folder information..., this may take a while.")
- for media_path in media_paths:
- base_name = os.path.basename(os.path.normpath(media_path))
- for subfolder in sorted(Path(media_path).iterdir()):
- if subfolder.is_dir():
- for sub_sub_folder in sorted(Path(subfolder).iterdir()):
- if sub_sub_folder.is_dir():
- sub_sub_folder_base_name = os.path.basename(
- os.path.normpath(sub_sub_folder))
- if not (sub_sub_folder_base_name.startswith("Season ") or sub_sub_folder_base_name == "Specials"):
- logger.debug(
- f"Skipping '{sub_sub_folder_base_name}' because it is not a season folder.")
- continue
- if any(subfolder.name in s['title'] for s in media['series']):
- for series in media['series']:
- if subfolder.name in series['title']:
- series['season_number'].append(
- sub_sub_folder.name)
- else:
- media['series'].append({
- 'title': subfolder.name,
- 'season_number': [],
- 'path': base_name
- })
- for series in media['series']:
- if subfolder.name in series['title']:
- series['season_number'].append(
- sub_sub_folder.name)
- if not any(sub_sub_folder.is_dir() for sub_sub_folder in Path(subfolder).iterdir()):
- media['movies'].append({
- 'title': subfolder.name,
- 'path': base_name
- })
- media = dict(sorted(media.items()))
- logger.debug("Media Directories:")
- logger.debug(json.dumps(media, ensure_ascii=False, indent=4))
- return media
-
-def match_assets(assets, media, plex_collections):
- unmatched_media = {'unmatched_movies': [], 'unmatched_series': [], 'unmatched_collections': []}
- for series in tqdm(media['series'], desc='Matching series', total=len(media['series'])):
- asset_found = False
- media_title = re.sub(r'[^A-Za-z0-9]+', '', unidecode(series['title']).replace('&', 'and')).strip().lower()
- for asset in assets['series']:
- asset_title = re.sub(r'[^A-Za-z0-9]+', '', unidecode(asset['title']).replace('&', 'and')).strip().lower()
- if asset_title == media_title:
- asset_found = True
- missing_seasons = [
- season for season in series['season_number'] if season not in asset['season_number']]
- if missing_seasons:
- unmatched_media['unmatched_series'].append({
- 'title': series['title'],
- 'season_number': missing_seasons,
- 'missing_season': True,
- 'path': series['path']
- })
- break
- if not asset_found:
- unmatched_media['unmatched_series'].append({
- 'title': series['title'],
- 'season_number': series['season_number'],
- 'missing_season': False,
- 'path': series['path']
- })
- for media_movie in tqdm(media['movies'], desc='Matching movies', total=len(media['movies'])):
- asset_found = False
- media_title = re.sub(r'[^A-Za-z0-9]+', '', unidecode(media_movie['title']).replace('&', 'and')).strip().lower()
- for asset in assets['movies']:
- asset_title = re.sub(r'[^A-Za-z0-9]+', '', unidecode(asset['title']).replace('&', 'and')).strip().lower()
- if media_title == asset_title:
- asset_found = True
- break
- if not asset_found:
- unmatched_media['unmatched_movies'].append({
- 'title': media_movie['title'],
- 'path': media_movie['path']
- })
- for plex_collection in tqdm(plex_collections['collections'], desc='Matching collections', total=len(plex_collections['collections'])):
- asset_found = False
- for asset in assets['collections']:
- if unidecode(plex_collection['title']) == unidecode(asset['title']):
- asset_found = True
- break
- if not asset_found:
- unmatched_media['unmatched_collections'].append({
- 'title': plex_collection['title'],
- })
- logger.debug("Unmatched Assets:")
- logger.debug(json.dumps(unmatched_media, ensure_ascii=False, indent=4))
- return unmatched_media
-
-
-def print_output(unmatched_media, media, plex_collections):
- unmatched_movies_total = 0
- unmatched_series_total = 0
- unmatched_collections_total = 0
- unmatched_seasons = 0
- total_seasons = 0
- total_movies = len(media['movies'])
- total_series = len(media['series'])
- for series in media['series']:
- total_seasons += len(series['season_number'])
- total_collections = len(plex_collections)
- if unmatched_media['unmatched_movies']:
- logger.info("Unmatched Movies:")
- previous_path = None
- for movie in unmatched_media['unmatched_movies']:
- if movie['path'] != previous_path:
- logger.info(f"\t{movie['path'].capitalize()}")
- previous_path = movie['path']
- logger.info(f"\t\t{movie['title']}")
- unmatched_movies_total += 1
- logger.info(f"\t{unmatched_movies_total} unmatched movies found: Percent complete: ({100 - 100 * unmatched_movies_total / total_movies:.2f}% of total {total_movies}).")
- if unmatched_media['unmatched_series']:
- logger.info("Unmatched Series:")
- previous_path = None
- for series in unmatched_media['unmatched_series']:
- if series['path'] != previous_path:
- logger.info(f"\t{series['path'].capitalize()}")
- previous_path = series['path']
- if series['missing_season']:
- output = f"Series poster available but seasons listed are missing"
- logger.info(f"\t\t{series['title']}, {output}")
- for season in series['season_number']:
- logger.info(f"\t\t\t{season}")
- else:
- output = f"Series poster unavailable"
- logger.info(f"\t\t{series['title']}, {output}")
- for season in series['season_number']:
- logger.info(f"\t\t\t{season}")
- unmatched_series_total += 1
- unmatched_seasons += len(series['season_number'])
- logger.info(
- f"\t{unmatched_seasons} unmatched seasons found: Percent complete: ({100 - 100 * unmatched_seasons / total_seasons:.2f}% of total {total_seasons}).")
- logger.info(f"\t{unmatched_series_total} unmatched series found: Percent complete: ({100 - 100 * unmatched_series_total / total_series:.2f}% of total {total_series}).")
- logger.info(f"\t{unmatched_series_total} unmatched series & {unmatched_seasons} unmatched seasons. Grand percent complete: ({100 - 100 * (unmatched_series_total + unmatched_seasons) / (total_series + total_seasons):.2f}% of grand total {total_series + unmatched_seasons}).\n")
- if unmatched_media['unmatched_collections']:
- logger.info("Unmatched Collections:")
- for collection in unmatched_media['unmatched_collections']:
- logger.info(f"\t{collection['title']}")
- unmatched_collections_total += 1
- logger.info(f"\t{unmatched_collections_total} unmatched collections found: Percent complete: ({100 - 100 * unmatched_collections_total / unmatched_collections_total:.2f}% of total {total_collections}).\n")
- grand_total_movies = unmatched_movies_total
- grand_total_series = unmatched_series_total
- grand_total_seasons = unmatched_seasons
- grand_total_collections = unmatched_collections_total
- grand_total = grand_total_movies + grand_total_series + grand_total_seasons + grand_total_collections
- grand_percent_complete = max(0, 100 - 100 * (grand_total_movies + grand_total_series + grand_total_seasons + grand_total_collections) / (total_movies + total_series + total_seasons + total_collections))
-
- logger.info(f"Grand total: {grand_total_movies} unmatched movies, {grand_total_series} unmatched series, {grand_total_seasons} unmatched seasons, {grand_total_collections} unmatched collections. Grand percent complete: ({grand_percent_complete:.2f}% of grand total {grand_total}).\n")
-
-def main():
- """
- Main function for the script.
- """
- data = [
- ["Script Settings"]
- ]
- create_table(data, log_level="debug", logger=logger)
- logger.debug(f'{"Log level:":<20}{log_level if log_level else "Not set"}')
- logger.debug(f'{"Asset Folders:":<20}{config.asset_folders if config.asset_folders else "Not set"}')
- logger.debug(f'{"Assets path:":<20}{config.assets_path if config.assets_path else "Not set"}')
- logger.debug(f'{"Media paths:":<20}{config.media_paths if config.media_paths else "Not set"}')
- logger.debug(f'{"Library names:":<20}{config.library_names if config.library_names else "Not set"}')
- logger.debug(f'{"Ignore collections:":<20}{config.ignore_collections if config.ignore_collections else "Not set"}')
- logger.debug('*' * 40 + '\n')
- if config.plex_data:
- for data in config.plex_data:
- api_key = data.get('api', '')
- url = data.get('url', '')
- if config.library_names:
- app = PlexServer(url, api_key)
- else:
- logger.info("No library names specified in config.yml. Skipping Plex.")
- assets = get_assets_files(config.assets_path)
- media = get_media_folders(config.media_paths)
- collections = []
- if config.library_names and app:
- for library_name in config.library_names:
- try:
- library = app.library.section(library_name)
- logger.debug(library)
- collections += library.collections()
- except BadRequest:
- logger.error(f"Library {library_name} not found.")
- continue
- else:
- logger.info(
- "No library names specified in config.yml. Skipping collections.")
- collection_names = [
- collection.title for collection in collections if collection.smart != True]
- logger.debug(json.dumps(collection_names, indent=4))
- if config.ignore_collections:
- for collection in config.ignore_collections:
- if collection in collection_names:
- collection_names.remove(collection)
- dict_plex = {'collections': []}
- for collection in collection_names:
- sanitized_collection = illegal_chars_regex.sub('', collection)
- dict_plex['collections'].append({'title': sanitized_collection})
- unmatched_media = match_assets(assets, media, dict_plex)
- print_output(unmatched_media, media, dict_plex)
-
-if __name__ == "__main__":
- """
- Entry point for the script.
- """
- main()
diff --git a/python-scripts/upgradinatorr.py b/python-scripts/upgradinatorr.py
deleted file mode 100644
index 44fe343..0000000
--- a/python-scripts/upgradinatorr.py
+++ /dev/null
@@ -1,217 +0,0 @@
-# _ _ _ _ _ _____
-# | | | | | (_) | | | __ \
-# | | | |_ __ __ _ _ __ __ _ __| |_ _ __ __ _| |_ ___ _ __ _ __| |__) | _
-# | | | | '_ \ / _` | '__/ _` |/ _` | | '_ \ / _` | __/ _ \| '__| '__| ___/ | | |
-# | |__| | |_) | (_| | | | (_| | (_| | | | | | (_| | || (_) | | | |_ | | | |_| |
-# \____/| .__/ \__, |_| \__,_|\__,_|_|_| |_|\__,_|\__\___/|_| |_(_)|_| \__, |
-# | | __/ | __/ |
-# |_| |___/ |___/
-# ===================================================================================================
-# Author: Drazzilb
-# Description: A script to upgrade Sonarr/Radarr libraries to the keep in line with trash-guides
-# Usage: python3 /path/to/upgradinatorr.py
-# Requirements: requests, pyyaml
-# License: MIT License
-# ===================================================================================================
-
-script_version = "3.2.2"
-
-from modules.config import Config
-from modules.logger import setup_logger
-from modules.arrpy import StARR
-from modules.arrpy import arrpy_py_version
-from modules.version import version
-from modules.discord import discord
-from modules.formatting import create_table
-
-script_name = "upgradinatorr"
-config = Config(script_name)
-log_level = config.log_level
-logger = setup_logger(log_level, script_name)
-version(script_name, script_version, arrpy_py_version, logger, config)
-
-def check_all_tagged(all_media, tag_id, status, monitored):
- """
- Check if all media with a given tag is in a given status and monitored state.
- Parameters:
- all_media (list): A list of dictionaries representing all media in the Radarr instance.
- tag_id (int): The ID of the tag to check for
- status (str): The status to check for
- monitored (bool): Whether or not to check for monitored media
- Returns:
- True if all media with the given tag is in the given status and monitored state, False otherwise.
- """
- for media in all_media:
- if monitored != media['monitored']:
- continue
- if isinstance(status, str):
- if status != "all" and status != media['status']:
- continue
- elif isinstance(status, list):
- for stat in status:
- if stat == media['status']:
- break
- else:
- continue
- if tag_id not in media['tags']:
- return False
- return True
-
-def process_instance(instance_type, instance_name, count, tag_name, unattended, status, monitored, url, api, dry_run, reset):
- """
- Process a given instance.
- Parameters:
- instance_type (str): The type of instance to process.
- instance_name (str): The name of the instance to process.
- count (int): The number of items to process.
- tag_name (str): The name of the tag to use.
- unattended (bool): Whether or not to run the script unattended.
- status (str): The status to check for.
- monitored (bool): Whether or not to check for monitored media.
- url (str): The URL of the instance.
- api (str): The API key of the instance.
- dry_run (bool): Whether or not to run the script in dry run mode.
- reset (bool): Whether or not to reset the tag.
- """
- media_type = None
- tagged_count = 0
- untagged_count = 0
- total_count = 0
- app = StARR(url, api, logger)
- server_name = app.get_instance_name()
- data = [
- [server_name],
- ]
- create_table(data, log_level="info", logger=logger)
- data = [
- [f"{server_name} Settings"]
- ]
- create_table(data, log_level="debug", logger=logger)
- logger.debug('*' * 40)
- logger.debug(f"Script Settings for {instance_name}:")
- logger.debug(f'{"Count:":<20}{count if count else "Not Set"}')
- logger.debug(f'{"tag_name:":<20}{tag_name if tag_name else "Not Set"}')
- logger.debug(f'{"reset: {reset}":<20}{reset if reset else "Not Set"}')
- logger.debug(f'{"unattended:":<20}{unattended if unattended else "Not Set"}')
- logger.debug(f'{"URL:":<20}{url if url else "Not Set"}')
- logger.debug(f'{"API:":<20}{"*" * (len(api) - 5)}{api[-5:] if api else "Not Set"}')
- logger.debug(f'{"Instance Type:":<20}{instance_type if instance_type else "Not Set"}')
- logger.debug(f'{"ARR name:":<20}{server_name if instance_name else "Not Set"}')
- logger.debug('*' * 40 + '\n')
- media = app.get_media()
- if instance_type == "Radarr":
- media_type = "Movies"
- elif instance_type == "Sonarr":
- media_type = "Series"
- arr_tag_id = app.get_tag_id_from_name(tag_name)
- if not arr_tag_id:
- arr_tag_id = app.create_tag(tag_name)
- if arr_tag_id:
- logger.debug(f"Tag: {tag_name} | Tag ID: {arr_tag_id}")
- else:
- logger.debug(f"Tag: {tag_name} | Tag ID: {arr_tag_id}")
- all_tagged = check_all_tagged(media, arr_tag_id, status, monitored)
- all_media_ids = [item["id"] for item in media]
- if reset:
- if not dry_run:
- app.remove_tags(all_media_ids, arr_tag_id)
- logger.info(f'All of {instance_name} have had the tag {tag_name} removed.')
- all_tagged = False
- else:
- logger.info(f'All of {instance_name} would have had the tag {tag_name} removed.')
- all_tagged = False
- elif all_tagged and unattended:
- if not dry_run:
- app.remove_tags(all_media_ids, arr_tag_id)
- logger.info(f'All of {instance_name} have had the tag {tag_name} removed.')
- discord(None, logger, config, script_name, description=f"All of {instance_name} have had the tag {tag_name} removed.", color=0xFFA500, content=None)
- all_tagged = False
- else:
- logger.info(f'All of {instance_name} would have had the tag {tag_name} removed.')
- discord(None, logger, config, script_name, description=f"All of {instance_name} would have had the tag {tag_name} removed.", color=0xFFA500, content=None)
- all_tagged = False
- elif all_tagged and not unattended:
- logger.info(f'All of {instance_name} has been tagged with {tag_name}')
- logger.info("If you would like to remove the tag and re-run the script, please set reset to True or set unattended to True.")
- logger.info(f"Skipping {instance_name}...")
- discord(None, logger, config, script_name, description=f"All of {instance_name} has been tagged with {tag_name}, please set reset to True or set unattended to True to remove the tag and re-run the script, {instance_name} will be skipped.", color=0xFFA500, content=None)
- return
- if not all_tagged:
- if isinstance(status, str):
- untagged_media = [m for m in media if arr_tag_id not in m['tags'] and m['monitored'] == monitored and (status == "all" or status == m['status'])]
- elif isinstance(status, list):
- untagged_media = [m for m in media if arr_tag_id not in m['tags'] and m['monitored'] == monitored and any(stat == m['status'] for stat in status)]
- media_to_process = untagged_media[:count]
- media_ids_to_process = [item["id"] for item in media_to_process]
- if not dry_run:
- app.add_tags(media_ids_to_process, arr_tag_id)
- app.search_media(media_ids_to_process)
- for title in media_to_process:
- logger.info(f"Search request sent for '{title['title']}', this item has been tagged with '{tag_name}'")
- else:
- for title in media_to_process:
- logger.info(f"Search request would have been sent for '{title['title']}', this item would have been tagged with '{tag_name}'")
- for m in media:
- if (arr_tag_id in m["tags"]):
- tagged_count += 1
- elif (arr_tag_id not in m["tags"]):
- untagged_count += 1
- total_count = tagged_count + untagged_count
- tagged_percent = (tagged_count / total_count) * 100
- untagged_percent = (untagged_count / total_count) * 100
- logger.info(f'Total {media_type}: {total_count}, Tagged {media_type}: {tagged_count} ({tagged_percent:.2f}%), Untagged {media_type}: {untagged_count} ({untagged_percent:.2f}%)\n')
-
-def main():
- data = [
- ["Script Settings"]
- ]
- create_table(data, log_level="debug", logger=logger)
- logger.debug(f'{"Dry_run:":<20}{config.dry_run if config.dry_run else "False"}')
- logger.debug(f'{"Log level:":<20}{log_level if log_level else "INFO"}')
- logger.debug(f'*' * 40 + '\n')
- if config.dry_run:
- data = [
- ["Dry Run"],
- ["NO CHANGES WILL BE MADE"]
- ]
- create_table(data, log_level="info", logger=logger)
- instance_data = {
- 'Radarr': config.radarr_data,
- 'Sonarr': config.sonarr_data
- }
-
- for instance_type, instances in instance_data.items():
- for instance in instances:
- instance_name = instance['name']
- url = instance['url']
- api = instance['api']
- script_name = None
- if instance_type == "Radarr" and config.radarr:
- data = next((data for data in config.radarr if data['name'] == instance_name), None)
- if data:
- script_name = data['name']
- count = data.get('count', 1)
- tag_name = data.get('tag_name', 'Upgradinatorr')
- reset = data.get('reset', False)
- unattended = data.get('unattended', False)
- monitored = data.get('monitored', True)
- status = data.get('status', 'all')
- elif instance_type == "Sonarr" and config.sonarr:
- data = next((data for data in config.sonarr if data['name'] == instance_name), None)
- if data:
- script_name = data['name']
- count = data['count']
- count = data.get('count', 1)
- tag_name = data.get('tag_name', 'Upgradinatorr')
- reset = data.get('reset', False)
- unattended = data.get('unattended', False)
- monitored = data.get('monitored', True)
- status = data.get('status', 'all')
- if script_name and instance_name == script_name:
- process_instance(instance_type, instance_name, count, tag_name, unattended, status, monitored, url, api, config.dry_run, reset)
-
-if __name__ == '__main__':
- """
- Main entry point for the script.
- """
- main()
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000..e9b6da1
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,22 @@
+-i https://pypi.org/simple
+certifi==2024.2.2; python_version >= '3.6'
+charset-normalizer==3.3.2; python_full_version >= '3.7.0'
+croniter==2.0.2; python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'
+docker==7.0.0; python_version >= '3.8'
+idna==3.6; python_version >= '3.5'
+packaging==23.2; python_version >= '3.7'
+pillow==10.2.0; python_version >= '3.8'
+plexapi==4.15.10; python_version >= '3.8'
+prettytable==3.10.0; python_version >= '3.8'
+python-dateutil==2.9.0.post0; python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'
+pytz==2024.1
+pyyaml==6.0.1; python_version >= '3.6'
+qbittorrent-api==2024.2.59; python_version >= '3.8'
+requests==2.31.0; python_version >= '3.7'
+ruamel.yaml==0.18.6; python_version >= '3.7'
+ruamel.yaml.clib==0.2.8; python_version < '3.13' and platform_python_implementation == 'CPython'
+six==1.16.0; python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'
+tqdm==4.66.2; python_version >= '3.7'
+unidecode==1.3.8; python_version >= '3.5'
+urllib3==2.2.1; python_version >= '3.8'
+wcwidth==0.2.13
diff --git a/screenshots/tpdb.js.off.png b/screenshots/tpdb.js.off.png
deleted file mode 100644
index 708bf1e..0000000
Binary files a/screenshots/tpdb.js.off.png and /dev/null differ
diff --git a/screenshots/tpdb.js.on.png b/screenshots/tpdb.js.on.png
deleted file mode 100644
index 07d12ca..0000000
Binary files a/screenshots/tpdb.js.on.png and /dev/null differ
diff --git a/bash-scripts/misc-scripts/jduparr.sh b/scripts/jduparr.sh
old mode 100644
new mode 100755
similarity index 86%
rename from bash-scripts/misc-scripts/jduparr.sh
rename to scripts/jduparr.sh
index 77ea520..a7fab13
--- a/bash-scripts/misc-scripts/jduparr.sh
+++ b/scripts/jduparr.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/usr/bin/env bash
# _ _____ _ _ _____ _____ _____
# (_) __ \| | | | __ \ /\ | __ \| __ \
@@ -15,8 +15,10 @@
# License: MIT License
# ====================================================
-data_dir='/path/to/data/dir' # This is the root directory for your media you want to check for duplicates
-log_dir='/path/to/log/dir' # This is the directory the logs gets written in
+
+data_dir='/path/to/data/dir' # This is the root directory for your media you want to check for duplicates
+log_dir='' # This is the directory the logs gets written in if not set will use script's directory
+
# Optional for notifications on Discord through Discord webhook or Notifiarr API.
webhook='' # Not required if you don't want to use notifications // Leave as is if not using notifications
@@ -90,44 +92,43 @@ check_config() {
response_code=$(curl --write-out "%{response_code}" --silent --output /dev/null -H "x-api-key: $apikey" "https://notifiarr.com/api/v1/user/validate")
else
if [ "$debug" == "true" ]; then
- echo "Checking webhook validity: $webhook" | tee -a "$log_dir/jduparr.log"
+ echo "Checking webhook validity: $webhook" | tee -a "$log_file"
fi
response_code=$(curl --write-out "%{response_code}" --silent --output /dev/null "$webhook")
fi
if [ "$debug" == "true" ]; then
- echo "Response: $response_code" | tee -a "$log_dir/jduparr.log"
+ echo "Response: $response_code" | tee -a "$log_file"
fi
-
if [ "$response_code" -eq 200 ]; then
echo "Webhook is valid"
else
echo "Webhook is not valid"
- echo "Backup will be created without a notification being sent"
+ echo "Please check your webhook and try again"
fi
fi
}
find_duplicates() {
+ log_file="$log_dir/jduparr/.jduparr.log"
start=$(date +%s)
- echo "Running jdupes" | tee "$log_dir/jduparr.log"
+ echo "Running jdupes pre-run" | tee "$log_file"
if [ $debug == "true" ]; then
- echo "Running jdupes for all directories" | tee -a "$log_dir/jduparr.log"
- echo -e "Media directory: ${data_dir}" | tee -a "$log_dir/jduparr.log"
- echo -e "jdupes -r -L -X onlyext:mp4,mkv,avi ${data_dir}" | tee -a "$log_dir/jduparr.log"
+ echo "Running jdupes for all directories" | tee -a "$log_file"
+ echo -e "Media directory: ${data_dir}" | tee -a "$log_file"
+ echo -e "jdupes -r -L -X onlyext:mp4,mkv,avi ${data_dir}" | tee -a "$log_file"
fi
- mkdir -p "$(dirname "$0")/../logs"
- echo "jDupes started" | tee -a "$log_dir/jduparr.log"
+ echo "Start re-linking files" | tee -a "$log_file"
results=$(jdupes -r -M -X onlyext:mp4,mkv,avi "${data_dir}")
if [[ $results != *"No duplicates found."* ]]; then
jdupes -r -L -X onlyext:mp4,mkv,avi "${data_dir}"
fi
- echo "jDupes completed" | tee -a "$log_dir/jduparr.log"
+ echo "jDupes completed" | tee -a "$log_file"
- # Call the function to parse the jduparr.log file
+ # Call the function to parse the .jduparr_bash.log file
parse_jdupes_output
if [ $debug == "true" ]; then
- echo -e "jdupes output: ${results}" | tee -a "$log_dir/jduparr.log"
+ echo -e "jdupes output: ${results}" | tee -a "$log_file"
fi
end=$(date +%s)
}
@@ -141,7 +142,7 @@ parse_jdupes_output(){
parsed_log="No hardlinks created"
fi
if [ $debug == "true" ]; then
- echo -e "Parsed log: ${parsed_log}" | tee -a "$log_dir/jduparr.log"
+ echo -e "Parsed log: ${parsed_log}" | tee -a "$log_file"
fi
}
@@ -185,8 +186,8 @@ send_notification() {
discord_common_fields
payload
if [ "$debug" == "true" ]; then
- echo "$webhook" | tee -a "$log_dir/jduparr.log"
- echo "$payload" | tee -a "$log_dir/jduparr.log"
+ echo "$webhook" | tee -a "$log_file"
+ echo "$payload" | tee -a "$log_file"
fi
curl -s -H "Content-Type: application/json" -X POST -d "$payload" "$webhook"
fi
@@ -194,8 +195,8 @@ send_notification() {
notifiarr_common_fields
payload
if [ "$debug" == "true" ]; then
- echo "$webhook" | tee -a "$log_dir/jduparr.log"
- echo "$payload" | tee -a "$log_dir/jduparr.log"
+ echo "$webhook" | tee -a "$log_file"
+ echo "$payload" | tee -a "$log_file"
fi
curl -s -H "Content-Type: application/json" -X POST -d "$payload" "$webhook"
fi
@@ -350,15 +351,30 @@ unraid_notify(){
message="❌ Folder: $data_dir - Checked and validated\n➡️ Hardlinks recreated:\n$parsed_log\n\n"
fi
if [ "$debug" == "true" ]; then
- echo "$message" | tee -a "$log_dir/jduparr.log"
+ echo "$message" | tee -a "$log_file"
fi
/usr/local/emhttp/plugins/dynamix/scripts/notify -s "Script Execution Summary" -d "$message"
}
main() {
+
data_dir=${data_dir%/}
log_dir=${log_dir%/}
+
+ script_path=$(dirname "$0")
+ parent_dir=$(dirname "$script_path")
+
+ # If DOCKER_ENV is set
+ if [ -n "$DOCKER_ENV" ]; then
+ log_dir="${LOG_DIR/nohl_bash:-$parent_dir/logs/jduparr}"
+ else
+ log_dir="${log_dir:-$parent_dir/logs/jduparr}"
+ fi
+
+ echo "Log directory: $log_dir"
+
+ handle_options "$@"
check_duplicate_script
check_config
hex_to_decimal
@@ -370,42 +386,23 @@ main() {
if [ -n "$webhook" ]; then
send_notification
fi
- echo "$run_output" | tee -a "$log_dir/jduparr.log"
+ echo "$run_output" | tee -a "$log_file"
cleanup
}
-# Parse command line arguments
-TEMP=$(getopt -o w:b:n:h --long webhook:,bar-color:,bot-name:,help -n "$0" -- "$@")
-eval set -- "$TEMP"
-
-while [[ $# -gt 0 ]]; do
- case "$1" in
- -w | --webhook)
- webhook="$2"
- shift 2
- ;;
- -b | --bar-color)
- bar_color="$2"
- shift 2
- ;;
- -n | --bot-name)
- bot_name="$2"
- shift 2
- ;;
- -h | --help)
- display_help
- exit 0
- ;;
- --)
- shift
- break
- ;;
- *)
- echo "Invalid argument: $1" >&2
- display_help
- exit 1
- ;;
- esac
-done
+handle_options() {
+ while getopts ":w:D:b:n:h:L:C:" opt; do
+ case $opt in
+ w) webhook="$OPTARG" ;;
+ D) data_dir="$OPTARG" ;;
+ b) bar_color="$OPTARG" ;;
+ n) bot_name="$OPTARG" ;;
+ C) channel="$OPTARG" ;;
+ h) display_help ;;
+ L) log_dir="$OPTARG" ;;
+ \?) echo "Invalid option -$OPTARG" >&2 ;;
+ esac
+ done
+}
-main
\ No newline at end of file
+main "$@"
\ No newline at end of file
diff --git a/bash-scripts/misc-scripts/noHL.sh b/scripts/nohl_bash.sh
old mode 100644
new mode 100755
similarity index 80%
rename from bash-scripts/misc-scripts/noHL.sh
rename to scripts/nohl_bash.sh
index 5237b89..c0feb39
--- a/bash-scripts/misc-scripts/noHL.sh
+++ b/scripts/nohl_bash.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/usr/bin/env bash
# _ _ _
# | | | | |
# _ __ ___ | |__| | |
@@ -12,25 +12,6 @@
# License: MIT License
# ====================================================
-# Define variables
-source_dir='/path/to/media/'
-log_dir='/path/to/log/files'
-
-# Define folders inside your source_dir to include in the search
-include=(
- #"Media directories"
- #"Movies"
- #"TV Shows"
- #"Anime"
-)
-
-# Define folders inside your source_dir to exclude from the search
-exclude=(
- #"Show Name"
- #"Show Name"
- #"Show Name"
-)
-
# Define variables for webhook notifications
webhook=false
webhook=''
@@ -90,6 +71,33 @@ check_config() {
fi
}
+log_file() {
+ # remove trailing slash from log_dir if it exists
+ log_dir=${log_dir%/}
+
+ script_path=$(dirname "$0")
+ parent_dir=$(dirname "$script_path")
+
+ # If DOCKER_ENV is set to true
+ if [ -n "$DOCKER_ENV" ]; then
+ log_dir="${LOG_DIR/nohl_bash:-$parent_dir/logs/nohl_bash}"
+ else
+ log_dir="${log_dir:-$parent_dir/logs}"
+ fi
+ # remove trailing slash from source_dir if it exists
+ source_dir=${source_dir%%/}
+
+ log_file=$log_dir/nohl_bash/nohl.log
+
+ echo "Log directory: $log_dir"
+ echo "Log file: $log_file"
+
+ # check if log file exists, if it does delete it
+ if [ -f "$log_file" ]; then
+ rm "$log_file"
+ fi
+}
+
# Function to check for hardlinks
check_hardlinks() {
exclude_folders=(
@@ -110,37 +118,40 @@ check_hardlinks() {
)
# Print starting message
echo "Starting Search..."
- # remove trailing slash from log_dir if it exists
- log_dir=${log_dir%%/}
- echo "log_dir: $log_dir"
- # check if the log_dir directory exists, if not create it
- if [ ! -d "$log_dir" ]; then
- echo "Directory doesn't exist, creating it"
- mkdir -p "$log_dir"
- else
- echo "Directory exists"
- fi
- # remove trailing slash from source_dir if it exists
- source_dir=${source_dir%%/}
-
- log_file=$log_dir/nohl.log
- # check if log file exists, if it does delete it
- if [ -f "$log_file" ]; then
- rm "$log_file"
- fi
# Iterate through the include array
for ((i = 0; i < ${#include[@]}; i++)); do
- echo "****** Searching ${include[$i]}... ******" | tee -a "$log_file"
+ echo -e "\n****** Searching ${include[$i]}... ******" | tee -a "$log_file"
# Construct the find command to search for files with hard link count of 1
# Exclude folders listed in 'exclude_folders' and 'exclude'
# Include specific file extensions listed in 'file_extensions'
- find "${source_dir}/${include[$i]}" -type d \( -name "${exclude_folders[*]}" -o -name "${exclude[*]}" \) -prune -o -type f -links 1 \( -iname "*.${file_extensions[0]}" -o -iname "*.${file_extensions[1]}" -o -iname "*.${file_extensions[2]}" -o -iname "*.${file_extensions[3]}" -o -iname "*.${file_extensions[4]}" -o -iname "*.${file_extensions[5]}" \) -printf "%P\n" | tee -a "$log_file"
- # Use awk and sed to remove unwanted characters from the file name and print it to /tmp/nohl.tmp
- find "${source_dir}/${include[$i]}" -type d \( -name "${exclude_folders[*]}" -o -name "${exclude[*]}" \) -prune -o -type f -links 1 \( -iname "*.${file_extensions[0]}" -o -iname "*.${file_extensions[1]}" -o -iname "*.${file_extensions[2]}" -o -iname "*.${file_extensions[3]}" -o -iname "*.${file_extensions[4]}" -o -iname "*.${file_extensions[5]}" \) -printf "%f\n" | awk -F"[" '{print $1}' | sed $'s/[^[:print:]\t]//g' | tee -a /tmp/nohl.tmp >/dev/null
- done
+ find_command="find \"$source_dir/${include[$i]}\" -type f -links 1 \( "
+
+ for l in "${file_extensions[@]}"; do
+ find_command+=" -name '*.$l' -o"
+ done
+
+ # Remove the trailing '-o' from the find command and close the parentheses
+ find_command=${find_command%-o}
+ find_command+=" \)"
+ for j in "${exclude_folders[@]}"; do
+ find_command+=" -not -path '*/$j/*'"
+ done
+
+ for k in "${exclude[@]}"; do
+ find_command+=" -not -path '*/$k/*'"
+ done
+
+ # Print the find command to the console
+ # echo "Find command: $find_command" | tee -a "$log_file"
+
+ # Execute the find command and store the output in a temporary file
+ eval "$find_command" > /tmp/nohl.tmp
+ base_path="$source_dir/${include[$i]}"
+ eval "$find_command" | sed "s|$base_path/||" | tee -a "$log_file"
+ done
}
# Function to send notification
@@ -299,6 +310,7 @@ payload() {
# Main function
main() {
handle_options "$@"
+ log_file
check_config
check_hardlinks
if [ -n "$webhook" ]; then
@@ -308,45 +320,25 @@ main() {
# Define function to handle options
handle_options() {
-
- # Define valid options
- valid_long_options=("webhook:" "bot-name:" "help")
- valid_short_options=("w:" "n:" "h")
-
- # Handle command-line options
- TEMP=$(getopt -o "${valid_short_options[*]}" --long "${valid_long_options[*]}" -n "$0" -- "$@")
- eval set -- "$TEMP"
- while true; do
- case "$1" in
- --webhook | -w)
- webhook="$2"
- shift 2
- ;;
- --bot-name | -n)
- bot_name=$2
- shift 2
- ;;
- -help | -h)
- display_help
- exit 0
- ;;
- --)
- shift
- break
- ;;
- *)
- echo "Internal error!"
- exit 1
- ;;
+ while getopts ":s:i:e:C:w:n:h" opt; do
+ case $opt in
+ s) source_dir="$OPTARG" ;;
+ i) IFS=',' read -r -a include <<< "$OPTARG" ;;
+ e) exclude+=("$OPTARG") ;;
+ w) webhook="$OPTARG" ;;
+ n) bot_name="$OPTARG" ;;
+ C) channel="$OPTARG" ;;
+ h) display_help ;;
+ \?) echo "Invalid option: -$OPTARG" >&2; display_help; exit 1 ;;
+ :) echo "Option -$OPTARG requires an argument." >&2; display_help; exit 1 ;;
esac
done
-
- # Check for any remaining arguments
- for arg in "$@"; do
- echo "Invalid argument: $arg" >&2
- display_help
- exit 1
- done
+ # Update exclude array with -e option
+ # Add string to exclude array seperated by comma
+ # shellcheck disable=SC2128
+ if [ -n "$exclude" ]; then
+ IFS=',' read -r -a exclude <<< "$exclude"
+ fi
}
# Call the function
diff --git a/scripts/rclone.sh b/scripts/rclone.sh
new file mode 100755
index 0000000..3dffe37
--- /dev/null
+++ b/scripts/rclone.sh
@@ -0,0 +1,64 @@
+#!/usr/bin/env bash
+
+# Initialize variables with default values
+client_id=""
+client_secret=""
+sync_location=""
+folder_id=""
+token=""
+gdrive_sa=""
+verbose=true
+
+# Function to print usage information
+print_usage() {
+ echo "Usage: $0 -i -s -l -f -t "
+ echo "Options:"
+ echo " -i : Specify the client id."
+ echo " -s : Specify the client secret."
+ echo " -l : Specify the sync location folder."
+ echo " -f : Specify the google drive folder id."
+ echo " -t : OPTIONAL: Specify the token. not need if you have a service account."
+ echo " -g : OPTIONAL: Specify the google drive service account file location."
+}
+
+# Parse command line arguments
+while getopts ":i:s:l:f:t:g:" opt; do
+ case $opt in
+ i) client_id="$OPTARG";;
+ s) client_secret="$OPTARG";;
+ l) sync_location="$OPTARG";;
+ f) folder_id="$OPTARG";;
+ t) token="$OPTARG";;
+ g) gdrive_sa="$OPTARG";;
+ \?) echo "Invalid option: -$OPTARG" >&2; print_usage; exit 1;;
+ :) echo "Option -$OPTARG requires an argument." >&2; print_usage; exit 1;;
+ esac
+done
+
+#This creates a blank rclone google drive remote named "posters" we'll use for this command.
+rclone config create posters drive config_is_local=false >&2
+
+#RCLONE SYNC COMMAND
+rclone sync \
+ --drive-client-id "$client_id" \
+ --drive-client-secret "$client_secret" \
+ --drive-token "$token" \
+ --drive-root-folder-id "$folder_id" \
+ --drive-service-account-file "$gdrive_sa" \
+ --fast-list \
+ --tpslimit=5 \
+ --no-update-modtime \
+ --drive-use-trash=false \
+ --drive-chunk-size=512M \
+ --check-first \
+ --bwlimit=80M \
+ --size-only \
+ --delete-after \
+ -v \
+ posters: "$sync_location"
+
+if [ "$verbose" = false ]; then
+ echo
+fi
+
+exit 0
\ No newline at end of file
diff --git a/start.sh b/start.sh
new file mode 100755
index 0000000..8e90ed4
--- /dev/null
+++ b/start.sh
@@ -0,0 +1,59 @@
+#!/bin/bash
+
+PUID=${PUID:-99}
+PGID=${PGID:-100}
+UMASK=${UMASK:-002}
+
+export RCLONE_CONFIG="${CONFIG_DIR}/rclone/rclone.conf"
+
+# Read version number from VERSION file from current dir
+# current dir
+VERSION=$(cat "$(dirname "$0")/VERSION")
+
+echo "
+---------------------------------------------------------
+ _____ _____ _____
+ | __ \\ /\ | __ \\ / ____|
+ | | | | / \\ | |__) | (___
+ | | | |/ /\ \\ | ___/ \\___ \\
+ | |__| / ____ \\| | ____) |
+ |_____/_/ \_\\_| |_____/
+ (Drazzilb's Arr PMM Scripts)
+
+ PUID: ${PUID}
+ PGID: ${PGID}
+ UMASK: ${UMASK}
+ BRANCH: ${BRANCH}
+ VERSION: ${VERSION}
+ CONFIG_DIR: ${CONFIG_DIR}
+ RCLONE_CONFIG: ${RCLONE_CONFIG}
+ APPDATA Path ${APPDATA_PATH}
+ LOG_DIR: ${LOG_DIR}
+---------------------------------------------------------
+"
+
+# Set umask
+umask "$UMASK"
+
+groupmod -o -g "$PGID" dockeruser
+usermod -o -u "$PUID" dockeruser
+
+
+# Download latest config files if they don't exist or are different
+file="config.sample.yml"
+local_file="$CONFIG_DIR/$file"
+if [ ! -f "$local_file" ] || [ "$(curl -s "https://raw.githubusercontent.com/Drazzilb08/userScripts/${BRANCH}/config/$file" | diff -q - "$local_file")" ]; then
+ echo "Downloading latest $file"
+ curl -s "https://raw.githubusercontent.com/Drazzilb08/userScripts/${BRANCH}/config/$file" -o "$local_file"
+else
+ echo "File $file is up to date"
+fi
+
+
+echo "Starting userScripts as $(whoami) running userscripts with UID: $PUID and GID: $PGID"
+
+chown -R ${PUID}:${PGID} /${CONFIG_DIR} /app > /dev/null 2>&1
+chmod -R 777 /${CONFIG_DIR} > /dev/null 2>&1
+
+# Run main.py as the dockeruser
+exec su -s /bin/bash -c "python3 /app/main.py" dockeruser
\ No newline at end of file
diff --git a/util/__init__.py b/util/__init__.py
new file mode 100755
index 0000000..e69de29
diff --git a/python-scripts/modules/arrpy.py b/util/arrpy.py
old mode 100644
new mode 100755
similarity index 58%
rename from python-scripts/modules/arrpy.py
rename to util/arrpy.py
index 7ed538f..1d96253
--- a/python-scripts/modules/arrpy.py
+++ b/util/arrpy.py
@@ -1,19 +1,22 @@
import sys
-import requests
-import logging
+import time
import json
+import logging
-arrpy_py_version = "1.2.7"
+try:
+ import requests
+except ImportError as e:
+ print(f"ImportError: {e}")
+ print("Please install the required modules with 'pip install -r requirements.txt'")
+ exit(1)
-logging.getLogger("qbittorrentapi").setLevel(logging.WARNING)
logging.getLogger("requests").setLevel(logging.WARNING)
-logging.getLogger('urllib3').setLevel(logging.WARNING)
class StARR:
def __init__(self, url, api, logger):
"""
Initialize the StARR class.
- Parameters:
+ Args:
url (str): The URL of the ARR instance.
api (str): The API key to use to connect to the ARR instance.
logger (logging.Logger): a logger object for logging debug messages.
@@ -22,7 +25,7 @@ def __init__(self, url, api, logger):
"""
self.logger = logger
self.max_retries = 5
- self.timeout = 30
+ self.timeout = 60
self.url = url
self.api = api
self.headers = {
@@ -43,8 +46,8 @@ def __init__(self, url, api, logger):
self.logger.debug(f"Connected to {app_name} v{app_version} at {self.url}")
except requests.exceptions.ConnectionError as e:
self.logger.error(f"Could not connect to {self.url}: {e}")
- self.logger.error("Exiting script")
- sys.exit(1)
+ return None
+
def get_instance_name(self):
"""
@@ -67,9 +70,9 @@ def get_system_status(self):
def make_get_request(self, endpoint, headers=None):
"""
Make a GET request to the ARR instance.
- Parameters:
+ Args:
url (str): The URL to make the GET request to.
- params (dict): The parameters to pass to the GET request.
+ params (dict): The Args to pass to the GET request.
Returns:
dict: The JSON response from the GET request.
Raises:
@@ -83,16 +86,15 @@ def make_get_request(self, endpoint, headers=None):
return response.json()
except (requests.exceptions.Timeout, requests.exceptions.HTTPError) as ex:
self.logger.warning(f'GET request failed ({ex}), retrying ({i+1}/{self.max_retries})...')
- self.logger.error(f'GET request failed after {self.max_retries} retries with response: {response.text}')
+ self.logger.error(f'GET request failed after {self.max_retries} retries.')
self.logger.error(f"endpoint: {endpoint}")
- self.logger.error(f"response: {response}")
- self.logger.error(f"exiting script")
- sys.exit(1)
+ self.logger.error(f"response: {response.text}")
+ return None
def make_post_request(self, endpoint, headers=None, json=None):
"""
Make a POST request to the ARR instance.
- Parameters:
+ Args:
url (str): The URL to make the POST request to.
headers (dict): The headers to pass to the POST request.
json (dict): The JSON data to pass to the POST request.
@@ -109,18 +111,17 @@ def make_post_request(self, endpoint, headers=None, json=None):
return response.json()
except (requests.exceptions.Timeout, requests.exceptions.HTTPError) as ex:
self.logger.warning(f'POST request failed ({ex}), retrying ({i+1}/{self.max_retries})...')
- self.logger.error(f'GET request failed after {self.max_retries} retries with response: {response.text}')
+ self.logger.error(f'GET request failed after {self.max_retries} retries.')
self.logger.error(f"endpoint: {endpoint}")
self.logger.error(f"Payload: {json}")
- self.logger.error(f"response: {response}")
- self.logger.error(f"exiting script")
- sys.exit(1)
+ self.logger.error(f"response: {response.text}")
+ return None
def make_put_request(self, endpoint, headers=None, json=None):
"""
Make a PUT request to the ARR instance.
- Parameters:
+ Args:
url (str): The URL to make the PUT request to.
headers (dict): The headers to pass to the PUT request.
json (dict): The JSON data to pass to the PUT request.
@@ -137,17 +138,16 @@ def make_put_request(self, endpoint, headers=None, json=None):
return response.json()
except (requests.exceptions.Timeout, requests.exceptions.HTTPError) as ex:
self.logger.warning(f'PUT request failed ({ex}), retrying ({i+1}/{self.max_retries})...')
- self.logger.error(f'GET request failed after {self.max_retries} retries with response: {response.text}')
+ self.logger.error(f'GET request failed after {self.max_retries} retries.')
self.logger.error(f"endpoint: {endpoint}")
self.logger.error(f"Payload: {json}")
- self.logger.error(f"response: {response}")
- self.logger.error(f"exiting script")
- sys.exit(1)
+ self.logger.error(f"response: {response.text}")
+ return None
def make_delete_request(self, endpoint, json=None, headers=None):
"""
Make a DELETE request to the ARR instance.
- Parameters:
+ Args:
url (str): The URL to make the DELETE request to.
headers (dict): The headers to pass to the DELETE request.
Returns:
@@ -163,27 +163,12 @@ def make_delete_request(self, endpoint, json=None, headers=None):
return response
except (requests.exceptions.Timeout, requests.exceptions.HTTPError) as ex:
self.logger.warning(f'DELETE request failed ({ex}), retrying ({i+1}/{self.max_retries})...')
- self.logger.error(f'GET request failed after {self.max_retries} retries with response: {response.text}')
+ self.logger.error(f'GET request failed after {self.max_retries} retries.')
self.logger.error(f"endpoint: {endpoint}")
self.logger.error(f"Payload: {json}")
- self.logger.error(f"response: {response}")
- self.logger.error(f"exiting script")
- sys.exit(1)
+ self.logger.error(f"response: {response.text}")
+ return None
- def get_movie_fileid(self, movie_id):
- """
- Get the file for a movie.
- Parameters:
- movie_id (int): The ID of the movie.
- Returns:
- dict: The JSON response from the GET request.
- """
- endpoint = f"{self.url}/api/v3/moviefile/{movie_id}"
- response = self.make_get_request(endpoint)
- for r in response:
- if r['movieId'] == movie_id:
- print(f"Found file ID {r['id']} for movie ID {movie_id}")
- exit()
def get_media(self):
"""
@@ -211,7 +196,7 @@ def get_all_tags(self):
def create_tag(self, tag):
"""
Create a tag on the ARR instance.
- Parameters:
+ Args:
tag (dict): The tag to create.
Returns:
int: The ID of the created tag.
@@ -227,7 +212,7 @@ def create_tag(self, tag):
def add_tags(self, media_id, tag_id):
"""
Add a tag to a media item.
- Parameters:
+ Args:
media_id (int): The ID of the media item to add the tag to.
tag_id (int): The ID of the tag to add to the media item.
Returns:
@@ -238,10 +223,10 @@ def add_tags(self, media_id, tag_id):
if isinstance(media_id, int):
media_id = [media_id]
if self.instance_type == 'Sonarr':
- media = "series"
+ media_type = "series"
id_type = "seriesIds"
elif self.instance_type == 'Radarr':
- media = "movie"
+ media_type = "movie"
id_type = "movieIds"
payload = {
id_type: media_id,
@@ -249,23 +234,25 @@ def add_tags(self, media_id, tag_id):
"applyTags": "add"
}
self.logger.debug(f"Add tag payload: {payload}")
- endpoint = f"{self.url}/api/v3/{media}/editor"
+ endpoint = f"{self.url}/api/v3/{media_type}/editor"
return self.make_put_request(endpoint, json=payload)
def remove_tags(self, media_ids, tag_id):
"""
Remove a tag from all media.
- Parameters:
+ Args:
media_ids (list): A list of media IDs to remove the tag from.
tag_id (int): The ID of the tag to remove from the media.
+ Returns:
+ dict: The JSON response from the POST request.
"""
id_type = None
media = None
if self.instance_type == 'Sonarr':
- media = "series"
+ media_type = "series"
id_type = "seriesIds"
elif self.instance_type == 'Radarr':
- media = "movie"
+ media_type = "movie"
id_type = "movieIds"
payload = {
id_type: media_ids,
@@ -273,13 +260,13 @@ def remove_tags(self, media_ids, tag_id):
"applyTags": "remove"
}
self.logger.debug(f"Remove tag payload: {payload}")
- endpoint = f"{self.url}/api/v3/{media}/editor"
+ endpoint = f"{self.url}/api/v3/{media_type}/editor"
return self.make_put_request(endpoint, json=payload)
def get_rename_list(self, media_id):
"""
Get a list of media items to rename.
- Parameters:
+ Args:
all_media (list): A list of media objects.
Returns:
list: A list of media items to rename.
@@ -295,7 +282,7 @@ def get_rename_list(self, media_id):
def rename_media(self, media_ids):
"""
Rename a media item.
- Parameters:
+ Args:
media_ids (list): A list of media IDs to attempt rename.
"""
id_type = None
@@ -313,81 +300,125 @@ def rename_media(self, media_ids):
self.logger.debug(f"Rename payload: {payload}")
endpoint = f"{self.url}/api/v3/command"
return self.make_post_request(endpoint, json=payload)
+
+ def rename_folders(self, media_ids, root_folder_path):
+ """
+ Rename a media item.
+ Args:
+ media_ids (list): A list of media IDs to attempt rename.
+ """
+ id_type = None
+ if self.instance_type == 'Sonarr':
+ media_type = "series"
+ id_type = "seriesIds"
+ elif self.instance_type == 'Radarr':
+ media_type = "movie"
+ id_type = "movieIds"
+ payload = {
+ id_type: media_ids,
+ "moveFiles": True,
+ "rootFolderPath": root_folder_path,
+ }
+ self.logger.debug(f"Payload: {payload}")
+ endpoint = f"{self.url}/api/v3/{media_type}/editor"
+ return self.make_put_request(endpoint, json=payload)
+
+ def wait_for_command(self, command_id):
+ """
+ Wait for a refresh to complete.
+ Args:
+ command_id (int): The ID of the refresh command.
+ Returns:
+ bool: True if the refresh was successful, False otherwise.
+ """
+ print(f"Waiting for command to complete...")
+ while True:
+ endpoint = f"{self.url}/api/v3/command/{command_id}"
+ response = self.make_get_request(endpoint)
+ if response['status'] == 'completed':
+ return True
+ elif response['status'] == 'failed':
+ return False
+ time.sleep(5)
- def refresh_media(self, media_ids):
+ def refresh_items(self, media_ids):
"""
Refresh a media item.
- Parameters:
+ Args:
media_id (int): The ID of the media item to refresh.
"""
- name_type = None
- id_type = None
+ if isinstance(media_ids, int):
+ media_ids = [media_ids]
if self.instance_type == 'Sonarr':
- if isinstance(media_ids, list) and len(media_ids) == 1:
- id_type = "seriesId"
- media_ids = int(media_ids[0])
- elif isinstance(media_ids, int):
- id_type = "seriesId"
- media_ids = int(media_ids)
- else:
- id_type = "seriesIds"
name_type = "RefreshSeries"
+ media_type = "seriesIds"
elif self.instance_type == 'Radarr':
- if isinstance(media_ids, list) and len(media_ids) == 1:
- id_type = "movieId"
- media_ids = int(media_ids[0])
- elif isinstance(media_ids, int):
- id_type = "movieId"
- media_ids = int(media_ids)
- else:
- id_type = "movieIds"
name_type = "RefreshMovie"
+ media_type = "movieIds"
payload = {
"name": name_type,
- id_type: media_ids
+ media_type: media_ids
}
self.logger.debug(f"Refresh payload: {payload}")
endpoint = f"{self.url}/api/v3/command"
return self.make_post_request(endpoint, headers=self.headers, json=payload)
- def search_media(self, media_id):
+ def refresh_media(self):
+ """
+ Refresh a media item.
+ Args:
+ media_id (int): The ID of the media item to refresh.
+ """
+ if self.instance_type == 'Sonarr':
+ name_type = "RefreshSeries"
+ elif self.instance_type == 'Radarr':
+ name_type = "RefreshMovie"
+ payload = {
+ "name": name_type,
+ }
+ self.logger.debug(f"Refresh payload: {payload}")
+ endpoint = f"{self.url}/api/v3/command"
+ return self.make_post_request(endpoint, headers=self.headers, json=payload)
+
+ def search_media(self, media_ids):
"""
Search for a media item.
- Parameters:
+ Args:
media_id (int): The ID of the media item to search for.
"""
name_type = None
id_type = None
- self.logger.debug(f"Media ID: {media_id}")
+ self.logger.debug(f"Media ID: {media_ids}")
endpoint = f"{self.url}/api/v3/command"
+ if isinstance(media_ids, int):
+ media_ids = [media_ids]
if self.instance_type == 'Sonarr':
- for id in media_id:
- name_type = "SeriesSearch"
- id_type = "seriesId"
+ name_type = "SeriesSearch"
+ id_type = "seriesId"
+ for id in media_ids:
payload = {
"name": name_type,
id_type: id
}
- self.logger.debug(f"Search payload: {payload}")
- self.make_post_request(endpoint, json=payload)
elif self.instance_type == 'Radarr':
name_type = "MoviesSearch"
id_type = "movieIds"
- id = media_id
- # convert to list if not already
- if isinstance(id, int):
- id = [id]
payload = {
"name": name_type,
- id_type: id
+ id_type: media_ids
}
- self.logger.debug(f"Search payload: {payload}")
- self.make_post_request(endpoint, json=payload)
+ self.logger.debug(f"Search payload: {payload}")
+ result = self.make_post_request(endpoint, headers=self.headers, json=payload)
+ if result:
+ return result
+ else:
+ self.logger.error(f"Search failed for media ID: {media_ids}")
+ return None
def search_season(self, media_id, season_number):
"""
Search for a series by ID.
- Parameters:
+ Args:
media_id (int): The ID of the series to search for
Raises:
Exception: If the API call to search for the series fails
@@ -400,27 +431,58 @@ def search_season(self, media_id, season_number):
endpoint = f"{self.url}/api/v3/command"
return self.make_post_request(endpoint, json=payload)
+ def get_episode_data(self, media_id):
+ """
+ Get data for an episode.
+ Args:
+ media_id (int): The ID of the series to get data for
+ Returns:
+ list: A list of dictionaries representing the episodes for the series
+ """
+ endpoint = f"{self.url}/api/v3/episodefile?seriesId={media_id}"
+ return self.make_get_request(endpoint, headers=self.headers)
+
+ def get_episode_data_by_season(self, media_id, season_number):
+ """
+ Get data for an episode.
+ Args:
+ media_id (int): The ID of the series to get data for
+ Returns:
+ list: A list of dictionaries representing the episodes for the series
+ """
+ endpoint = f"{self.url}/api/v3/episode?seriesId={media_id}&seasonNumber={season_number}"
+ return self.make_get_request(endpoint, headers=self.headers)
+
def get_season_data(self, media_id):
"""
Get data for a season.
- Parameters:
+ Args:
media_id (int): The ID of the series to get data for
Returns:
list: A list of dictionaries representing the episodes for the season
"""
endpoint = f"{self.url}/api/v3/episode?seriesId={media_id}"
return self.make_get_request(endpoint, headers=self.headers)
+
+ def delete_episode_file(self, episode_file_id):
+ """
+ Delete an episode file.
+ Args:
+ episode_file_id (int): The ID of the episode file to delete.
+ """
+ endpoint = f"{self.url}/api/v3/episodefile/{episode_file_id}"
+ return self.make_delete_request(endpoint)
- def delete_episode_files(self, media_id):
+ def delete_episode_files(self, episode_file_ids):
"""
Delete all episode files for a series.
- Parameters:
+ Args:
media_id (int): The ID of the series to delete episode files for
"""
- if isinstance(media_id, int):
- media_id = [media_id]
+ if isinstance(episode_file_ids, int):
+ episode_file_ids = [episode_file_ids]
payload = {
- "episodeFileIds": media_id
+ "episodeFileIds": episode_file_ids
}
self.logger.debug(f"Delete episode files payload: {payload}")
endpoint = f"{self.url}/api/v3/episodefile/bulk"
@@ -429,7 +491,7 @@ def delete_episode_files(self, media_id):
def delete_movie_file(self, media_id):
"""
Delete a media item.
- Parameters:
+ Args:
media_id (int): The ID of the media item to delete.
"""
endpoint = f"{self.url}/api/v3/moviefile/{media_id}"
@@ -438,7 +500,7 @@ def delete_movie_file(self, media_id):
def search_episodes(self, episode_ids):
"""
Search for an episode.
- Parameters:
+ Args:
media_id (int): The ID of the series to search for
fileIds (int): The episode number to search for
"""
@@ -449,17 +511,121 @@ def search_episodes(self, episode_ids):
}
self.logger.debug(f"Search payload: {payload}")
return self.make_post_request(endpoint, json=payload)
+
+ def get_movie_data(self, media_id):
+ """
+ Get data for a movie.
+ Args:
+ media_id (int): The ID of the movie to get data for
+ Returns:
+ dict: A dictionary representing the movie
+ """
+ endpoint = f"{self.url}/api/v3/moviefile?movieId={media_id}"
+ return self.make_get_request(endpoint, headers=self.headers)
+
+ def get_grab_history(self, media_id, instance_type):
+ """
+ Get grab history.
+ Args:
+ media_id (int): The ID of the media item to get the history for.
+ instance_type (str): The type of instance to get the history for.
- def get_queue(self):
+ Returns:
+ dict: A dictionary representing the history.
+ """
+ if instance_type == 'radarr':
+ url_addon = f"movie?movieId={media_id}&eventType=grabbed&includeMovie=false"
+ elif instance_type == 'sonarr':
+ url_addon = f"series?seriesId={media_id}&eventType=grabbed&includeSeries=false&includeEpisode=false"
+ endpoint = f"{self.url}/api/v3/history/{url_addon}"
+ return self.make_get_request(endpoint, headers=self.headers)
+
+ def get_import_history(self, media_id, instance_type):
+ """
+ Get import history.
+ Args:
+ media_id (int): The ID of the media item to get the history for.
+ instance_type (str): The type of instance to get the history for.
+
+ Returns:
+ dict: A dictionary representing the history.
+ """
+ if instance_type == 'radarr':
+ url_addon = f"movie?movieId={media_id}&eventType=downloadFolderImported&includeMovie=false"
+ elif instance_type == 'sonarr':
+ url_addon = f"series?seriesId={media_id}&eventType=downloadFolderImported&includeSeries=false&includeEpisode=false"
+ endpoint = f"{self.url}/api/v3/history/{url_addon}"
+ return self.make_get_request(endpoint, headers=self.headers)
+
+ def get_season_grab_history(self, media_id, season):
+ """
+ Get season grab history.
+ Args:
+ media_id (int): The ID of the media item to get the history for.
+ season (int): The season to get the history for.
+
+ Returns:
+ dict: A dictionary representing the history.
+ """
+ url_addon = f"series?seriesId={media_id}&seasonNumber={season}&eventType=grabbed&includeSeries=false&includeEpisode=false"
+ endpoint = f"{self.url}/api/v3/history/{url_addon}"
+ return self.make_get_request(endpoint, headers=self.headers)
+
+ def get_season_import_history(self, media_id, season):
+ """
+ Get season import history.
+ Args:
+ media_id (int): The ID of the media item to get the history for.
+ season (int): The season to get the history for.
+
+ Returns:
+ dict: A dictionary representing the history.
+ """
+ url_addon = f"series?seriesId={media_id}&seasonNumber={season}&eventType=downloadFolderImported&includeSeries=false&includeEpisode=false"
+ endpoint = f"{self.url}/api/v3/history/{url_addon}"
+ return self.make_get_request(endpoint, headers=self.headers)
+
+ def get_import_history(self, media_id, instance_type):
+ """
+ Get import history.
+ Args:
+ media_id (int): The ID of the media item to get the history for.
+ instance_type (str): The type of instance to get the history for.
+
+ Returns:
+ dict: A dictionary representing the history.
+ """
+ if instance_type == 'radarr':
+ url_addon = f"movie?movieId={media_id}&eventType=downloadFolderImported&includeMovie=false"
+ elif instance_type == 'sonarr':
+ url_addon = f"series?seriesId={media_id}&eventType=downloadFolderImported&includeSeries=false&includeEpisode=false"
+ endpoint = f"{self.url}/api/v3/history/{url_addon}"
+ return self.make_get_request(endpoint, headers=self.headers)
+
+ def get_queue(self, instance_type):
"""
Get the queue.
+ Args:
+ instance_type (str): The type of instance to get the queue for.
+ Returns:
+ dict: A dictionary representing the queue.
"""
- endpoint = f"{self.url}/api/v3/queue"
+ if instance_type == 'radarr':
+ url_addon = "page=1&pageSize=200&includeMovie=true"
+ elif instance_type == 'sonarr':
+ url_addon = "page=1&pageSize=200&includeSeries=true"
+
+ endpoint = f"{self.url}/api/v3/queue?{url_addon}"
return self.make_get_request(endpoint, headers=self.headers)
def get_quality_profile_names(self):
"""
Get the names of all quality profiles.
+ Returns:
+ dict: A dictionary of quality profile names and IDs.
+
+ returns:
+ dict: A dictionary of quality profile names and IDs.
"""
dict_of_names_and_ids = {}
endpoint = f"{self.url}/api/v3/qualityprofile"
@@ -472,6 +638,9 @@ def get_quality_profile_names(self):
def refresh_queue(self):
"""
Refresh the queue.
+
+ Returns:
+ dict: A dictionary representing the queue.
"""
endpoint = f"{self.url}/api/v3/command"
payload = {
@@ -483,6 +652,8 @@ def refresh_queue(self):
def get_health(self):
"""
Get the health status.
+ Returns:
+ dict: A dictionary representing the health status.
"""
endpoint = f"{self.url}/api/v3/health"
return self.make_get_request(endpoint, headers=self.headers)
@@ -490,7 +661,7 @@ def get_health(self):
def delete_media(self, media_id, instance_type):
"""
Delete a media item.
- Parameters:
+ Args:
media_id (int): The ID of the media item to delete.
"""
endpoint = None
@@ -502,25 +673,36 @@ def delete_media(self, media_id, instance_type):
def get_tag_id_from_name(self, tag_name):
"""
- Get the ID of a tag from its name.
- Parameters:
+ Get the ID of a tag from its name. If the tag does not yet
+ exist, it will be created.
+ Args:
tag_name (str): The name of the tag to get the ID for.
Returns:
int: The ID of the tag.
"""
- all_tags = self.get_all_tags()
+ all_tags = self.get_all_tags() or []
tag_name = tag_name.lower()
+
for tag in all_tags:
if tag["label"] == tag_name:
tag_id = tag["id"]
return tag_id
- return None
+
+ # If the tag doesn't already exist, create it.
+ tag_id = self.create_tag(tag_name)
+ return tag_id
- def remove_item_from_queue(self, queue_id, blocklist):
+ def remove_item_from_queue(self, queue_ids):
"""
Remove an item from the queue.
- Parameters:
+ Args:
queue_id (int): The ID of the queue item to remove.
"""
- endpoint = f"{self.url}/api/v3/queue/{queue_id}?removeFromClient=false&blocklist={blocklist}&skipRedownload=true"
- return self.make_delete_request(endpoint)
\ No newline at end of file
+ # if one item in list, convert to int
+ if isinstance(queue_ids, int):
+ queue_ids = [queue_ids]
+ payload = {
+ "ids": queue_ids
+ }
+ endpoint = f"{self.url}/api/v3/queue/bulk?removeFromClient=false&blocklist=false&skipRedownload=false&changeCategory=false"
+ return self.make_delete_request(endpoint, payload)
diff --git a/util/call_script.py b/util/call_script.py
new file mode 100755
index 0000000..17eda43
--- /dev/null
+++ b/util/call_script.py
@@ -0,0 +1,42 @@
+from subprocess import PIPE, STDOUT, CalledProcessError, CompletedProcess, Popen
+
+from subprocess import Popen, PIPE, STDOUT, CompletedProcess, CalledProcessError
+
+from util.utility import redact_sensitive_info
+
+def call_script(command, logger):
+ """
+ Run a bash script
+
+ Args:
+ command (list): The command to run
+ logger (logger): The logger to use for logging output
+
+ Returns:
+ CompletedProcess: The completed process
+ """
+ # Print the command being executed
+
+ # Redact command secrets
+ redacted_command = str(' '.join(command))
+ # Redact random strings of characters
+
+ redacted_command = redact_sensitive_info(redacted_command)
+
+ print(f"\nRunning command:\n\n{redacted_command}\n\n")
+
+ # Execute the command and capture the output
+ with Popen(command, text=True, stdout=PIPE, stderr=STDOUT) as process:
+ for line in process.stdout:
+ # Log each line of the output using the provided logger
+ logger.info(line[:-1]) # Exclude the newline character
+
+ # Retrieve the return code of the process
+ retcode = process.poll()
+
+ # If there is an error in the process, raise a CalledProcessError
+ if retcode:
+ raise CalledProcessError(retcode, process.args)
+
+ # Return the CompletedProcess object
+ return CompletedProcess(process.args, retcode)
diff --git a/util/config.py b/util/config.py
new file mode 100755
index 0000000..932736d
--- /dev/null
+++ b/util/config.py
@@ -0,0 +1,177 @@
+import pathlib
+import yaml
+import os
+from pathlib import Path
+from util.utility import *
+import time
+from ruamel.yaml import YAML
+
+
+# Set the config file path
+if os.environ.get('DOCKER_ENV'):
+ # Set the config path
+ config_path = os.getenv('CONFIG_DIR', '/config')
+ # Set the config file path
+ config_file_path = os.path.join(config_path, "config.yml")
+else:
+ # Set the config file path
+ config_file_path = os.path.join(pathlib.Path(__file__).parents[1], "config/config.yml")
+
+
+# Wait for the config file to be created
+while not os.path.isfile(config_file_path):
+ print(f"Config file not found. Retrying in 60 seconds...")
+ time.sleep(60)
+
+
+class Config:
+ """
+ A class to represent the config file
+ """
+ def __init__(self, script_name):
+ """
+ Initialize the config file
+ """
+ self.config_path = config_file_path
+ self.script_name = script_name
+ self.load_config()
+
+ def load_config(self):
+ """
+ Load the config file
+
+ Args:
+ None
+
+ Returns:
+ None
+ """
+ # Open the YAML config file and load its contents
+ try:
+ try:
+ with open(self.config_path, "r") as file:
+ config = yaml.safe_load(file)
+ except FileNotFoundError:
+ print(f"Config file not found at {self.config_path}")
+ return
+ except yaml.parser.ParserError as e:
+ print(f"Error parsing config file: {e}")
+ return
+ except FileNotFoundError:
+ print(f"Config file not found at {self.config_path}")
+ return
+
+ # Set various attributes from the loaded config
+ self.instances_config = config['instances'] # Instance configurations
+ self.bash_config = config['bash_scripts'] # Bash script configurations
+ self.scheduler = config['schedule'] # Scheduler configurations
+ self.discord = config.get('discord', {}) # Discord configurations, if available
+
+ # If the script_name attribute exists, set script-specific configurations
+ if self.script_name:
+ self.script_config = config.get(f'{self.script_name}', None) # Script-specific config
+ try:
+ self.log_level = self.script_config.get('log_level', 'info').lower() # Log level
+ except AttributeError:
+ print(f"Invalid log level '{self.script_config.get('log_level', 'info')}', defaulting to 'info'")
+ self.log_level = 'info'
+ self.dry_run = self.script_config.get('dry_run', False) # Dry run setting
+ self.sync_gdrive = self.script_config.get('sync_gdrive', False) # Google Drive sync setting
+
+ # Set specific configurations for different services
+ self.radarr_config = self.instances_config.get('radarr', {}) # Radarr configurations
+ self.sonarr_config = self.instances_config.get('sonarr', {}) # Sonarr configurations
+ self.qbit_config = self.instances_config.get('qbittorrent', {}) # qBittorrent configurations
+ self.plex_config = self.instances_config.get('plex', {}) # Plex configurations
+
+
+ # Add data to config file
+ def add_to_config(self, add_type, container, logger, message=None):
+ """
+ Add data to the config file for the backup_appdata key
+
+ Args:
+ add_type (str): stop_list or no_stop_list
+ container_name (str): Name of the container to add to the config file
+
+ Returns:
+ None
+ """
+ yaml = YAML()
+
+ # Load the config file
+ with open(self.config_path, "r") as file:
+ config = yaml.load(file)
+
+ container_name = container.name
+ container_name_message = f"{container_name}\t\t\t{message}" if message else f"{container_name}"
+ stop_list = config['backup_appdata']['stop_list']
+ no_stop_list = config['backup_appdata']['no_stop_list']
+ exclusion_list = config['backup_appdata']['exclusion_list']
+
+ logger.debug(f"Adding {container_name} to {add_type} list")
+ # Add the container to the stop_list or no_stop_list
+ if add_type == "stop":
+ if not stop_list:
+ stop_list = [container_name_message]
+ elif container_name_message not in stop_list:
+ stop_list.append(container_name_message)
+ elif add_type == "no_stop":
+ if not no_stop_list:
+ no_stop_list = [container_name_message]
+ elif container_name_message not in no_stop_list:
+ no_stop_list.append(container_name_message)
+ elif add_type == "exclude":
+ if not exclusion_list:
+ exclusion_list = [container_name_message]
+ elif container_name_message not in exclusion_list:
+ exclusion_list.append(container_name_message)
+
+ # Add the new data to the config file
+ config['backup_appdata']['stop_list'] = stop_list
+ config['backup_appdata']['no_stop_list'] = no_stop_list
+ config['backup_appdata']['exclusion_list'] = exclusion_list
+
+ with open(self.config_path, "w") as file:
+ yaml.dump(config, file)
+
+ def remove_from_config(self, containers_to_remove, logger):
+ """
+ Removes container names from appdata_backup stop_list or no_stop_list
+ if the container is removed from the system
+
+ Args:
+ remove_type (str): stop_list or no_stop_list
+ container_name (str): Name of the container to remove from the config file
+
+ Returns:
+ None
+ """
+ yaml = YAML()
+
+ # Load the config file
+ with open(self.config_path, "r") as file:
+ config = yaml.load(file)
+
+ stop_list = config['backup_appdata']['stop_list']
+ no_stop_list = config['backup_appdata']['no_stop_list']
+ exclusion_list = config['backup_appdata']['exclusion_list']
+
+ for container in containers_to_remove:
+ if container in stop_list:
+ logger.debug(f"Removing {container} from stop_list")
+ stop_list.remove(container)
+ if container in no_stop_list:
+ logger.debug(f"Removing {container} from no_stop_list")
+ no_stop_list.remove(container)
+ if container in exclusion_list:
+ logger.debug(f"Removing {container} from exclusion_list")
+ exclusion_list.remove(container)
+
+ # Add the new data to the config file
+ config['backup_appdata']['stop_list'] = stop_list
+ config['backup_appdata']['no_stop_list'] = no_stop_list
+ config['backup_appdata']['exclusion_list'] = exclusion_list
+
+ with open(self.config_path, "w") as file:
+ yaml.dump(config, file)
\ No newline at end of file
diff --git a/util/discord.py b/util/discord.py
new file mode 100755
index 0000000..a69f821
--- /dev/null
+++ b/util/discord.py
@@ -0,0 +1,229 @@
+try:
+ import requests
+except ImportError as e:
+ print(f"ImportError: {e}")
+ print("Please install the required modules with 'pip install -r requirements.txt'")
+ exit(1)
+
+import requests
+import random
+import json
+from datetime import datetime
+from util.config import Config
+
+config = Config(script_name="discord")
+
+def get_discord_data(script_name, logger):
+ """
+ Gather discord data from config file
+
+ Args:
+ config (dict): The config file
+ script_name (str): The name of the script
+ logger (logger): The logger to use for logging output
+
+ Returns:
+ webhook (str): The webhook to send the notification to
+ channel_id (str): The channel ID to send the notification to
+ """
+ # Access the 'discord' section in the config
+ discord = config.discord
+
+ # Get the 'notifiarr_webhook' from the config
+ notifiarr_webhook = discord.get('notifiarr_webhook', None)
+
+ # Get the script-specific notification info from the config based on the script name
+ script_notification_info = discord.get(script_name, {})
+
+ # Get the 'channel_id' and 'discord_webhook' from the script-specific notification info
+ channel_id = script_notification_info.get('channel_id', None)
+
+ # Get the 'discord_webhook' from the script-specific notification info
+ discord_webhook = script_notification_info.get('discord_webhook', None)
+
+ # If notifiarr_webhook is missing, log an error and return
+ if notifiarr_webhook:
+ # If channel_id is missing, log an error and return
+ if not channel_id:
+ logger.error("Discord channel ID is missing. Cannot send Discord notification.")
+ return None, None
+ else:
+ return notifiarr_webhook, channel_id
+ else:
+ return discord_webhook, None
+
+def get_message_data(logger):
+ """
+ Gather message data from GitHub
+
+ Args:
+ logger (logger): The logger to use for logging output
+
+ Returns:
+ random_joke (str): A random joke from the jokes.txt file
+ timestamp (str): The timestamp when the joke was retrieved
+ """
+ # Send a GET request to GitHub to retrieve the jokes.txt file
+ response = requests.get("https://raw.githubusercontent.com/Drazzilb08/userScripts/master/jokes.txt")
+
+ # Check if the request was successful (status code 200)
+ if response.status_code == 200:
+ # Split the received text into individual lines
+ jokes = response.text.splitlines()
+
+ # Choose a random joke from the lines
+ random_joke = random.choice(jokes)
+
+ # Get the current timestamp in ISO format
+ timestamp = datetime.utcnow().isoformat()
+ else:
+ # Log an error if the request failed
+ logger.error(f"Failed to get jokes from GitHub. Status code: {response.status_code} with response: {response.text}")
+ random_joke = "Error: Failed to get jokes from GitHub."
+
+ return random_joke, timestamp
+
+def discord_check(script_name):
+ """
+ Check if Discord notifications are enabled for the script
+
+ Args:
+ config (dict): The config file
+ script_name (str): The name of the script
+
+ Returns:
+ enabled (bool): Whether or not Discord notifications are enabled for the script
+ """
+ # Get the 'discord' section from the config
+ discord = config.discord
+
+ notifiarr_webhook = discord.get('notifiarr_webhook', None)
+
+ # Get the script-specific notification info from the config based on the script name
+ script_notification_info = discord.get(script_name, {})
+
+ # Get the 'channel_id' and 'discord_webhook' from the script-specific notification info
+ channel_id = script_notification_info.get('channel_id', None)
+
+ # Get the 'discord_webhook' from the script-specific notification info
+ discord_webhook = script_notification_info.get('discord_webhook', None)
+
+ if discord_webhook or channel_id and notifiarr_webhook:
+ # If enabled is True, return True
+ return True
+ else:
+ # If enabled is False, return False
+ return False
+
+def discord(fields, logger, script_name, description, color, content):
+ """
+ Send a Discord notification
+
+ Args:
+ fields (list): The fields to include in the notification
+ logger (logger): The logger to use for logging output
+ config (dict): The config file
+ script_name (str): The name of the script
+ description (str): The description of the notification
+ color (str): The color of the notification
+ content (str): The content of the notification
+
+ Returns:
+ None
+ """
+ # Get the webhook and channel_id from the config
+ webhook, channel_id = get_discord_data(script_name, logger)
+ script_name = script_name.replace("_", " ").title()
+ if webhook:
+ # Get the random joke and timestamp
+ random_joke, timestamp = get_message_data(logger)
+ try:
+ # Check if the webhook is a Notifiarr webhook
+ if fields and webhook:
+ # Check if the webhook is a Notifiarr webhook
+ if webhook.startswith("https://notifiarr.com/api/v1/notification/passthrough"):
+ # Create the payload to send to Notifiarr
+ if channel_id:
+ # Convert color from hex to 6 digit html
+ color = f"{color:06x}"
+ payload = {
+ "notification": {
+ "update": False,
+ "name": f"{script_name}",
+ },
+ "discord": {
+ "color": f"{color}",
+ "text": {
+ "description": f"{description}",
+ "content": f"{content}",
+ "fields": fields,
+ "footer": f"Powered by: Drazzilb | {random_joke}",
+ },
+ "ids": {
+ "channel": f"{channel_id}",
+ }
+ }
+ }
+ # If channel_id is missing, log an error and return
+ else:
+ logger.error("Discord channel ID is missing. Cannot send Discord notification.")
+ logger.error(f"Discord channel ID: {channel_id}")
+ return
+ # if description is empty then remove it from the payload
+ if not description:
+ del payload["discord"]["text"]["description"]
+ # if content is empty then remove it from the payload
+ if not content:
+ del payload["discord"]["text"]["content"]
+ # if fields is empty then remove it from the payload
+ if not fields:
+ del payload["discord"]["text"]["fields"]
+ # If the webhook is not a Notifiarr webhook, create the payload to send to Discord
+ else:
+ payload = {
+ "username": "Notification Bot",
+ "embeds": [
+ {
+ "title": f"{script_name.capitalize()}",
+ "description": f"{description}",
+ "color": color,
+ "content": f"{content}",
+ "fields": fields,
+ "footer": {
+ "text": f"Powered by: Drazzilb | {random_joke}"
+ },
+ "timestamp": timestamp,
+ }
+ ]
+ }
+ # if description is empty then remove it from the payload
+ if not description:
+ del payload["embeds"][0]["description"]
+ # if fields is empty then remove it from the payload
+ if not fields:
+ del payload["embeds"][0]["fields"]
+ # if content is empty then remove it from the payload
+ if not content:
+ del payload["embeds"][0]["content"]
+ logger.debug(json.dumps(payload, indent=4))
+ response = requests.post(webhook, json=payload)
+ # Check if the request was successful (status code 204 or 200)
+ if response.status_code == 204 or response.status_code == 200:
+ logger.debug("Discord notification sent.")
+ return
+ # If the request failed, log an error
+ else:
+ logger.error(f"Payload: {json.dumps(payload, indent=4)})")
+ logger.error(f"Webhook failed: {webhook}")
+ logger.error(f"Discord notification failed with status code: {response.status_code} with response: {response.text}")
+ # If the payload is empty, log an error
+ else:
+ logger.error("Payload is empty. Cannot send Discord notification.")
+ # If there is an error, log it
+ except Exception as e:
+ logger.error(f"Error: {e}")
+ return
+ # If the webhook is missing, log an error
+ else:
+ logger.debug(f"Discord webhook is missing. Cannot send Discord notification.")
+ return
\ No newline at end of file
diff --git a/util/logger.py b/util/logger.py
new file mode 100644
index 0000000..136ea22
--- /dev/null
+++ b/util/logger.py
@@ -0,0 +1,98 @@
+import os
+import time
+import logging
+import pathlib
+from logging.handlers import RotatingFileHandler
+from util.version import get_version
+from util.utility import create_bar
+
+def setup_logger(log_level, script_name, max_logs=9):
+ """
+ Setup the logger.
+
+ Parameters:
+ log_level (str): The log level to use
+ script_name (str): The name of the script
+ max_logs (int): Maximum number of log files to keep
+
+ Returns:
+ A logger object for logging messages.
+ """
+
+ if os.environ.get('DOCKER_ENV'):
+ config_dir = os.getenv('CONFIG_DIR', '/config')
+ log_dir = f"{config_dir}/logs/{script_name}"
+ else:
+ log_dir = f"{os.path.join(pathlib.Path(__file__).parents[1], 'logs', script_name)}"
+
+ if log_level not in ['debug', 'info', 'critical']:
+ log_level = 'info'
+ print(f"Invalid log level '{log_level}', defaulting to 'info'")
+
+ # Create the log directory if it doesn't exist
+ if not os.path.exists(log_dir):
+ os.makedirs(log_dir)
+
+
+ # Define the log file path with the current date
+ log_file = f"{log_dir}/{script_name}.log"
+
+ # Check if log file already exists
+ if os.path.isfile(log_file):
+ for i in range(max_logs - 1, 0, -1):
+ old_log = f"{log_dir}/{script_name}.log.{i}"
+ new_log = f"{log_dir}/{script_name}.log.{i + 1}"
+ if os.path.exists(old_log):
+ os.rename(old_log, new_log)
+ os.rename(log_file, f"{log_dir}/{script_name}.log.1")
+
+ # Create a logger object with the script name
+ logger = logging.getLogger(script_name)
+ logger.propagate = False
+
+ # Set the log level based on the provided parameter
+ log_level = log_level.upper()
+ if log_level == 'DEBUG':
+ logger.setLevel(logging.DEBUG)
+ elif log_level == 'INFO':
+ logger.setLevel(logging.INFO)
+ elif log_level == 'CRITICAL':
+ logger.setLevel(logging.CRITICAL)
+ else:
+ logger.critical(f"Invalid log level '{log_level}', defaulting to 'INFO'")
+ logger.setLevel(logging.INFO)
+
+ # Define the log message format
+ formatter = logging.Formatter(fmt='%(asctime)s %(levelname)s: %(message)s', datefmt='%m/%d/%y %I:%M %p')
+
+ # Create a RotatingFileHandler for log files
+ handler = RotatingFileHandler(log_file, delay=True, mode="w", backupCount=max_logs)
+ handler.setFormatter(formatter)
+
+ # Add the file handler to the logger
+ logger.addHandler(handler)
+
+ # Configure console logging with the specified log level
+ console_handler = logging.StreamHandler()
+ if log_level == 'DEBUG':
+ console_handler.setLevel(logging.DEBUG)
+ elif log_level == 'INFO':
+ console_handler.setLevel(logging.INFO)
+ elif log_level == 'CRITICAL':
+ console_handler.setLevel(logging.CRITICAL)
+
+ # Add the console handler to the logger
+ logger.addHandler(console_handler)
+
+ # Overwrite previous logger if exists
+ logging.getLogger(script_name).handlers.clear()
+ logging.getLogger(script_name).addHandler(handler)
+ logging.getLogger(script_name).addHandler(console_handler)
+
+ # Insert version number at the head of every log file
+ version = get_version()
+ name = script_name.replace("_", " ").upper()
+ logger.info(create_bar(f"{name} Version: {version}"))
+
+
+ return logger
diff --git a/util/scheduler.py b/util/scheduler.py
new file mode 100755
index 0000000..5e78472
--- /dev/null
+++ b/util/scheduler.py
@@ -0,0 +1,94 @@
+from datetime import datetime
+from croniter import croniter
+from dateutil import tz
+
+next_run_times = {}
+
+def check_schedule(script_name, schedule, logger):
+ """
+ Checks if a given name is currently active based on its schedule.
+
+ Args:
+ name: Name of the schedule.
+ script_name: Name of the script. (Used for cron schedule)
+ schedule: Schedule string in the format "frequency(data)".
+ - frequency: Can be "hourly", "daily", "weekly", or "monthly".
+ - data: Depends on the frequency:
+ - hourly: Hour of the day (e.g., "10").
+ - daily: Time of the day (e.g., "11:05"). Can be multiple times separated by commas.
+ - weekly: Day of the week and time of the day (e.g., "monday@12:00", "tuesday@12:30"). Can be multiple times separated by commas.
+ - monthly: Day of the month and time of the day (e.g., "15@12:00").
+ - range: Date range (e.g., "01/01-12/31"). Can be multiple ranges separated by pipes.
+ - cron: Cron expression (e.g., "0 0 * * *").
+
+ Returns:
+ bool: True if the schedule is active, False otherwise.
+ """
+
+ try:
+ now = datetime.now()
+ frequency, data = schedule.split("(")
+ data = data[:-1]
+ if frequency == "hourly":
+ return int(data) == now.minute
+ elif frequency == "daily":
+ times = data.split("|")
+ for time in times:
+ hour, minute = map(int, time.split(":"))
+ if now.hour == hour and now.minute == minute:
+ return True
+ elif frequency == "weekly":
+ days = [day.split("@")[0] for day in data.split("|")]
+ times = [day.split("@")[1] for day in data.split("|")]
+ if now.strftime("%A").lower() in days:
+ for time, day in zip(times, days):
+ hour, minute = map(int, time.split(":"))
+ if now.hour == hour and now.minute == minute and (now.strftime("%A").lower() == day or
+ (now.strftime("%A").lower() == "sunday" and day == "saturday")):
+ return True
+ elif frequency == "monthly":
+ day, time = data.split("@")
+ if now.day == int(day) and now.hour == int(time.split(":")[0]) and now.minute == int(time.split(":")[1]):
+ return True
+ elif frequency == "range":
+ ranges = data.split("|")
+ for start_end in ranges:
+ start, end = start_end.split("-")
+ start_month, start_day = map(int, start.split("/"))
+ end_month, end_day = map(int, end.split("/"))
+ current_month, current_day = map(int, now.strftime("%m/%d").split("/"))
+
+ if start_month <= current_month <= end_month and start_day <= current_day <= end_day:
+ return True
+ elif frequency == "cron":
+ local_tz = tz.tzlocal()
+ local_date = datetime.now(local_tz)
+
+ current_time = datetime.now(local_tz).replace(second=0, microsecond=0)
+
+ logger.debug(f"Local time: {current_time}")
+
+ next_run = next_run_times.get(script_name)
+ if next_run is None:
+ next_run = croniter(data, local_date).get_next(datetime)
+ next_run_times[script_name] = next_run
+
+ logger.debug(f"Next run for {script_name}: {next_run}")
+
+ if next_run <= current_time:
+ next_run = croniter(data, local_date).get_next(datetime)
+ next_run_times[script_name] = next_run
+
+ logger.debug(f"Next run for {script_name}: {next_run}\n")
+
+ return True
+ else:
+ logger.debug(f"Next run time for script {script_name}: {next_run} is in the future\n")
+ return False
+
+
+
+ except ValueError as e:
+ logger.error(f"Invalid schedule: {schedule} for script: {script_name}")
+ logger.error (f"Error: {e}")
+ return False
\ No newline at end of file
diff --git a/util/utility.py b/util/utility.py
new file mode 100755
index 0000000..e41f930
--- /dev/null
+++ b/util/utility.py
@@ -0,0 +1,880 @@
+import re
+import os
+import json
+from pathlib import Path
+import subprocess
+import math
+import pathlib
+
+try:
+ import html
+ from unidecode import unidecode
+ from tqdm import tqdm
+ from plexapi.exceptions import NotFound
+except ImportError as e:
+ print(f"ImportError: {e}")
+ print("Please install the required modules with 'pip install -r requirements.txt'")
+ exit(1)
+
+# Regex to remove illegal characters from file names
+illegal_chars_regex = re.compile(r'[<>:"/\\|?*\x00-\x1f]+')
+# Regex to extract the year from parentheses in the title
+year_regex = re.compile(r"\s?\((\d{4})\)(?!.*Collection).*")
+# Regex to extract the year from parentheses in the folder name
+folder_year_regex = re.compile(r"(.*)\s\((\d{4})\)")
+# Regex to remove special characters from the title
+remove_special_chars = re.compile(r'[^a-zA-Z0-9\s]+')
+# Season number regex
+season_number_regex = re.compile(r'[-_]\s*Season\s*(\d+)')
+
+# List of season name info to match against
+season_name_info = [
+ "_Season",
+ " - Season ",
+ " - Specials"
+]
+
+# List of words to remove from titles
+words_to_remove = [
+ "(US)",
+ "(UK)",
+ "(AU)",
+ "(CA)",
+ "(NZ)",
+ "(FR)",
+]
+
+# List of prefixes and suffixes to remove from titles for comparison
+prefixes = [
+ "The",
+ "A",
+ "An"
+]
+
+# List of prefixes and suffixes to remove from titles for comparison
+suffixes = [
+ "Collection",
+]
+
+def normalize_file_names(file_name):
+ """
+ Normalize file names for comparison
+
+ Args:
+ file_name (str): The file name to normalize
+
+ Returns:
+ str: The normalized file name
+ """
+
+ # remove extension
+ file_name, extension = os.path.splitext(file_name)
+
+ # Remove specific words from the title
+ for word in words_to_remove:
+ file_name = file_name.replace(word, '')
+
+ # Remove illegal characters from the file name using regex
+ file_name = illegal_chars_regex.sub('', file_name)
+
+ # Convert special characters to ASCII equivalent
+ file_name = unidecode(html.unescape(file_name))
+
+ # Remove trailing whitespaces
+ file_name = file_name.rstrip()
+
+ # Remove leading whitespaces
+ file_name = file_name.lstrip()
+
+ # Replace '&' with 'and'
+ file_name = file_name.replace('&', 'and')
+
+ # Remove special characters using regex
+ file_name = re.sub(remove_special_chars, '', file_name).lower()
+
+ # Remove spaces in the file name
+ file_name = file_name.replace(' ', '')
+
+ return file_name
+
+def normalize_titles(title):
+ """
+ Normalize titles for comparison
+
+ Args:
+ title (str): The title to normalize
+
+ Returns:
+ str: The normalized title
+ """
+
+ # Initialize the normalized title as the original title
+ normalized_title = title
+
+ # Remove specific words from the title
+ for word in words_to_remove:
+ normalized_title = title.replace(word, '')
+
+ # Extract the year from parentheses in the title
+ normalized_title = year_regex.sub('', normalized_title)
+
+ # Remove illegal characters from the title using regex
+ normalized_title = illegal_chars_regex.sub('', normalized_title)
+
+ # Convert special characters to ASCII equivalent
+ normalized_title = unidecode(html.unescape(normalized_title))
+
+ # Remove trailing whitespaces
+ normalized_title = normalized_title.rstrip()
+
+ # Remove leading whitespaces
+ normalized_title = normalized_title.lstrip()
+
+ # Replace '&' with 'and'
+ normalized_title = normalized_title.replace('&', 'and')
+
+ # Remove special characters using regex
+ normalized_title = re.sub(remove_special_chars, '', normalized_title).lower()
+
+ # Remove spaces in the title
+ normalized_title = normalized_title.replace(' ', ' ')
+
+ return normalized_title
+
+def _is_asset_folders(folder_path):
+ """
+ Check if the folder contains asset folders
+
+ Args:
+ folder_path (str): The path to the folder to check
+
+ Returns:
+ bool: True if the folder contains asset folders, False otherwise
+ """
+ if not os.path.exists(folder_path):
+ return False
+ else:
+ for item in os.listdir(folder_path):
+ if item.startswith('.') or item.startswith('@') or item == "tmp":
+ continue
+ if os.path.isdir(os.path.join(folder_path, item)):
+ return True
+ return False
+
+def categorize_files(folder_path):
+ """
+ Categorize files into movies, collections, and series
+
+ Args:
+ folder_path (str): The path to the folder to sort
+ asset_folders (bool): Whether or not to sort by folders
+
+ Returns:
+ list: A list of dictionaries containing the sorted files
+ """
+
+ asset_folders = _is_asset_folders(folder_path)
+
+ assets_dict = []
+
+ # Define asset types to categorize
+ folder_path = folder_path.rstrip('/') # Remove trailing slash from folder path
+ base_name = os.path.basename(folder_path) # Get the base folder name
+
+ # If asset_folders is False, categorize files within the folder
+ if not asset_folders:
+ # Get list of files in the folder
+ try:
+ files = [f.name for f in os.scandir(folder_path) if f.is_file()]
+ except FileNotFoundError:
+ return None
+ files = sorted(files, key=lambda x: x.lower()) # Sort files alphabetically
+ if files:
+ # Loop through each file in the folder
+ for file in tqdm(files, desc=f"Processing '{base_name}' folder", total=len(files), disable=None, leave=True):
+ if file.startswith('.') or "(N-A)" in file:
+ continue # Skip hidden files or files with "(N-A)" in the name
+
+ # Extract information from the file name
+ base_name, extension = os.path.splitext(file)
+ title = re.sub(year_regex, '', base_name)
+ normalize_title = normalize_titles(base_name)
+
+ try:
+ year = int(year_regex.search(base_name).group(1))
+ except:
+ year = None
+
+ file_path = f"{folder_path}/{file}" # Full file path
+
+ if not year: # If year is not found in the file name
+ # Categorize as a collection
+ # Additional processing for collection items
+ no_prefix = [re.sub(r'\b{}\b'.format(prefix), '', title).strip() for prefix in prefixes if title.startswith(prefix) and re.sub(r'\b{}\b'.format(prefix), '', title).strip() != title]
+ no_suffix = [re.sub(r'\b{}\b'.format(suffix), '', title).strip() for suffix in suffixes if title.endswith(suffix) and re.sub(r'\b{}\b'.format(suffix), '', title).strip() != title]
+ no_prefix_normalized = [normalize_titles(re.sub(r'\b{}\b'.format(prefix), '', title).strip()) for prefix in prefixes if title.startswith(prefix) and normalize_titles(re.sub(r'\b{}\b'.format(prefix), '', title).strip()) != normalize_title]
+ no_suffix_normalized = [normalize_titles(re.sub(r'\b{}\b'.format(suffix), '', title).strip()) for suffix in suffixes if title.endswith(suffix) and normalize_titles(re.sub(r'\b{}\b'.format(suffix), '', title).strip()) != normalize_title]
+ assets_dict.append({
+ 'title': title,
+ 'year': year,
+ 'normalized_title': normalize_title,
+ 'no_prefix': no_prefix,
+ 'no_suffix': no_suffix,
+ 'no_prefix_normalized': no_prefix_normalized,
+ 'no_suffix_normalized': no_suffix_normalized,
+ 'path': None,
+ 'files': [file_path],
+ })
+ else:
+ # Categorize as a series
+ if any(file.startswith(base_name) and any(base_name + season_name in file for season_name in season_name_info) for file in files):
+ # Check if the series entry already exists in the assets dictionary
+ series_entry = next((d for d in assets_dict if d['normalized_title'] == normalize_title and d['year'] == year), None)
+ if series_entry is None:
+ # If not, add a new series entry
+ series_entry = {
+ 'title': title,
+ 'year': year,
+ 'normalized_title': normalize_title,
+ 'files': [file_path],
+ 'season_numbers': []
+ }
+ assets_dict.append(series_entry)
+ else:
+ # Add the file path to the current series entry
+ if file_path not in series_entry['files']:
+ if normalize_file_names(file_path) not in [normalize_file_names(f) for f in series_entry['files']]:
+ series_entry['files'].append(file_path)
+
+ elif any(word in file for word in season_name_info):
+ # Check if the series entry already exists in the assets dictionary
+ series_entry = next((d for d in assets_dict if d['normalized_title'] == normalize_title and d['year'] == year), None)
+ if series_entry is None:
+ # If not, add a new series entry
+ series_entry = {
+ 'title': title,
+ 'year': year,
+ 'normalized_title': normalize_title,
+ 'files': [file_path],
+ 'season_numbers': []
+ }
+ assets_dict.append(series_entry)
+ else:
+ # Add the file path to the current series entry
+ if file_path not in series_entry['files']:
+ if normalize_file_names(file_path) not in [normalize_file_names(f) for f in series_entry['files']]:
+ series_entry['files'].append(file_path)
+
+ # Categorize as a movie
+ else:
+ assets_dict.append({
+ 'title': title,
+ 'year': year,
+ 'normalized_title': normalize_title,
+ 'path': None,
+ 'files': [file_path],
+ })
+ else:
+ return None
+
+ # Add Season number information to the series entries
+ if assets_dict:
+ # Get Season numbers from each series entry
+ series = [d for d in assets_dict if 'season_numbers' in d]
+ if series:
+ for series_entry in series:
+ for file in series_entry['files']:
+ if " - Specials" in file:
+ series_entry['season_numbers'].append(0)
+
+ # Check for season numbers in the file name using regex
+ elif re.search(season_number_regex, file):
+ match = re.search(season_number_regex, file)
+ if match:
+ series_entry['season_numbers'].append(int(match.group(1)))
+ # Sort the season numbers and file paths for the current series entry
+ if series_entry is not None:
+ # Remove duplicates
+ series_entry['season_numbers'] = list(set(map(int, series_entry['season_numbers'])))
+ series_entry['season_numbers'].sort()
+ # Remove duplicates
+ series_entry['files'] = list(set(series_entry['files']))
+ series_entry['files'].sort()
+ else: # If asset_folders is True, sort assets based on folders
+ try:
+ for dir_entry in tqdm(os.scandir(folder_path), desc='Processing posters', total=len(os.listdir(folder_path)), disable=None):
+ if dir_entry.is_dir():
+ dir = dir_entry.path
+ files = [f.name for f in os.scandir(dir) if f.is_file()]
+ if dir == folder_path or dir.endswith("tmp"):
+ continue # Skip root folder and temporary folders
+
+ base_name = os.path.basename(dir)
+ title = re.sub(year_regex, '', base_name)
+ normalize_title = normalize_titles(base_name)
+
+ try:
+ year = int(year_regex.search(base_name).group(1))
+ except:
+ year = None
+
+ if not year: # If year is not found in the folder name
+ # Categorize as a collection
+ # Process files within the folder and add to the collection
+ files = []
+ for file in os.listdir(dir):
+ if file.startswith('.'):
+ continue
+ files.append(f"{dir}/{file}")
+ assets_dict.append({
+ 'title': title,
+ 'year': year,
+ 'normalized_title': normalize_title,
+ 'no_prefix': [title.replace(prefix, '').strip() for prefix in prefixes if title.startswith(prefix)],
+ 'no_suffix': [title.replace(suffix, '').strip() for suffix in suffixes if title.endswith(suffix)],
+ 'no_prefix_normalized': [normalize_titles(title.replace(prefix, '').strip()) for prefix in prefixes if title.startswith(prefix)],
+ 'no_suffix_normalized': [normalize_titles(title.replace(suffix, '').strip()) for suffix in suffixes if title.endswith(suffix)],
+ 'path': dir,
+ 'files': files,
+ })
+ else:
+ # If year is found in the folder name
+ # Check if the folder contains series or movies based on certain criteria
+ # (presence of Season information for series, etc. - specific to the context)
+ if any("Season" in file for file in files):
+ list_of_season_numbers = []
+ list_of_files = []
+ for file in files:
+ if file.startswith('.'):
+ continue
+ if "season" in file.lower():
+ season_numbers = int(re.search(r'Season\s*(\d+)', file).group(1))
+ if season_numbers not in list_of_season_numbers:
+ list_of_season_numbers.append(season_numbers)
+ if file not in list_of_files:
+ list_of_files.append(f"{dir}/{file}")
+ if "poster" in file.lower():
+ list_of_files.append(f"{dir}/{file}")
+
+ # sort the season numbers and files
+ list_of_season_numbers.sort()
+ list_of_files.sort()
+
+ # Add series data to the assets dictionary
+ assets_dict.append({
+ 'title': title,
+ 'year': year,
+ 'normalized_title': normalize_title,
+ 'season_numbers': list_of_season_numbers,
+ 'path': dir,
+ 'files': list_of_files,
+ })
+
+ else:
+ files = []
+ for file in os.listdir(dir):
+ if file.startswith('.'):
+ continue
+ files.append(f"{dir}/{file}")
+ assets_dict.append({
+ 'title': title,
+ 'year': year,
+ 'normalized_title': normalize_title,
+ 'path': dir,
+ 'files': files,
+ })
+ except FileNotFoundError:
+ return None
+
+ return assets_dict
+
+def create_table(data):
+ """
+ Create a table from the provided data
+
+ Args:
+ data (list): The data to create the table from
+ log_level (str, optional): The log level to use for logging output. Defaults to None.
+ logger (logger, optional): The logger to use for logging output. Defaults to None.
+
+ Returns:
+ str: The formatted table string
+ """
+
+ if not data:
+ return "No data provided."
+
+ num_rows = len(data)
+ num_cols = len(data[0])
+
+ # Calculate column widths
+ col_widths = [max(len(str(data[row][col])) for row in range(num_rows)) for col in range(num_cols)]
+
+ # Add two spaces padding to each cell
+ col_widths = [max(width + 2, 5) for width in col_widths] # Set minimum width of 5 for each column
+
+ # Calculate total table width without including padding
+ total_width = sum(col_widths) + num_cols - 1 # Separator widths between columns
+
+ width = 76
+
+ # Ensure minimum width of 40
+ if total_width < width:
+ additional_width = width - total_width
+ extra_width_per_col = additional_width // num_cols
+ remainder = additional_width % num_cols
+
+ for i in range(num_cols):
+ col_widths[i] += extra_width_per_col
+ if remainder > 0:
+ col_widths[i] += 1
+ remainder -= 1
+
+ # Recalculate total table width
+ total_width = sum(col_widths) + num_cols - 1
+
+ # Create the table
+ table = "\n"
+
+ # Top border
+ table += "_" * (total_width + 2) + "\n"
+
+ for row in range(num_rows):
+ table += "|"
+ for col in range(num_cols):
+ cell_content = str(data[row][col])
+ padding = col_widths[col] - len(cell_content)
+ left_padding = padding // 2
+ right_padding = padding - left_padding
+
+ # Determine the separator for the cell
+ separator = '|' if col < num_cols - 1 else '|'
+
+ table += f"{' ' * left_padding}{cell_content}{' ' * right_padding}{separator}"
+ table += "\n"
+ if row < num_rows - 1:
+ table += "|" + "-" * (total_width) + "|\n"
+
+ # Bottom border
+ table += "‾" * (total_width + 2) + ""
+
+ return table
+
+def get_media_folders(paths, logger):
+ """
+ Get media folders from the provided paths
+
+ Args:
+ paths (list): The paths to get media folders from
+ logger (logger): The logger to use for logging output
+
+ Returns:
+ dict: A dictionary containing the media folders
+ """
+
+ media_dict = {} # Initialize an empty dictionary to hold media folders
+ if isinstance(paths, str): # Ensure paths is a list
+ list(paths)
+
+ media_dict = {'movies': [], 'series': []} # Initialize dictionaries for movies and series
+
+ for path in paths: # Loop through each path provided
+ base_name = os.path.basename(os.path.normpath(path)) # Get the base folder name
+
+ # Iterate through items in the directory
+ for item in tqdm(os.listdir(path), desc=f"Getting media folders for '{base_name}'", disable=None, leave=True):
+ if item.startswith('.') or item.startswith('@'):
+ continue # Skip hidden files/folders
+
+ try:
+ # Extract title and year information from item name using regex
+ title = str(re.sub(year_regex, '', item))
+ year = int(year_regex.search(item).group(1))
+ except Exception as e:
+ logger.warning(f"Warning: {e} - Skipping '{item}' in '{base_name}'")
+ continue # Continue to the next item
+
+ normalized_title = normalize_titles(item) # Normalize the item's title
+
+ if os.path.isdir(os.path.join(path, item)) and any(os.path.isdir(os.path.join(path, item, sub_folder)) for sub_folder in os.listdir(os.path.join(path, item))):
+ # If the item is a directory and contains sub folders
+ sub_folders = [sub_folder for sub_folder in os.listdir(os.path.join(path, item)) if os.path.isdir(os.path.join(path, item, sub_folder)) and not sub_folder.startswith('.')]
+ sub_folders.sort() # Sort the sub folders
+
+ season_numbers = []
+ for folder in sub_folders:
+ if folder == "Specials":
+ season_data = '00'
+ season_numbers.append(season_data)
+ elif folder.startswith("Season"):
+ season_data = folder.replace("Season", "").strip().zfill(2)
+ season_numbers.append(season_data)
+
+ # Add series data to the media dictionary
+ media_dict['series'].append({
+ 'title': title,
+ 'year': year,
+ 'normalized_title': normalized_title,
+ 'season_numbers': season_numbers,
+ 'path': os.path.join(path, item),
+ 'location': base_name,
+ })
+ else:
+ # Add movie data to the media dictionary
+ media_dict['movies'].append({
+ 'title': title,
+ 'year': year,
+ 'normalized_title': normalized_title,
+ 'path': os.path.join(path, item),
+ 'location': base_name,
+ })
+
+ return media_dict
+
+def handle_starr_data(app, server_name, instance_type, include_episode=False):
+ """
+ Get data from Radarr or Sonarr
+
+ Args:
+ app (Radarr or Sonarr): The Radarr or Sonarr instance
+ instance_type (str): The type of instance (Radarr or Sonarr)
+
+ Returns:
+ list: A list of dictionaries containing the data from Radarr or Sonarr
+ """
+
+ media_dict = [] # Initialize an empty list to hold media data
+ media = app.get_media() # Fetch media data from the Radarr or Sonarr instance
+ if media:
+ for item in tqdm(media, desc=f"Getting {server_name.capitalize()} data", total=len(media), disable=None, leave=True):
+ # Fetch relevant data based on the instance type (Radarr or Sonarr)
+ if instance_type == "radarr":
+ file_id = item.get('movieFile', {}).get('id', None) # Fetch file ID for Radarr
+ elif instance_type == "sonarr":
+ season_data = item.get('seasons', []) # Fetch season data for Sonarr
+ season_list = [] # Initialize a list to hold season data
+ for season in season_data:
+ if include_episode:
+ episode_data = app.get_episode_data_by_season(item['id'], season['seasonNumber']) # Fetch episode data for each season
+ episode_list = [] # Initialize a list to hold episode data
+ for episode in episode_data:
+ episode_list.append({
+ 'episode_number': episode['episodeNumber'],
+ 'monitored': episode['monitored'],
+ 'episode_file_id': episode['episodeFileId'],
+ 'episode_id': episode['id'],
+ 'has_file': episode['hasFile'],
+ }) # Append episode data to the episode dictionary
+ season_list.append({
+ 'season_number': season['seasonNumber'],
+ 'monitored': season['monitored'],
+ 'season_pack': season['statistics']['episodeCount'] == season['statistics']['totalEpisodeCount'],
+ 'season_has_episodes': season['statistics']['episodeCount'] > 0,
+ 'episode_data': episode_list if include_episode else [],
+ }) # Append season data to the season dictionary
+
+ alternate_titles = []
+ normalized_alternate_titles = []
+ if item['alternateTitles']:
+ for alternate_title in item['alternateTitles']:
+ alternate_titles.append(alternate_title['title']) # Collect alternate titles
+ normalized_alternate_titles.append(normalize_titles(alternate_title['title'])) # Normalize alternate titles
+ # If year is in the name extract name and year
+ if re.search(r"\s?\(\d{4}\)", item['title']):
+ title = re.sub(r"\s?\(\d{4}\)", "", item['title'])
+ year = int(re.search(r"\s?\((\d{4})\)", item['title']).group(1))
+ else:
+ title = item['title']
+ year = item['year']
+ # Construct a dictionary for each item and append it to media_dict
+ media_dict.append({
+ 'title': title,
+ 'year': year,
+ 'media_id': item['id'],
+ 'db_id': item['tmdbId'] if instance_type == "radarr" else item['tvdbId'],
+ 'monitored': item['monitored'],
+ 'status': item['status'],
+ 'root_folder': item['rootFolderPath'],
+ 'quality_profile': item['qualityProfileId'],
+ 'normalized_title': normalize_titles(item['title']),
+ 'path_name': os.path.basename(item['path']),
+ 'original_title': item.get('originalTitle', None),
+ 'secondary_year': item.get('secondaryYear', None),
+ 'alternate_titles': alternate_titles,
+ 'normalized_alternate_titles': normalized_alternate_titles,
+ 'file_id': file_id if instance_type == "radarr" else None,
+ 'folder': os.path.basename(os.path.normpath(item['path'])),
+ 'has_file': item['hasFile'] if instance_type == "radarr" else None,
+ 'tags': item['tags'],
+ 'seasons': season_list if instance_type == "sonarr" else None, # Add season_list for Sonarr items
+ 'season_numbers': [season['season_number'] for season in season_list] if instance_type == "sonarr" else None,
+ }) # Append the constructed dictionary to media_dict
+ else:
+ return None
+
+ return media_dict
+
+def get_plex_data(plex, library_names, logger, include_smart, collections_only):
+ """
+ Get data from Plex
+
+ Args:
+ plex (Plex): The Plex instance
+ library_names (list): The names of the libraries to get data from
+ logger (logger): The logger to use for logging output
+ include_smart (bool): Whether or not to include smart collections
+ collections_only (bool): Whether or not to only get collection data
+
+ Returns:
+ list: A list of dictionaries containing the data from Plex
+ """
+
+ plex_dict = [] # Initialize an empty list to hold Plex data
+ collection_names = {} # Initialize an empty dictionary to hold raw collection data
+ library_data = {} # Initialize an empty dictionary to hold library data
+ # Loop through each library name provided
+ for library_name in library_names:
+ try:
+ library = plex.library.section(library_name) # Get the library instance
+ except NotFound:
+ logger.error(f"Error: Library '{library_name}' not found, check your settings and try again.")
+ continue
+
+ if collections_only:
+ if include_smart:
+ collection_names[library_name] = [collection.title for collection in library.search(libtype="collection")]
+ else:
+ collection_names[library_name] = [collection.title for collection in library.search(libtype="collection") if not collection.smart]
+ else:
+ library_data[library_name] = library.all() # Get all items from the library
+
+ if collections_only:
+ # Process collection data
+ for library_name, collection_names in collection_names.items():
+ for collection in tqdm(collection_names, desc=f"Processing Plex collection data for '{library_name}'", total=len(collection_names), disable=None, leave=True):
+ plex_dict.append({
+ 'title': collection,
+ 'normalized_title': normalize_titles(collection),
+ 'location': library_name,
+ 'year': None,
+ 'folder': collection,
+ }) # Append collection information to plex_dict
+ else:
+ # Process library item data
+ for library_name, library_data in library_data.items():
+ for item in tqdm(library_data, desc=f"Processing {library_name} data", total=len(library_data), disable=None, leave=True):
+ labels = [str(label).lower() for label in item.labels] # Get lowercase labels
+ plex_dict.append({
+ 'title': item.title,
+ 'normalized_title': normalize_titles(item.title),
+ 'year': item.year,
+ 'labels': labels,
+ }) # Append item information to plex_dict
+
+ return plex_dict # Return the constructed Plex data dictionary
+
+
+def validate(config, script_config, logger):
+ """
+ Validate the config file
+
+ Args:
+ config (Config): The Config instance
+ script_config (dict): The script-specific config
+ logger (logger): The logger to use for logging output
+
+ Returns:
+ bool: True if the config is valid, False otherwise
+ """
+
+ instances = script_config.get('instances', [])
+ # validate instances
+ list_of_instance_keys = [sub_key for key in config.instances_config.keys() for sub_key in config.instances_config[key].keys()]
+ for instance in instances:
+ if instance not in list_of_instance_keys:
+ logger.error(f"Instance '{instance}' not found in config.yml.")
+ return False
+ else:
+ return True
+
+def get_current_git_branch():
+ """
+ Get the current git branch
+
+ Returns:
+ str: The current git branch
+ """
+ if os.environ.get('DOCKER_ENV'):
+ branch = os.getenv('BRANCH', "master")
+ return branch
+ else:
+ try:
+ root_dir = pathlib.Path(__file__).parents[1]
+ # Run the git rev-parse command to get the current branch
+ result = subprocess.run(['git', 'rev-parse', '--abbrev-ref', 'HEAD'],
+ cwd=root_dir,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ check=True,
+ text=True)
+ # Capture the output and return the current branch
+ return result.stdout.strip()
+ except (FileNotFoundError, subprocess.CalledProcessError) as e:
+ # Handle any errors if the command fails
+ print(f"Error: {e}")
+ return None
+
+def create_bar(middle_text):
+ """
+ Creates a separation bar with provided text in the center
+
+ Args:
+ middle_text (str): The text to place in the center of the separation bar
+
+ Returns:
+ str: The formatted separation bar
+ """
+ total_length = 80
+ if len(middle_text) == 1:
+ remaining_length = total_length - len(middle_text) - 2
+ left_side_length = 0
+ right_side_length = remaining_length
+ return f"\n{middle_text * left_side_length}{middle_text}{middle_text * right_side_length}\n"
+ else:
+ remaining_length = total_length - len(middle_text) - 4
+ left_side_length = math.floor(remaining_length / 2)
+ right_side_length = remaining_length - left_side_length
+ return f"\n{'*' * left_side_length} {middle_text} {'*' * right_side_length}\n"
+
+def redact_sensitive_info(text):
+ """
+ Redact sensitive information from the provided text
+
+ Args:
+ text (str): The text to redact sensitive information from
+
+ Returns:
+ str: The text with sensitive information redacted
+ """
+ # Redact Discord webhook URLs
+ text = re.sub(r'https://discord\.com/api/webhooks/[^/]+/\S+', r'https://discord.com/api/webhooks/[redacted]', text)
+
+ # Redact Google OAuth client IDs
+ text = re.sub(r'\b(\w{24})-[a-zA-Z0-9_-]{24}\.apps\.googleusercontent\.com\b', r'[redacted].apps.googleusercontent.com', text)
+
+ # Redact Google OAuth refresh tokens
+ text = re.sub(r'(?<=refresh_token": ")([^"]+)(?=")', r'[redacted]', text)
+
+ # Redact Google Drive file IDs
+ text = re.sub(r'(\b[A-Za-z0-9_-]{33}\b)', r'[redacted]', text)
+
+ # Redact Discord access tokens
+ text = re.sub(r'(?<=access_token": ")([^"]+)(?=")', r'[redacted]', text)
+
+ # redact GOCSPX-8765434567654 to GOCSPX-[redacted]
+ text = re.sub(r'GOCSPX-\S+', r'GOCSPX-[redacted]', text)
+
+ pattern = r'(-i).*?(\.apps\.googleusercontent\.com)'
+ text = re.sub(pattern, r'\1 [redacted]\2', text, flags=re.DOTALL | re.IGNORECASE)
+
+ pattern = r'(-f)\s\S+'
+ text = re.sub(pattern, r'\1 [redacted]', text, flags=re.DOTALL | re.IGNORECASE)
+
+ return text
+
+def sort_assets(assets_list):
+ """
+ Sort assets into movies, series, and collections
+
+ Args:
+ assets_list (list): The assets to sort
+
+ Returns:
+ Dict: A dictionary containing the sorted assets
+ """
+ assets_dict = {
+ 'movies': [],
+ 'series': [],
+ 'collections': []
+ }
+ for item in tqdm(assets_list, desc="Categorizing assets", total=len(assets_list), disable=None, leave=True):
+ if not item['year']:
+ assets_dict['collections'].append(item)
+ else:
+ if item.get('season_numbers', None):
+ assets_dict['series'].append(item)
+ else:
+ assets_dict['movies'].append(item)
+
+ return assets_dict
+
+def compare_strings(string1, string2):
+ """
+ Compare two strings for equality
+
+ Args:
+ string1 (str): The first string to compare
+ string2 (str): The second string to compare
+
+ Returns:
+ bool: True if the strings are equal, False otherwise
+ """
+ string1 = re.sub(r'\W+', '', string1)
+ string2 = re.sub(r'\W+', '', string2)
+
+ return string1.lower() == string2.lower()
+
+def is_match(asset, media):
+ """
+ Check if the asset matches the media
+
+ Args:
+ asset (dict): The asset to check
+ media (dict): The media to check
+
+ Returns:
+ bool: True if the asset matches the media, False otherwise
+ """
+ no_prefix = asset.get('no_prefix', [])
+ no_suffix = asset.get('no_suffix', [])
+ no_prefix_normalized = asset.get('no_prefix_normalized', [])
+ no_suffix_normalized = asset.get('no_suffix_normalized', [])
+ alternate_titles = media.get('alternate_titles', [])
+ normalized_alternate_titles = media.get('normalized_alternate_titles', [])
+ secondary_year = media.get('secondary_year', None)
+ original_title = media.get('original_title', None)
+ folder = media.get('folder', None)
+ folder_title = None
+ folder_year = None
+ normalized_folder_title = None
+ if folder:
+ folder_base_name = os.path.basename(folder)
+ match = re.search(folder_year_regex, folder_base_name)
+ if match:
+ folder_title, folder_year = match.groups()
+ folder_year = int(folder_year)
+ normalized_folder_title = normalize_titles(folder_title)
+
+ # Matching criteria for media and asset
+ if (
+ asset['title'] == media['title'] or
+ asset['normalized_title'] == media['normalized_title'] or
+ asset['title'] in alternate_titles or
+ asset['normalized_title'] in normalized_alternate_titles or
+ asset['title'] == original_title or
+ asset['title'] == folder_title or
+ asset['normalized_title'] == normalized_folder_title or
+ (media['title'] in no_prefix) or
+ (media['title'] in no_suffix) or
+ (media['normalized_title'] in no_prefix_normalized) or
+ (media['normalized_title'] in no_suffix_normalized) or
+ compare_strings(asset['title'], media['title']) or
+ compare_strings(asset['normalized_title'], media['normalized_title'])
+ ) and (
+ asset['year'] == media['year'] or
+ asset['year'] == secondary_year or
+ asset['year'] == folder_year
+ ):
+ return True
+ else:
+ return False
diff --git a/util/version.py b/util/version.py
new file mode 100755
index 0000000..1a89ee8
--- /dev/null
+++ b/util/version.py
@@ -0,0 +1,100 @@
+import requests
+import os
+import pathlib
+from util.discord import discord
+
+
+try:
+ import requests
+except ImportError:
+ print("ImportError: requests")
+ print("Please install the required modules with 'pip install -r requirements.txt'")
+ exit(1)
+
+
+script_dir = pathlib.Path(__file__).parents[1] # Get the path to the script directory (one level up from this file
+
+def version_check(logger, branch):
+ """
+ Check for a new version of the script
+
+ Args:
+ logger (logger): The logger to use for logging output
+ config (dict): The config file
+
+ Returns:
+ None
+ """
+
+ # Read version from a local VERSION file
+ version_file = os.path.join(script_dir, "VERSION")
+
+ with open(version_file, "r") as f:
+ script_version = f.read().strip()
+
+ # Construct the URL for the GitHub raw file containing the version
+ github_url = f"https://raw.githubusercontent.com/Drazzilb08/userScripts/{branch}/VERSION"
+
+ # Send a GET request to the GitHub URL to fetch the script's version from GitHub
+ response = requests.get(github_url)
+
+ # Check if the request was successful
+ if response.status_code == 200:
+ github_script_version = response.text.strip() # Get the GitHub version
+ else:
+ logger.error(f"Error: {response.status_code} while retrieving version from {github_url}")
+ return
+
+ # Function to compare versions
+ def compare_versions(script_version, github_script_version):
+ script_version_parts = script_version.split('.')
+ github_script_version_parts = github_script_version.split('.')
+ for i in range(len(script_version_parts)):
+ script_version_part = int(script_version_parts[i])
+ github_script_version_part = int(github_script_version_parts[i])
+
+ if script_version_part < github_script_version_part:
+ return True # There is a newer version available
+ elif script_version_part > github_script_version_part:
+ return False # The local version is newer
+ return False # The versions are the same or local version is newer
+
+ # Compare the local script version with the version on GitHub
+ if compare_versions(script_version, github_script_version):
+ logger.warning("New version available")
+ logger.warning(f"Current Version: {script_version}")
+ logger.warning(f"GitHub Version: {github_script_version}")
+
+ # Prepare data for Discord notification
+ field = [{
+ "name": "New Version Available",
+ "value": f""
+ },{
+ "name": "Current Version",
+ "value": f"```{script_version}```"
+ }, {
+ "name": "GitHub Version",
+ "value": f"```{github_script_version}```"
+ }]
+
+ print("Sending Discord notification")
+ # Call function to send Discord notification with script details
+ discord(field, logger=logger, script_name="main", description=f"__**Drazzilb's Scripts**__", color=0xff0000, content=None)
+
+def get_version():
+ """
+ Get the version number from the VERSION file
+
+ Args:
+ None
+
+ Returns:
+ str: The version number
+ """
+
+ version_file = os.path.join(script_dir, "VERSION")
+
+ with open(version_file, "r") as f:
+ script_version = f.read().strip()
+
+ return script_version