From 7ce95a3ecf58d01902a58f965101ef539af0b361 Mon Sep 17 00:00:00 2001 From: eduardocerqueira Date: Wed, 18 Sep 2024 17:12:23 +0000 Subject: [PATCH] 2024-09-18 17:12:23.129943 new snippets --- seeker/report.txt | 29 ++ .../Insert Shapes in Word using Java.java | 37 ++ seeker/snippet/angular.Dockerfile | 29 ++ seeker/snippet/aws_enum.sh | 61 ++++ seeker/snippet/base_conversions.py | 110 ------ ...check-artifact-registry-batch-deletions.py | 101 ++++++ seeker/snippet/generate_signed_urls.py | 52 --- seeker/snippet/install-awscli.sh | 33 ++ .../ipynb_importParentDir_pyCodeTemplate.py | 26 -- seeker/snippet/kafka-move-leadership.sh | 323 ++++++++++++++++++ seeker/snippet/main.java | 36 -- seeker/snippet/main.py | 187 +--------- seeker/snippet/module_5_2.py | 39 --- seeker/snippet/program.java | 115 ------- seeker/snippet/spring.Dockerfile | 25 ++ 15 files changed, 643 insertions(+), 560 deletions(-) create mode 100644 seeker/snippet/Insert Shapes in Word using Java.java create mode 100644 seeker/snippet/angular.Dockerfile create mode 100644 seeker/snippet/aws_enum.sh delete mode 100644 seeker/snippet/base_conversions.py create mode 100644 seeker/snippet/check-artifact-registry-batch-deletions.py delete mode 100644 seeker/snippet/generate_signed_urls.py create mode 100644 seeker/snippet/install-awscli.sh delete mode 100644 seeker/snippet/ipynb_importParentDir_pyCodeTemplate.py create mode 100644 seeker/snippet/kafka-move-leadership.sh delete mode 100644 seeker/snippet/main.java delete mode 100644 seeker/snippet/module_5_2.py delete mode 100644 seeker/snippet/program.java create mode 100644 seeker/snippet/spring.Dockerfile diff --git a/seeker/report.txt b/seeker/report.txt index a5528eb5..fb047e1d 100644 --- a/seeker/report.txt +++ b/seeker/report.txt @@ -1,3 +1,32 @@ +-------------------------------------------------------------------------------- + 2024-09-18 17:12:23.129943 +-------------------------------------------------------------------------------- + On branch main +Your branch is up to date with 'origin/main'. + +Changes not staged for commit: + (use "git add/rm ..." to update what will be committed) + (use "git restore ..." to discard changes in working directory) + deleted: snippet/base_conversions.py + deleted: snippet/generate_signed_urls.py + deleted: snippet/ipynb_importParentDir_pyCodeTemplate.py + deleted: snippet/main.java + modified: snippet/main.py + deleted: snippet/module_5_2.py + deleted: snippet/program.java + +Untracked files: + (use "git add ..." to include in what will be committed) + snippet/Insert Shapes in Word using Java.java + snippet/angular.Dockerfile + snippet/aws_enum.sh + snippet/check-artifact-registry-batch-deletions.py + snippet/install-awscli.sh + snippet/kafka-move-leadership.sh + snippet/spring.Dockerfile + +no changes added to commit (use "git add" and/or "git commit -a") + -------------------------------------------------------------------------------- 2024-09-17 17:12:22.213604 -------------------------------------------------------------------------------- diff --git a/seeker/snippet/Insert Shapes in Word using Java.java b/seeker/snippet/Insert Shapes in Word using Java.java new file mode 100644 index 00000000..d8c50215 --- /dev/null +++ b/seeker/snippet/Insert Shapes in Word using Java.java @@ -0,0 +1,37 @@ +//date: 2024-09-18T16:54:15Z +//url: https://api.github.com/gists/133d9815f8bd9b36601b1925952eeb4c +//owner: https://api.github.com/users/aspose-com-kb + +import com.aspose.words.*; + +public class Main +{ + public static void main(String[] args) throws Exception // Adding shapes in Java + { + // Set the licenses + new License().setLicense("License.lic"); + + Document doc = new Document(); + DocumentBuilder builder = new DocumentBuilder(doc); + + //Inline shape + Shape shape = builder.insertShape(ShapeType.LINE, 200, 200); + shape.setRotation(35.0); + + //Free-floating shape + shape = builder.insertShape + ( ShapeType.ARROW,RelativeHorizontalPosition.PAGE,250, + RelativeVerticalPosition.PAGE,150,150,150,WrapType.INLINE); + shape.setRotation(40.0); + builder.writeln(); + OoxmlSaveOptions saveOptions = new OoxmlSaveOptions(SaveFormat.DOCX); + + // Save shapes as DML + saveOptions.setCompliance(OoxmlCompliance.ISO_29500_2008_TRANSITIONAL); + + // Save the document + doc.save("output.docx", saveOptions); + + System.out.println("Shapes added successfully"); + } +} \ No newline at end of file diff --git a/seeker/snippet/angular.Dockerfile b/seeker/snippet/angular.Dockerfile new file mode 100644 index 00000000..009c9158 --- /dev/null +++ b/seeker/snippet/angular.Dockerfile @@ -0,0 +1,29 @@ +#date: 2024-09-18T17:05:04Z +#url: https://api.github.com/gists/124d04536cdeca0cc709c6b43ffd9871 +#owner: https://api.github.com/users/Riko07br + +# Build angular------------------ +FROM node:lts-alpine3.20 as build + +RUN npm install -g @angular/cli + +WORKDIR /app + +COPY package.json . + +RUN npm install + +COPY . . + +RUN npm run build + +# NGINX runner--------------- +FROM nginx:1.21-alpine + +COPY --from=build /app/dist/frontend/browser /usr/share/nginx/html + +COPY ./nginx.conf /etc/nginx/conf.d/default.conf + +EXPOSE 80 4200 + +CMD ["nginx", "-g", "daemon off;"] \ No newline at end of file diff --git a/seeker/snippet/aws_enum.sh b/seeker/snippet/aws_enum.sh new file mode 100644 index 00000000..dfad1da4 --- /dev/null +++ b/seeker/snippet/aws_enum.sh @@ -0,0 +1,61 @@ +#date: 2024-09-18T16:53:38Z +#url: https://api.github.com/gists/04d0e48c523b9b73b2f1d14d81e2ba3f +#owner: https://api.github.com/users/h00die + +EC2_TOKEN=$(curl -X PUT "http: "**********": 21600" 2>/dev/null || wget -q -O - --method PUT "http://169.254.169.254/latest/api/token" --header "X-aws-ec2-metadata-token-ttl-seconds: 21600" 2>/dev/null) +HEADER="X-aws-ec2-metadata-token: "**********" +URL="http://169.254.169.254/latest/meta-data" + +aws_req="" +if [ "$(command -v curl)" ]; then + aws_req="curl -s -f -H '$HEADER'" +elif [ "$(command -v wget)" ]; then + aws_req="wget -q -O - -H '$HEADER'" +else + echo "Neither curl nor wget were found, I can't enumerate the metadata service :(" +fi + +printf "ami-id: "; eval $aws_req "$URL/ami-id"; echo "" +printf "instance-action: "; eval $aws_req "$URL/instance-action"; echo "" +printf "instance-id: "; eval $aws_req "$URL/instance-id"; echo "" +printf "instance-life-cycle: "; eval $aws_req "$URL/instance-life-cycle"; echo "" +printf "instance-type: "; eval $aws_req "$URL/instance-type"; echo "" +printf "region: "; eval $aws_req "$URL/placement/region"; echo "" + +echo "" +echo "Account Info" +eval $aws_req "$URL/identity-credentials/ec2/info"; echo "" +eval $aws_req "http://169.254.169.254/latest/dynamic/instance-identity/document"; echo "" + +echo "" +echo "Network Info" +for mac in $(eval $aws_req "$URL/network/interfaces/macs/" 2>/dev/null); do + echo "Mac: $mac" + printf "Owner ID: "; eval $aws_req "$URL/network/interfaces/macs/$mac/owner-id"; echo "" + printf "Public Hostname: "; eval $aws_req "$URL/network/interfaces/macs/$mac/public-hostname"; echo "" + printf "Security Groups: "; eval $aws_req "$URL/network/interfaces/macs/$mac/security-groups"; echo "" + echo "Private IPv4s:"; eval $aws_req "$URL/network/interfaces/macs/$mac/ipv4-associations/"; echo "" + printf "Subnet IPv4: "; eval $aws_req "$URL/network/interfaces/macs/$mac/subnet-ipv4-cidr-block"; echo "" + echo "PrivateIPv6s:"; eval $aws_req "$URL/network/interfaces/macs/$mac/ipv6s"; echo "" + printf "Subnet IPv6: "; eval $aws_req "$URL/network/interfaces/macs/$mac/subnet-ipv6-cidr-blocks"; echo "" + echo "Public IPv4s:"; eval $aws_req "$URL/network/interfaces/macs/$mac/public-ipv4s"; echo "" + echo "" +done + +echo "" +echo "IAM Role" +eval $aws_req "$URL/iam/info" +for role in $(eval $aws_req "$URL/iam/security-credentials/" 2>/dev/null); do + echo "Role: $role" + eval $aws_req "$URL/iam/security-credentials/$role"; echo "" + echo "" +done + +echo "" +echo "User Data" +# Search hardcoded credentials +eval $aws_req "http://169.254.169.254/latest/user-data" + +echo "" +echo "EC2 Security Credentials" +eval $aws_req "$URL/identity-credentials/ec2/security-credentials/ec2-instance"; echo ""ty-credentials/ec2/security-credentials/ec2-instance"; echo "" \ No newline at end of file diff --git a/seeker/snippet/base_conversions.py b/seeker/snippet/base_conversions.py deleted file mode 100644 index 34485fc7..00000000 --- a/seeker/snippet/base_conversions.py +++ /dev/null @@ -1,110 +0,0 @@ -#date: 2024-09-16T17:00:30Z -#url: https://api.github.com/gists/dd83ecd985ad1817d72ae92764b4921c -#owner: https://api.github.com/users/Marcus5408 - -# conversion.py -# ------------- -# Description: -# A simple program that converts a number from one base to another using either -# the successive division method or the weighted multiplication method. -# ------------- -# Usage: -# In a terminal, run the following command: -# python3 conversion.py -# -# selects the conversion method using one of the following: -# - divide: successive division method -# - multiply: weighted multiplication method -# is the number to convert. -# is the target base (for successive division) -# or the base of the number (for weighted multiplication). -# ------------- -# (c) Issac Liu, 2024 - -from typing import Union, Literal -import sys - - -def success_div(n, base): - remainder = 0 - result = 0 - charset = "0123456789" - if base > 10: - if base == 16: - charset = "0123456789ABCDEF" - else: - print( - "You have entered a base greater than 10. Please enter every digit of your base from least to greatest." - ) - values = input("") - charset = values if len(values) == base else "0123456789ABCDEF" - if base < 10: - while n != 0 or n > base: - remainder = n % base - quotient = n // base - print(f"{n}/{base} = {quotient}r{remainder}") - result = result * 10 + remainder - n = quotient - # reverse the result - result = int(str(result)[::-1]) - print(f"\n{result}") - else: - result = "" - while n != 0: - remainder = n % base - quotient = n // base - if base > 10 and remainder > 9: - hex_value = f" ({remainder} -> {charset[remainder]})" - print(f"{n}/{base} = {quotient}r{remainder}{hex_value}") - else: - print(f"{n}/{base} = {quotient}r{remainder}") - result = charset[remainder] + result - n = quotient - print(f"\n{result}") - - return result - - -def weighted_multiply(n: Union[int, str], base: int) -> int: - if isinstance(n, str): - n = n.upper() - charset = "0123456789ABCDEF" - list = [charset.index(x) for x in n] - else: - list = [int(x) for x in str(n)] - - weights = [base**i for i in range(len(list) - 1, -1, -1)] - result = [a * b for a, b in zip(list, weights)] - - for i in range(len(result)): - if base > 10 and list[i] > 9: - hex_value = f" ({charset[list[i]]} -> {list[i]})" - print( - f"{list[i]}{hex_value} * {base}^{len(list) - i - 1} = {list[i]} * {weights[i]} = {result[i]}" - ) - else: - print( - f"{list[i]} * {base}^{len(list) - i - 1} = {list[i]} * {weights[i]} = {result[i]}" - ) - - print(f"\n{' + '.join([str(x) for x in result])} = {sum(result)}") - return sum(result) - - -if __name__ == "__main__": - if len(sys.argv) != 4: - print("Usage: python conversion.py ") - sys.exit(1) - - method = sys.argv[1] - n = int(sys.argv[2]) if sys.argv[2].isdigit() else sys.argv[2] - base = int(sys.argv[3]) - - if method == "divide": - success_div(n, base) - elif method == "multiply": - weighted_multiply(n, base) - else: - print( - "Invalid method. Use 1 for division method or 2 for weighted multiply method." - ) diff --git a/seeker/snippet/check-artifact-registry-batch-deletions.py b/seeker/snippet/check-artifact-registry-batch-deletions.py new file mode 100644 index 00000000..e995d435 --- /dev/null +++ b/seeker/snippet/check-artifact-registry-batch-deletions.py @@ -0,0 +1,101 @@ +#date: 2024-09-18T16:58:56Z +#url: https://api.github.com/gists/cc77d7c98cd4c0c02f40f0e777ec3c0c +#owner: https://api.github.com/users/philwhiteuk + +from collections import defaultdict +from datetime import date, datetime, timedelta, UTC +import os +import textwrap + +from dotenv import load_dotenv +from google.cloud import artifactregistry_v1 as artifactregistry, logging +import yaml + +load_dotenv() # take environment variables from .env. + +project_id = os.getenv('PROJECT_ID') +location = os.getenv('LOCATION') + +def get_log(log_date: date = date.today()): + log_file_path = os.path.join(os.path.dirname(__file__), '../var/log', f'{log_date.isoformat()}.yaml') + + if not os.path.exists(log_file_path): + print(f'Fetching log for {log_date.isoformat()}...') + logging_client = logging.Client(project=project_id) + log_query = f''' + protoPayload.methodName="google.devtools.artifactregistry.v1.ArtifactRegistry.BatchDeleteVersions" + AND protoPayload.authenticationInfo.principalEmail:"@gcp-sa-artifactregistry.iam.gserviceaccount.com" + AND protoPayload.request.parent:"projects/{project_id}/locations/{location}" + AND protoPayload.request.validateOnly=true + AND protoPayload.serviceName="artifactregistry.googleapis.com" + AND + (timestamp > {log_date.isoformat()} AND timestamp <= {(log_date + timedelta(days=1)).isoformat()}) + ''' + with open(log_file_path, 'a') as f: + for entry in logging_client.list_entries(filter_=log_query): + f.write('---\n') # Separate documents with '---' + yaml.dump(entry.to_api_repr(), f) # Write entry to file + + with open(log_file_path, 'r') as f: + yaml_docs = yaml.safe_load_all(f) + return list(yaml_docs) + +def get_current_inventory(repository: str, package: str): + inventory_path = os.path.join(os.path.dirname(__file__), '../var/inventory', f'{date.today().isoformat()}-{repository}-{package}.yaml') + + if not os.path.exists(inventory_path): + print(f'Fetching most recent inventory for {repository}: {package}...') + artifactregistry_client = artifactregistry.ArtifactRegistryClient() + parent = f'projects/{project_id}/locations/{location}/repositories/{repository}/packages/{package}' + request = artifactregistry.ListVersionsRequest(parent=parent) + with open(inventory_path, 'a') as f: + for entry in artifactregistry_client.list_versions(request=request): + f.write('---\n') # Separate documents with '---' + yaml.dump({'name': entry.name, 'create_time': entry.create_time.isoformat(), 'update_time': entry.update_time.isoformat()}, f) # Write entry to file + + with open(inventory_path, 'r') as f: + yaml_docs = yaml.safe_load_all(f) + return list(yaml_docs) + +def main(): + today = date.today() + print(f'Checking log for {today.isoformat()}...') + log_entries = get_log(log_date=today) + + versions_marked_for_deletion = set() + for log_entry in log_entries: + if log_entry['protoPayload']['request']['names']: + versions_marked_for_deletion.update(log_entry['protoPayload']['request']['names']) + + versions_by_package = defaultdict(list) + for tag in versions_marked_for_deletion: + repository = tag.split('/')[5] + package = tag.split('/')[7] + versions_by_package[f'{repository}/{package}'].append(tag) + + now = datetime.now(UTC) + for package, versions_marked_for_deletion in versions_by_package.items(): + repository = package.split('/')[0] + package = package.split('/')[1] + + all_versions = get_current_inventory(repository=repository, package=package) + assert len(versions_marked_for_deletion) <= len(all_versions) - 100, f'{repository}/{package} is keeping fewer than the minimum 100 versions!' + summary = f''' + Resource: projects/{project_id}/locations/{location}/repositories/{repository}/package/{package} + Total files: {len(all_versions)} + Marked for deletion: {len(versions_marked_for_deletion)} + ''' + print(textwrap.dedent(summary)) + + inventory_lookup = dict() + for item in all_versions: + inventory_lookup.update([{ item['name'], datetime.fromisoformat(item['update_time']) }]) + + tag_counter = 0 + for tag in versions_marked_for_deletion: + if tag in inventory_lookup: + timedelta = now - inventory_lookup[tag] + assert timedelta.days >= 5, f'Version {tag} is newer than 5 days!' + print(f'- ✅ {tag.split('/')[-1]} {timedelta.days} days old') + tag_counter += 1 + diff --git a/seeker/snippet/generate_signed_urls.py b/seeker/snippet/generate_signed_urls.py deleted file mode 100644 index 0ad90d08..00000000 --- a/seeker/snippet/generate_signed_urls.py +++ /dev/null @@ -1,52 +0,0 @@ -#date: 2024-09-16T16:57:20Z -#url: https://api.github.com/gists/05da91b9b34799ff6fd4254cffba7d3e -#owner: https://api.github.com/users/rlank - -from google.cloud import storage -from datetime import timedelta -import os - -# Set the path to your service account key file -os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = '/path/to/key/json' - -def generate_signed_urls(bucket_name, prefix, expiration_time): - """ - Generates signed URLs for files in the given bucket and prefix. - - :param bucket_name: Name of the GCS bucket. - :param prefix: Prefix of the files in the GCS bucket. - :param expiration_time: Time in minutes for which the signed URL should be valid. - :return: List of tuples containing the file name and signed URL. - """ - # Initialize the client - # This uses the default credentials. Make sure that the GOOGLE_APPLICATION_CREDENTIALS environment variable is set. - storage_client = storage.Client() - - # Get the bucket - bucket = storage_client.bucket(bucket_name) - - # Get blobs (files) with the given prefix - blobs = bucket.list_blobs(prefix=prefix) - - signed_urls = [] - for blob in blobs: - # Generate a signed URL for each blob - url = blob.generate_signed_url( - expiration=expiration_time, - version='v4' # Use V4 signing - ) - signed_urls.append((blob.name, url)) - - return signed_urls -# Usage -bucket_name = 'fuelcast-data' -prefix = 'fuel/rapid-2024-conus/' - -# Longest allowable time is one week -exp_time = timedelta(days=7) - -signed_urls = generate_signed_urls(bucket_name, prefix, expiration_time=exp_time) - -# Print signed URLs -for file_name, url in signed_urls: - print(f"File: {file_name} - Signed URL: {url}") \ No newline at end of file diff --git a/seeker/snippet/install-awscli.sh b/seeker/snippet/install-awscli.sh new file mode 100644 index 00000000..73241aac --- /dev/null +++ b/seeker/snippet/install-awscli.sh @@ -0,0 +1,33 @@ +#date: 2024-09-18T17:00:55Z +#url: https://api.github.com/gists/b7c4ed2118fe9954b85eda3d411150fb +#owner: https://api.github.com/users/salutgeek + +#!/usr/bin/env bash +# see: https://docs.aws.amazon.com/cli/latest/userguide/getting-started-source-install.html +# This script will install aws-cli libraries to /usr/local/lib/aws-cli/ +# This script will install aws-cli executable to /usr/local/bin/ + +set -e +WORK_DIR=$(mktemp -d) + +# download source package and un-tar +curl -fsSL https://awscli.amazonaws.com/awscli.tar.gz | \ + tar -xz --strip-components=1 - -C "$WORK_DIR" + +# cleanup +trap "sudo rm -rf "$WORK_DIR"" EXIT + +pushd "$WORK_DIR" + +# remove existing installed aws-cli +sudo rm -rf /usr/local/lib/aws-cli + +# configure deps +./configure --with-download-deps + +# install +make +sudo make install +popd + +aws --version \ No newline at end of file diff --git a/seeker/snippet/ipynb_importParentDir_pyCodeTemplate.py b/seeker/snippet/ipynb_importParentDir_pyCodeTemplate.py deleted file mode 100644 index fe87e3f7..00000000 --- a/seeker/snippet/ipynb_importParentDir_pyCodeTemplate.py +++ /dev/null @@ -1,26 +0,0 @@ -#date: 2024-09-16T16:51:30Z -#url: https://api.github.com/gists/c4028cf4e3de861d0dda7c7edf552b57 -#owner: https://api.github.com/users/birdflyi - -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# Python 3.7 # Your python version - -# @Time : ${DATE} ${TIME} -# @Author : 'Lou Zehua' # Your name -# @File : ${NAME}.py - -import os -import sys - -if '__file__' not in globals(): - # !pip install ipynbname # Remove comment symbols to solve the ModuleNotFoundError - import ipynbname - - nb_path = ipynbname.path() - __file__ = str(nb_path) -cur_dir = os.path.dirname(__file__) -pkg_rootdir = os.path.dirname(cur_dir) # Should be the root directory of your project. -if pkg_rootdir not in sys.path: # To resolve the ModuleNotFoundError - sys.path.append(pkg_rootdir) - print('-- Add root directory "{}" to system path.'.format(pkg_rootdir)) diff --git a/seeker/snippet/kafka-move-leadership.sh b/seeker/snippet/kafka-move-leadership.sh new file mode 100644 index 00000000..372d72f8 --- /dev/null +++ b/seeker/snippet/kafka-move-leadership.sh @@ -0,0 +1,323 @@ +#date: 2024-09-18T16:49:56Z +#url: https://api.github.com/gists/97732ef53ee3c2f6ea421db8d22f85a2 +#owner: https://api.github.com/users/agrezende + +#!/usr/bin/env bash +# +# File: kafka-move-leadership.sh +# +# Description +# =========== +# +# Generates a Kafka partition reassignment JSON snippet to STDOUT to move the leadership +# of any replicas away from the provided "source" broker to different, randomly selected +# "target"brokers. Run this script with `-h` to show detailed usage instructions. +# +# +# Requirements +# ============ +# - Kafka 0.8.1.1 (later versions may work, too) +# +# +# Usage +# ===== +# +# To show usage instructions run this script with `-h` or `--help`. +# +# +# Full workflow +# ============= +# +# High-level overview +# ------------------- +# +# 1. Use this script to generate a partition reassignment JSON file. +# 2. Start the actual reassignment operation via Kafka's `kafka-reassign-partitions.sh` script and this JSON file. +# 3. Monitor the progress of the reassignment operation with Kafka's `kafka-reassign-partitions.sh` script. +# +# Example +# ------- +# +# Step 1 (generate reassignment JSON): +# +# $ kafka-move-leadership.sh --broker-id 4 --first-broker-id 0 --last-broker-id 8 --zookeeper zookeeper1:2181 > partitions-to-move-4.json +# +# Step 2 (start reassignment process): +# +# $ kafka-reassign-partitions.sh --zookeeper zookeeper1:2181 --reassignment-json-file partitions-to-move-4 --execute +# +# Step 3 (monitor progress of reassignment process): +# +# $ kafka-reassign-partitions.sh --zookeeper zookeeper1:2181 --reassignment-json-file partitions-to-move-4 --verify + + +declare -r MYSELF=`basename $0` + +print_usage() { + echo "$MYSELF - generates a Kafka partition reassignment JSON snippet to move partition leadership away from a broker (details below)" + echo + echo "Usage: $MYSELF [OPTION]..." + echo + echo " --broker-id Move leadership of all replicas, if any, from this broker" + echo " to different, randomly selected brokers. Example: 4" + echo " --first-broker-id First (= lowest) Kafka broker ID in the cluster. Used as" + echo " the start index for the range of broker IDs from which" + echo " replacement brokers will be randomly selected. Example: 0" + echo " --last-broker-id Last (= highest) Kafka broker ID in the cluster. Used as" + echo " the end index for the range of broker IDs from which" + echo " replacement brokers will be randomly selected. Example: 8" + echo " --zookeeper Comma-separated list of ZK servers with which the brokers" + echo " are registered. Example: zookeeper1:2181,zookeeper2:2181" + echo " -h, --help Print this help message and exit." + echo + echo "Example" + echo "-------" + echo + echo "The following example moves leadership from broker with ID 4 to brokers randomly selected from" + echo "the ID range 0,1,2,3,4,5,6,7,8 (though 4 itself will be excluded from the range automatically):" + echo + echo " $ $MYSELF --broker-id 4 --first-broker-id 0 --last-broker-id 8 --zookeeper zookeeper1:2181" + echo + echo "Use cases include:" + echo "------------------" + echo " 1. Safely restarting a broker while minimizing risk of data loss." + echo " 2. Replacing a broker." + echo " 3. Preparing a broker for maintenance." + echo + echo "Detailed description" + echo "--------------------" + echo "Generates a Kafka partition reassignment JSON snippet to STDOUT" + echo "to move the leadership of any replicas from the provided broker ID to" + echo "different, randomly selected broker IDs." + echo + echo "This JSON snippet can be saved to a file and then be used as an argument for:" + echo + echo " $ kafka-reassign-partitions.sh --reassignment-json-file my.json" + echo + echo "Further information" + echo "-------------------" + echo "- http://kafka.apache.org/documentation.html#basic_ops_cluster_expansion" + echo "- https://cwiki.apache.org/confluence/display/KAFKA/Replication+tools#Replicationtools-6.ReassignPartitionsTool" +} + +if [[ $# -eq 0 ]]; then + print_usage + exit 97 +fi + +while [[ $# -gt 0 ]]; do + case "$1" in + --broker-id) + shift + declare -r BROKER="$1" + shift + ;; + --zookeeper) + shift + declare -r ZOOKEEPER_CONNECT="$1" + shift + ;; + --first-broker-id) + shift + declare -r KAFKA_FIRST_BROKER_ID="$1" + shift + ;; + --last-broker-id) + shift + declare -r KAFKA_LAST_BROKER_ID="$1" + shift + ;; + -h|--help) + print_usage + exit 98 + ;; + *) + echo "ERROR: Unexpected option ${1}" + echo + print_usage + exit 99 + ;; + esac +done + + +# Input validation +if [ -z "$BROKER" ]; then + echo "ERROR: You must set the parameter --broker-id" + exit 80 +fi + +if [ -z "$ZOOKEEPER_CONNECT" ]; then + echo "ERROR: You must set the parameter --zookeeper" + exit 81 +fi + +if [ -z "$KAFKA_FIRST_BROKER_ID" ]; then + echo "ERROR: You must set the parameter --first-broker-id" + exit 82 +fi + +if [ -z "$KAFKA_LAST_BROKER_ID" ]; then + echo "ERROR: You must set the parameter --last-broker-id" + exit 83 +fi + + +############################################################################### +### DEPENDENCIES +############################################################################### + +declare -r KAFKA_TOPICS_SCRIPT_NAME="kafka-topics.sh" +declare -r FALLBACK_PATH="/opt/kafka/bin" + +which "$KAFKA_TOPICS_SCRIPT_NAME" &>/dev/null +if [ $? -ne 0 ]; then + declare -r FALLBACK_BIN="$FALLBACK_PATH/$KAFKA_TOPICS_SCRIPT_NAME" + which "$FALLBACK_BIN" &>/dev/null + if [ $? -ne 0 ]; then + echo "ERROR: $KAFKA_TOPICS_SCRIPT_NAME (ships with Kafka) not found in PATH." + exit 70 + else + declare -r KAFKA_TOPICS_BIN="$FALLBACK_BIN" + fi +else + declare -r KAFKA_TOPICS_BIN="$KAFKA_TOPICS_SCRIPT_NAME" +fi + + +############################################################################### +### MISC CONFIGURATION - DO NOT TOUCH UNLESS YOU KNOW WHAT YOU ARE DOING +############################################################################### + +declare -r OLD_IFS="$IFS" + + +############################################################################### +### UTILITY FUNCTIONS +############################################################################### + +# Checks whether an array (first param) contains an element (second param). +# Returns 0 if the array contains the element, and 1 if it does not. +# +# Usage: array_contains myArray myElement +function array_contains { + local array="$1[@]" + local seeking=$2 + local in=1 + for element in "${!array}"; do + if [[ $element == $seeking ]]; then + in=0 + break + fi + done + return $in +} + +# Randomly selects a broker ID in the range specified by +# KAFKA_FIRST_BROKER_ID (including) and KAFKA_LAST_BROKER_ID (including). +# +# Usage: random_broker => may return e.g. "6" +function random_broker { + shuf -i ${KAFKA_FIRST_BROKER_ID}-${KAFKA_LAST_BROKER_ID} -n 1 +} + +# Randomly selects, from the list of available brokers (range specified by +# KAFKA_FIRST_BROKER_ID and KAFKA_LAST_BROKER_ID), a broker ID that is not +# already listed in the provided brokers (first param). +# +# Usage: other_broker "1,4,6" => may return e.g. "2" +# +# Note: Do NOT put spaces in the string. "1,2" is ok, "1, 2" is not. +function other_broker { + local brokers_string=$1 + IFS=$',' read -a brokers <<< "$brokers_string" + local new_broker=`random_broker` + while array_contains brokers $new_broker; do + new_broker=`random_broker` + done + echo $new_broker +} + +# Returns a list of broker IDs by removing the provided broker ID (second param) +# from the provided list of original broker IDs (first param). If the original +# broker list does not contain the provided broker, the list is returned as is. +# +# The list of broker IDs must be a comma-separated list of numbers, e.g. "1,2". +# +# Usage: all_but_broker "1,2,3" "3" => returns "1,2" +# +# Note: Do NOT put spaces in the string. "1,2" is ok, "1, 2" is not. +function all_but_broker { + local brokers_string=$1 + local broker=$2 + IFS=$',' read -a brokers <<< "$brokers_string" + local new_brokers="" + for curr_broker in "${brokers[@]}"; do + if [ "$curr_broker" != "$broker" ]; then + new_brokers="$new_brokers,$curr_broker" + fi + done + # Remove leading comma, if any. + new_brokers=${new_brokers#","} + echo $new_brokers +} + +# Returns a list of broker IDs based on a provided list of broker IDs (first +# param), where the provided broker ID (second param) is replaced by a +# randomly selected broker ID that is not already in the original list. +# +# Usage: replace_broker "1,2,3" "2" => may return e.g. "1,3,4" +# +# Note: Do NOT put spaces in the string. "1,2" is ok, "1, 2" is not. +function replace_broker { + local brokers_string=$1 + local broker=$2 + local remaining_brokers=`all_but_broker $brokers_string $broker` + local replacement_broker=`other_broker $brokers_string $broker` + new_brokers="$remaining_brokers,$replacement_broker" + # Remove leading comma, if any. + new_brokers=${new_brokers#","} + # Remove trailing comma, if any. + new_brokers=${new_brokers%","} + echo $new_brokers +} + + +############################################################################### +### MAIN +############################################################################### + +# "Header" of JSON file for Kafka partition reassignment +json="{\n" +json="$json \"partitions\": [\n" + +# Actual partition reassignments +for topicPartitionReplicas in `$KAFKA_TOPICS_BIN --zookeeper $ZOOKEEPER_CONNECT --describe | grep "Leader: $BROKER" | awk '{ print $2"#"$4"#"$8 }'`; do + # Note: We use '#' as field separator in awk (see above) and here + # because it is not a valid character for a Kafka topic name. + IFS=$'#' read -a array <<< "$topicPartitionReplicas" + topic="${array[0]}" # e.g. "zerg.hydra" + partition="${array[1]}" # e.g. "4" + replicas="${array[2]}" # e.g. "0,8" (= comma-separated list of broker IDs) + new_replicas=`replace_broker $replicas $BROKER` + json="$json {\"topic\": \"${topic}\", \"partition\": ${partition}, \"replicas\": [${new_replicas}] },\n" +done + +# Remove tailing comma, if any. +json=${json%",\n"} +json="${json}\n" + +# "Footer" of JSON file +json="$json ],\n" +json="$json \"version\": 1\n" +json="${json}}\n" + +# Print JSON to STDOUT +echo -e $json + + +############################################################################### +### CLEANUP +############################################################################### + +IFS="$OLD_IFS" \ No newline at end of file diff --git a/seeker/snippet/main.java b/seeker/snippet/main.java deleted file mode 100644 index dd3473c3..00000000 --- a/seeker/snippet/main.java +++ /dev/null @@ -1,36 +0,0 @@ -//date: 2024-09-16T17:10:28Z -//url: https://api.github.com/gists/12ac859a0f6d500e52d8ae7999e2b395 -//owner: https://api.github.com/users/qren0neu - -class Solution { - public long maxScore(int[] nums1, int[] nums2, int k) { - // if use priority queue, we can have: - // 1. when we poll in the queue, we remove the min - // so the sum of nums1 should be larger - // but, we have to calculate the minimum dynamically in nums2 - // if we can combine nums1 and nums2 somehow together, we can solve the problem - int[][] arr = new int[nums1.length][2]; - for (int i = 0; i < nums1.length; i++) { - arr[i][0] = nums1[i]; - arr[i][1] = nums2[i]; - } - Arrays.sort(arr, (int[] arr1, int[] arr2) -> arr2[1] - arr1[1]); - PriorityQueue pq = new PriorityQueue(k, (a,b) -> a - b); - long score = 0; - long sum = 0; - for (int[] pair : arr) { - // pair: nums1, nums2 - int min = pair[1]; - pq.offer(pair[0]); - sum += pair[0]; - if (pq.size() > k) { - int removed = pq.poll(); - sum -= removed; - } - if (pq.size() == k) { - score = Math.max(score, sum * min); - } - } - return score; - } -} \ No newline at end of file diff --git a/seeker/snippet/main.py b/seeker/snippet/main.py index cfde251f..3bf676a9 100644 --- a/seeker/snippet/main.py +++ b/seeker/snippet/main.py @@ -1,183 +1,6 @@ -#date: 2024-09-17T16:55:05Z -#url: https://api.github.com/gists/18888e23fee5a7788305f1aa35a1df3b -#owner: https://api.github.com/users/zitterbewegung +#date: 2024-09-18T17:06:49Z +#url: https://api.github.com/gists/00ca864491f7851330dd40ea0b33db42 +#owner: https://api.github.com/users/mypy-play -#!/usr/bin/env python3 - -import subprocess -import os -import git -import requests -import re -import json -from langgraph import LangGraph, RAGCodeAssistant, Tool -from pygdbmi.gdbcontroller import GdbController # GDB Python interface -from ghidra_bridge import GhidraBridge # Ghidra Python bridge -from litellm import LiteLLMClient # Import LiteLLM client -import vowpalwabbit -from dotenv import load_dotenv -import argparse - -# Load environment variables from .env file -load_dotenv() - -# Retrieve paths and tokens from environment variables -GDB_PATH = os.getenv('GDB_PATH') -GHIDRA_PATH = os.getenv('GHIDRA_PATH') -DTRACE_PATH = os.getenv('DTRACE_PATH') -LITELLM_API_KEY = os.getenv('LITELLM_API_KEY') -ADVISORY_DB_API_TOKEN = "**********" -ADVISORY_DB_URL = "https://api.github.com/repos/github/advisory-database/contents/advisories" - -# Initialize LiteLLM client with the API key from the .env file -llm_client = LiteLLMClient(api_key=LITELLM_API_KEY) - -class LiteLLMAgent: - """Agent to interact with LiteLLM for generating code explanations and other prompts.""" - - def __init__(self, model="gpt-3.5"): - self.model = model - self.client = llm_client - - def generate_response(self, prompt): - """Generate response using LiteLLM.""" - response = self.client.complete( - prompt=prompt, - model=self.model, - max_tokens= "**********" - ) - return response.text - -class CodeExplanationTool(Tool): - """A tool that uses LiteLLM to explain the functionality of C/C++ source code.""" - - def __init__(self): - super().__init__(name="code_explanation", description="Generates explanations for source code using LiteLLM.") - self.llm_agent = LiteLLMAgent() - - def explain_code(self, code): - """Generate a description of what the code does using LiteLLM.""" - explanation_prompt = f"Analyze the following C/C++ code and provide a detailed explanation of what it does:\n\n{code}\n\nExplanation:" - response = self.llm_agent.generate_response(explanation_prompt) - return response - -class AdvisoryScanTool(Tool): - """A tool that scans a GitHub repository for known vulnerabilities based on the GitHub Advisory Database.""" - - def __init__(self): - super().__init__(name="advisory_scan", description="Scans dependencies for known vulnerabilities.") - self.advisory_data = self.load_advisory_database() - - def load_advisory_database(self): - """Loads advisories from the GitHub Advisory Database.""" - headers = {'Authorization': "**********" - response = requests.get(ADVISORY_DB_URL, headers=headers) - if response.status_code == 200: - advisories = response.json() - print("Advisory Database Loaded Successfully") - return advisories - else: - print("Failed to load Advisory Database") - return [] - - def parse_advisory_entries(self, advisory): - """Parse advisories to extract relevant information.""" - details = { - "package_name": advisory.get("package_name", ""), - "vulnerable_versions": advisory.get("vulnerable_versions", ""), - "description": advisory.get("description", ""), - "severity": advisory.get("severity", ""), - "identifiers": advisory.get("identifiers", []), - } - return details - - def scan_repository(self, repo_path): - """Scans the given GitHub repository path for dependencies and checks against known advisories.""" - repo = git.Repo(repo_path) - dependencies = self.extract_dependencies(repo) - vulnerabilities = self.match_advisories(dependencies) - return vulnerabilities - - def extract_dependencies(self, repo): - """Extracts dependencies from the repository (example for C/C++ projects).""" - dependencies = [] - files = repo.git.ls_files('*.txt', '*.json', '*.yaml', '*.lock').splitlines() - for file_path in files: - with open(os.path.join(repo.working_dir, file_path), 'r') as file: - content = file.read() - dependencies.extend(self.parse_dependencies_from_file(content)) - return dependencies - - def parse_dependencies_from_file(self, content): - """Parses dependencies from a given file content.""" - dependencies = [] - for line in content.splitlines(): - if "==" in line: - package = line.split("==")[0].strip() - dependencies.append(package) - return dependencies - - def match_advisories(self, dependencies): - """Matches dependencies against advisories from the GitHub Advisory Database.""" - matched_vulnerabilities = [] - for advisory in self.advisory_data: - advisory_details = self.parse_advisory_entries(advisory) - for dependency in dependencies: - if advisory_details["package_name"].lower() == dependency.lower(): - matched_vulnerabilities.append(advisory_details) - print(f"Vulnerability found for {dependency}: {advisory_details}") - return matched_vulnerabilities - -# Main LangGraph application setup -class CodeAnalysisLangGraph: - def __init__(self): - self.langgraph = LangGraph() - self.vulnerability_tool = VulnerabilityDetectionTool() - self.advisory_scan_tool = AdvisoryScanTool() - self.code_explanation_tool = CodeExplanationTool() - - def analyze_repository(self, repo_path): - """Main function to analyze the repository and program behavior.""" - # Initialize GitPython repository - repo = git.Repo(repo_path) - - # Scan and explain each C/C++ source file - source_files = repo.git.ls_files('*.c', '*.cpp').splitlines() - for file_path in source_files: - full_path = os.path.join(repo_path, file_path) - with open(full_path, 'r') as file: - source_code = file.read() - - # Vulnerability Detection - vulnerabilities = self.vulnerability_tool.scan_code(source_code) - if vulnerabilities: - print(f"Vulnerabilities in {file_path}:") - for vulnerability in vulnerabilities: - print("-", vulnerability) - - # Code Explanation - explanation = self.code_explanation_tool.explain_code(source_code) - print(f"Explanation for {file_path}:\n{explanation}\n") - - # Advisory Scan Tool - vulnerabilities = self.advisory_scan_tool.scan_repository(repo_path) - if vulnerabilities: - print("Dependency Vulnerabilities Detected:") - for vulnerability in vulnerabilities: - print(json.dumps(vulnerability, indent=4)) - -def main(): - parser = argparse.ArgumentParser(description="Analyze a GitHub repository for vulnerabilities and code functionality.") - parser.add_argument('--repo', type=str, help='Path to the GitHub repository', default=os.getenv('REPO_PATH')) - args = parser.parse_args() - - # Run the analysis with the provided or default repository path - analyzer = CodeAnalysisLangGraph() - analyzer.analyze_repository(args.repo) - -if __name__ == "__main__": - main() - - -if __name__ == "__main__": - main() +for i in range(1000): + print(i:int) \ No newline at end of file diff --git a/seeker/snippet/module_5_2.py b/seeker/snippet/module_5_2.py deleted file mode 100644 index 2b538ee9..00000000 --- a/seeker/snippet/module_5_2.py +++ /dev/null @@ -1,39 +0,0 @@ -#date: 2024-09-16T17:08:45Z -#url: https://api.github.com/gists/f4b2d14e1c2552e58d2373ba74014c2b -#owner: https://api.github.com/users/zabelloalexandr - -from pygments.formatters import other - - -class House: - def __init__(self, name, number_of_floors): - self.name = name - self.number_of_floors = number_of_floors - self.current_floor = 1 - - def __eq__(self, other): - return self.number_of_floors == other.new_floor - - def __str__(self): - return f'{self.name} {self.number_of_floors}' - - def __lt__(self): - return self.number_of_floors < other.new_floor - def __len__(self): - return self.number_of_floors - - - - -h1 = House('ЖК Эльбрус', 10) -h2 = House('ЖК Акация', 20) - -# __str__ -print(h1) -print(h2) - -# __len__ -print(len(h1)) -print(len(h2)) - - diff --git a/seeker/snippet/program.java b/seeker/snippet/program.java deleted file mode 100644 index dc77c45a..00000000 --- a/seeker/snippet/program.java +++ /dev/null @@ -1,115 +0,0 @@ -//date: 2024-09-16T16:48:13Z -//url: https://api.github.com/gists/a8b1ca4dfd2a3170ad6e9193a5f39de5 -//owner: https://api.github.com/users/Nivasnvz - -import java.util.*; - -public class program -{ - public static void main(String[] ar) - { - - Scanner s=new Scanner(System.in); - - //define array for getting values - List a=new ArrayList<>(); - - // define two seperate array for greater and smaller string - - List big=new ArrayList<>(); - List small=new ArrayList<>(); - - int i=0,l=0,k=0; - - //to getting the String arrays until user stops - - for(;i<100;i++) - { - String a1=s.nextLine(); - a.add(a1); - if(a1.isEmpty())break; - } - - - - for(;k') - { - small.add(a.get(k+1)); - big.add(a.get(k)); - - } - } - - //combine both big and small string arrays - - List co=new ArrayList<>(big); - co.addAll(small); - - - - for(;l un=new LinkedHashSet<>(co); - - for(String so:un) - { - System.out.print(so+" "); - } - - - } - - static boolean isequal(String e,String e1,List a, List co) - { - int i; - for(i=0;i a) - { - String temp=a.get(s); - a.set(s,a.get(s1)); - a.set(s1,temp); - } -} \ No newline at end of file diff --git a/seeker/snippet/spring.Dockerfile b/seeker/snippet/spring.Dockerfile new file mode 100644 index 00000000..bfae59a6 --- /dev/null +++ b/seeker/snippet/spring.Dockerfile @@ -0,0 +1,25 @@ +#date: 2024-09-18T17:05:04Z +#url: https://api.github.com/gists/124d04536cdeca0cc709c6b43ffd9871 +#owner: https://api.github.com/users/Riko07br + +# Build maven----------------------- +FROM maven:3.8.4-openjdk-17 AS build + +WORKDIR /app + +COPY src/main ./src/main + +COPY pom.xml ./ + +RUN mvn clean "-Dmaven.test.skip" package + +# openJDK runner-------------------- +FROM openjdk:17-jdk-alpine + +WORKDIR /app + +COPY --from=build /app/target/backend-0.0.1-SNAPSHOT.jar ./app.jar + +EXPOSE 8080 + +ENTRYPOINT ["java","-jar","/app/app.jar"] \ No newline at end of file