Skip to content

Commit

Permalink
Merge pull request #34 from ajcwebdev/next
Browse files Browse the repository at this point in the history
Configure Docker Setup for TypeScript Compatibility
  • Loading branch information
ajcwebdev authored Oct 11, 2024
2 parents efb5b0d + 7dd3aa4 commit 276abff
Show file tree
Hide file tree
Showing 17 changed files with 170 additions and 98 deletions.
3 changes: 3 additions & 0 deletions Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,9 @@ WORKDIR /usr/src/app
RUN curl -L https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp -o /usr/local/bin/yt-dlp \
&& chmod a+rx /usr/local/bin/yt-dlp

# Install tsx globally
RUN npm install -g tsx

# Copy package.json, package-lock.json, and install dependencies
COPY package*.json ./
RUN npm ci
Expand Down
2 changes: 0 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -48,8 +48,6 @@ See [`docs/roadmap.md`](/docs/roadmap.md) for details about current development
cp .env.example .env
```

This sets a default model for Llama.cpp which ensures `--llama` doesn't fail if you haven't downloaded a model yet. Before trying to run local LLM inference with Llama.cpp, `callLlama` checks for a model and downloads one if none is detected.

### Install Local Dependencies

Install `yt-dlp`, `ffmpeg`, and run `npm i`.
Expand Down
36 changes: 18 additions & 18 deletions docker-compose.yml
Original file line number Diff line number Diff line change
Expand Up @@ -11,10 +11,10 @@ services:
- /var/run/docker.sock:/var/run/docker.sock
depends_on:
- whisper
- ollama
environment:
- OLLAMA_HOST=ollama
- OLLAMA_PORT=11434
# - ollama
# environment:
# - OLLAMA_HOST=ollama
# - OLLAMA_PORT=11434
networks:
- autoshownet
whisper:
Expand All @@ -29,20 +29,20 @@ services:
stdin_open: true
networks:
- autoshownet
ollama:
image: ollama/ollama
command: ["ollama", "serve", "--address", "0.0.0.0"] # Listen on all interfaces
ports:
- "11434:11434"
volumes:
- ./ollama:/root/.ollama
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:11434/healthz"]
interval: 10s
timeout: 5s
retries: 5
networks:
- autoshownet
# ollama:
# image: ollama/ollama
# command: ["ollama", "serve", "--address", "0.0.0.0"] # Listen on all interfaces
# ports:
# - "11434:11434"
# volumes:
# - ./ollama:/root/.ollama
# healthcheck:
# test: ["CMD", "curl", "-f", "http://localhost:11434/healthz"]
# interval: 10s
# timeout: 5s
# retries: 5
# networks:
# - autoshownet
networks:
autoshownet:
driver: bridge
2 changes: 1 addition & 1 deletion docker-entrypoint.sh
Original file line number Diff line number Diff line change
Expand Up @@ -2,4 +2,4 @@

# docker-entrypoint.sh
# Run the autoshow command with all arguments passed to the container
exec node --env-file=.env --no-warnings src/autoshow.js "$@"
exec tsx --env-file=.env --no-warnings src/autoshow.ts "$@"
19 changes: 16 additions & 3 deletions docs/examples.md
Original file line number Diff line number Diff line change
Expand Up @@ -388,13 +388,26 @@ npm run as -- --video "https://www.youtube.com/watch?v=MORMZXEaONk" --prompt tit

### Docker Compose

This will run both `whisper.cpp` and the AutoShow Commander CLI in their own Docker containers.
This will start both `whisper.cpp` and the AutoShow Commander CLI in their own Docker containers.

```bash
docker compose run autoshow --video "https://www.youtube.com/watch?v=MORMZXEaONk" --whisperDocker base
npm run docker-up
```

Currently working on the `llama.cpp` Docker integration so the entire project can be encapsulated in one local Docker Compose file.
Replace `as` with `docker` to run most other commands explained in this document.

- Does not support all commands at this time, notably `--llama` and `--ollama`.
- Currently working on the `llama.cpp` Docker integration so the entire project can be encapsulated in one local Docker Compose file.

```bash
npm run docker -- --video "https://www.youtube.com/watch?v=MORMZXEaONk" --whisperDocker base
```

To reset your Docker images and containers, run:

```bash
docker system prune -af --volumes
```

### Bun

Expand Down
45 changes: 24 additions & 21 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -18,24 +18,26 @@
"autoshow": "./dist/autoshow.js"
},
"scripts": {
"setup": "bash ./setup.sh",
"build": "npx tsc",
"autoshow": "npm run build && node --env-file=.env --no-warnings dist/autoshow.js",
"as": "npm run build && node --env-file=.env --no-warnings dist/autoshow.js",
"bun-as": "npm run build && bun --env-file=.env --no-warnings dist/autoshow.js",
"deno-as": "npm run build && deno run --allow-sys --allow-read --allow-run --allow-write --allow-env dist/autoshow.js",
"v": "npm run build && node --env-file=.env --no-warnings dist/autoshow.js --whisper large-v2 --video",
"u": "npm run build && node --env-file=.env --no-warnings dist/autoshow.js --whisper large-v2 --urls",
"p": "npm run build && node --env-file=.env --no-warnings dist/autoshow.js --whisper large-v2 --playlist",
"f": "npm run build && node --env-file=.env --no-warnings dist/autoshow.js --whisper large-v2 --file",
"r": "npm run build && node --env-file=.env --no-warnings dist/autoshow.js --whisper large-v2 --rss",
"last3": "npm run build && node --env-file=.env --no-warnings dist/autoshow.js --whisper large-v2 --last 3 --rss",
"serve": "npm run build && node --env-file=.env --no-warnings --watch packages/server/index.js",
"fetch-local": "npm run build && node --env-file=.env --no-warnings packages/server/tests/fetch-local.js",
"fetch-all": "npm run build && node --env-file=.env --no-warnings packages/server/tests/fetch-all.js",
"t": "npm run build && node --test test/local.test.js",
"test-local": "npm run build && node --test test/local.test.js",
"test-all": "npm run build && node --test test/all.test.js"
"setup": "bash ./scripts/setup.sh",
"autoshow": "tsx --env-file=.env --no-warnings src/autoshow.ts",
"as": "tsx --env-file=.env --no-warnings src/autoshow.ts",
"docker": "docker compose run --remove-orphans autoshow",
"docker-up": "docker compose up --build -d --remove-orphans --no-start",
"ds": "docker compose images && docker compose ls",
"v": "node --env-file=.env --no-warnings src/autoshow.ts --whisper large-v2 --video",
"u": "node --env-file=.env --no-warnings src/autoshow.ts --whisper large-v2 --urls",
"p": "node --env-file=.env --no-warnings src/autoshow.ts --whisper large-v2 --playlist",
"f": "node --env-file=.env --no-warnings src/autoshow.ts --whisper large-v2 --file",
"r": "node --env-file=.env --no-warnings src/autoshow.ts --whisper large-v2 --rss",
"last3": "node --env-file=.env --no-warnings src/autoshow.ts --whisper large-v2 --last 3 --rss",
"serve": "node --env-file=.env --no-warnings --watch packages/server/index.ts",
"fetch-local": "node --env-file=.env --no-warnings packages/server/tests/fetch-local.ts",
"fetch-all": "node --env-file=.env --no-warnings packages/server/tests/fetch-all.ts",
"t": "npm run test-local",
"test-local": "node --test test/local.test.js",
"test-all": "node --test test/all.test.js",
"bun-as": "bun --env-file=.env --no-warnings src/autoshow.ts",
"deno-as": "deno run --allow-sys --allow-read --allow-run --allow-write --allow-env src/autoshow.ts"
},
"dependencies": {
"@anthropic-ai/sdk": "^0.26.0",
Expand All @@ -44,7 +46,6 @@
"@google/generative-ai": "^0.17.1",
"@mistralai/mistralai": "^1.0.2",
"@octoai/sdk": "^1.5.1",
"@types/inquirer": "^9.0.7",
"assemblyai": "^4.6.1",
"chalk": "^5.3.0",
"cohere-ai": "^7.12.0",
Expand All @@ -56,10 +57,12 @@
"inquirer": "^10.2.2",
"node-llama-cpp": "^3.1.0",
"ollama": "^0.5.9",
"openai": "^4.55.7",
"typescript": "^5.6.2"
"openai": "^4.55.7"
},
"devDependencies": {
"@types/inquirer": "^9.0.7",
"@types/node": "^22.7.5",
"tsx": "^4.19.1",
"typescript": "^5.6.2"
}
}
19 changes: 19 additions & 0 deletions scripts/setup-colima.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
#!/bin/bash

# Check if Docker is running
if ! docker info &>/dev/null; then
echo "Docker is not running. Checking for Colima..."
if command_exists colima; then
echo "Colima is installed. Attempting to start Colima..."
colima start &>/dev/null
if [ $? -eq 0 ]; then
echo "Colima started successfully."
else
echo "Failed to start Colima. Please start Docker manually or check your Docker installation."
fi
else
echo "Colima is not installed. Please start Docker manually or check your Docker installation."
fi
else
echo "Docker is running."
fi
65 changes: 65 additions & 0 deletions scripts/setup.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,65 @@
#!/bin/bash

# Function to check if a command exists
command_exists() {
command -v "$1" >/dev/null 2>&1
}

# Check if .env file exists
if [ -f ".env" ]; then
echo ".env file already exists. Skipping copy of .env.example."
else
echo ".env file does not exist. Copying .env.example to .env."
cp .env.example .env
fi

# Check if yt-dlp is installed, if not, provide installation instructions
if ! command_exists yt-dlp; then
echo "yt-dlp could not be found, refer to installation instructions here:"
echo "https://github.com/yt-dlp/yt-dlp/wiki/Installation"
else
echo "yt-dlp is already installed."
fi

# Check if Ollama is installed
if ! command_exists ollama; then
echo "Ollama is not installed, refer to installation instructions here:"
echo "https://github.com/ollama/ollama"
else
echo "Ollama is installed."
fi

# Check if Ollama server is running
if ! curl -s "http://127.0.0.1:11434" &> /dev/null; then
echo "Ollama server is not running. Starting Ollama server..."
ollama serve > ollama.log 2>&1 &
OLLAMA_PID=$!
echo "Ollama server started with PID $OLLAMA_PID"
sleep 5
else
echo "Ollama server is already running."
fi

# Install npm dependencies
npm i

# Clone whisper.cpp repository
git clone https://github.com/ggerganov/whisper.cpp.git

# Download whisper models
bash ./whisper.cpp/models/download-ggml-model.sh base
bash ./whisper.cpp/models/download-ggml-model.sh large-v2

# Compile whisper.cpp
make -C whisper.cpp

# Copy Dockerfile
cp .github/whisper.Dockerfile whisper.cpp/Dockerfile

# Download Qwen 2.5 1.5B model for Llama.cpp
curl -L "https://huggingface.co/Qwen/Qwen2.5-1.5B-Instruct-GGUF/resolve/main/qwen2.5-1.5b-instruct-q6_k.gguf" -o "./src/llms/models/qwen2.5-1.5b-instruct-q6_k.gguf"

# Pull Llama 3.1 1B model using Ollama
ollama pull llama3.2:1b

echo "Setup completed successfully!"
34 changes: 0 additions & 34 deletions setup.sh

This file was deleted.

4 changes: 2 additions & 2 deletions src/autoshow.ts
Original file line number Diff line number Diff line change
Expand Up @@ -145,9 +145,9 @@ program.action(async (options: ProcessingOptions) => {
llmServices,
finalTranscriptServices
)
log(final(`\n==================================================`))
log(final(`\n================================================================================================`))
log(final(` ${action} Processing Completed Successfully.`))
log(final(`==================================================\n`))
log(final(`================================================================================================\n`))
exit(0)
} catch (error) {
console.error(`Error processing ${action}:`, (error as Error).message)
Expand Down
4 changes: 2 additions & 2 deletions src/commands/processPlaylist.ts
Original file line number Diff line number Diff line change
Expand Up @@ -67,9 +67,9 @@ export async function processPlaylist(

// Process each video in the playlist
for (const [index, url] of urls.entries()) {
log(opts(`\n==============================================================`))
log(opts(`\n================================================================================================`))
log(opts(` Processing video ${index + 1}/${urls.length}: ${url}`))
log(opts(`==============================================================\n`))
log(opts(`================================================================================================\n`))
try {
await processVideo(options, url, llmServices, transcriptServices)
} catch (error) {
Expand Down
8 changes: 4 additions & 4 deletions src/commands/processRSS.ts
Original file line number Diff line number Diff line change
Expand Up @@ -211,12 +211,12 @@ export async function processRSS(
// Process each item in the feed
for (const [index, item] of itemsToProcess.entries()) {
log(opts(`\n========================================================================================`))
log(opts(` Item ${index + 1}/${itemsToProcess.length} processing:\n\n${item.title}`))
log(opts(` Item ${index + 1}/${itemsToProcess.length} processing: ${item.title}`))
log(opts(`========================================================================================\n`))
await processItem(options, item, llmServices, transcriptServices)
log(final(`\n========================================================================================`))
log(final(` ${index + 1}/${itemsToProcess.length} item processing completed successfully`))
log(final(`========================================================================================\n`))
log(opts(`\n========================================================================================`))
log(opts(` ${index + 1}/${itemsToProcess.length} item processing completed successfully`))
log(opts(`========================================================================================\n`))
}
} catch (error) {
console.error(`Error processing RSS feed: ${(error as Error).message}`)
Expand Down
6 changes: 3 additions & 3 deletions src/commands/processURLs.ts
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ export async function processURLs(
console.error('Error: No URLs found in the file.')
process.exit(1)
}
log(opts(`\n=== Found ${urls.length} URLs in the file... ===`))
log(opts(`\nFound ${urls.length} URLs in the file...`))

// Extract metadata for all videos
const metadataPromises = urls.map(extractVideoMetadata)
Expand All @@ -54,9 +54,9 @@ export async function processURLs(

// Process each URL
for (const [index, url] of urls.entries()) {
log(opts(`\n============================================================`))
log(opts(`\n================================================================================================`))
log(opts(` Processing URL ${index + 1}/${urls.length}: ${url}`))
log(opts(`============================================================\n`))
log(opts(`================================================================================================\n`))
try {
await processVideo(options, url, llmServices, transcriptServices)
} catch (error) {
Expand Down
10 changes: 5 additions & 5 deletions src/llms/llama.ts
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,8 @@ export const callLlama: LLMFunction = async (
modelName?: string
) => {
try {
// Get the model object from LLAMA_MODELS using the provided model name or default to QWEN_2_5_3B
const selectedModel = LLAMA_MODELS[modelName as LlamaModelType] || LLAMA_MODELS.QWEN_2_5_3B
// Get the model object from LLAMA_MODELS using the provided model name or default to QWEN_2_5_1B
const selectedModel = LLAMA_MODELS[modelName as LlamaModelType] || LLAMA_MODELS.QWEN_2_5_1B
log(wait(` - filename: ${selectedModel.filename}\n - url: ${selectedModel.url}\n`))

// If no valid model is found, throw an error
Expand All @@ -42,14 +42,14 @@ export const callLlama: LLMFunction = async (

// Check if the model file already exists, if not, download it
if (!existsSync(modelPath)) {
log(success(`\nDownloading ${selectedModel.filename}...`))
log(wait(`\n No model detected, downloading ${selectedModel.filename}...`))
try {
const downloader = await createModelDownloader({
modelUri: selectedModel.url,
dirPath: modelDir
})
await downloader.download()
log(success('Download completed'))
log(success(' Download completed'))
} catch (err) {
console.error(`Download failed: ${err instanceof Error ? err.message : String(err)}`)
throw new Error('Failed to download the model')
Expand Down Expand Up @@ -80,7 +80,7 @@ export const callLlama: LLMFunction = async (
// Write the response to the temporary file
await writeFile(tempPath, response)

log(success('LLM processing completed'))
log(wait(' \nLLM processing completed'))
} catch (error) {
console.error(`Error in callLlama: ${error instanceof Error ? (error as Error).message : String(error)}`)
throw error
Expand Down
Loading

0 comments on commit 276abff

Please sign in to comment.