diff --git a/.github/actions/auto-pr-description/generate_pr_description.js b/.github/actions/auto-pr-description/generate_pr_description.js
index 7eac3cd..e486af3 100644
--- a/.github/actions/auto-pr-description/generate_pr_description.js
+++ b/.github/actions/auto-pr-description/generate_pr_description.js
@@ -7,7 +7,7 @@ const path = require('path');
const MAX_TOKENS_PER_REQUEST = 80000; // Conservative limit for Gemini 2.5 Flash
const CHARS_PER_TOKEN = 4; // Rough estimation
//const MAX_CHARS_PER_CHUNK = MAX_TOKENS_PER_REQUEST * CHARS_PER_TOKEN;
-const MAX_CHUNKS = 3; // Limit to prevent excessive API calls
+const MAX_CHUNKS = 5; // Limit to prevent excessive API calls
/**
* Estimate token count for text (rough approximation)
@@ -52,46 +52,69 @@ function chunkDiffByFiles(diffContent) {
const lines = diffContent.split('\n');
let currentChunk = '';
let currentFile = '';
- let tokenCount = 0;
+ let currentChunkTokenCount = 0;
+ const PROMPT_OVERHEAD = 2000; // Reserve tokens for prompt overhead
+ const MAX_CHUNK_TOKENS = MAX_TOKENS_PER_REQUEST - PROMPT_OVERHEAD;
- for (const line of lines) {
- // Check if this is a new file header
- //console.error(`Line is estimated at ${estimateTokens(line)} tokens`);
- tokenCount += estimateTokens(line);
- //console.error(`Total tokens for this chunk is ${tokenCount}`);
- if (line.startsWith('diff --git') || line.startsWith('+++') || line.startsWith('---')) {
- // If we have content and it's getting large, save current chunk
- if (currentChunk && tokenCount > MAX_TOKENS_PER_REQUEST) {
- fileChunks.push({
- content: currentChunk.trim(),
- file: currentFile,
- type: 'file-chunk'
- });
- currentChunk = '';
- tokenCount = 0;
+ for (let i = 0; i < lines.length; i++) {
+ const line = lines[i];
+ const lineTokens = estimateTokens(line);
+ const isNewFile = line.startsWith('diff --git');
+ const isFileHeader = line.startsWith('+++') || line.startsWith('---');
+
+ // Check if we need to split current chunk before adding this line
+ if (currentChunk && (currentChunkTokenCount + lineTokens) > MAX_CHUNK_TOKENS) {
+ // Current chunk is getting too large, save it
+ fileChunks.push({
+ content: currentChunk.trim(),
+ file: currentFile,
+ type: 'file-chunk'
+ });
+ console.error(`Chunk ${fileChunks.length} saved: ${currentChunkTokenCount} tokens for ${currentFile || 'unknown'}`);
+ currentChunk = '';
+ currentChunkTokenCount = 0;
+ }
+
+ // Handle new file boundaries
+ if (isNewFile) {
+ // Extract filename from next lines
+ // Look ahead for +++ line
+ for (let j = i + 1; j < Math.min(i + 10, lines.length); j++) {
+ if (lines[j].startsWith('+++')) {
+ currentFile = lines[j].replace('+++ b/', '').replace('+++ a/', '');
+ break;
+ }
}
+ }
+
+ // Add line to current chunk
+ currentChunk += line + '\n';
+ currentChunkTokenCount += lineTokens;
+
+ // If a single line is too large, split it
+ if (lineTokens > MAX_CHUNK_TOKENS && currentChunk.length > 100) {
+ // Remove the line from current chunk
+ currentChunk = currentChunk.substring(0, currentChunk.length - line.length - 1);
+ currentChunkTokenCount -= lineTokens;
- // Start new chunk
- currentChunk = line + '\n';
-
-
- // Extract filename for reference
- if (line.startsWith('+++')) {
- currentFile = line.replace('+++ b/', '').replace('+++ a/', '');
- }
- if(tokenCount > MAX_TOKENS_PER_REQUEST){
- const split_chunk = splitStringByTokens(currentChunk, MAX_TOKENS_PER_REQUEST);
- currentChunk = split_chunk[split_chunk.length-1];
- for(let i = 0; i < split_chunk.length -1;i++){
- fileChunks.push({
- content: split_chunk[i].trim(),
- file: currentFile,
- type: 'file-chunk'
- });
+ // Split the large line
+ const splitChunks = splitStringByTokens(line, MAX_CHUNK_TOKENS);
+ for (let j = 0; j < splitChunks.length; j++) {
+ if (j > 0) {
+ // Save previous chunk if it has content
+ if (currentChunk.trim()) {
+ fileChunks.push({
+ content: currentChunk.trim(),
+ file: currentFile,
+ type: 'file-chunk'
+ });
+ currentChunk = '';
+ currentChunkTokenCount = 0;
+ }
}
+ currentChunk = splitChunks[j] + '\n';
+ currentChunkTokenCount = estimateTokens(currentChunk);
}
- } else {
- currentChunk += line + '\n';
}
}
@@ -102,6 +125,7 @@ function chunkDiffByFiles(diffContent) {
file: currentFile,
type: 'file-chunk'
});
+ console.error(`Final chunk ${fileChunks.length} saved: ${currentChunkTokenCount} tokens for ${currentFile || 'unknown'}`);
}
return fileChunks;
@@ -149,44 +173,67 @@ ${diffContent}`;
}
/**
- * Call Gemini API with the given prompt
+ * Call Gemini API with the given prompt (with retry logic for rate limits)
*/
-async function callGeminiAPI(prompt, apiKey) {
+async function callGeminiAPI(prompt, apiKey, retryCount = 0) {
+ const maxRetries = 3;
+ const baseDelay = 1000; // 1 second base delay
+
console.error(`Sending prompt with an estimated ${estimateTokens(prompt)} tokens`);
- const response = await fetch(`https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash:generateContent?key=${apiKey}`, {
- method: 'POST',
- headers: { 'Content-Type': 'application/json' },
- body: JSON.stringify({
- contents: [{
- parts: [{
- text: prompt
- }]
- }],
- generationConfig: {
- temperature: 0.7,
- topK: 40,
- topP: 0.95,
- maxOutputTokens: 8192,
- }
- })
- });
+
+ try {
+ const response = await fetch(`https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash:generateContent?key=${apiKey}`, {
+ method: 'POST',
+ headers: { 'Content-Type': 'application/json' },
+ body: JSON.stringify({
+ contents: [{
+ parts: [{
+ text: prompt
+ }]
+ }],
+ generationConfig: {
+ temperature: 0.7,
+ topK: 40,
+ topP: 0.95,
+ maxOutputTokens: 8192,
+ }
+ })
+ });
- if (!response.ok) {
- const errorText = await response.text();
- throw new Error(`Gemini API request failed with status ${response.status}: ${errorText}`);
- }
+ // Handle rate limiting (429) with exponential backoff
+ if (response.status === 429 && retryCount < maxRetries) {
+ const delay = baseDelay * Math.pow(2, retryCount); // Exponential backoff: 1s, 2s, 4s
+ console.error(`Rate limit hit, retrying in ${delay}ms (attempt ${retryCount + 1}/${maxRetries})`);
+ await sleep(delay);
+ return await callGeminiAPI(prompt, apiKey, retryCount + 1);
+ }
- const json = await response.json();
-
- if (!json.candidates || !json.candidates[0]) {
- throw new Error('Invalid response from Gemini API');
- }
+ if (!response.ok) {
+ const errorText = await response.text();
+ throw new Error(`Gemini API request failed with status ${response.status}: ${errorText}`);
+ }
- if (!json.candidates[0].content || !json.candidates[0].content.parts || !json.candidates[0].content.parts[0] || !json.candidates[0].content.parts[0].text) {
- throw new Error('Invalid response structure from Gemini API - missing content');
- }
+ const json = await response.json();
+
+ if (!json.candidates || !json.candidates[0]) {
+ throw new Error('Invalid response from Gemini API');
+ }
+
+ if (!json.candidates[0].content || !json.candidates[0].content.parts || !json.candidates[0].content.parts[0] || !json.candidates[0].content.parts[0].text) {
+ throw new Error('Invalid response structure from Gemini API - missing content');
+ }
- return json.candidates[0].content.parts[0].text;
+ return json.candidates[0].content.parts[0].text;
+ } catch (error) {
+ // If it's a network error and we have retries left, retry with exponential backoff
+ if (retryCount < maxRetries && (error.message.includes('fetch') || error.message.includes('network'))) {
+ const delay = baseDelay * Math.pow(2, retryCount);
+ console.error(`Network error, retrying in ${delay}ms (attempt ${retryCount + 1}/${maxRetries}): ${error.message}`);
+ await sleep(delay);
+ return await callGeminiAPI(prompt, apiKey, retryCount + 1);
+ }
+ throw error;
+ }
}
/**
@@ -201,12 +248,13 @@ async function processChunks(chunks, apiKey) {
// Multiple chunks - process each and combine
const chunkResults = [];
+ const CHUNK_DELAY = 500; // 500ms delay between chunks (reduced from 5s)
for (let i = 0; i < Math.min(chunks.length, MAX_CHUNKS); i++) {
const chunk = chunks[i];
if (i > 0) {
- // sleep for 3 seconds
- sleep(5 * 1000);
+ // Small delay between chunks to avoid rate limits (reduced from 5s to 500ms)
+ await sleep(CHUNK_DELAY);
}
console.error(`Processing chunk ${i + 1}/${Math.min(chunks.length, MAX_CHUNKS)} (${chunk.file || 'unknown file'})`);
@@ -225,7 +273,10 @@ async function processChunks(chunks, apiKey) {
if (chunkResults.length === 0) {
throw new Error('Failed to process any chunks');
}
- sleep(5*1000);
+
+ // Small delay before combining (reduced from 5s to 500ms)
+ await sleep(CHUNK_DELAY);
+
// Combine results from multiple chunks
const combinedPrompt = `Combine these pull request descriptions into a single, coherent PR description. Use the same format:
diff --git a/.github/actions/auto-release-description/action.yml b/.github/actions/auto-release-description/action.yml
index 4dcdd55..dc94ec3 100644
--- a/.github/actions/auto-release-description/action.yml
+++ b/.github/actions/auto-release-description/action.yml
@@ -67,34 +67,47 @@ runs:
PR_NUMBER: ${{ inputs.pr-number }}
JIRA_TICKET_URL_PREFIX: ${{ inputs.jira-ticket-url-prefix }}
run: |
- # Generate description using AI
- DESCRIPTION=$(node ${{ github.action_path }}/generate_pr_description.js pr.diff)
-
- # Get existing PR body to check for images
- FIRST_LINE=$(gh pr view ${{ inputs.pr-number }} --json body --jq '.body' | head -n 1)
-
- # Preserve images if they exist at the beginning
- if echo "$FIRST_LINE" | grep -qE '^(
]*>[[:space:]]*|!\[[^]]*\]\([^)]*\))$'; then
- printf '%s\n\n%s\n' "$FIRST_LINE" "$DESCRIPTION" > pr_body.md
- else
- printf '%s\n' "$DESCRIPTION" > pr_body.md
+ # Generate description using AI (with chunking support for large diffs)
+ # The script writes description to stdout, errors to stderr
+ if ! node ${{ github.action_path }}/generate_pr_description.js pr.diff > release_description.md 2>generate_errors.log; then
+ echo "Error: Failed to generate release description" >&2
+ cat generate_errors.log >&2
+ exit 1
fi
- # Add JIRA ticket link if found
- PR_TITLE=$(gh pr view ${{ inputs.pr-number }} --json title --jq '.title')
- TICKET_ID=$(echo "$PR_TITLE" | grep -oE '[A-Z]+-[0-9]+' || true)
- if [ -z "$TICKET_ID" ]; then
- TICKET_ID=$(echo "$GITHUB_HEAD_REF" | grep -oE '[A-Z]+-[0-9]+' || true)
- fi
- if [ -n "$TICKET_ID" ]; then
- TICKET_URL="${{ inputs.jira-ticket-url-prefix }}${TICKET_ID}"
- printf '\n## Ticket\n%s\n' "$TICKET_URL" >> pr_body.md
- fi
+ # Get existing PR body to preserve all content
+ gh pr view ${{ inputs.pr-number }} --json body --jq '.body' > pr_body.md || {
+ echo "Error: Failed to get PR body" >&2
+ exit 1
+ }
+
+ # Insert the generated description into the existing PR body
+ # This preserves all existing content and uses comment tags for auto-generated section
+ node ${{ github.action_path }}/insert_release_description.js pr_body.md release_description.md > new_body.md || {
+ echo "Error: Failed to insert release description" >&2
+ exit 1
+ }
# Output the description for other steps to use
- echo "description<> $GITHUB_OUTPUT
- cat pr_body.md >> $GITHUB_OUTPUT
- echo "EOF" >> $GITHUB_OUTPUT
+ # Use a unique delimiter to avoid conflicts with content that might contain "EOF"
+ # Generate delimiter once and ensure it's used consistently
+ DELIMITER="GITHUB_OUTPUT_DESCRIPTION_$(date +%s)_$$"
+ {
+ echo "description<<${DELIMITER}"
+ # Ensure file exists and is readable
+ if [ -f release_description.md ] && [ -s release_description.md ]; then
+ # Output the file content
+ cat release_description.md
+ # Always ensure newline before delimiter (GitHub Actions requirement)
+ # This handles cases where file doesn't end with newline
+ printf '\n'
+ else
+ echo "Error: release_description.md is missing or empty" >&2
+ echo "Error: release_description.md is missing or empty"
+ fi
+ # Closing delimiter must match exactly - use the same variable
+ echo "${DELIMITER}"
+ } >> $GITHUB_OUTPUT
- name: Update PR description
id: update_pr
@@ -103,6 +116,7 @@ runs:
GH_TOKEN: ${{ inputs.github-token }}
PR_NUMBER: ${{ inputs.pr-number }}
run: |
- gh pr edit ${{ inputs.pr-number }} --body-file pr_body.md
+ # Update PR with merged body (preserves all existing content)
+ gh pr edit ${{ inputs.pr-number }} --body-file new_body.md
echo "updated=true" >> $GITHUB_OUTPUT
echo "Successfully updated PR #${{ inputs.pr-number }} description"
diff --git a/.github/actions/auto-release-description/generate_pr_description.js b/.github/actions/auto-release-description/generate_pr_description.js
index 1f03180..83b064a 100644
--- a/.github/actions/auto-release-description/generate_pr_description.js
+++ b/.github/actions/auto-release-description/generate_pr_description.js
@@ -3,45 +3,195 @@
const fs = require('fs');
const path = require('path');
-(async () => {
- const [, , diffFile] = process.argv;
- if (!diffFile) {
- console.error('Usage: generate_pr_description.js ');
- process.exit(1);
- }
+// Configuration constants
+const MAX_TOKENS_PER_REQUEST = 80000; // Conservative limit for Gemini 2.5 Flash
+const CHARS_PER_TOKEN = 4; // Rough estimation
+//const MAX_CHARS_PER_CHUNK = MAX_TOKENS_PER_REQUEST * CHARS_PER_TOKEN;
+const MAX_CHUNKS = 5; // Limit to prevent excessive API calls
- if (!fs.existsSync(diffFile)) {
- console.error(`Error: Diff file not found at ${diffFile}`);
- process.exit(1);
+/**
+ * Estimate token count for text (rough approximation)
+ */
+function estimateTokens(text) {
+ return Math.ceil(text.length / CHARS_PER_TOKEN);
+}
+
+
+function sleep(ms) {
+ return new Promise(resolve => setTimeout(resolve, ms));
+}
+
+
+function splitStringByTokens(str, maxTokens) {
+ console.error('splitStringByTokens');
+ const words = str.split(' ');
+ const result = [];
+ let currentLine = '';
+
+ for (const word of words) {
+ if (estimateTokens(currentLine + word) <= maxTokens) {
+ currentLine += (currentLine ? ' ' : '') + word;
+ } else {
+ if (currentLine) result.push(currentLine);
+ currentLine = word;
+ }
}
- const apiKey = process.env.GEMINI_API_KEY;
- if (!apiKey) {
- console.error('Error: GEMINI_API_KEY environment variable is required');
- process.exit(1);
+ if (currentLine) result.push(currentLine);
+
+ return result;
+}
+
+
+/**
+ * Split diff into chunks by file boundaries
+ */
+function chunkDiffByFiles(diffContent) {
+ console.error('chunkDiffByFiles');
+ const fileChunks = [];
+ const lines = diffContent.split('\n');
+ let currentChunk = '';
+ let currentFile = '';
+ let currentChunkTokenCount = 0;
+ const PROMPT_OVERHEAD = 2000; // Reserve tokens for prompt overhead
+ const MAX_CHUNK_TOKENS = MAX_TOKENS_PER_REQUEST - PROMPT_OVERHEAD;
+
+ for (let i = 0; i < lines.length; i++) {
+ const line = lines[i];
+ const lineTokens = estimateTokens(line);
+ const isNewFile = line.startsWith('diff --git');
+ const isFileHeader = line.startsWith('+++') || line.startsWith('---');
+
+ // Check if we need to split current chunk before adding this line
+ if (currentChunk && (currentChunkTokenCount + lineTokens) > MAX_CHUNK_TOKENS) {
+ // Current chunk is getting too large, save it
+ fileChunks.push({
+ content: currentChunk.trim(),
+ file: currentFile,
+ type: 'file-chunk'
+ });
+ console.error(`Chunk ${fileChunks.length} saved: ${currentChunkTokenCount} tokens for ${currentFile || 'unknown'}`);
+ currentChunk = '';
+ currentChunkTokenCount = 0;
+ }
+
+ // Handle new file boundaries
+ if (isNewFile) {
+ // Extract filename from next lines
+ // Look ahead for +++ line
+ for (let j = i + 1; j < Math.min(i + 10, lines.length); j++) {
+ if (lines[j].startsWith('+++')) {
+ currentFile = lines[j].replace('+++ b/', '').replace('+++ a/', '');
+ break;
+ }
+ }
+ }
+
+ // Add line to current chunk
+ currentChunk += line + '\n';
+ currentChunkTokenCount += lineTokens;
+
+ // If a single line is too large, split it
+ if (lineTokens > MAX_CHUNK_TOKENS && currentChunk.length > 100) {
+ // Remove the line from current chunk
+ currentChunk = currentChunk.substring(0, currentChunk.length - line.length - 1);
+ currentChunkTokenCount -= lineTokens;
+
+ // Split the large line
+ const splitChunks = splitStringByTokens(line, MAX_CHUNK_TOKENS);
+ for (let j = 0; j < splitChunks.length; j++) {
+ if (j > 0) {
+ // Save previous chunk if it has content
+ if (currentChunk.trim()) {
+ fileChunks.push({
+ content: currentChunk.trim(),
+ file: currentFile,
+ type: 'file-chunk'
+ });
+ currentChunk = '';
+ currentChunkTokenCount = 0;
+ }
+ }
+ currentChunk = splitChunks[j] + '\n';
+ currentChunkTokenCount = estimateTokens(currentChunk);
+ }
+ }
+ }
+
+ // Add the last chunk
+ if (currentChunk.trim()) {
+ fileChunks.push({
+ content: currentChunk.trim(),
+ file: currentFile,
+ type: 'file-chunk'
+ });
+ console.error(`Final chunk ${fileChunks.length} saved: ${currentChunkTokenCount} tokens for ${currentFile || 'unknown'}`);
}
+
+ return fileChunks;
+}
- // Create prompt for PR description generation
- const promptTemplate = `Write a concise pull request description based on the git diff. Use this exact format:
+/**
+ * Create a summary prompt for extremely large diffs
+ */
+function createSummaryPrompt(diffContent) {
+ return `Analyze this git diff and provide a high-level summary. Focus on:
+1. What types of files were changed (e.g., source code, tests, config, docs)
+2. The overall scope of changes (e.g., new feature, bug fix, refactor)
+3. Any major architectural changes or new dependencies
-## Description
-Brief summary of changes (1-2 sentences max).
+Keep the summary to 2-3 sentences maximum.
-## Changes
-- [ ] Key change 1
-- [ ] Key change 2
-- [ ] Key change 3 (max 5 items)
+Git diff:
+${diffContent}`;
+}
-## Verification
-- [ ] Test step 1
-- [ ] Test step 2
-- [ ] Test step 3 (max 3 items)
+/**
+ * Create the main PR description prompt
+ */
+function createPRPrompt(diffContent) {
+ return `Write a release note summary with the following sections:
-Keep it concise and focused on the most important changes.`;
+ ## Infrastructure Changes
+ Highlight changes to AWS resources, IaC (CloudFormation/SAM templates), Lambda functions, databases, and deployment configurations.
- const diffContent = fs.readFileSync(diffFile, 'utf8');
- const combinedPrompt = `${promptTemplate}\n\nHere is the git diff:\n\n${diffContent}`;
+ ## Security Concerns
+ Identify any security-related changes, authentication/authorization updates, data access modifications, or potential vulnerabilities.
+
+ ## Performance Implications
+ Assess any changes that could impact system performance, database queries, API response times, or resource consumption.
+
+ ## New Features
+ Describe new functionality, enhancements, or capabilities being introduced.
+
+ ## Student Access Risk Analysis
+ Evaluate the risk that these changes could cause students to lose access to their course materials. Assess changes to:
+
+ - Term ID handling and relationships
+ - Adoption, section, product, or term product relationships
+ - Data export or deduplication logic
+ - Database schema affecting terms, sections, or student access
+ - Section Channel Offer (SCO) activation/deactivation logic
+ - Query logic for student materials or permissions
+
+ For each risk identified, provide: Risk Level (NONE/LOW/MEDIUM/HIGH/CRITICAL), specific changes, risk description, and recommended verification steps.
+
+ If no student access risks are detected, state: "Risk Level: NONE - No changes affect student material access."
+ Print only the report and ask no questions.
+
+${diffContent}`;
+}
+
+/**
+ * Call Gemini API with the given prompt (with retry logic for rate limits)
+ */
+async function callGeminiAPI(prompt, apiKey, retryCount = 0) {
+ const maxRetries = 3;
+ const baseDelay = 1000; // 1 second base delay
+
+ console.error(`Sending prompt with an estimated ${estimateTokens(prompt)} tokens`);
+
try {
const response = await fetch(`https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash:generateContent?key=${apiKey}`, {
method: 'POST',
@@ -49,7 +199,7 @@ Keep it concise and focused on the most important changes.`;
body: JSON.stringify({
contents: [{
parts: [{
- text: combinedPrompt
+ text: prompt
}]
}],
generationConfig: {
@@ -61,40 +211,134 @@ Keep it concise and focused on the most important changes.`;
})
});
+ // Handle rate limiting (429) with exponential backoff
+ if (response.status === 429 && retryCount < maxRetries) {
+ const delay = baseDelay * Math.pow(2, retryCount); // Exponential backoff: 1s, 2s, 4s
+ console.error(`Rate limit hit, retrying in ${delay}ms (attempt ${retryCount + 1}/${maxRetries})`);
+ await sleep(delay);
+ return await callGeminiAPI(prompt, apiKey, retryCount + 1);
+ }
+
if (!response.ok) {
const errorText = await response.text();
- console.error(`Error: Gemini API request failed with status ${response.status}`);
- console.error(`Response: ${errorText}`);
- process.exit(1);
+ throw new Error(`Gemini API request failed with status ${response.status}: ${errorText}`);
}
const json = await response.json();
if (!json.candidates || !json.candidates[0]) {
- console.error('Error: Invalid response from Gemini API');
- console.error(JSON.stringify(json, null, 2));
- process.exit(1);
+ throw new Error('Invalid response from Gemini API');
+ }
+
+ if (!json.candidates[0].content || !json.candidates[0].content.parts || !json.candidates[0].content.parts[0] || !json.candidates[0].content.parts[0].text) {
+ throw new Error('Invalid response structure from Gemini API - missing content');
}
- // Check if response was truncated due to max tokens
- if (json.candidates[0].finishReason === 'MAX_TOKENS') {
- console.error('Warning: Response was truncated due to token limit. Consider reducing diff size or using more specific ignore-files.');
- // Continue processing the partial response
+ return json.candidates[0].content.parts[0].text;
+ } catch (error) {
+ // If it's a network error and we have retries left, retry with exponential backoff
+ if (retryCount < maxRetries && (error.message.includes('fetch') || error.message.includes('network'))) {
+ const delay = baseDelay * Math.pow(2, retryCount);
+ console.error(`Network error, retrying in ${delay}ms (attempt ${retryCount + 1}/${maxRetries}): ${error.message}`);
+ await sleep(delay);
+ return await callGeminiAPI(prompt, apiKey, retryCount + 1);
}
+ throw error;
+ }
+}
+
+/**
+ * Process diff chunks and combine results
+ */
+async function processChunks(chunks, apiKey) {
+ console.error('processchunks');
+ if (chunks.length === 1) {
+ // Single chunk, process normally
+ return await callGeminiAPI(createPRPrompt(chunks[0].content), apiKey);
+ }
- if (!json.candidates[0].content) {
- console.error('Error: No content in API response');
- console.error(JSON.stringify(json, null, 2));
- process.exit(1);
+ // Multiple chunks - process each and combine
+ const chunkResults = [];
+ const CHUNK_DELAY = 500; // 500ms delay between chunks (reduced from 5s)
+
+ for (let i = 0; i < Math.min(chunks.length, MAX_CHUNKS); i++) {
+ const chunk = chunks[i];
+ if (i > 0) {
+ // Small delay between chunks to avoid rate limits (reduced from 5s to 500ms)
+ await sleep(CHUNK_DELAY);
}
+ console.error(`Processing chunk ${i + 1}/${Math.min(chunks.length, MAX_CHUNKS)} (${chunk.file || 'unknown file'})`);
+
+ try {
+ const result = await callGeminiAPI(createPRPrompt(chunk.content), apiKey);
+ chunkResults.push({
+ file: chunk.file,
+ result: result
+ });
+ } catch (error) {
+ console.error(`Warning: Failed to process chunk ${i + 1}: ${error.message}`);
+ // Continue with other chunks
+ }
+ }
+
+ if (chunkResults.length === 0) {
+ throw new Error('Failed to process any chunks');
+ }
+
+ // Small delay before combining (reduced from 5s to 500ms)
+ await sleep(CHUNK_DELAY);
+
+ // Combine results from multiple chunks
+ const combinedPrompt = `Combine these pull request descriptions into a single, coherent PR description. Use the same format:
+
+${chunkResults.map((chunk, index) => `## Chunk ${index + 1} (${chunk.file}):
+${chunk.result}`).join('\n\n')}
+
+Create a unified description that captures the overall changes across all files.`;
+
+ return await callGeminiAPI(combinedPrompt, apiKey);
+}
+
+(async () => {
+ const [, , diffFile] = process.argv;
+ if (!diffFile) {
+ console.error('Usage: generate_pr_description.js ');
+ process.exit(1);
+ }
+
+ if (!fs.existsSync(diffFile)) {
+ console.error(`Error: Diff file not found at ${diffFile}`);
+ process.exit(1);
+ }
+
+ const apiKey = process.env.GEMINI_API_KEY;
+ if (!apiKey) {
+ console.error('Error: GEMINI_API_KEY environment variable is required');
+ process.exit(1);
+ }
+
+ const diffContent = fs.readFileSync(diffFile, 'utf8');
+ const estimatedTokens = estimateTokens(diffContent);
+
+ console.error(`Diff size: ${diffContent.length} characters (~${estimatedTokens} tokens)`);
+
+ try {
+ let result;
- if (!json.candidates[0].content.parts || !json.candidates[0].content.parts[0] || !json.candidates[0].content.parts[0].text) {
- console.error('Error: Invalid response structure from Gemini API - missing parts or text');
- console.error(JSON.stringify(json, null, 2));
- process.exit(1);
+ if (estimatedTokens > MAX_TOKENS_PER_REQUEST) {
+ console.error('Large diff detected, using chunking strategy...');
+
+ const chunks = chunkDiffByFiles(diffContent);
+ console.error(`Split diff into ${chunks.length} chunks`);
+ if (chunks.length > MAX_CHUNKS) {
+ console.error(`Warning: Too many chunks (${chunks.length}), processing first ${MAX_CHUNKS} chunks only`);
+ }
+ result = await processChunks(chunks, apiKey);
+ } else {
+ // Small diff, process normally
+ result = await callGeminiAPI(createPRPrompt(diffContent), apiKey);
}
- const result = json.candidates[0].content.parts[0].text;
process.stdout.write(result);
} catch (error) {
console.error(`Error: Failed to generate pull request description: ${error.message}`);
diff --git a/.github/actions/auto-release-description/insert_release_description.js b/.github/actions/auto-release-description/insert_release_description.js
new file mode 100644
index 0000000..8a69b06
--- /dev/null
+++ b/.github/actions/auto-release-description/insert_release_description.js
@@ -0,0 +1,63 @@
+#!/usr/bin/env node
+
+const fs = require('fs');
+
+/**
+ * Inserts a release description into an existing PR body using HTML comment tags.
+ * This preserves all existing PR body content while allowing replacement of the
+ * auto-generated section.
+ *
+ * Usage: node insert_release_description.js
+ */
+
+const [, , bodyPath, descriptionPath] = process.argv;
+
+if (!bodyPath || !descriptionPath) {
+ console.error('Usage: insert_release_description.js ');
+ process.exit(1);
+}
+
+const START_TAG = '';
+const END_TAG = '';
+
+if (!fs.existsSync(bodyPath)) {
+ console.error(`Error: Body file not found at ${bodyPath}`);
+ process.exit(1);
+}
+
+if (!fs.existsSync(descriptionPath)) {
+ console.error(`Error: Description file not found at ${descriptionPath}`);
+ process.exit(1);
+}
+
+const body = fs.readFileSync(bodyPath, 'utf8');
+const description = fs.readFileSync(descriptionPath, 'utf8').trim();
+
+const block = `${START_TAG}\n${description}\n${END_TAG}`;
+
+let result;
+const startIndex = body.indexOf(START_TAG);
+
+if (startIndex === -1) {
+ // No existing tags - append the description block
+ const trimmed = body.trimEnd();
+ const prefix = trimmed.length > 0 ? trimmed + '\n\n' : '';
+ result = `${prefix}${block}\n`;
+} else {
+ // Tags exist - replace content between tags
+ const endIndex = body.indexOf(END_TAG, startIndex + START_TAG.length);
+
+ if (endIndex === -1) {
+ // Start tag exists but no end tag - replace from start tag to end
+ result = body.slice(0, startIndex) + block + '\n';
+ } else {
+ // Both tags exist - replace content between them
+ result = body.slice(0, startIndex) + block + body.slice(endIndex + END_TAG.length);
+ if (!result.endsWith('\n')) {
+ result += '\n';
+ }
+ }
+}
+
+process.stdout.write(result);
+