Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
30 changes: 8 additions & 22 deletions apps/web/migrations/meta/0003_snapshot.json
Original file line number Diff line number Diff line change
Expand Up @@ -93,12 +93,8 @@
"name": "accounts_user_id_users_id_fk",
"tableFrom": "accounts",
"tableTo": "users",
"columnsFrom": [
"user_id"
],
"columnsTo": [
"id"
],
"columnsFrom": ["user_id"],
"columnsTo": ["id"],
"onDelete": "cascade",
"onUpdate": "no action"
}
Expand Down Expand Up @@ -145,9 +141,7 @@
"export_waitlist_email_unique": {
"name": "export_waitlist_email_unique",
"nullsNotDistinct": false,
"columns": [
"email"
]
"columns": ["email"]
}
},
"policies": {},
Expand Down Expand Up @@ -213,12 +207,8 @@
"name": "sessions_user_id_users_id_fk",
"tableFrom": "sessions",
"tableTo": "users",
"columnsFrom": [
"user_id"
],
"columnsTo": [
"id"
],
"columnsFrom": ["user_id"],
"columnsTo": ["id"],
"onDelete": "cascade",
"onUpdate": "no action"
}
Expand All @@ -228,9 +218,7 @@
"sessions_token_unique": {
"name": "sessions_token_unique",
"nullsNotDistinct": false,
"columns": [
"token"
]
"columns": ["token"]
}
},
"policies": {},
Expand Down Expand Up @@ -292,9 +280,7 @@
"users_email_unique": {
"name": "users_email_unique",
"nullsNotDistinct": false,
"columns": [
"email"
]
"columns": ["email"]
}
},
"policies": {},
Expand Down Expand Up @@ -362,4 +348,4 @@
"schemas": {},
"tables": {}
}
}
}
2 changes: 1 addition & 1 deletion apps/web/migrations/meta/_journal.json
Original file line number Diff line number Diff line change
Expand Up @@ -31,4 +31,4 @@
"breakpoints": true
}
]
}
}
256 changes: 128 additions & 128 deletions apps/web/src/app/api/get-upload-url/route.ts
Original file line number Diff line number Diff line change
@@ -1,128 +1,128 @@
import { NextRequest, NextResponse } from "next/server";
import { z } from "zod";
import { AwsClient } from "aws4fetch";
import { nanoid } from "nanoid";
import { env } from "@/env";
import { baseRateLimit } from "@/lib/rate-limit";
import { isTranscriptionConfigured } from "@/lib/transcription-utils";

const uploadRequestSchema = z.object({
fileExtension: z.enum(["wav", "mp3", "m4a", "flac"], {
errorMap: () => ({
message: "File extension must be wav, mp3, m4a, or flac",
}),
}),
});

const apiResponseSchema = z.object({
uploadUrl: z.string().url(),
fileName: z.string().min(1),
});

export async function POST(request: NextRequest) {
try {
// Rate limiting
const ip = request.headers.get("x-forwarded-for") ?? "anonymous";
const { success } = await baseRateLimit.limit(ip);

if (!success) {
return NextResponse.json({ error: "Too many requests" }, { status: 429 });
}

// Check transcription configuration
const transcriptionCheck = isTranscriptionConfigured();
if (!transcriptionCheck.configured) {
console.error(
"Missing environment variables:",
JSON.stringify(transcriptionCheck.missingVars)
);

return NextResponse.json(
{
error: "Transcription not configured",
message: `Auto-captions require environment variables: ${transcriptionCheck.missingVars.join(", ")}. Check README for setup instructions.`,
},
{ status: 503 }
);
}

// Parse and validate request body
const rawBody = await request.json().catch(() => null);
if (!rawBody) {
return NextResponse.json(
{ error: "Invalid JSON in request body" },
{ status: 400 }
);
}

const validationResult = uploadRequestSchema.safeParse(rawBody);
if (!validationResult.success) {
return NextResponse.json(
{
error: "Invalid request parameters",
details: validationResult.error.flatten().fieldErrors,
},
{ status: 400 }
);
}

const { fileExtension } = validationResult.data;

// Initialize R2 client
const client = new AwsClient({
accessKeyId: env.R2_ACCESS_KEY_ID,
secretAccessKey: env.R2_SECRET_ACCESS_KEY,
});

// Generate unique filename with timestamp
const timestamp = Date.now();
const fileName = `audio/${timestamp}-${nanoid()}.${fileExtension}`;

// Create presigned URL
const url = new URL(
`https://${env.R2_BUCKET_NAME}.${env.CLOUDFLARE_ACCOUNT_ID}.r2.cloudflarestorage.com/${fileName}`
);

url.searchParams.set("X-Amz-Expires", "3600"); // 1 hour expiry

const signed = await client.sign(new Request(url, { method: "PUT" }), {
aws: { signQuery: true },
});

if (!signed.url) {
throw new Error("Failed to generate presigned URL");
}

// Prepare and validate response
const responseData = {
uploadUrl: signed.url,
fileName,
};

const responseValidation = apiResponseSchema.safeParse(responseData);
if (!responseValidation.success) {
console.error(
"Invalid API response structure:",
responseValidation.error
);
return NextResponse.json(
{ error: "Internal response formatting error" },
{ status: 500 }
);
}

return NextResponse.json(responseValidation.data);
} catch (error) {
console.error("Error generating upload URL:", error);
return NextResponse.json(
{
error: "Failed to generate upload URL",
message:
error instanceof Error
? error.message
: "An unexpected error occurred",
},
{ status: 500 }
);
}
}
import { NextRequest, NextResponse } from "next/server";
import { z } from "zod";
import { AwsClient } from "aws4fetch";
import { nanoid } from "nanoid";
import { env } from "@/env";
import { baseRateLimit } from "@/lib/rate-limit";
import { isTranscriptionConfigured } from "@/lib/transcription-utils";

const uploadRequestSchema = z.object({
fileExtension: z.enum(["wav", "mp3", "m4a", "flac"], {
errorMap: () => ({
message: "File extension must be wav, mp3, m4a, or flac",
}),
}),
});

const apiResponseSchema = z.object({
uploadUrl: z.string().url(),
fileName: z.string().min(1),
});

export async function POST(request: NextRequest) {
try {
// Rate limiting
const ip = request.headers.get("x-forwarded-for") ?? "anonymous";
const { success } = await baseRateLimit.limit(ip);

if (!success) {
return NextResponse.json({ error: "Too many requests" }, { status: 429 });
}

// Check transcription configuration
const transcriptionCheck = isTranscriptionConfigured();
if (!transcriptionCheck.configured) {
console.error(
"Missing environment variables:",
JSON.stringify(transcriptionCheck.missingVars)
);

return NextResponse.json(
{
error: "Transcription not configured",
message: `Auto-captions require environment variables: ${transcriptionCheck.missingVars.join(", ")}. Check README for setup instructions.`,
},
{ status: 503 }
);
}
Comment on lines +32 to +47
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

Wrong configuration guard: this endpoint checks transcription config instead of storage config

This route only needs R2/Cloudflare vars. Using isTranscriptionConfigured() can 503 the endpoint when MODAL_TRANSCRIPTION_URL is missing, even though uploads don’t require it.

Apply:

-    // Check transcription configuration
-    const transcriptionCheck = isTranscriptionConfigured();
-    if (!transcriptionCheck.configured) {
-      console.error(
-        "Missing environment variables:",
-        JSON.stringify(transcriptionCheck.missingVars)
-      );
-
-      return NextResponse.json(
-        {
-          error: "Transcription not configured",
-          message: `Auto-captions require environment variables: ${transcriptionCheck.missingVars.join(", ")}. Check README for setup instructions.`,
-        },
-        { status: 503 }
-      );
-    }
+    // Check R2 storage configuration
+    const missingVars: string[] = [];
+    if (!env.CLOUDFLARE_ACCOUNT_ID) missingVars.push("CLOUDFLARE_ACCOUNT_ID");
+    if (!env.R2_ACCESS_KEY_ID) missingVars.push("R2_ACCESS_KEY_ID");
+    if (!env.R2_SECRET_ACCESS_KEY) missingVars.push("R2_SECRET_ACCESS_KEY");
+    if (!env.R2_BUCKET_NAME) missingVars.push("R2_BUCKET_NAME");
+    if (missingVars.length > 0) {
+      console.error("Missing environment variables:", JSON.stringify(missingVars));
+      return NextResponse.json(
+        {
+          error: "Upload storage not configured",
+          message: `Uploads require environment variables: ${missingVars.join(", ")}.`,
+        },
+        { status: 503 }
+      );
+    }
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
// Check transcription configuration
const transcriptionCheck = isTranscriptionConfigured();
if (!transcriptionCheck.configured) {
console.error(
"Missing environment variables:",
JSON.stringify(transcriptionCheck.missingVars)
);
return NextResponse.json(
{
error: "Transcription not configured",
message: `Auto-captions require environment variables: ${transcriptionCheck.missingVars.join(", ")}. Check README for setup instructions.`,
},
{ status: 503 }
);
}
// Check R2 storage configuration
const missingVars: string[] = [];
if (!env.CLOUDFLARE_ACCOUNT_ID) missingVars.push("CLOUDFLARE_ACCOUNT_ID");
if (!env.R2_ACCESS_KEY_ID) missingVars.push("R2_ACCESS_KEY_ID");
if (!env.R2_SECRET_ACCESS_KEY) missingVars.push("R2_SECRET_ACCESS_KEY");
if (!env.R2_BUCKET_NAME) missingVars.push("R2_BUCKET_NAME");
if (missingVars.length > 0) {
console.error("Missing environment variables:", JSON.stringify(missingVars));
return NextResponse.json(
{
error: "Upload storage not configured",
message: `Uploads require environment variables: ${missingVars.join(", ")}.`,
},
{ status: 503 }
);
}
🤖 Prompt for AI Agents
In apps/web/src/app/api/get-upload-url/route.ts around lines 32 to 47, the route
currently calls isTranscriptionConfigured() which can incorrectly 503 the
endpoint when transcription variables are missing; change the guard to use the
storage configuration checker (e.g., isStorageConfigured()) so only
R2/Cloudflare storage vars are validated, log the missing storage vars via
console.error(JSON.stringify(...)) and return a 503 NextResponse.json with an
error and a message that lists the missing storage vars (not transcription vars)
and points to README setup instructions.


// Parse and validate request body
const rawBody = await request.json().catch(() => null);
if (!rawBody) {
return NextResponse.json(
{ error: "Invalid JSON in request body" },
{ status: 400 }
);
}

const validationResult = uploadRequestSchema.safeParse(rawBody);
if (!validationResult.success) {
return NextResponse.json(
{
error: "Invalid request parameters",
details: validationResult.error.flatten().fieldErrors,
},
{ status: 400 }
);
}

const { fileExtension } = validationResult.data;

// Initialize R2 client
const client = new AwsClient({
accessKeyId: env.R2_ACCESS_KEY_ID,
secretAccessKey: env.R2_SECRET_ACCESS_KEY,
});

// Generate unique filename with timestamp
const timestamp = Date.now();
const fileName = `audio/${timestamp}-${nanoid()}.${fileExtension}`;

// Create presigned URL
const url = new URL(
`https://${env.R2_BUCKET_NAME}.${env.CLOUDFLARE_ACCOUNT_ID}.r2.cloudflarestorage.com/${fileName}`
);

url.searchParams.set("X-Amz-Expires", "3600"); // 1 hour expiry

const signed = await client.sign(new Request(url, { method: "PUT" }), {
aws: { signQuery: true },
});
Comment on lines +82 to +90
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🔴 Critical

🧩 Analysis chain

Verify R2 endpoint style to prevent signature mismatch

Ensure the host/path style matches Cloudflare R2 S3 signing rules. Many setups use https://<ACCOUNT_ID>.r2.cloudflarestorage.com/<BUCKET>/<KEY> rather than <BUCKET>.<ACCOUNT_ID>.../<KEY>. If incorrect, presigned PUTs will 403.

Run to compare usage patterns in-repo:

If we’re mixing styles across endpoints, standardize to one that matches our bucket configuration (virtual-hosted vs path-style) and test a PUT with the presigned URL.


🏁 Script executed:

#!/bin/bash
rg -n -C2 'r2\.cloudflarestorage\.com' | sed -n '1,200p'

Length of output: 891


Switch to path-style R2 endpoint for presigned URLs
Replace the virtual-hosted URL in apps/web/src/app/api/get-upload-url/route.ts (lines 82–84):

// from:
new URL(
  `https://${env.R2_BUCKET_NAME}.${env.CLOUDFLARE_ACCOUNT_ID}.r2.cloudflarestorage.com/${fileName}`
);
// to:
new URL(
  `https://${env.CLOUDFLARE_ACCOUNT_ID}.r2.cloudflarestorage.com/${env.R2_BUCKET_NAME}/${fileName}`
);

This matches your Python usage and Cloudflare R2’s signing rules to avoid 403s.

🤖 Prompt for AI Agents
In apps/web/src/app/api/get-upload-url/route.ts around lines 82 to 90, the code
builds a virtual-hosted–style R2 URL using the bucket as a subdomain which can
cause 403s when signing; change it to path-style by constructing the URL with
the account host then appending the bucket and filename (i.e., host:
${env.CLOUDFLARE_ACCOUNT_ID}.r2.cloudflarestorage.com and path:
/${env.R2_BUCKET_NAME}/${fileName}), keep the same query param and signing call
so the presigned PUT uses the path-style endpoint.


if (!signed.url) {
throw new Error("Failed to generate presigned URL");
}

// Prepare and validate response
const responseData = {
uploadUrl: signed.url,
fileName,
};

const responseValidation = apiResponseSchema.safeParse(responseData);
if (!responseValidation.success) {
console.error(
"Invalid API response structure:",
responseValidation.error
);
return NextResponse.json(
{ error: "Internal response formatting error" },
{ status: 500 }
);
}

return NextResponse.json(responseValidation.data);
} catch (error) {
console.error("Error generating upload URL:", error);
return NextResponse.json(
{
error: "Failed to generate upload URL",
message:
error instanceof Error
? error.message
: "An unexpected error occurred",
},
{ status: 500 }
);
}
}
Loading