|
| 1 | +/** @file Standalone cloud file upload functionality. */ |
| 2 | + |
| 3 | +import type { |
| 4 | + default as Backend, |
| 5 | + S3MultipartPart, |
| 6 | + UploadedAsset, |
| 7 | + UploadFileEndRequestBody, |
| 8 | + UploadFileRequestParams, |
| 9 | +} from 'enso-common/src/services/Backend' |
| 10 | +import { S3_CHUNK_SIZE_BYTES } from 'enso-common/src/services/Backend' |
| 11 | + |
| 12 | +/** Upload progress event types. */ |
| 13 | +export type UploadProgressEvent = 'begin' | 'chunk' | 'end' |
| 14 | + |
| 15 | +/** Upload progress information. */ |
| 16 | +export interface UploadProgress { |
| 17 | + readonly event: UploadProgressEvent |
| 18 | + readonly sentBytes: number |
| 19 | + readonly totalBytes: number |
| 20 | +} |
| 21 | + |
| 22 | +/** Options for file upload. */ |
| 23 | +export interface UploadFileOptions { |
| 24 | + /** Number of retries for chunk uploads. Defaults to 3. */ |
| 25 | + readonly chunkRetries?: number |
| 26 | + /** Number of retries for finalization. Defaults to 3. */ |
| 27 | + readonly endRetries?: number |
| 28 | + /** Progress callback. */ |
| 29 | + readonly onProgress?: (progress: UploadProgress) => void |
| 30 | + /** Called before upload starts. */ |
| 31 | + readonly onBegin?: (progress: UploadProgress) => void |
| 32 | + /** Called after each successful chunk upload. */ |
| 33 | + readonly onChunkSuccess?: (progress: UploadProgress) => void |
| 34 | + /** Called after successful upload completion. */ |
| 35 | + readonly onSuccess?: (progress: UploadProgress) => void |
| 36 | + /** Called on error. */ |
| 37 | + readonly onError?: (error: unknown) => void |
| 38 | + /** Called after completion (success or error). */ |
| 39 | + readonly onSettled?: (progress: UploadProgress | null, error: unknown) => void |
| 40 | + /** Maximum number of parallel chunk uploads. Defaults to 8. */ |
| 41 | + readonly maxParallelChunks?: number |
| 42 | +} |
| 43 | + |
| 44 | +/** |
| 45 | + * Retry a function with exponential backoff. |
| 46 | + */ |
| 47 | +async function retryWithBackoff<T>( |
| 48 | + fn: () => Promise<T>, |
| 49 | + retries: number, |
| 50 | + delayMs = 1000, |
| 51 | +): Promise<T> { |
| 52 | + let lastError: unknown |
| 53 | + for (let attempt = 0; attempt <= retries; attempt++) { |
| 54 | + try { |
| 55 | + return await fn() |
| 56 | + } catch (error) { |
| 57 | + lastError = error |
| 58 | + if (attempt < retries) { |
| 59 | + await new Promise((resolve) => setTimeout(resolve, delayMs * Math.pow(2, attempt))) |
| 60 | + } |
| 61 | + } |
| 62 | + } |
| 63 | + throw lastError |
| 64 | +} |
| 65 | + |
| 66 | +/** |
| 67 | + * Upload a single chunk with retry logic. |
| 68 | + */ |
| 69 | +async function uploadChunk( |
| 70 | + url: string, |
| 71 | + file: File, |
| 72 | + chunkIndex: number, |
| 73 | + retries: number, |
| 74 | +): Promise<S3MultipartPart> { |
| 75 | + return retryWithBackoff(async () => { |
| 76 | + const start = chunkIndex * S3_CHUNK_SIZE_BYTES |
| 77 | + const end = Math.min(start + S3_CHUNK_SIZE_BYTES, file.size) |
| 78 | + const chunk = file.slice(start, end) |
| 79 | + |
| 80 | + const response = await fetch(url, { |
| 81 | + method: 'PUT', |
| 82 | + body: chunk, |
| 83 | + headers: { |
| 84 | + 'Content-Type': 'application/octet-stream', |
| 85 | + }, |
| 86 | + }) |
| 87 | + |
| 88 | + if (!response.ok) { |
| 89 | + throw new Error(`Chunk upload failed: ${response.statusText}`) |
| 90 | + } |
| 91 | + |
| 92 | + const eTag = response.headers.get('ETag') |
| 93 | + if (!eTag) { |
| 94 | + throw new Error('Missing ETag in response') |
| 95 | + } |
| 96 | + |
| 97 | + return { |
| 98 | + eTag: eTag.replace(/"/g, ''), |
| 99 | + partNumber: chunkIndex + 1, |
| 100 | + } |
| 101 | + }, retries) |
| 102 | +} |
| 103 | + |
| 104 | +/** |
| 105 | + * Upload chunks with controlled parallelism. |
| 106 | + */ |
| 107 | +async function uploadChunksWithParallelism( |
| 108 | + presignedUrls: readonly string[], |
| 109 | + file: File, |
| 110 | + chunkRetries: number, |
| 111 | + maxParallel: number, |
| 112 | + onChunkComplete: (completedCount: number) => void, |
| 113 | +): Promise<S3MultipartPart[]> { |
| 114 | + const parts: S3MultipartPart[] = new Array(presignedUrls.length) |
| 115 | + let completedCount = 0 |
| 116 | + let nextIndex = 0 |
| 117 | + |
| 118 | + const uploadNext = async (): Promise<void> => { |
| 119 | + while (nextIndex < presignedUrls.length) { |
| 120 | + const currentIndex = nextIndex++ |
| 121 | + const url = presignedUrls[currentIndex]! |
| 122 | + const part = await uploadChunk(url, file, currentIndex, chunkRetries) |
| 123 | + parts[currentIndex] = part |
| 124 | + completedCount++ |
| 125 | + onChunkComplete(completedCount) |
| 126 | + } |
| 127 | + } |
| 128 | + |
| 129 | + const workers = Array.from({ length: Math.min(maxParallel, presignedUrls.length) }, () => |
| 130 | + uploadNext(), |
| 131 | + ) |
| 132 | + await Promise.all(workers) |
| 133 | + |
| 134 | + return parts |
| 135 | +} |
| 136 | + |
| 137 | +/** |
| 138 | + * Upload a file to the backend using multipart upload. |
| 139 | + * This is a standalone implementation that doesn't depend on React hooks or toast notifications. |
| 140 | + * |
| 141 | + * @param backend - The backend instance to use for upload operations |
| 142 | + * @param body - Upload request parameters |
| 143 | + * @param file - The file to upload |
| 144 | + * @param options - Upload options |
| 145 | + * @returns The uploaded asset information |
| 146 | + */ |
| 147 | +export async function uploadFile( |
| 148 | + backend: Backend, |
| 149 | + body: UploadFileRequestParams, |
| 150 | + file: File, |
| 151 | + options: UploadFileOptions = {}, |
| 152 | +): Promise<UploadedAsset> { |
| 153 | + const { |
| 154 | + chunkRetries = 3, |
| 155 | + endRetries = 3, |
| 156 | + maxParallelChunks = 8, |
| 157 | + onBegin, |
| 158 | + onChunkSuccess, |
| 159 | + onSuccess, |
| 160 | + onError, |
| 161 | + onSettled, |
| 162 | + onProgress, |
| 163 | + } = options |
| 164 | + |
| 165 | + const fileSizeBytes = file.size |
| 166 | + const beginProgress: UploadProgress = { |
| 167 | + event: 'begin', |
| 168 | + sentBytes: 0, |
| 169 | + totalBytes: fileSizeBytes, |
| 170 | + } |
| 171 | + |
| 172 | + try { |
| 173 | + // Notify upload start |
| 174 | + onBegin?.(beginProgress) |
| 175 | + onProgress?.(beginProgress) |
| 176 | + |
| 177 | + // Start multipart upload |
| 178 | + const { sourcePath, uploadId, presignedUrls } = await backend.uploadFileStart(body, file) |
| 179 | + |
| 180 | + // Upload chunks with controlled parallelism |
| 181 | + const parts = await uploadChunksWithParallelism( |
| 182 | + presignedUrls, |
| 183 | + file, |
| 184 | + chunkRetries, |
| 185 | + maxParallelChunks, |
| 186 | + (completedCount) => { |
| 187 | + const newSentBytes = Math.min(completedCount * S3_CHUNK_SIZE_BYTES, fileSizeBytes) |
| 188 | + const chunkProgress: UploadProgress = { |
| 189 | + event: 'chunk', |
| 190 | + sentBytes: newSentBytes, |
| 191 | + totalBytes: fileSizeBytes, |
| 192 | + } |
| 193 | + onChunkSuccess?.(chunkProgress) |
| 194 | + onProgress?.(chunkProgress) |
| 195 | + }, |
| 196 | + ) |
| 197 | + |
| 198 | + // Finalize upload with retry |
| 199 | + const result = await retryWithBackoff(async () => { |
| 200 | + const endParams: UploadFileEndRequestBody = { |
| 201 | + parentDirectoryId: body.parentDirectoryId, |
| 202 | + parts, |
| 203 | + sourcePath, |
| 204 | + uploadId, |
| 205 | + assetId: body.fileId, |
| 206 | + fileName: body.fileName, |
| 207 | + } |
| 208 | + return await backend.uploadFileEnd(endParams) |
| 209 | + }, endRetries) |
| 210 | + |
| 211 | + // Notify completion |
| 212 | + const endProgress: UploadProgress = { |
| 213 | + event: 'end', |
| 214 | + sentBytes: fileSizeBytes, |
| 215 | + totalBytes: fileSizeBytes, |
| 216 | + } |
| 217 | + onSuccess?.(endProgress) |
| 218 | + onProgress?.(endProgress) |
| 219 | + onSettled?.(endProgress, null) |
| 220 | + |
| 221 | + return result |
| 222 | + } catch (error) { |
| 223 | + onError?.(error) |
| 224 | + onSettled?.(null, error) |
| 225 | + throw error |
| 226 | + } |
| 227 | +} |
0 commit comments