|
| 1 | +from __future__ import annotations |
| 2 | + |
| 3 | +from typing import Any, Dict, List |
| 4 | +import sys |
| 5 | + |
| 6 | +from together.abstract import api_requestor |
| 7 | +from together.together_response import TogetherResponse |
| 8 | +from together.types import ( |
| 9 | + TogetherClient, |
| 10 | + TogetherRequest, |
| 11 | +) |
| 12 | +from together.types.videos import ( |
| 13 | + CreateVideoResponse, |
| 14 | + CreateVideoBody, |
| 15 | + VideoJob, |
| 16 | +) |
| 17 | + |
| 18 | +if sys.version_info >= (3, 8): |
| 19 | + from typing import Literal |
| 20 | +else: |
| 21 | + from typing_extensions import Literal |
| 22 | + |
| 23 | + |
| 24 | +class Videos: |
| 25 | + def __init__(self, client: TogetherClient) -> None: |
| 26 | + self._client = client |
| 27 | + |
| 28 | + def create( |
| 29 | + self, |
| 30 | + *, |
| 31 | + model: str, |
| 32 | + prompt: str | None = None, |
| 33 | + height: int | None = None, |
| 34 | + width: int | None = None, |
| 35 | + seconds: str | None = None, |
| 36 | + fps: int | None = None, |
| 37 | + steps: int | None = None, |
| 38 | + seed: int | None = None, |
| 39 | + guidance_scale: float | None = None, |
| 40 | + output_format: Literal["MP4", "WEBM"] | None = None, |
| 41 | + output_quality: int | None = None, |
| 42 | + negative_prompt: str | None = None, |
| 43 | + frame_images: List[Dict[str, Any]] | None = None, |
| 44 | + reference_images: List[str] | None = None, |
| 45 | + **kwargs: Any, |
| 46 | + ) -> CreateVideoResponse: |
| 47 | + """ |
| 48 | + Method to generate videos based on a given prompt using a specified model. |
| 49 | +
|
| 50 | + Args: |
| 51 | + model (str): The model to use for video generation. |
| 52 | +
|
| 53 | + prompt (str): A description of the desired video. Positive prompt for the generation. |
| 54 | +
|
| 55 | + height (int, optional): Height of the video to generate in pixels. |
| 56 | +
|
| 57 | + width (int, optional): Width of the video to generate in pixels. |
| 58 | +
|
| 59 | + seconds (str, optional): Length of generated video in seconds. Min 1 max 10. |
| 60 | +
|
| 61 | + fps (int, optional): Frames per second, min 15 max 60. Defaults to 24. |
| 62 | +
|
| 63 | + steps (int, optional): The number of denoising steps the model performs during video |
| 64 | + generation. More steps typically result in higher quality output but require longer |
| 65 | + processing time. Min 10 max 50. Defaults to 20. |
| 66 | +
|
| 67 | + seed (int, optional): Seed to use in initializing the video generation. Using the same |
| 68 | + seed allows deterministic video generation. If not provided, a random seed is |
| 69 | + generated for each request. Note: When requesting multiple videos with the same |
| 70 | + seed, the seed will be incremented by 1 (+1) for each video generated. |
| 71 | +
|
| 72 | + guidance_scale (float, optional): Controls how closely the video generation follows your |
| 73 | + prompt. Higher values make the model adhere more strictly to your text description, |
| 74 | + while lower values allow more creative freedom. Recommended range is 6.0-10.0 for |
| 75 | + most video models. Values above 12 may cause over-guidance artifacts or unnatural |
| 76 | + motion patterns. Defaults to 8. |
| 77 | +
|
| 78 | + output_format (str, optional): Specifies the format of the output video. Either "MP4" |
| 79 | + or "WEBM". Defaults to "MP4". |
| 80 | +
|
| 81 | + output_quality (int, optional): Compression quality. Defaults to 20. |
| 82 | +
|
| 83 | + negative_prompt (str, optional): Similar to prompt, but specifies what to avoid instead |
| 84 | + of what to include. Defaults to None. |
| 85 | +
|
| 86 | + frame_images (List[Dict[str, Any]], optional): Array of images to guide video generation, |
| 87 | + like keyframes. If size 1, starting frame; if size 2, starting and ending frame; |
| 88 | + if more than 2 then frame must be specified. Defaults to None. |
| 89 | +
|
| 90 | + reference_images (List[str], optional): An array containing reference images |
| 91 | + used to condition the generation process. These images provide visual guidance to |
| 92 | + help the model generate content that aligns with the style, composition, or |
| 93 | + characteristics of the reference materials. Defaults to None. |
| 94 | +
|
| 95 | + Returns: |
| 96 | + CreateVideoResponse: Object containing video generation job id |
| 97 | + """ |
| 98 | + |
| 99 | + requestor = api_requestor.APIRequestor( |
| 100 | + client=self._client, |
| 101 | + ) |
| 102 | + |
| 103 | + parameter_payload = CreateVideoBody( |
| 104 | + prompt=prompt, |
| 105 | + model=model, |
| 106 | + height=height, |
| 107 | + width=width, |
| 108 | + seconds=seconds, |
| 109 | + fps=fps, |
| 110 | + steps=steps, |
| 111 | + seed=seed, |
| 112 | + guidance_scale=guidance_scale, |
| 113 | + output_format=output_format, |
| 114 | + output_quality=output_quality, |
| 115 | + negative_prompt=negative_prompt, |
| 116 | + frame_images=frame_images, |
| 117 | + reference_images=reference_images, |
| 118 | + **kwargs, |
| 119 | + ).model_dump(exclude_none=True) |
| 120 | + |
| 121 | + response, _, _ = requestor.request( |
| 122 | + options=TogetherRequest( |
| 123 | + method="POST", |
| 124 | + url="../v2/videos", |
| 125 | + params=parameter_payload, |
| 126 | + ), |
| 127 | + stream=False, |
| 128 | + ) |
| 129 | + |
| 130 | + assert isinstance(response, TogetherResponse) |
| 131 | + |
| 132 | + return CreateVideoResponse(**response.data) |
| 133 | + |
| 134 | + def retrieve( |
| 135 | + self, |
| 136 | + id: str, |
| 137 | + ) -> VideoJob: |
| 138 | + """ |
| 139 | + Method to retrieve a video creation job. |
| 140 | +
|
| 141 | + Args: |
| 142 | + id (str): The ID of the video creation job to retrieve. |
| 143 | +
|
| 144 | + Returns: |
| 145 | + VideoJob: Object containing the current status and details of the video creation job |
| 146 | + """ |
| 147 | + |
| 148 | + requestor = api_requestor.APIRequestor( |
| 149 | + client=self._client, |
| 150 | + ) |
| 151 | + |
| 152 | + response, _, _ = requestor.request( |
| 153 | + options=TogetherRequest( |
| 154 | + method="GET", |
| 155 | + url=f"../v2/videos/{id}", |
| 156 | + ), |
| 157 | + stream=False, |
| 158 | + ) |
| 159 | + |
| 160 | + assert isinstance(response, TogetherResponse) |
| 161 | + |
| 162 | + return VideoJob(**response.data) |
| 163 | + |
| 164 | + |
| 165 | +class AsyncVideos: |
| 166 | + def __init__(self, client: TogetherClient) -> None: |
| 167 | + self._client = client |
| 168 | + |
| 169 | + async def create( |
| 170 | + self, |
| 171 | + *, |
| 172 | + prompt: str, |
| 173 | + model: str, |
| 174 | + height: int | None = None, |
| 175 | + width: int | None = None, |
| 176 | + seconds: float | None = None, |
| 177 | + fps: int | None = None, |
| 178 | + steps: int | None = None, |
| 179 | + seed: int | None = None, |
| 180 | + guidance_scale: float | None = None, |
| 181 | + output_format: Literal["MP4", "WEBM"] | None = None, |
| 182 | + output_quality: int | None = None, |
| 183 | + negative_prompt: str | None = None, |
| 184 | + frame_images: List[Dict[str, Any]] | None = None, |
| 185 | + reference_images: List[str] | None = None, |
| 186 | + **kwargs: Any, |
| 187 | + ) -> CreateVideoResponse: |
| 188 | + """ |
| 189 | + Async method to create videos based on a given prompt using a specified model. |
| 190 | +
|
| 191 | + Args: |
| 192 | + prompt (str): A description of the desired video. Positive prompt for the generation. |
| 193 | +
|
| 194 | + model (str): The model to use for video generation. |
| 195 | +
|
| 196 | + height (int, optional): Height of the video to generate in pixels. |
| 197 | +
|
| 198 | + width (int, optional): Width of the video to generate in pixels. |
| 199 | +
|
| 200 | + seconds (float, optional): Length of generated video in seconds. Min 1 max 10. |
| 201 | +
|
| 202 | + fps (int, optional): Frames per second, min 15 max 60. Defaults to 24. |
| 203 | +
|
| 204 | + steps (int, optional): The number of denoising steps the model performs during video |
| 205 | + generation. More steps typically result in higher quality output but require longer |
| 206 | + processing time. Min 10 max 50. Defaults to 20. |
| 207 | +
|
| 208 | + seed (int, optional): Seed to use in initializing the video generation. Using the same |
| 209 | + seed allows deterministic video generation. If not provided, a random seed is |
| 210 | + generated for each request. Note: When requesting multiple videos with the same |
| 211 | + seed, the seed will be incremented by 1 (+1) for each video generated. |
| 212 | +
|
| 213 | + guidance_scale (float, optional): Controls how closely the video generation follows your |
| 214 | + prompt. Higher values make the model adhere more strictly to your text description, |
| 215 | + while lower values allow more creative freedom. Recommended range is 6.0-10.0 for |
| 216 | + most video models. Values above 12 may cause over-guidance artifacts or unnatural |
| 217 | + motion patterns. Defaults to 8. |
| 218 | +
|
| 219 | + output_format (Literal["MP4", "WEBM"], optional): Specifies the format of the output video. Either "MP4" |
| 220 | + or "WEBM". Defaults to "MP4". |
| 221 | +
|
| 222 | + output_quality (int, optional): Compression quality. Defaults to 20. |
| 223 | +
|
| 224 | + negative_prompt (str, optional): Similar to prompt, but specifies what to avoid instead |
| 225 | + of what to include. Defaults to None. |
| 226 | +
|
| 227 | + frame_images (List[Dict[str, Any]], optional): Array of images to guide video generation, |
| 228 | + like keyframes. If size 1, starting frame; if size 2, starting and ending frame; |
| 229 | + if more than 2 then frame must be specified. Defaults to None. |
| 230 | +
|
| 231 | + reference_images (List[str], optional): An array containing reference images |
| 232 | + used to condition the generation process. These images provide visual guidance to |
| 233 | + help the model generate content that aligns with the style, composition, or |
| 234 | + characteristics of the reference materials. Defaults to None. |
| 235 | +
|
| 236 | + Returns: |
| 237 | + CreateVideoResponse: Object containing video creation job id |
| 238 | + """ |
| 239 | + |
| 240 | + requestor = api_requestor.APIRequestor( |
| 241 | + client=self._client, |
| 242 | + ) |
| 243 | + |
| 244 | + parameter_payload = CreateVideoBody( |
| 245 | + prompt=prompt, |
| 246 | + model=model, |
| 247 | + height=height, |
| 248 | + width=width, |
| 249 | + seconds=seconds, |
| 250 | + fps=fps, |
| 251 | + steps=steps, |
| 252 | + seed=seed, |
| 253 | + guidance_scale=guidance_scale, |
| 254 | + output_format=output_format, |
| 255 | + output_quality=output_quality, |
| 256 | + negative_prompt=negative_prompt, |
| 257 | + frame_images=frame_images, |
| 258 | + reference_images=reference_images, |
| 259 | + **kwargs, |
| 260 | + ).model_dump(exclude_none=True) |
| 261 | + |
| 262 | + response, _, _ = await requestor.arequest( |
| 263 | + options=TogetherRequest( |
| 264 | + method="POST", |
| 265 | + url="../v2/videos", |
| 266 | + params=parameter_payload, |
| 267 | + ), |
| 268 | + stream=False, |
| 269 | + ) |
| 270 | + |
| 271 | + assert isinstance(response, TogetherResponse) |
| 272 | + |
| 273 | + return CreateVideoResponse(**response.data) |
| 274 | + |
| 275 | + async def retrieve( |
| 276 | + self, |
| 277 | + id: str, |
| 278 | + ) -> VideoJob: |
| 279 | + """ |
| 280 | + Async method to retrieve a video creation job. |
| 281 | +
|
| 282 | + Args: |
| 283 | + id (str): The ID of the video creation job to retrieve. |
| 284 | +
|
| 285 | + Returns: |
| 286 | + VideoJob: Object containing the current status and details of the video creation job |
| 287 | + """ |
| 288 | + |
| 289 | + requestor = api_requestor.APIRequestor( |
| 290 | + client=self._client, |
| 291 | + ) |
| 292 | + |
| 293 | + response, _, _ = await requestor.arequest( |
| 294 | + options=TogetherRequest( |
| 295 | + method="GET", |
| 296 | + url=f"../v2/videos/{id}", |
| 297 | + ), |
| 298 | + stream=False, |
| 299 | + ) |
| 300 | + |
| 301 | + assert isinstance(response, TogetherResponse) |
| 302 | + |
| 303 | + return VideoJob(**response.data) |
0 commit comments