diff --git a/assessment_module_manager/poetry.lock b/assessment_module_manager/poetry.lock index 33fceb01f..84a305029 100644 --- a/assessment_module_manager/poetry.lock +++ b/assessment_module_manager/poetry.lock @@ -38,7 +38,7 @@ description = "This is a helper module for easier development of Athena modules. optional = false python-versions = "3.11.*" files = [] -develop = true +develop = false [package.dependencies] fastapi = "^0.109.1" @@ -49,8 +49,11 @@ sqlalchemy = {version = "^2.0.21", extras = ["mypy"]} uvicorn = "^0.23.0" [package.source] -type = "directory" -url = "../athena" +type = "git" +url = "https://github.com/ls1intum/Athena.git" +reference = "9773c41" +resolved_reference = "9773c41f64f0ae307b879363308b2b63082b6bf7" +subdirectory = "athena" [[package]] name = "certifi" @@ -1044,4 +1047,4 @@ standard = ["colorama (>=0.4)", "httptools (>=0.5.0)", "python-dotenv (>=0.13)", [metadata] lock-version = "2.0" python-versions = "3.11.*" -content-hash = "02781dc7d21c2e264149e6e6f6ecbc7a0d7642784bc67420cefceb3fd45979de" +content-hash = "f740d850d558b42bdfa0e31eb3c74690065c5ef82d3636ddd577e3b785c30b1d" diff --git a/assessment_module_manager/pyproject.toml b/assessment_module_manager/pyproject.toml index 4f4aa6221..dfabbb6e8 100644 --- a/assessment_module_manager/pyproject.toml +++ b/assessment_module_manager/pyproject.toml @@ -7,7 +7,7 @@ license = "MIT" [tool.poetry.dependencies] python = "3.11.*" -athena = {path = "../athena", develop = true} +athena = { git = "https://github.com/ls1intum/Athena.git", rev = "9773c41", subdirectory = "athena"} fastapi = "^0.109.1" uvicorn = "^0.23.0" httpx = "^0.24.1" diff --git a/athena/athena/endpoints.py b/athena/athena/endpoints.py index 6d259a2a2..a30140500 100644 --- a/athena/athena/endpoints.py +++ b/athena/athena/endpoints.py @@ -156,7 +156,7 @@ class SubmissionSelectorRequest(BaseModel): class Config: # Allow camelCase field names in the API (converted to snake_case) alias_generator = to_camel - allow_population_by_field_name = True + populate_by_name = True @app.post("/select_submission", responses=module_responses) @authenticated diff --git a/athena/athena/helpers/programming/code_repository.py b/athena/athena/helpers/programming/code_repository.py index 31a1675ef..c64aaa900 100644 --- a/athena/athena/helpers/programming/code_repository.py +++ b/athena/athena/helpers/programming/code_repository.py @@ -18,7 +18,7 @@ def get_repository_zip(url: str, authorization_secret: Optional[str] = None) -> the cache or by downloading it, and return a ZipFile object. Optional: Authorization secret for the API. If omitted, it will be auto-determined given the request session. """ - url_hash = hashlib.md5(url.encode("utf-8")).hexdigest() + url_hash = hashlib.md5(str(url).encode("utf-8")).hexdigest() file_name = url_hash + ".zip" cache_file_path = cache_dir / file_name @@ -27,7 +27,7 @@ def get_repository_zip(url: str, authorization_secret: Optional[str] = None) -> if contextvars.repository_authorization_secret_context_var_empty(): raise ValueError("Authorization secret for the repository API is not set. Pass authorization_secret to this function or add the X-Repository-Authorization-Secret header to the request from the assessment module manager.") authorization_secret = contextvars.get_repository_authorization_secret_context_var() - with httpx.stream("GET", url, headers={ "Authorization": cast(str, authorization_secret) }) as response: + with httpx.stream("GET", str(url), headers={ "Authorization": cast(str, authorization_secret) }) as response: response.raise_for_status() with open(cache_file_path, "wb") as f: for chunk in response.iter_bytes(): @@ -42,7 +42,7 @@ def get_repository(url: str, authorization_secret: Optional[str] = None) -> Repo downloading it, and return a Repo object. """ - url_hash = hashlib.md5(url.encode("utf-8")).hexdigest() + url_hash = hashlib.md5(str(url).encode("utf-8")).hexdigest() dir_name = url_hash + ".git" cache_dir_path = cache_dir / dir_name diff --git a/athena/athena/schemas/exercise.py b/athena/athena/schemas/exercise.py index 1f173e2e3..ee85e05a5 100644 --- a/athena/athena/schemas/exercise.py +++ b/athena/athena/schemas/exercise.py @@ -29,4 +29,4 @@ class Exercise(Schema, ABC): meta: dict = Field({}, example={"internal_id": "5"}) class Config: - orm_mode = True + from_attributes = True diff --git a/athena/athena/schemas/feedback.py b/athena/athena/schemas/feedback.py index 4cf9c6a2e..24af49410 100644 --- a/athena/athena/schemas/feedback.py +++ b/athena/athena/schemas/feedback.py @@ -33,4 +33,4 @@ def to_model(self, is_suggestion: bool = False, lms_id: Optional[int] = None, lm return type(self).get_model_class()(**self.dict(), is_suggestion=is_suggestion, lms_id=lms_id, lms_url=lms_url) class Config: - orm_mode = True + from_attributes = True diff --git a/athena/athena/schemas/modeling_exercise.py b/athena/athena/schemas/modeling_exercise.py index 70452f839..c3e34469a 100644 --- a/athena/athena/schemas/modeling_exercise.py +++ b/athena/athena/schemas/modeling_exercise.py @@ -8,6 +8,6 @@ class ModelingExercise(Exercise): """A modeling exercise that can be solved by students, enhanced with metadata.""" - type: ExerciseType = Field(ExerciseType.modeling, const=True) + type: ExerciseType = Field(ExerciseType.modeling, Literal=True) example_solution: Optional[str] = Field(None, description="An example solution to the exercise.") diff --git a/athena/athena/schemas/programming_exercise.py b/athena/athena/schemas/programming_exercise.py index ac875b05d..a78cf8f9d 100644 --- a/athena/athena/schemas/programming_exercise.py +++ b/athena/athena/schemas/programming_exercise.py @@ -10,7 +10,7 @@ class ProgrammingExercise(Exercise): """A programming exercise that can be solved by students, enhanced with metadata.""" - type: ExerciseType = Field(ExerciseType.programming, const=True) + type: ExerciseType = Field(ExerciseType.programming, Literal=True) programming_language: str = Field(description="The programming language that is used for this exercise.", example="java") solution_repository_uri: AnyUrl = Field(description="URL to the solution git repository, which contains the " @@ -51,4 +51,11 @@ def get_tests_zip(self) -> ZipFile: def get_tests_repository(self) -> Repo: """Return the tests repository as a Repo object.""" - return get_repository(self.tests_repository_uri) \ No newline at end of file + return get_repository(self.tests_repository_uri) + + def to_model(self): + model = super().to_model() + model.solution_repository_uri = str(self.solution_repository_uri) + model.template_repository_uri = str(self.template_repository_uri) + model.tests_repository_uri = str(self.tests_repository_uri) + return model \ No newline at end of file diff --git a/athena/athena/schemas/schema.py b/athena/athena/schemas/schema.py index 67e146ada..b0e7e6b7f 100644 --- a/athena/athena/schemas/schema.py +++ b/athena/athena/schemas/schema.py @@ -32,4 +32,4 @@ def to_model(self): class Config: # Allow camelCase field names in the API (converted to snake_case) alias_generator = to_camel - allow_population_by_field_name = True \ No newline at end of file + populate_by_name = True \ No newline at end of file diff --git a/athena/athena/schemas/submission.py b/athena/athena/schemas/submission.py index f9e39c372..0bf684bdc 100644 --- a/athena/athena/schemas/submission.py +++ b/athena/athena/schemas/submission.py @@ -12,4 +12,4 @@ class Submission(Schema, ABC): meta: dict = Field({}, example={}) class Config: - orm_mode = True + from_attributes = True diff --git a/athena/athena/schemas/text_exercise.py b/athena/athena/schemas/text_exercise.py index 6c8367667..ac3861a70 100644 --- a/athena/athena/schemas/text_exercise.py +++ b/athena/athena/schemas/text_exercise.py @@ -8,6 +8,6 @@ class TextExercise(Exercise): """A text exercise that can be solved by students, enhanced with metadata.""" - type: ExerciseType = Field(ExerciseType.text, const=True) + type: ExerciseType = Field(ExerciseType.text, Literal=True) example_solution: Optional[str] = Field(None, description="An example solution to the exercise.") diff --git a/athena/poetry.lock b/athena/poetry.lock index a9d171803..6a72b908f 100644 --- a/athena/poetry.lock +++ b/athena/poetry.lock @@ -1,14 +1,25 @@ # This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. +[[package]] +name = "annotated-types" +version = "0.7.0" +description = "Reusable constraint types to use with typing.Annotated" +optional = false +python-versions = ">=3.8" +files = [ + {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, + {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, +] + [[package]] name = "anyio" -version = "4.4.0" +version = "4.6.0" description = "High level compatibility layer for multiple asynchronous event loop implementations" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "anyio-4.4.0-py3-none-any.whl", hash = "sha256:c1b2d8f46a8a812513012e1107cb0e68c17159a7a594208005a57dc776e1bdc7"}, - {file = "anyio-4.4.0.tar.gz", hash = "sha256:5aadc6a1bbb7cdb0bede386cac5e2940f5e2ff3aa20277e991cf028e0585ce94"}, + {file = "anyio-4.6.0-py3-none-any.whl", hash = "sha256:c7d2e9d63e31599eeb636c8c5c03a7e108d73b345f064f1c19fdc87b79036a9a"}, + {file = "anyio-4.6.0.tar.gz", hash = "sha256:137b4559cbb034c477165047febb6ff83f390fc3b20bf181c1fc0a728cb8beeb"}, ] [package.dependencies] @@ -16,9 +27,9 @@ idna = ">=2.8" sniffio = ">=1.1" [package.extras] -doc = ["Sphinx (>=7)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"] -test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"] -trio = ["trio (>=0.23)"] +doc = ["Sphinx (>=7.4,<8.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"] +test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.21.0b1)"] +trio = ["trio (>=0.26.1)"] [[package]] name = "astroid" @@ -73,13 +84,13 @@ files = [ [[package]] name = "dill" -version = "0.3.8" +version = "0.3.9" description = "serialize all of Python" optional = false python-versions = ">=3.8" files = [ - {file = "dill-0.3.8-py3-none-any.whl", hash = "sha256:c36ca9ffb54365bdd2f8eb3eff7d2a21237f8452b57ace88b1ac615b7e815bd7"}, - {file = "dill-0.3.8.tar.gz", hash = "sha256:3ebe3c479ad625c4553aca177444d89b486b1d84982eeacded644afc0cf797ca"}, + {file = "dill-0.3.9-py3-none-any.whl", hash = "sha256:468dff3b89520b474c0397703366b7b95eebe6303f108adf9b19da1f702be87a"}, + {file = "dill-0.3.9.tar.gz", hash = "sha256:81aa267dddf68cbfe8029c42ca9ec6a4ab3b22371d1c450abc54422577b4512c"}, ] [package.extras] @@ -180,69 +191,84 @@ test = ["coverage[toml]", "ddt (>=1.1.1,!=1.4.3)", "mock", "mypy", "pre-commit", [[package]] name = "greenlet" -version = "3.0.3" +version = "3.1.1" description = "Lightweight in-process concurrent programming" optional = false python-versions = ">=3.7" files = [ - {file = "greenlet-3.0.3-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:9da2bd29ed9e4f15955dd1595ad7bc9320308a3b766ef7f837e23ad4b4aac31a"}, - {file = "greenlet-3.0.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d353cadd6083fdb056bb46ed07e4340b0869c305c8ca54ef9da3421acbdf6881"}, - {file = "greenlet-3.0.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dca1e2f3ca00b84a396bc1bce13dd21f680f035314d2379c4160c98153b2059b"}, - {file = "greenlet-3.0.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3ed7fb269f15dc662787f4119ec300ad0702fa1b19d2135a37c2c4de6fadfd4a"}, - {file = "greenlet-3.0.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd4f49ae60e10adbc94b45c0b5e6a179acc1736cf7a90160b404076ee283cf83"}, - {file = "greenlet-3.0.3-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:73a411ef564e0e097dbe7e866bb2dda0f027e072b04da387282b02c308807405"}, - {file = "greenlet-3.0.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:7f362975f2d179f9e26928c5b517524e89dd48530a0202570d55ad6ca5d8a56f"}, - {file = "greenlet-3.0.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:649dde7de1a5eceb258f9cb00bdf50e978c9db1b996964cd80703614c86495eb"}, - {file = "greenlet-3.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:68834da854554926fbedd38c76e60c4a2e3198c6fbed520b106a8986445caaf9"}, - {file = "greenlet-3.0.3-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:b1b5667cced97081bf57b8fa1d6bfca67814b0afd38208d52538316e9422fc61"}, - {file = "greenlet-3.0.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:52f59dd9c96ad2fc0d5724107444f76eb20aaccb675bf825df6435acb7703559"}, - {file = "greenlet-3.0.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:afaff6cf5200befd5cec055b07d1c0a5a06c040fe5ad148abcd11ba6ab9b114e"}, - {file = "greenlet-3.0.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fe754d231288e1e64323cfad462fcee8f0288654c10bdf4f603a39ed923bef33"}, - {file = "greenlet-3.0.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2797aa5aedac23af156bbb5a6aa2cd3427ada2972c828244eb7d1b9255846379"}, - {file = "greenlet-3.0.3-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b7f009caad047246ed379e1c4dbcb8b020f0a390667ea74d2387be2998f58a22"}, - {file = "greenlet-3.0.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:c5e1536de2aad7bf62e27baf79225d0d64360d4168cf2e6becb91baf1ed074f3"}, - {file = "greenlet-3.0.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:894393ce10ceac937e56ec00bb71c4c2f8209ad516e96033e4b3b1de270e200d"}, - {file = "greenlet-3.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:1ea188d4f49089fc6fb283845ab18a2518d279c7cd9da1065d7a84e991748728"}, - {file = "greenlet-3.0.3-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:70fb482fdf2c707765ab5f0b6655e9cfcf3780d8d87355a063547b41177599be"}, - {file = "greenlet-3.0.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d4d1ac74f5c0c0524e4a24335350edad7e5f03b9532da7ea4d3c54d527784f2e"}, - {file = "greenlet-3.0.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:149e94a2dd82d19838fe4b2259f1b6b9957d5ba1b25640d2380bea9c5df37676"}, - {file = "greenlet-3.0.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:15d79dd26056573940fcb8c7413d84118086f2ec1a8acdfa854631084393efcc"}, - {file = "greenlet-3.0.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:881b7db1ebff4ba09aaaeae6aa491daeb226c8150fc20e836ad00041bcb11230"}, - {file = "greenlet-3.0.3-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fcd2469d6a2cf298f198f0487e0a5b1a47a42ca0fa4dfd1b6862c999f018ebbf"}, - {file = "greenlet-3.0.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:1f672519db1796ca0d8753f9e78ec02355e862d0998193038c7073045899f305"}, - {file = "greenlet-3.0.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:2516a9957eed41dd8f1ec0c604f1cdc86758b587d964668b5b196a9db5bfcde6"}, - {file = "greenlet-3.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:bba5387a6975598857d86de9eac14210a49d554a77eb8261cc68b7d082f78ce2"}, - {file = "greenlet-3.0.3-cp37-cp37m-macosx_11_0_universal2.whl", hash = "sha256:5b51e85cb5ceda94e79d019ed36b35386e8c37d22f07d6a751cb659b180d5274"}, - {file = "greenlet-3.0.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:daf3cb43b7cf2ba96d614252ce1684c1bccee6b2183a01328c98d36fcd7d5cb0"}, - {file = "greenlet-3.0.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:99bf650dc5d69546e076f413a87481ee1d2d09aaaaaca058c9251b6d8c14783f"}, - {file = "greenlet-3.0.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2dd6e660effd852586b6a8478a1d244b8dc90ab5b1321751d2ea15deb49ed414"}, - {file = "greenlet-3.0.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e3391d1e16e2a5a1507d83e4a8b100f4ee626e8eca43cf2cadb543de69827c4c"}, - {file = "greenlet-3.0.3-cp37-cp37m-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e1f145462f1fa6e4a4ae3c0f782e580ce44d57c8f2c7aae1b6fa88c0b2efdb41"}, - {file = "greenlet-3.0.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:1a7191e42732df52cb5f39d3527217e7ab73cae2cb3694d241e18f53d84ea9a7"}, - {file = "greenlet-3.0.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:0448abc479fab28b00cb472d278828b3ccca164531daab4e970a0458786055d6"}, - {file = "greenlet-3.0.3-cp37-cp37m-win32.whl", hash = "sha256:b542be2440edc2d48547b5923c408cbe0fc94afb9f18741faa6ae970dbcb9b6d"}, - {file = "greenlet-3.0.3-cp37-cp37m-win_amd64.whl", hash = "sha256:01bc7ea167cf943b4c802068e178bbf70ae2e8c080467070d01bfa02f337ee67"}, - {file = "greenlet-3.0.3-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:1996cb9306c8595335bb157d133daf5cf9f693ef413e7673cb07e3e5871379ca"}, - {file = "greenlet-3.0.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ddc0f794e6ad661e321caa8d2f0a55ce01213c74722587256fb6566049a8b04"}, - {file = "greenlet-3.0.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c9db1c18f0eaad2f804728c67d6c610778456e3e1cc4ab4bbd5eeb8e6053c6fc"}, - {file = "greenlet-3.0.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7170375bcc99f1a2fbd9c306f5be8764eaf3ac6b5cb968862cad4c7057756506"}, - {file = "greenlet-3.0.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b66c9c1e7ccabad3a7d037b2bcb740122a7b17a53734b7d72a344ce39882a1b"}, - {file = "greenlet-3.0.3-cp38-cp38-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:098d86f528c855ead3479afe84b49242e174ed262456c342d70fc7f972bc13c4"}, - {file = "greenlet-3.0.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:81bb9c6d52e8321f09c3d165b2a78c680506d9af285bfccbad9fb7ad5a5da3e5"}, - {file = "greenlet-3.0.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:fd096eb7ffef17c456cfa587523c5f92321ae02427ff955bebe9e3c63bc9f0da"}, - {file = "greenlet-3.0.3-cp38-cp38-win32.whl", hash = "sha256:d46677c85c5ba00a9cb6f7a00b2bfa6f812192d2c9f7d9c4f6a55b60216712f3"}, - {file = "greenlet-3.0.3-cp38-cp38-win_amd64.whl", hash = "sha256:419b386f84949bf0e7c73e6032e3457b82a787c1ab4a0e43732898a761cc9dbf"}, - {file = "greenlet-3.0.3-cp39-cp39-macosx_11_0_universal2.whl", hash = "sha256:da70d4d51c8b306bb7a031d5cff6cc25ad253affe89b70352af5f1cb68e74b53"}, - {file = "greenlet-3.0.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:086152f8fbc5955df88382e8a75984e2bb1c892ad2e3c80a2508954e52295257"}, - {file = "greenlet-3.0.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d73a9fe764d77f87f8ec26a0c85144d6a951a6c438dfe50487df5595c6373eac"}, - {file = "greenlet-3.0.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b7dcbe92cc99f08c8dd11f930de4d99ef756c3591a5377d1d9cd7dd5e896da71"}, - {file = "greenlet-3.0.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1551a8195c0d4a68fac7a4325efac0d541b48def35feb49d803674ac32582f61"}, - {file = "greenlet-3.0.3-cp39-cp39-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:64d7675ad83578e3fc149b617a444fab8efdafc9385471f868eb5ff83e446b8b"}, - {file = "greenlet-3.0.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b37eef18ea55f2ffd8f00ff8fe7c8d3818abd3e25fb73fae2ca3b672e333a7a6"}, - {file = "greenlet-3.0.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:77457465d89b8263bca14759d7c1684df840b6811b2499838cc5b040a8b5b113"}, - {file = "greenlet-3.0.3-cp39-cp39-win32.whl", hash = "sha256:57e8974f23e47dac22b83436bdcf23080ade568ce77df33159e019d161ce1d1e"}, - {file = "greenlet-3.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:c5ee858cfe08f34712f548c3c363e807e7186f03ad7a5039ebadb29e8c6be067"}, - {file = "greenlet-3.0.3.tar.gz", hash = "sha256:43374442353259554ce33599da8b692d5aa96f8976d567d4badf263371fbe491"}, + {file = "greenlet-3.1.1-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:0bbae94a29c9e5c7e4a2b7f0aae5c17e8e90acbfd3bf6270eeba60c39fce3563"}, + {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0fde093fb93f35ca72a556cf72c92ea3ebfda3d79fc35bb19fbe685853869a83"}, + {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:36b89d13c49216cadb828db8dfa6ce86bbbc476a82d3a6c397f0efae0525bdd0"}, + {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:94b6150a85e1b33b40b1464a3f9988dcc5251d6ed06842abff82e42632fac120"}, + {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:93147c513fac16385d1036b7e5b102c7fbbdb163d556b791f0f11eada7ba65dc"}, + {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:da7a9bff22ce038e19bf62c4dd1ec8391062878710ded0a845bcf47cc0200617"}, + {file = "greenlet-3.1.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:b2795058c23988728eec1f36a4e5e4ebad22f8320c85f3587b539b9ac84128d7"}, + {file = "greenlet-3.1.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ed10eac5830befbdd0c32f83e8aa6288361597550ba669b04c48f0f9a2c843c6"}, + {file = "greenlet-3.1.1-cp310-cp310-win_amd64.whl", hash = "sha256:77c386de38a60d1dfb8e55b8c1101d68c79dfdd25c7095d51fec2dd800892b80"}, + {file = "greenlet-3.1.1-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:e4d333e558953648ca09d64f13e6d8f0523fa705f51cae3f03b5983489958c70"}, + {file = "greenlet-3.1.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:09fc016b73c94e98e29af67ab7b9a879c307c6731a2c9da0db5a7d9b7edd1159"}, + {file = "greenlet-3.1.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d5e975ca70269d66d17dd995dafc06f1b06e8cb1ec1e9ed54c1d1e4a7c4cf26e"}, + {file = "greenlet-3.1.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3b2813dc3de8c1ee3f924e4d4227999285fd335d1bcc0d2be6dc3f1f6a318ec1"}, + {file = "greenlet-3.1.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e347b3bfcf985a05e8c0b7d462ba6f15b1ee1c909e2dcad795e49e91b152c383"}, + {file = "greenlet-3.1.1-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9e8f8c9cb53cdac7ba9793c276acd90168f416b9ce36799b9b885790f8ad6c0a"}, + {file = "greenlet-3.1.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:62ee94988d6b4722ce0028644418d93a52429e977d742ca2ccbe1c4f4a792511"}, + {file = "greenlet-3.1.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:1776fd7f989fc6b8d8c8cb8da1f6b82c5814957264d1f6cf818d475ec2bf6395"}, + {file = "greenlet-3.1.1-cp311-cp311-win_amd64.whl", hash = "sha256:48ca08c771c268a768087b408658e216133aecd835c0ded47ce955381105ba39"}, + {file = "greenlet-3.1.1-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:4afe7ea89de619adc868e087b4d2359282058479d7cfb94970adf4b55284574d"}, + {file = "greenlet-3.1.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f406b22b7c9a9b4f8aa9d2ab13d6ae0ac3e85c9a809bd590ad53fed2bf70dc79"}, + {file = "greenlet-3.1.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c3a701fe5a9695b238503ce5bbe8218e03c3bcccf7e204e455e7462d770268aa"}, + {file = "greenlet-3.1.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2846930c65b47d70b9d178e89c7e1a69c95c1f68ea5aa0a58646b7a96df12441"}, + {file = "greenlet-3.1.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:99cfaa2110534e2cf3ba31a7abcac9d328d1d9f1b95beede58294a60348fba36"}, + {file = "greenlet-3.1.1-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1443279c19fca463fc33e65ef2a935a5b09bb90f978beab37729e1c3c6c25fe9"}, + {file = "greenlet-3.1.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:b7cede291382a78f7bb5f04a529cb18e068dd29e0fb27376074b6d0317bf4dd0"}, + {file = "greenlet-3.1.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:23f20bb60ae298d7d8656c6ec6db134bca379ecefadb0b19ce6f19d1f232a942"}, + {file = "greenlet-3.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:7124e16b4c55d417577c2077be379514321916d5790fa287c9ed6f23bd2ffd01"}, + {file = "greenlet-3.1.1-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:05175c27cb459dcfc05d026c4232f9de8913ed006d42713cb8a5137bd49375f1"}, + {file = "greenlet-3.1.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:935e943ec47c4afab8965954bf49bfa639c05d4ccf9ef6e924188f762145c0ff"}, + {file = "greenlet-3.1.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:667a9706c970cb552ede35aee17339a18e8f2a87a51fba2ed39ceeeb1004798a"}, + {file = "greenlet-3.1.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b8a678974d1f3aa55f6cc34dc480169d58f2e6d8958895d68845fa4ab566509e"}, + {file = "greenlet-3.1.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:efc0f674aa41b92da8c49e0346318c6075d734994c3c4e4430b1c3f853e498e4"}, + {file = "greenlet-3.1.1-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0153404a4bb921f0ff1abeb5ce8a5131da56b953eda6e14b88dc6bbc04d2049e"}, + {file = "greenlet-3.1.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:275f72decf9932639c1c6dd1013a1bc266438eb32710016a1c742df5da6e60a1"}, + {file = "greenlet-3.1.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:c4aab7f6381f38a4b42f269057aee279ab0fc7bf2e929e3d4abfae97b682a12c"}, + {file = "greenlet-3.1.1-cp313-cp313-win_amd64.whl", hash = "sha256:b42703b1cf69f2aa1df7d1030b9d77d3e584a70755674d60e710f0af570f3761"}, + {file = "greenlet-3.1.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f1695e76146579f8c06c1509c7ce4dfe0706f49c6831a817ac04eebb2fd02011"}, + {file = "greenlet-3.1.1-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7876452af029456b3f3549b696bb36a06db7c90747740c5302f74a9e9fa14b13"}, + {file = "greenlet-3.1.1-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4ead44c85f8ab905852d3de8d86f6f8baf77109f9da589cb4fa142bd3b57b475"}, + {file = "greenlet-3.1.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8320f64b777d00dd7ccdade271eaf0cad6636343293a25074cc5566160e4de7b"}, + {file = "greenlet-3.1.1-cp313-cp313t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6510bf84a6b643dabba74d3049ead221257603a253d0a9873f55f6a59a65f822"}, + {file = "greenlet-3.1.1-cp313-cp313t-musllinux_1_1_aarch64.whl", hash = "sha256:04b013dc07c96f83134b1e99888e7a79979f1a247e2a9f59697fa14b5862ed01"}, + {file = "greenlet-3.1.1-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:411f015496fec93c1c8cd4e5238da364e1da7a124bcb293f085bf2860c32c6f6"}, + {file = "greenlet-3.1.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:47da355d8687fd65240c364c90a31569a133b7b60de111c255ef5b606f2ae291"}, + {file = "greenlet-3.1.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:98884ecf2ffb7d7fe6bd517e8eb99d31ff7855a840fa6d0d63cd07c037f6a981"}, + {file = "greenlet-3.1.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f1d4aeb8891338e60d1ab6127af1fe45def5259def8094b9c7e34690c8858803"}, + {file = "greenlet-3.1.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db32b5348615a04b82240cc67983cb315309e88d444a288934ee6ceaebcad6cc"}, + {file = "greenlet-3.1.1-cp37-cp37m-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:dcc62f31eae24de7f8dce72134c8651c58000d3b1868e01392baea7c32c247de"}, + {file = "greenlet-3.1.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:1d3755bcb2e02de341c55b4fca7a745a24a9e7212ac953f6b3a48d117d7257aa"}, + {file = "greenlet-3.1.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:b8da394b34370874b4572676f36acabac172602abf054cbc4ac910219f3340af"}, + {file = "greenlet-3.1.1-cp37-cp37m-win32.whl", hash = "sha256:a0dfc6c143b519113354e780a50381508139b07d2177cb6ad6a08278ec655798"}, + {file = "greenlet-3.1.1-cp37-cp37m-win_amd64.whl", hash = "sha256:54558ea205654b50c438029505def3834e80f0869a70fb15b871c29b4575ddef"}, + {file = "greenlet-3.1.1-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:346bed03fe47414091be4ad44786d1bd8bef0c3fcad6ed3dee074a032ab408a9"}, + {file = "greenlet-3.1.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dfc59d69fc48664bc693842bd57acfdd490acafda1ab52c7836e3fc75c90a111"}, + {file = "greenlet-3.1.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d21e10da6ec19b457b82636209cbe2331ff4306b54d06fa04b7c138ba18c8a81"}, + {file = "greenlet-3.1.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:37b9de5a96111fc15418819ab4c4432e4f3c2ede61e660b1e33971eba26ef9ba"}, + {file = "greenlet-3.1.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6ef9ea3f137e5711f0dbe5f9263e8c009b7069d8a1acea822bd5e9dae0ae49c8"}, + {file = "greenlet-3.1.1-cp38-cp38-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:85f3ff71e2e60bd4b4932a043fbbe0f499e263c628390b285cb599154a3b03b1"}, + {file = "greenlet-3.1.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:95ffcf719966dd7c453f908e208e14cde192e09fde6c7186c8f1896ef778d8cd"}, + {file = "greenlet-3.1.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:03a088b9de532cbfe2ba2034b2b85e82df37874681e8c470d6fb2f8c04d7e4b7"}, + {file = "greenlet-3.1.1-cp38-cp38-win32.whl", hash = "sha256:8b8b36671f10ba80e159378df9c4f15c14098c4fd73a36b9ad715f057272fbef"}, + {file = "greenlet-3.1.1-cp38-cp38-win_amd64.whl", hash = "sha256:7017b2be767b9d43cc31416aba48aab0d2309ee31b4dbf10a1d38fb7972bdf9d"}, + {file = "greenlet-3.1.1-cp39-cp39-macosx_11_0_universal2.whl", hash = "sha256:396979749bd95f018296af156201d6211240e7a23090f50a8d5d18c370084dc3"}, + {file = "greenlet-3.1.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca9d0ff5ad43e785350894d97e13633a66e2b50000e8a183a50a88d834752d42"}, + {file = "greenlet-3.1.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f6ff3b14f2df4c41660a7dec01045a045653998784bf8cfcb5a525bdffffbc8f"}, + {file = "greenlet-3.1.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:94ebba31df2aa506d7b14866fed00ac141a867e63143fe5bca82a8e503b36437"}, + {file = "greenlet-3.1.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:73aaad12ac0ff500f62cebed98d8789198ea0e6f233421059fa68a5aa7220145"}, + {file = "greenlet-3.1.1-cp39-cp39-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:63e4844797b975b9af3a3fb8f7866ff08775f5426925e1e0bbcfe7932059a12c"}, + {file = "greenlet-3.1.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:7939aa3ca7d2a1593596e7ac6d59391ff30281ef280d8632fa03d81f7c5f955e"}, + {file = "greenlet-3.1.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d0028e725ee18175c6e422797c407874da24381ce0690d6b9396c204c7f7276e"}, + {file = "greenlet-3.1.1-cp39-cp39-win32.whl", hash = "sha256:5e06afd14cbaf9e00899fae69b24a32f2196c19de08fcb9f4779dd4f004e5e7c"}, + {file = "greenlet-3.1.1-cp39-cp39-win_amd64.whl", hash = "sha256:3319aa75e0e0639bc15ff54ca327e8dc7a6fe404003496e3c6925cd3142e0e22"}, + {file = "greenlet-3.1.1.tar.gz", hash = "sha256:4ce3ac6cdb6adf7946475d7ef31777c26d94bccc377e070a7986bd2d5c515467"}, ] [package.extras] @@ -306,15 +332,18 @@ socks = ["socksio (==1.*)"] [[package]] name = "idna" -version = "3.8" +version = "3.10" description = "Internationalized Domain Names in Applications (IDNA)" optional = false python-versions = ">=3.6" files = [ - {file = "idna-3.8-py3-none-any.whl", hash = "sha256:050b4e5baadcd44d760cedbd2b8e639f2ff89bbc7a5730fcc662954303377aac"}, - {file = "idna-3.8.tar.gz", hash = "sha256:d838c2c0ed6fced7693d5e8ab8e734d5f8fda53a039c0164afb0b82e771e3603"}, + {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, + {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, ] +[package.extras] +all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"] + [[package]] name = "isort" version = "5.13.2" @@ -470,19 +499,19 @@ flake8-polyfill = ">=1.0.2,<2" [[package]] name = "platformdirs" -version = "4.2.2" +version = "4.3.6" description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." optional = false python-versions = ">=3.8" files = [ - {file = "platformdirs-4.2.2-py3-none-any.whl", hash = "sha256:2d7a1657e36a80ea911db832a8a6ece5ee53d8de21edd5cc5879af6530b1bfee"}, - {file = "platformdirs-4.2.2.tar.gz", hash = "sha256:38b7b51f512eed9e84a22788b4bce1de17c0adb134d6becb09836e37d8654cd3"}, + {file = "platformdirs-4.3.6-py3-none-any.whl", hash = "sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb"}, + {file = "platformdirs-4.3.6.tar.gz", hash = "sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907"}, ] [package.extras] -docs = ["furo (>=2023.9.10)", "proselint (>=0.13)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.25.2)"] -test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)"] -type = ["mypy (>=1.8)"] +docs = ["furo (>=2024.8.6)", "proselint (>=0.14)", "sphinx (>=8.0.2)", "sphinx-autodoc-typehints (>=2.4)"] +test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=8.3.2)", "pytest-cov (>=5)", "pytest-mock (>=3.14)"] +type = ["mypy (>=1.11.2)"] [[package]] name = "prospector" @@ -558,62 +587,113 @@ files = [ [[package]] name = "pydantic" -version = "1.10.17" -description = "Data validation and settings management using python type hints" +version = "2.7.4" +description = "Data validation using Python type hints" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "pydantic-1.10.17-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0fa51175313cc30097660b10eec8ca55ed08bfa07acbfe02f7a42f6c242e9a4b"}, - {file = "pydantic-1.10.17-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c7e8988bb16988890c985bd2093df9dd731bfb9d5e0860db054c23034fab8f7a"}, - {file = "pydantic-1.10.17-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:371dcf1831f87c9e217e2b6a0c66842879a14873114ebb9d0861ab22e3b5bb1e"}, - {file = "pydantic-1.10.17-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4866a1579c0c3ca2c40575398a24d805d4db6cb353ee74df75ddeee3c657f9a7"}, - {file = "pydantic-1.10.17-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:543da3c6914795b37785703ffc74ba4d660418620cc273490d42c53949eeeca6"}, - {file = "pydantic-1.10.17-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7623b59876f49e61c2e283551cc3647616d2fbdc0b4d36d3d638aae8547ea681"}, - {file = "pydantic-1.10.17-cp310-cp310-win_amd64.whl", hash = "sha256:409b2b36d7d7d19cd8310b97a4ce6b1755ef8bd45b9a2ec5ec2b124db0a0d8f3"}, - {file = "pydantic-1.10.17-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:fa43f362b46741df8f201bf3e7dff3569fa92069bcc7b4a740dea3602e27ab7a"}, - {file = "pydantic-1.10.17-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2a72d2a5ff86a3075ed81ca031eac86923d44bc5d42e719d585a8eb547bf0c9b"}, - {file = "pydantic-1.10.17-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b4ad32aed3bf5eea5ca5decc3d1bbc3d0ec5d4fbcd72a03cdad849458decbc63"}, - {file = "pydantic-1.10.17-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:aeb4e741782e236ee7dc1fb11ad94dc56aabaf02d21df0e79e0c21fe07c95741"}, - {file = "pydantic-1.10.17-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:d2f89a719411cb234105735a520b7c077158a81e0fe1cb05a79c01fc5eb59d3c"}, - {file = "pydantic-1.10.17-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:db3b48d9283d80a314f7a682f7acae8422386de659fffaba454b77a083c3937d"}, - {file = "pydantic-1.10.17-cp311-cp311-win_amd64.whl", hash = "sha256:9c803a5113cfab7bbb912f75faa4fc1e4acff43e452c82560349fff64f852e1b"}, - {file = "pydantic-1.10.17-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:820ae12a390c9cbb26bb44913c87fa2ff431a029a785642c1ff11fed0a095fcb"}, - {file = "pydantic-1.10.17-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c1e51d1af306641b7d1574d6d3307eaa10a4991542ca324f0feb134fee259815"}, - {file = "pydantic-1.10.17-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e53fb834aae96e7b0dadd6e92c66e7dd9cdf08965340ed04c16813102a47fab"}, - {file = "pydantic-1.10.17-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0e2495309b1266e81d259a570dd199916ff34f7f51f1b549a0d37a6d9b17b4dc"}, - {file = "pydantic-1.10.17-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:098ad8de840c92ea586bf8efd9e2e90c6339d33ab5c1cfbb85be66e4ecf8213f"}, - {file = "pydantic-1.10.17-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:525bbef620dac93c430d5d6bdbc91bdb5521698d434adf4434a7ef6ffd5c4b7f"}, - {file = "pydantic-1.10.17-cp312-cp312-win_amd64.whl", hash = "sha256:6654028d1144df451e1da69a670083c27117d493f16cf83da81e1e50edce72ad"}, - {file = "pydantic-1.10.17-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c87cedb4680d1614f1d59d13fea353faf3afd41ba5c906a266f3f2e8c245d655"}, - {file = "pydantic-1.10.17-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:11289fa895bcbc8f18704efa1d8020bb9a86314da435348f59745473eb042e6b"}, - {file = "pydantic-1.10.17-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:94833612d6fd18b57c359a127cbfd932d9150c1b72fea7c86ab58c2a77edd7c7"}, - {file = "pydantic-1.10.17-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:d4ecb515fa7cb0e46e163ecd9d52f9147ba57bc3633dca0e586cdb7a232db9e3"}, - {file = "pydantic-1.10.17-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:7017971ffa7fd7808146880aa41b266e06c1e6e12261768a28b8b41ba55c8076"}, - {file = "pydantic-1.10.17-cp37-cp37m-win_amd64.whl", hash = "sha256:e840e6b2026920fc3f250ea8ebfdedf6ea7a25b77bf04c6576178e681942ae0f"}, - {file = "pydantic-1.10.17-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:bfbb18b616abc4df70591b8c1ff1b3eabd234ddcddb86b7cac82657ab9017e33"}, - {file = "pydantic-1.10.17-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ebb249096d873593e014535ab07145498957091aa6ae92759a32d40cb9998e2e"}, - {file = "pydantic-1.10.17-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d8c209af63ccd7b22fba94b9024e8b7fd07feffee0001efae50dd99316b27768"}, - {file = "pydantic-1.10.17-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d4b40c9e13a0b61583e5599e7950490c700297b4a375b55b2b592774332798b7"}, - {file = "pydantic-1.10.17-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:c31d281c7485223caf6474fc2b7cf21456289dbaa31401844069b77160cab9c7"}, - {file = "pydantic-1.10.17-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:ae5184e99a060a5c80010a2d53c99aee76a3b0ad683d493e5f0620b5d86eeb75"}, - {file = "pydantic-1.10.17-cp38-cp38-win_amd64.whl", hash = "sha256:ad1e33dc6b9787a6f0f3fd132859aa75626528b49cc1f9e429cdacb2608ad5f0"}, - {file = "pydantic-1.10.17-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7e17c0ee7192e54a10943f245dc79e36d9fe282418ea05b886e1c666063a7b54"}, - {file = "pydantic-1.10.17-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:cafb9c938f61d1b182dfc7d44a7021326547b7b9cf695db5b68ec7b590214773"}, - {file = "pydantic-1.10.17-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95ef534e3c22e5abbdbdd6f66b6ea9dac3ca3e34c5c632894f8625d13d084cbe"}, - {file = "pydantic-1.10.17-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:62d96b8799ae3d782df7ec9615cb59fc32c32e1ed6afa1b231b0595f6516e8ab"}, - {file = "pydantic-1.10.17-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:ab2f976336808fd5d539fdc26eb51f9aafc1f4b638e212ef6b6f05e753c8011d"}, - {file = "pydantic-1.10.17-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:b8ad363330557beac73159acfbeed220d5f1bfcd6b930302a987a375e02f74fd"}, - {file = "pydantic-1.10.17-cp39-cp39-win_amd64.whl", hash = "sha256:48db882e48575ce4b39659558b2f9f37c25b8d348e37a2b4e32971dd5a7d6227"}, - {file = "pydantic-1.10.17-py3-none-any.whl", hash = "sha256:e41b5b973e5c64f674b3b4720286ded184dcc26a691dd55f34391c62c6934688"}, - {file = "pydantic-1.10.17.tar.gz", hash = "sha256:f434160fb14b353caf634149baaf847206406471ba70e64657c1e8330277a991"}, + {file = "pydantic-2.7.4-py3-none-any.whl", hash = "sha256:ee8538d41ccb9c0a9ad3e0e5f07bf15ed8015b481ced539a1759d8cc89ae90d0"}, + {file = "pydantic-2.7.4.tar.gz", hash = "sha256:0c84efd9548d545f63ac0060c1e4d39bb9b14db8b3c0652338aecc07b5adec52"}, ] [package.dependencies] -typing-extensions = ">=4.2.0" +annotated-types = ">=0.4.0" +pydantic-core = "2.18.4" +typing-extensions = ">=4.6.1" [package.extras] -dotenv = ["python-dotenv (>=0.10.4)"] -email = ["email-validator (>=1.0.3)"] +email = ["email-validator (>=2.0.0)"] + +[[package]] +name = "pydantic-core" +version = "2.18.4" +description = "Core functionality for Pydantic validation and serialization" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pydantic_core-2.18.4-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:f76d0ad001edd426b92233d45c746fd08f467d56100fd8f30e9ace4b005266e4"}, + {file = "pydantic_core-2.18.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:59ff3e89f4eaf14050c8022011862df275b552caef8082e37b542b066ce1ff26"}, + {file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a55b5b16c839df1070bc113c1f7f94a0af4433fcfa1b41799ce7606e5c79ce0a"}, + {file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4d0dcc59664fcb8974b356fe0a18a672d6d7cf9f54746c05f43275fc48636851"}, + {file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8951eee36c57cd128f779e641e21eb40bc5073eb28b2d23f33eb0ef14ffb3f5d"}, + {file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4701b19f7e3a06ea655513f7938de6f108123bf7c86bbebb1196eb9bd35cf724"}, + {file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e00a3f196329e08e43d99b79b286d60ce46bed10f2280d25a1718399457e06be"}, + {file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:97736815b9cc893b2b7f663628e63f436018b75f44854c8027040e05230eeddb"}, + {file = "pydantic_core-2.18.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:6891a2ae0e8692679c07728819b6e2b822fb30ca7445f67bbf6509b25a96332c"}, + {file = "pydantic_core-2.18.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bc4ff9805858bd54d1a20efff925ccd89c9d2e7cf4986144b30802bf78091c3e"}, + {file = "pydantic_core-2.18.4-cp310-none-win32.whl", hash = "sha256:1b4de2e51bbcb61fdebd0ab86ef28062704f62c82bbf4addc4e37fa4b00b7cbc"}, + {file = "pydantic_core-2.18.4-cp310-none-win_amd64.whl", hash = "sha256:6a750aec7bf431517a9fd78cb93c97b9b0c496090fee84a47a0d23668976b4b0"}, + {file = "pydantic_core-2.18.4-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:942ba11e7dfb66dc70f9ae66b33452f51ac7bb90676da39a7345e99ffb55402d"}, + {file = "pydantic_core-2.18.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b2ebef0e0b4454320274f5e83a41844c63438fdc874ea40a8b5b4ecb7693f1c4"}, + {file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a642295cd0c8df1b86fc3dced1d067874c353a188dc8e0f744626d49e9aa51c4"}, + {file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f09baa656c904807e832cf9cce799c6460c450c4ad80803517032da0cd062e2"}, + {file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:98906207f29bc2c459ff64fa007afd10a8c8ac080f7e4d5beff4c97086a3dabd"}, + {file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:19894b95aacfa98e7cb093cd7881a0c76f55731efad31073db4521e2b6ff5b7d"}, + {file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0fbbdc827fe5e42e4d196c746b890b3d72876bdbf160b0eafe9f0334525119c8"}, + {file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f85d05aa0918283cf29a30b547b4df2fbb56b45b135f9e35b6807cb28bc47951"}, + {file = "pydantic_core-2.18.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e85637bc8fe81ddb73fda9e56bab24560bdddfa98aa64f87aaa4e4b6730c23d2"}, + {file = "pydantic_core-2.18.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:2f5966897e5461f818e136b8451d0551a2e77259eb0f73a837027b47dc95dab9"}, + {file = "pydantic_core-2.18.4-cp311-none-win32.whl", hash = "sha256:44c7486a4228413c317952e9d89598bcdfb06399735e49e0f8df643e1ccd0558"}, + {file = "pydantic_core-2.18.4-cp311-none-win_amd64.whl", hash = "sha256:8a7164fe2005d03c64fd3b85649891cd4953a8de53107940bf272500ba8a788b"}, + {file = "pydantic_core-2.18.4-cp311-none-win_arm64.whl", hash = "sha256:4e99bc050fe65c450344421017f98298a97cefc18c53bb2f7b3531eb39bc7805"}, + {file = "pydantic_core-2.18.4-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:6f5c4d41b2771c730ea1c34e458e781b18cc668d194958e0112455fff4e402b2"}, + {file = "pydantic_core-2.18.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2fdf2156aa3d017fddf8aea5adfba9f777db1d6022d392b682d2a8329e087cef"}, + {file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4748321b5078216070b151d5271ef3e7cc905ab170bbfd27d5c83ee3ec436695"}, + {file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:847a35c4d58721c5dc3dba599878ebbdfd96784f3fb8bb2c356e123bdcd73f34"}, + {file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3c40d4eaad41f78e3bbda31b89edc46a3f3dc6e171bf0ecf097ff7a0ffff7cb1"}, + {file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:21a5e440dbe315ab9825fcd459b8814bb92b27c974cbc23c3e8baa2b76890077"}, + {file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:01dd777215e2aa86dfd664daed5957704b769e726626393438f9c87690ce78c3"}, + {file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4b06beb3b3f1479d32befd1f3079cc47b34fa2da62457cdf6c963393340b56e9"}, + {file = "pydantic_core-2.18.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:564d7922e4b13a16b98772441879fcdcbe82ff50daa622d681dd682175ea918c"}, + {file = "pydantic_core-2.18.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:0eb2a4f660fcd8e2b1c90ad566db2b98d7f3f4717c64fe0a83e0adb39766d5b8"}, + {file = "pydantic_core-2.18.4-cp312-none-win32.whl", hash = "sha256:8b8bab4c97248095ae0c4455b5a1cd1cdd96e4e4769306ab19dda135ea4cdb07"}, + {file = "pydantic_core-2.18.4-cp312-none-win_amd64.whl", hash = "sha256:14601cdb733d741b8958224030e2bfe21a4a881fb3dd6fbb21f071cabd48fa0a"}, + {file = "pydantic_core-2.18.4-cp312-none-win_arm64.whl", hash = "sha256:c1322d7dd74713dcc157a2b7898a564ab091ca6c58302d5c7b4c07296e3fd00f"}, + {file = "pydantic_core-2.18.4-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:823be1deb01793da05ecb0484d6c9e20baebb39bd42b5d72636ae9cf8350dbd2"}, + {file = "pydantic_core-2.18.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ebef0dd9bf9b812bf75bda96743f2a6c5734a02092ae7f721c048d156d5fabae"}, + {file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ae1d6df168efb88d7d522664693607b80b4080be6750c913eefb77e34c12c71a"}, + {file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f9899c94762343f2cc2fc64c13e7cae4c3cc65cdfc87dd810a31654c9b7358cc"}, + {file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:99457f184ad90235cfe8461c4d70ab7dd2680e28821c29eca00252ba90308c78"}, + {file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:18f469a3d2a2fdafe99296a87e8a4c37748b5080a26b806a707f25a902c040a8"}, + {file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b7cdf28938ac6b8b49ae5e92f2735056a7ba99c9b110a474473fd71185c1af5d"}, + {file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:938cb21650855054dc54dfd9120a851c974f95450f00683399006aa6e8abb057"}, + {file = "pydantic_core-2.18.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:44cd83ab6a51da80fb5adbd9560e26018e2ac7826f9626bc06ca3dc074cd198b"}, + {file = "pydantic_core-2.18.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:972658f4a72d02b8abfa2581d92d59f59897d2e9f7e708fdabe922f9087773af"}, + {file = "pydantic_core-2.18.4-cp38-none-win32.whl", hash = "sha256:1d886dc848e60cb7666f771e406acae54ab279b9f1e4143babc9c2258213daa2"}, + {file = "pydantic_core-2.18.4-cp38-none-win_amd64.whl", hash = "sha256:bb4462bd43c2460774914b8525f79b00f8f407c945d50881568f294c1d9b4443"}, + {file = "pydantic_core-2.18.4-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:44a688331d4a4e2129140a8118479443bd6f1905231138971372fcde37e43528"}, + {file = "pydantic_core-2.18.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a2fdd81edd64342c85ac7cf2753ccae0b79bf2dfa063785503cb85a7d3593223"}, + {file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:86110d7e1907ab36691f80b33eb2da87d780f4739ae773e5fc83fb272f88825f"}, + {file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:46387e38bd641b3ee5ce247563b60c5ca098da9c56c75c157a05eaa0933ed154"}, + {file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:123c3cec203e3f5ac7b000bd82235f1a3eced8665b63d18be751f115588fea30"}, + {file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dc1803ac5c32ec324c5261c7209e8f8ce88e83254c4e1aebdc8b0a39f9ddb443"}, + {file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:53db086f9f6ab2b4061958d9c276d1dbe3690e8dd727d6abf2321d6cce37fa94"}, + {file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:abc267fa9837245cc28ea6929f19fa335f3dc330a35d2e45509b6566dc18be23"}, + {file = "pydantic_core-2.18.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:a0d829524aaefdebccb869eed855e2d04c21d2d7479b6cada7ace5448416597b"}, + {file = "pydantic_core-2.18.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:509daade3b8649f80d4e5ff21aa5673e4ebe58590b25fe42fac5f0f52c6f034a"}, + {file = "pydantic_core-2.18.4-cp39-none-win32.whl", hash = "sha256:ca26a1e73c48cfc54c4a76ff78df3727b9d9f4ccc8dbee4ae3f73306a591676d"}, + {file = "pydantic_core-2.18.4-cp39-none-win_amd64.whl", hash = "sha256:c67598100338d5d985db1b3d21f3619ef392e185e71b8d52bceacc4a7771ea7e"}, + {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:574d92eac874f7f4db0ca653514d823a0d22e2354359d0759e3f6a406db5d55d"}, + {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:1f4d26ceb5eb9eed4af91bebeae4b06c3fb28966ca3a8fb765208cf6b51102ab"}, + {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77450e6d20016ec41f43ca4a6c63e9fdde03f0ae3fe90e7c27bdbeaece8b1ed4"}, + {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d323a01da91851a4f17bf592faf46149c9169d68430b3146dcba2bb5e5719abc"}, + {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:43d447dd2ae072a0065389092a231283f62d960030ecd27565672bd40746c507"}, + {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:578e24f761f3b425834f297b9935e1ce2e30f51400964ce4801002435a1b41ef"}, + {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:81b5efb2f126454586d0f40c4d834010979cb80785173d1586df845a632e4e6d"}, + {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:ab86ce7c8f9bea87b9d12c7f0af71102acbf5ecbc66c17796cff45dae54ef9a5"}, + {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:90afc12421df2b1b4dcc975f814e21bc1754640d502a2fbcc6d41e77af5ec312"}, + {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:51991a89639a912c17bef4b45c87bd83593aee0437d8102556af4885811d59f5"}, + {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:293afe532740370aba8c060882f7d26cfd00c94cae32fd2e212a3a6e3b7bc15e"}, + {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b48ece5bde2e768197a2d0f6e925f9d7e3e826f0ad2271120f8144a9db18d5c8"}, + {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:eae237477a873ab46e8dd748e515c72c0c804fb380fbe6c85533c7de51f23a8f"}, + {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:834b5230b5dfc0c1ec37b2fda433b271cbbc0e507560b5d1588e2cc1148cf1ce"}, + {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e858ac0a25074ba4bce653f9b5d0a85b7456eaddadc0ce82d3878c22489fa4ee"}, + {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2fd41f6eff4c20778d717af1cc50eca52f5afe7805ee530a4fbd0bae284f16e9"}, + {file = "pydantic_core-2.18.4.tar.gz", hash = "sha256:ec3beeada09ff865c344ff3bc2f427f5e6c26401cc6113d77e372c3fdac73864"}, +] + +[package.dependencies] +typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" [[package]] name = "pydocstyle" @@ -867,60 +947,60 @@ files = [ [[package]] name = "sqlalchemy" -version = "2.0.33" +version = "2.0.35" description = "Database Abstraction Library" optional = false python-versions = ">=3.7" files = [ - {file = "SQLAlchemy-2.0.33-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:63b7d9890f7958dabd95cf98a3f48740fbe2bb0493523aef590e82164fa68194"}, - {file = "SQLAlchemy-2.0.33-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:32a4f38d2efca066ec793451ef6852cb0d9086dc3d5479d88a5a25529d1d1861"}, - {file = "SQLAlchemy-2.0.33-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a3926e4ed4a3e956c8b2b0f1140493378c8cd17cad123b4fc1e0f6ecd3e05b19"}, - {file = "SQLAlchemy-2.0.33-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2415824ec658891ac38d13a2f36b4ceb2033f034dee1c226f83917589a65f072"}, - {file = "SQLAlchemy-2.0.33-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:92249ac94279b8e5f0c0c8420e09b804d0a49d2269f52f549d4cb536c8382434"}, - {file = "SQLAlchemy-2.0.33-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:c9f4f92eee7d06531cc6a5b814e603a0c7639876aab03638dcc70c420a3974f6"}, - {file = "SQLAlchemy-2.0.33-cp310-cp310-win32.whl", hash = "sha256:4f1c44c8d66101e6f627f330d8b5b3de5ad25eedb6df3ce39a2e6f92debbcf15"}, - {file = "SQLAlchemy-2.0.33-cp310-cp310-win_amd64.whl", hash = "sha256:3ad94634338d8c576b1d47a96c798be186650aa5282072053ce2d12c6f309f82"}, - {file = "SQLAlchemy-2.0.33-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:570ec43e8c3c020abac4f0720baa5fe5187334e3f1e8e1777183c041962b61cc"}, - {file = "SQLAlchemy-2.0.33-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:81759e77a4985abdbac068762a0eaf0f11860fe041ad6da170aae7615ea72531"}, - {file = "SQLAlchemy-2.0.33-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:49541a43828e273325c520fbacf786615bd974dad63ff60b8ea1e1216e914d1a"}, - {file = "SQLAlchemy-2.0.33-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:82c72da5be489c8d150deba70d5732398695418df5232bceb52ee323ddd9753b"}, - {file = "SQLAlchemy-2.0.33-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:31e56020832be602201fbf8189f379569cf5c3604cdc4ce79f10dbbfcbf8a0eb"}, - {file = "SQLAlchemy-2.0.33-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:30a3f55be76364b64c83788728faaba782ab282a24909e1994404c2146d39982"}, - {file = "SQLAlchemy-2.0.33-cp311-cp311-win32.whl", hash = "sha256:17d0c69f66392ad2db1609373a74d1f834b2e632f3f52d446747b8ec220aea53"}, - {file = "SQLAlchemy-2.0.33-cp311-cp311-win_amd64.whl", hash = "sha256:c5d5a733c6af7f392435e673d1b136f6bdf2366033abb35eed680400dc730840"}, - {file = "SQLAlchemy-2.0.33-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:1d81e3aeab456fe24c3f0dcfd4f952a3a5ee45e9c14fc66d34c1d7a60cf7b698"}, - {file = "SQLAlchemy-2.0.33-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ca8788dc1baee100f09110f33a01d928cf9df4483d2bfb25a37be31a659d46bb"}, - {file = "SQLAlchemy-2.0.33-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:60c54b677d4f0a0b2df3b79e89e84d601fb931c720176641742efd66b50601f9"}, - {file = "SQLAlchemy-2.0.33-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:684aee5fd811091b2f48006fb3fe6c7f2de4a716ef8d294a2aab762099753133"}, - {file = "SQLAlchemy-2.0.33-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ee2b82b170591ccd19d463c9798a9caeea0cad967a8d2f3264de459f582696d5"}, - {file = "SQLAlchemy-2.0.33-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1109cc6dc5c9d1223c42186391e6a5509e6d4ab2c30fa629573c10184f742f2e"}, - {file = "SQLAlchemy-2.0.33-cp312-cp312-win32.whl", hash = "sha256:c633e2d2f8a7b88c06e276bbe16cb7e62fed815fcbeb69cd9752cea166ecb8e8"}, - {file = "SQLAlchemy-2.0.33-cp312-cp312-win_amd64.whl", hash = "sha256:77eaf8fdf305266b806a91ae4633edbf86ad37e13bd92ac85e305e7f654c19a5"}, - {file = "SQLAlchemy-2.0.33-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:67eb8e0ffbebd3d82ec5079ca5f807a661c574b482785483717857c2acab833a"}, - {file = "SQLAlchemy-2.0.33-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a3da2371628e28ef279f3f756f5e58858fad7820de08508138c9f5f9e4d8f4ac"}, - {file = "SQLAlchemy-2.0.33-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f7c82a7930126bb5ccfbb73fc1562d52942fbffb2fda2791fab49de249fc202a"}, - {file = "SQLAlchemy-2.0.33-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:d004a623ad4aa8d2eb31b37e65b5e020c9f65a1852b8b9e6301f0e411aca5b9a"}, - {file = "SQLAlchemy-2.0.33-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:06b30bbc43c6dd8b7cdc509cd2e58f4f1dce867565642e1d1a65e32459c89bd0"}, - {file = "SQLAlchemy-2.0.33-cp37-cp37m-win32.whl", hash = "sha256:459099ab8dd43a5edbb99f58ba4730baec457df9c06ebc71434c6b4b78cc8cf9"}, - {file = "SQLAlchemy-2.0.33-cp37-cp37m-win_amd64.whl", hash = "sha256:3c64d58e83a68e228b1ae6ebac8721241e9d8cc5e0c0dd11ed5d89155477b243"}, - {file = "SQLAlchemy-2.0.33-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9d035a672d5b3e4793a4a8865c3274a7bbbac7fac67a47b415023b5539105087"}, - {file = "SQLAlchemy-2.0.33-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:61e9a2d68a5a8ca6a84cbc79aa7f2e430ae854d3351d6e9ceb3edf6798797b63"}, - {file = "SQLAlchemy-2.0.33-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:93efa4b72f7cb70555b0f66ee5e113ae40073c57054a72887e50b05bfd97baa4"}, - {file = "SQLAlchemy-2.0.33-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac252bafe8cbadfac7b1e8a74748ffd775e27325186d12b82600b652d9adcb86"}, - {file = "SQLAlchemy-2.0.33-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:2b1e98507ec2aa200af980d592e936e9dac1c1ec50acc94330ae4b13c55d6fea"}, - {file = "SQLAlchemy-2.0.33-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:523ae689c023cbf0fe1613101254824515193f85f806ba04611dee83302660b5"}, - {file = "SQLAlchemy-2.0.33-cp38-cp38-win32.whl", hash = "sha256:7fd0a28bc24a75326f13735a58272247f65c9e8ee16205eacb2431d6ee94f44a"}, - {file = "SQLAlchemy-2.0.33-cp38-cp38-win_amd64.whl", hash = "sha256:0ea64443a86c3b5a0fd7c93363ad2f9465cb3af61f9920b7c75d1a7bebbeef8a"}, - {file = "SQLAlchemy-2.0.33-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9e5819822050e6e36e2aa41260d05074c026a1bbb9baa6869170b5ce64db7a4d"}, - {file = "SQLAlchemy-2.0.33-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8bef11d31a1c48f5943e577d1ef81085ec1550c37552bfc9bf8e5d184ce47142"}, - {file = "SQLAlchemy-2.0.33-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:06504d9625e3ef114b39803ebca6f379133acad58a87c33117ddc5df66079915"}, - {file = "SQLAlchemy-2.0.33-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:454e9b4355f0051063daebc4060140251c19f33fc5d02151c347431860fd104b"}, - {file = "SQLAlchemy-2.0.33-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:28c0800c851955f5bd11c0b904638c1343002650d0c071c6fbf0d157cc78627d"}, - {file = "SQLAlchemy-2.0.33-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:816c927dd51e4951d6e79870c945340057a5d8e63543419dee0d247bd67a88f8"}, - {file = "SQLAlchemy-2.0.33-cp39-cp39-win32.whl", hash = "sha256:c40e0213beaf410a151e4329e30c73687838c251c998ba1b312975dbbcb2d05d"}, - {file = "SQLAlchemy-2.0.33-cp39-cp39-win_amd64.whl", hash = "sha256:751eaafa907a66dd9a328a9d15c3dcfdcba3ef8dd8f7f4a9771cdacdec45d9bf"}, - {file = "SQLAlchemy-2.0.33-py3-none-any.whl", hash = "sha256:ae294808afde1b14a1a69aa86a69cadfe391848bbb233a5332a8065e4081cabc"}, - {file = "sqlalchemy-2.0.33.tar.gz", hash = "sha256:91c93333c2b37ff721dc83b37e28c29de4c502b5612f2d093468037b86aa2be0"}, + {file = "SQLAlchemy-2.0.35-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:67219632be22f14750f0d1c70e62f204ba69d28f62fd6432ba05ab295853de9b"}, + {file = "SQLAlchemy-2.0.35-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4668bd8faf7e5b71c0319407b608f278f279668f358857dbfd10ef1954ac9f90"}, + {file = "SQLAlchemy-2.0.35-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb8bea573863762bbf45d1e13f87c2d2fd32cee2dbd50d050f83f87429c9e1ea"}, + {file = "SQLAlchemy-2.0.35-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f552023710d4b93d8fb29a91fadf97de89c5926c6bd758897875435f2a939f33"}, + {file = "SQLAlchemy-2.0.35-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:016b2e665f778f13d3c438651dd4de244214b527a275e0acf1d44c05bc6026a9"}, + {file = "SQLAlchemy-2.0.35-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:7befc148de64b6060937231cbff8d01ccf0bfd75aa26383ffdf8d82b12ec04ff"}, + {file = "SQLAlchemy-2.0.35-cp310-cp310-win32.whl", hash = "sha256:22b83aed390e3099584b839b93f80a0f4a95ee7f48270c97c90acd40ee646f0b"}, + {file = "SQLAlchemy-2.0.35-cp310-cp310-win_amd64.whl", hash = "sha256:a29762cd3d116585278ffb2e5b8cc311fb095ea278b96feef28d0b423154858e"}, + {file = "SQLAlchemy-2.0.35-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e21f66748ab725ade40fa7af8ec8b5019c68ab00b929f6643e1b1af461eddb60"}, + {file = "SQLAlchemy-2.0.35-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8a6219108a15fc6d24de499d0d515c7235c617b2540d97116b663dade1a54d62"}, + {file = "SQLAlchemy-2.0.35-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:042622a5306c23b972192283f4e22372da3b8ddf5f7aac1cc5d9c9b222ab3ff6"}, + {file = "SQLAlchemy-2.0.35-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:627dee0c280eea91aed87b20a1f849e9ae2fe719d52cbf847c0e0ea34464b3f7"}, + {file = "SQLAlchemy-2.0.35-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:4fdcd72a789c1c31ed242fd8c1bcd9ea186a98ee8e5408a50e610edfef980d71"}, + {file = "SQLAlchemy-2.0.35-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:89b64cd8898a3a6f642db4eb7b26d1b28a497d4022eccd7717ca066823e9fb01"}, + {file = "SQLAlchemy-2.0.35-cp311-cp311-win32.whl", hash = "sha256:6a93c5a0dfe8d34951e8a6f499a9479ffb9258123551fa007fc708ae2ac2bc5e"}, + {file = "SQLAlchemy-2.0.35-cp311-cp311-win_amd64.whl", hash = "sha256:c68fe3fcde03920c46697585620135b4ecfdfc1ed23e75cc2c2ae9f8502c10b8"}, + {file = "SQLAlchemy-2.0.35-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:eb60b026d8ad0c97917cb81d3662d0b39b8ff1335e3fabb24984c6acd0c900a2"}, + {file = "SQLAlchemy-2.0.35-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6921ee01caf375363be5e9ae70d08ce7ca9d7e0e8983183080211a062d299468"}, + {file = "SQLAlchemy-2.0.35-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8cdf1a0dbe5ced887a9b127da4ffd7354e9c1a3b9bb330dce84df6b70ccb3a8d"}, + {file = "SQLAlchemy-2.0.35-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:93a71c8601e823236ac0e5d087e4f397874a421017b3318fd92c0b14acf2b6db"}, + {file = "SQLAlchemy-2.0.35-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e04b622bb8a88f10e439084486f2f6349bf4d50605ac3e445869c7ea5cf0fa8c"}, + {file = "SQLAlchemy-2.0.35-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1b56961e2d31389aaadf4906d453859f35302b4eb818d34a26fab72596076bb8"}, + {file = "SQLAlchemy-2.0.35-cp312-cp312-win32.whl", hash = "sha256:0f9f3f9a3763b9c4deb8c5d09c4cc52ffe49f9876af41cc1b2ad0138878453cf"}, + {file = "SQLAlchemy-2.0.35-cp312-cp312-win_amd64.whl", hash = "sha256:25b0f63e7fcc2a6290cb5f7f5b4fc4047843504983a28856ce9b35d8f7de03cc"}, + {file = "SQLAlchemy-2.0.35-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:f021d334f2ca692523aaf7bbf7592ceff70c8594fad853416a81d66b35e3abf9"}, + {file = "SQLAlchemy-2.0.35-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:05c3f58cf91683102f2f0265c0db3bd3892e9eedabe059720492dbaa4f922da1"}, + {file = "SQLAlchemy-2.0.35-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:032d979ce77a6c2432653322ba4cbeabf5a6837f704d16fa38b5a05d8e21fa00"}, + {file = "SQLAlchemy-2.0.35-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:2e795c2f7d7249b75bb5f479b432a51b59041580d20599d4e112b5f2046437a3"}, + {file = "SQLAlchemy-2.0.35-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:cc32b2990fc34380ec2f6195f33a76b6cdaa9eecf09f0c9404b74fc120aef36f"}, + {file = "SQLAlchemy-2.0.35-cp37-cp37m-win32.whl", hash = "sha256:9509c4123491d0e63fb5e16199e09f8e262066e58903e84615c301dde8fa2e87"}, + {file = "SQLAlchemy-2.0.35-cp37-cp37m-win_amd64.whl", hash = "sha256:3655af10ebcc0f1e4e06c5900bb33e080d6a1fa4228f502121f28a3b1753cde5"}, + {file = "SQLAlchemy-2.0.35-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:4c31943b61ed8fdd63dfd12ccc919f2bf95eefca133767db6fbbd15da62078ec"}, + {file = "SQLAlchemy-2.0.35-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a62dd5d7cc8626a3634208df458c5fe4f21200d96a74d122c83bc2015b333bc1"}, + {file = "SQLAlchemy-2.0.35-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0630774b0977804fba4b6bbea6852ab56c14965a2b0c7fc7282c5f7d90a1ae72"}, + {file = "SQLAlchemy-2.0.35-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d625eddf7efeba2abfd9c014a22c0f6b3796e0ffb48f5d5ab106568ef01ff5a"}, + {file = "SQLAlchemy-2.0.35-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:ada603db10bb865bbe591939de854faf2c60f43c9b763e90f653224138f910d9"}, + {file = "SQLAlchemy-2.0.35-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:c41411e192f8d3ea39ea70e0fae48762cd11a2244e03751a98bd3c0ca9a4e936"}, + {file = "SQLAlchemy-2.0.35-cp38-cp38-win32.whl", hash = "sha256:d299797d75cd747e7797b1b41817111406b8b10a4f88b6e8fe5b5e59598b43b0"}, + {file = "SQLAlchemy-2.0.35-cp38-cp38-win_amd64.whl", hash = "sha256:0375a141e1c0878103eb3d719eb6d5aa444b490c96f3fedab8471c7f6ffe70ee"}, + {file = "SQLAlchemy-2.0.35-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ccae5de2a0140d8be6838c331604f91d6fafd0735dbdcee1ac78fc8fbaba76b4"}, + {file = "SQLAlchemy-2.0.35-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2a275a806f73e849e1c309ac11108ea1a14cd7058577aba962cd7190e27c9e3c"}, + {file = "SQLAlchemy-2.0.35-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:732e026240cdd1c1b2e3ac515c7a23820430ed94292ce33806a95869c46bd139"}, + {file = "SQLAlchemy-2.0.35-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:890da8cd1941fa3dab28c5bac3b9da8502e7e366f895b3b8e500896f12f94d11"}, + {file = "SQLAlchemy-2.0.35-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:c0d8326269dbf944b9201911b0d9f3dc524d64779a07518199a58384c3d37a44"}, + {file = "SQLAlchemy-2.0.35-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:b76d63495b0508ab9fc23f8152bac63205d2a704cd009a2b0722f4c8e0cba8e0"}, + {file = "SQLAlchemy-2.0.35-cp39-cp39-win32.whl", hash = "sha256:69683e02e8a9de37f17985905a5eca18ad651bf592314b4d3d799029797d0eb3"}, + {file = "SQLAlchemy-2.0.35-cp39-cp39-win_amd64.whl", hash = "sha256:aee110e4ef3c528f3abbc3c2018c121e708938adeeff9006428dd7c8555e9b3f"}, + {file = "SQLAlchemy-2.0.35-py3-none-any.whl", hash = "sha256:2ab3f0336c0387662ce6221ad30ab3a5e6499aab01b9790879b6578fd9b8faa1"}, + {file = "sqlalchemy-2.0.35.tar.gz", hash = "sha256:e11d7ea4d24f0a262bccf9a7cd6284c976c5369dac21db237cff59586045ab9f"}, ] [package.dependencies] @@ -994,13 +1074,13 @@ files = [ [[package]] name = "types-requests" -version = "2.32.0.20240712" +version = "2.32.0.20240914" description = "Typing stubs for requests" optional = false python-versions = ">=3.8" files = [ - {file = "types-requests-2.32.0.20240712.tar.gz", hash = "sha256:90c079ff05e549f6bf50e02e910210b98b8ff1ebdd18e19c873cd237737c1358"}, - {file = "types_requests-2.32.0.20240712-py3-none-any.whl", hash = "sha256:f754283e152c752e46e70942fa2a146b5bc70393522257bb85bd1ef7e019dcc3"}, + {file = "types-requests-2.32.0.20240914.tar.gz", hash = "sha256:2850e178db3919d9bf809e434eef65ba49d0e7e33ac92d588f4a5e295fffd405"}, + {file = "types_requests-2.32.0.20240914-py3-none-any.whl", hash = "sha256:59c2f673eb55f32a99b2894faf6020e1a9f4a402ad0f192bfee0b64469054310"}, ] [package.dependencies] @@ -1019,13 +1099,13 @@ files = [ [[package]] name = "urllib3" -version = "2.2.2" +version = "2.2.3" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false python-versions = ">=3.8" files = [ - {file = "urllib3-2.2.2-py3-none-any.whl", hash = "sha256:a448b2f64d686155468037e1ace9f2d2199776e17f0a46610480d311f73e3472"}, - {file = "urllib3-2.2.2.tar.gz", hash = "sha256:dd505485549a7a552833da5e6063639d0d177c04f23bc3864e41e5dc5f612168"}, + {file = "urllib3-2.2.3-py3-none-any.whl", hash = "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac"}, + {file = "urllib3-2.2.3.tar.gz", hash = "sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9"}, ] [package.extras] @@ -1134,4 +1214,4 @@ files = [ [metadata] lock-version = "2.0" python-versions = "3.11.*" -content-hash = "0c115e830083d14aa96e0f88f102dfcac65ac31bc522fb8c3bc68337adbeaccc" +content-hash = "16c726b30fc96e24b9f35ae5a6037b0df584b1f513b4828ac0cc1b5bb16eb86d" diff --git a/athena/pyproject.toml b/athena/pyproject.toml index 98d83e5d8..ac50e59f6 100644 --- a/athena/pyproject.toml +++ b/athena/pyproject.toml @@ -16,7 +16,7 @@ psycopg2 = "^2.9.9" [tool.poetry.group.dev.dependencies] types-requests = "^2.31.0.8" -pydantic = "1.10.17" +pydantic = "2.7.4" prospector = "^1.10.2" [build-system] diff --git a/llm_core/llm_core/models/model_config.py b/llm_core/llm_core/models/model_config.py index f433ab587..5bb988390 100644 --- a/llm_core/llm_core/models/model_config.py +++ b/llm_core/llm_core/models/model_config.py @@ -4,7 +4,9 @@ class ModelConfig(BaseModel, ABC): - + class Config: + protected_namespaces = () + @abstractmethod def get_model(self) -> BaseLanguageModel: pass diff --git a/llm_core/llm_core/utils/llm_utils.py b/llm_core/llm_core/utils/llm_utils.py index 4637b8558..7df06c43b 100644 --- a/llm_core/llm_core/utils/llm_utils.py +++ b/llm_core/llm_core/utils/llm_utils.py @@ -1,7 +1,7 @@ from typing import Type, TypeVar, List from pydantic import BaseModel import tiktoken -from langchain.chat_models import ChatOpenAI +from langchain_openai import ChatOpenAI from langchain.base_language import BaseLanguageModel from langchain.prompts import ( ChatPromptTemplate, @@ -26,9 +26,9 @@ def num_tokens_from_prompt(chat_prompt: ChatPromptTemplate, prompt_input: dict) return num_tokens_from_string(chat_prompt.format(**prompt_input)) -def check_prompt_length_and_omit_features_if_necessary(prompt: ChatPromptTemplate, - prompt_input: dict, - max_input_tokens: int, +def check_prompt_length_and_omit_features_if_necessary(prompt: ChatPromptTemplate, + prompt_input: dict, + max_input_tokens: int, omittable_features: List[str], debug: bool): """Check if the input is too long and omit features if necessary. @@ -43,7 +43,7 @@ def check_prompt_length_and_omit_features_if_necessary(prompt: ChatPromptTemplat debug (bool): Debug flag Returns: - (dict, bool): Tuple of (prompt_input, should_run) where prompt_input is the input with omitted features and + (dict, bool): Tuple of (prompt_input, should_run) where prompt_input is the input with omitted features and should_run is True if the model should run, False otherwise """ if num_tokens_from_prompt(prompt, prompt_input) <= max_input_tokens: @@ -79,11 +79,11 @@ def supports_function_calling(model: BaseLanguageModel): def get_chat_prompt_with_formatting_instructions( - model: BaseLanguageModel, - system_message: str, - human_message: str, - pydantic_object: Type[T] - ) -> ChatPromptTemplate: + model: BaseLanguageModel, + system_message: str, + human_message: str, + pydantic_object: Type[T] +) -> ChatPromptTemplate: """Returns a ChatPromptTemplate with formatting instructions (if necessary) Note: Does nothing if the model supports function calling @@ -101,10 +101,11 @@ def get_chat_prompt_with_formatting_instructions( system_message_prompt = SystemMessagePromptTemplate.from_template(system_message) human_message_prompt = HumanMessagePromptTemplate.from_template(human_message) return ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt]) - + output_parser = PydanticOutputParser(pydantic_object=pydantic_object) system_message_prompt = SystemMessagePromptTemplate.from_template(system_message + "\n{format_instructions}") system_message_prompt.prompt.partial_variables = {"format_instructions": output_parser.get_format_instructions()} system_message_prompt.prompt.input_variables.remove("format_instructions") - human_message_prompt = HumanMessagePromptTemplate.from_template(human_message + "\n\nJSON response following the provided schema:") + human_message_prompt = HumanMessagePromptTemplate.from_template( + human_message + "\n\nJSON response following the provided schema:") return ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt]) \ No newline at end of file diff --git a/llm_core/llm_core/utils/predict_and_parse.py b/llm_core/llm_core/utils/predict_and_parse.py index d73748bdf..0caecd507 100644 --- a/llm_core/llm_core/utils/predict_and_parse.py +++ b/llm_core/llm_core/utils/predict_and_parse.py @@ -1,17 +1,24 @@ from typing import Optional, Type, TypeVar, List + +from langchain_core.exceptions import OutputParserException from langchain_core.language_models import BaseLanguageModel +from langchain_core.output_parsers import PydanticOutputParser +from langchain_core.output_parsers.openai_functions import JsonOutputFunctionsParser from langchain_core.prompts import ChatPromptTemplate from langchain_core.pydantic_v1 import BaseModel, ValidationError -from langchain_core.runnables import RunnableSequence +from langchain_core.tracers import langchain +from langchain_core.utils.function_calling import convert_to_openai_function from athena import get_experiment_environment +from athena.logger import logger +from .llm_utils import supports_function_calling T = TypeVar("T", bound=BaseModel) async def predict_and_parse( - model: BaseLanguageModel, - chat_prompt: ChatPromptTemplate, - prompt_input: dict, - pydantic_object: Type[T], + model: BaseLanguageModel, + chat_prompt: ChatPromptTemplate, + prompt_input: dict, + pydantic_object: Type[T], tags: Optional[List[str]] ) -> Optional[T]: """Predicts an LLM completion using the model and parses the output using the provided Pydantic model @@ -26,6 +33,8 @@ async def predict_and_parse( Returns: Optional[T]: Parsed output, or None if it could not be parsed """ + langchain.debug = True + experiment = get_experiment_environment() tags = tags or [] @@ -36,13 +45,33 @@ async def predict_and_parse( if experiment.run_id is not None: tags.append(f"run-{experiment.run_id}") - structured_output_llm = model.with_structured_output(pydantic_object, method="json_mode") - chain = RunnableSequence( - chat_prompt, - structured_output_llm - ) + if supports_function_calling(model): + openai_functions = [convert_to_openai_function(pydantic_object)] + + runnable = chat_prompt | model.bind(functions=openai_functions).with_retry( + retry_if_exception_type=(ValueError, OutputParserException), + wait_exponential_jitter=True, + stop_after_attempt=3, + ) | JsonOutputFunctionsParser() + + try: + output_dict = await runnable.ainvoke(prompt_input) + return pydantic_object.parse_obj(output_dict) + except (OutputParserException, ValidationError) as e: + logger.error("Exception type: %s, Message: %s", type(e).__name__, e) + return None + + output_parser = PydanticOutputParser(pydantic_object=pydantic_object) + + runnable = chat_prompt | model.with_retry( + retry_if_exception_type=(ValueError, OutputParserException), + wait_exponential_jitter=True, + stop_after_attempt=3, + ) | output_parser try: - return await chain.ainvoke(prompt_input, config={"tags": tags}) - except ValidationError as e: - raise ValueError(f"Could not parse output: {e}") from e \ No newline at end of file + output_dict = await runnable.ainvoke(prompt_input) + return pydantic_object.parse_obj(output_dict) + except (OutputParserException, ValidationError) as e: + logger.error("Exception type: %s, Message: %s", type(e).__name__, e) + return None \ No newline at end of file diff --git a/modules/programming/module_programming_llm/module_programming_llm/__main__.py b/modules/programming/module_programming_llm/module_programming_llm/__main__.py index e8356270f..c057a8a62 100644 --- a/modules/programming/module_programming_llm/module_programming_llm/__main__.py +++ b/modules/programming/module_programming_llm/module_programming_llm/__main__.py @@ -1,6 +1,8 @@ +import os from typing import List import tiktoken +from langchain_core.globals import set_debug, set_verbose from athena import ( app, @@ -11,15 +13,9 @@ ) from athena.programming import Exercise, Submission, Feedback from athena.logger import logger +from module_programming_llm.approaches import generate_feedback from module_programming_llm.config import Configuration -from module_programming_llm.generate_graded_suggestions_by_file import ( - generate_suggestions_by_file as generate_graded_suggestions_by_file, -) -from module_programming_llm.generate_non_graded_suggestions_by_file import ( - generate_suggestions_by_file as generate_non_graded_suggestions_by_file, -) - @submissions_consumer def receive_submissions(exercise: Exercise, submissions: List[Submission]): @@ -41,15 +37,16 @@ def process_incoming_feedback(exercise: Exercise, submission: Submission, feedba async def suggest_feedback(exercise: Exercise, submission: Submission, is_graded: bool, module_config: Configuration) -> List[Feedback]: logger.info("suggest_feedback: %s suggestions for submission %d of exercise %d were requested", "Graded" if is_graded else "Non-graded", submission.id, exercise.id) - if is_graded: - return await generate_graded_suggestions_by_file(exercise, submission, module_config.graded_approach, - module_config.debug) - return await generate_non_graded_suggestions_by_file(exercise, submission, module_config.non_graded_approach, - module_config.debug) + return await generate_feedback(exercise, submission, is_graded, module_config) if __name__ == "__main__": # Preload for token estimation later tiktoken.get_encoding("cl100k_base") - app.start() \ No newline at end of file + app.start() + + enable_debug = os.getenv("ENABLE_DEBUGGING_INFO", "False").lower() in ("true", "1") + if enable_debug: + set_debug(True) + set_verbose(True) \ No newline at end of file diff --git a/modules/programming/module_programming_llm/module_programming_llm/approaches/__init__.py b/modules/programming/module_programming_llm/module_programming_llm/approaches/__init__.py new file mode 100644 index 000000000..f33d9d694 --- /dev/null +++ b/modules/programming/module_programming_llm/module_programming_llm/approaches/__init__.py @@ -0,0 +1 @@ +from .basic_by_file_approach import generate_feedback \ No newline at end of file diff --git a/modules/programming/module_programming_llm/module_programming_llm/approaches/basic_by_file_approach.py b/modules/programming/module_programming_llm/module_programming_llm/approaches/basic_by_file_approach.py new file mode 100644 index 000000000..50f15befb --- /dev/null +++ b/modules/programming/module_programming_llm/module_programming_llm/approaches/basic_by_file_approach.py @@ -0,0 +1,184 @@ +from typing import Optional, List + +from athena.programming import Submission, Exercise, Feedback +from module_programming_llm.config import Configuration +from llm_core.models import ModelConfigType +from module_programming_llm.prompts import GenerateFileSummary, SplitProblemStatementByFile, \ + SplitGradingInstructionsByFile, GenerateSuggestionsByFile, GenerateSuggestionsByFileOutput +from module_programming_llm.prompts.filter_out_solution.filter_out_solution import FilterOutSolution +from module_programming_llm.prompts.filter_out_solution.filter_out_solution_input import FilterOutSolutionInput +from module_programming_llm.prompts.filter_out_solution.filter_out_solution_output import FilterOutSolutionOutput +from module_programming_llm.prompts.generate_file_summary import GenerateFileSummaryOutput, GenerateFileSummaryInput +from module_programming_llm.prompts.generate_grading_criterion.generate_grading_criterion import \ + GenerateGradingCriterion, GenerateGradingCriterionOutput, GenerateGradingCriterionInput +from module_programming_llm.prompts.generate_suggestions_by_file.generate_suggestions_by_file_input import \ + GenerateSuggestionsByFileInput +from module_programming_llm.prompts.rag import RAGInput, RAG, RAGOutput +from module_programming_llm.prompts.split_grading_instructions_by_file import SplitGradingInstructionsByFileOutput, \ + SplitGradingInstructionsByFileInput +from module_programming_llm.prompts.split_problem_statement_by_file import SplitProblemStatementByFileOutput, \ + SplitProblemStatementByFileInput +from module_programming_llm.prompts.validate_suggestions import ValidateSuggestions, ValidateSuggestionsInput, \ + ValidateSuggestionsOutput + + +async def generate_file_summary(step: GenerateFileSummary, + input_data: GenerateFileSummaryInput, debug: bool, + model: ModelConfigType) -> (Optional)[GenerateFileSummaryOutput]: # type: ignore + return await step.process(input_data, debug, model) + + +async def split_problem_statement(step: SplitProblemStatementByFile, + input_data: SplitProblemStatementByFileInput, debug: bool, + model: ModelConfigType) -> Optional[SplitProblemStatementByFileOutput]: # type: ignore + return await step.process(input_data, debug, model) + + +async def split_grading_instructions(step: SplitGradingInstructionsByFile, + input_data: SplitGradingInstructionsByFileInput, debug: bool, + model: ModelConfigType) -> Optional[SplitGradingInstructionsByFileOutput]: # type: ignore + return await step.process(input_data, debug, model) + + +async def generate_suggestions(step: GenerateSuggestionsByFile, + input_data: GenerateSuggestionsByFileInput, debug: bool, + model: ModelConfigType) -> List[Optional[GenerateSuggestionsByFileOutput]]: # type: ignore + return await step.process(input_data, debug, model) + + +async def filter_out_solutions(step: FilterOutSolution, + input_data: FilterOutSolutionInput, debug: bool, + model: ModelConfigType) -> List[Optional[FilterOutSolutionOutput]]: # type: ignore + return await step.process(input_data, debug, model) + + +async def validate_suggestions(step: ValidateSuggestions, + input_data: ValidateSuggestionsInput, debug: bool, + model: ModelConfigType) -> List[Optional[ValidateSuggestionsOutput]]: # type: ignore + return await step.process(input_data, debug, model) + + +async def generate_grading_criterion(step: GenerateGradingCriterion, + input_data: GenerateGradingCriterionInput, debug: bool, + model: ModelConfigType) -> Optional[GenerateGradingCriterionOutput]: # type: ignore + return await step.process(input_data, debug, model) + +async def generate_rag_queries(step: RAG, + input_data: RAGInput, debug: bool, + model: ModelConfigType) -> Optional[RAGOutput]: # type: ignore + return await step.process(input_data, debug, model) + + +async def generate_feedback(exercise: Exercise, submission: Submission, is_graded: bool, + module_config: Configuration) -> List[Feedback]: # type: ignore + template_repo = exercise.get_template_repository() + solution_repo = exercise.get_solution_repository() + submission_repo = submission.get_repository() + is_debug = module_config.debug + model = module_config.basic_by_file_approach.model + +# rag_query_input = RAGInput(template_repo, solution_repo, exercise.id, exercise.problem_statement) +# rag_query_output = await generate_rag_queries(module_config.basic_by_file_approach.rag_requests, rag_query_input, module_config.debug, model) + +# rag_result = "" if rag_query_output is None else bulk_search(rag_query_output.rag_queries, model) + + rag_result = "" + + generate_file_summary_input = GenerateFileSummaryInput(template_repo, submission_repo, exercise.id, submission.id) + file_summary_output = await generate_file_summary(module_config.basic_by_file_approach.generate_file_summary, + generate_file_summary_input, is_debug, model) + + if not exercise.grading_criteria: + generate_grading_criterion_input = GenerateGradingCriterionInput(template_repo, solution_repo, exercise.id, + exercise.max_points, exercise.bonus_points, + exercise.problem_statement, + exercise.grading_instructions) + generate_grading_criterion_output = await generate_grading_criterion( + module_config.basic_by_file_approach.generate_grading_criterion, generate_grading_criterion_input, is_debug, + model) + if generate_grading_criterion_output is not None: + exercise.grading_criteria = generate_grading_criterion_output.structured_grading_criterion.criteria + + split_grading_instructions_output = None + if exercise.grading_criteria is not None: + split_grading_instructions_input = SplitGradingInstructionsByFileInput(template_repo, submission_repo, + solution_repo, exercise.id, + submission.id, + exercise.grading_instructions, + exercise.grading_criteria) + split_grading_instructions_output = await split_grading_instructions( + module_config.basic_by_file_approach.split_grading_instructions_by_file, split_grading_instructions_input, + is_debug, model) + + split_problem_statement_input = SplitProblemStatementByFileInput(template_repo, submission_repo, solution_repo, + exercise.problem_statement, exercise.id, + submission.id) + split_problem_statement_output = await split_problem_statement( + module_config.basic_by_file_approach.split_problem_statement_by_file, split_problem_statement_input, is_debug, + model) + + generate_suggestions_input = GenerateSuggestionsByFileInput(template_repo, submission_repo, solution_repo, + exercise.id, + submission.id, exercise.max_points, + exercise.bonus_points, exercise.programming_language, + file_summary_output.describe_solution_summary() if file_summary_output else "", + rag_result, + split_grading_instructions_output, + split_problem_statement_output, + exercise.grading_criteria, exercise.problem_statement, + exercise.grading_instructions) + output = await generate_suggestions( + module_config.basic_by_file_approach.generate_suggestions_by_file, generate_suggestions_input, is_debug, model) + + if not is_graded: + filter_out_solution_input = FilterOutSolutionInput(solution_repo, template_repo, exercise.problem_statement, + exercise.id, submission.id, output, + split_problem_statement_output) + output = await filter_out_solutions(module_config.basic_by_file_approach.filter_out_solution, + filter_out_solution_input, is_debug, model) + + # validate_suggestions_input = ValidateSuggestionsInput(solution_repo, template_repo, submission_repo, + # split_problem_statement_output, exercise.problem_statement, + # exercise.id, submission.id, output, + # split_grading_instructions_output, exercise.grading_criteria, + # exercise.grading_instructions, + # file_summary_output.describe_solution_summary() if file_summary_output else "", + # exercise.max_points, exercise.bonus_points, + # exercise.programming_language, + # rag_result) + # output = await validate_suggestions( + # module_config.basic_by_file_approach.validate_suggestions, validate_suggestions_input, is_debug, model) + + grading_instruction_ids = set( + grading_instruction.id + for criterion in exercise.grading_criteria or [] + for grading_instruction in criterion.structured_grading_instructions + ) + + feedbacks: List[Feedback] = [] + for result in output: + if result is None: + continue + for feedback in result.feedbacks: + grading_instruction_id = ( + feedback.grading_instruction_id + if feedback.grading_instruction_id in grading_instruction_ids + else None + ) + feedbacks.append( + Feedback( + exercise_id=exercise.id, + submission_id=submission.id, + title=feedback.title, + description=feedback.description, + file_path=result.file_path, + line_start=feedback.line_start, + line_end=feedback.line_end, + credits=feedback.credits, + structured_grading_instruction_id=grading_instruction_id, + is_graded=is_graded, + meta={}, + ) + ) + + return feedbacks diff --git a/modules/programming/module_programming_llm/module_programming_llm/config.py b/modules/programming/module_programming_llm/module_programming_llm/config.py index 6a632208e..21cb295b1 100644 --- a/modules/programming/module_programming_llm/module_programming_llm/config.py +++ b/modules/programming/module_programming_llm/module_programming_llm/config.py @@ -2,143 +2,62 @@ from pydantic import BaseModel, Field -from athena import config_schema_provider from llm_core.models import ModelConfigType, DefaultModelConfig -from module_programming_llm.prompts.generate_graded_suggestions_by_file import ( - system_message as generate_graded_suggestions_by_file_system_message, - human_message as generate_graded_suggestions_by_file_human_message, -) -from module_programming_llm.prompts.generate_non_graded_suggestions_by_file import ( - system_message as generate_non_graded_suggestions_by_file_system_message, - human_message as generate_non_graded_suggestions_by_file_human_message, -) -from module_programming_llm.prompts.split_grading_instructions_by_file import ( - system_message as split_grading_instructions_by_file_message, - human_message as split_grading_instructions_by_file_human_message, -) -from module_programming_llm.prompts.split_problem_non_grading_statement_by_file import ( - system_message as split_problem_statements_by_file_system_message_without_solution, - human_message as split_problem_statements_by_file_human_message_without_solution, -) -from module_programming_llm.prompts.split_problem_grading_statement_by_file import ( - system_message as split_problem_statements_by_file_system_message_with_solution, - human_message as split_problem_statements_by_file_human_message_with_solution, -) -from module_programming_llm.prompts.summarize_submission_by_file import ( - system_message as summarize_submission_by_file_system_message, - human_message as summarize_submission_by_file_human_message, -) - - -class SplitProblemStatementsBasePrompt(BaseModel): - """Base class for splitting problem statements into file-based ones, providing a structured approach for processing statements.""" - - system_message: str = Field(..., - description="Message for priming AI behavior and instructing it what to do.") - human_message: str = Field(..., - description="Message for priming AI behavior and instructing it what to do.") - tokens_before_split: int = Field(default=250, - description="Split the problem statement into file-based ones after this number of tokens.") - - -class SplitProblemStatementsWithSolutionByFilePrompt(SplitProblemStatementsBasePrompt): - """Specialized class for splitting problem statements with solutions, for cases where detailed solution information is available.""" - system_message: str = split_problem_statements_by_file_system_message_with_solution - human_message: str = split_problem_statements_by_file_human_message_with_solution - - -class SplitProblemStatementsWithoutSolutionByFilePrompt( - SplitProblemStatementsBasePrompt -): - """Specialized class for splitting problem statements without solutions, applicable when solution details are not provided.""" - system_message: str = split_problem_statements_by_file_system_message_without_solution - human_message: str = split_problem_statements_by_file_human_message_without_solution - - -class SplitGradingInstructionsByFilePrompt(BaseModel): - """\ -Features available: **{grading_instructions}**, **{changed_files_from_template_to_solution}**, **{changed_files_from_template_to_submission}**\ -""" - system_message: str = Field(default=split_grading_instructions_by_file_message, - description="Message for priming AI behavior and instructing it what to do.") - human_message: str = Field(default=split_grading_instructions_by_file_human_message, - description="Message from a human. The input on which the AI is supposed to act.") - tokens_before_split: int = Field(default=250, description="Split the grading instructions into file-based ones after this number of tokens.") - - -class FeedbackGenerationBasePrompt(BaseModel): - """Base class for feedback generation prompts, contains common definitions.""" - - system_message: str = Field(..., - description="Message for priming AI behavior and instructing it what to do.",) - human_message: str = Field(..., - description="Message from a human. The input on which the AI is supposed to act.",) - - -class GradedFeedbackGenerationPrompt(FeedbackGenerationBasePrompt): - """Generates graded feedback based on file submissions, tailored to provide detailed, evaluative comments and scores.""" - - system_message: str = generate_graded_suggestions_by_file_system_message - human_message: str = generate_graded_suggestions_by_file_human_message - - -class NonGradedFeedbackGenerationPrompt(FeedbackGenerationBasePrompt): - """\ -Features available: **{problem_statement}**, **{submission_file}** - -*Note: Prompt will be applied per file independently. Also, you don't have to include all features, -e.g. template_to_submission_diff. - """ - - system_message: str = generate_non_graded_suggestions_by_file_system_message - human_message: str = generate_non_graded_suggestions_by_file_human_message - - -class FileSummaryPrompt(BaseModel): - """Generates concise summaries of submission files, facilitating a quicker review and understanding of the content for AI processing.""" - - system_message: str = Field(summarize_submission_by_file_system_message, - description="Message for priming AI behavior and instructing it what to do.") - human_message: str = Field(summarize_submission_by_file_human_message, - description="Message from a human. The input on which the AI is supposed to act.") +from athena import config_schema_provider +from module_programming_llm.prompts import SplitProblemStatementByFile, SplitGradingInstructionsByFile, \ + GenerateSuggestionsByFile, GenerateFileSummary +from module_programming_llm.prompts.filter_out_solution.filter_out_solution import FilterOutSolution +from module_programming_llm.prompts.generate_grading_criterion.generate_grading_criterion import \ + GenerateGradingCriterion +from module_programming_llm.prompts.rag import RAG +from module_programming_llm.prompts.validate_suggestions import ValidateSuggestions class BasicApproachConfig(BaseModel): - """Defines a basic configuration for processing submissions, incorporating problem statement splitting, feedback generation, and file summarization.""" + """Defines a basic configuration for processing submissions, incorporating problem statement splitting, feedback generation.""" max_input_tokens: int = Field(default=3000, description="Maximum number of tokens in the input prompt.") - model: ModelConfigType = Field(default=DefaultModelConfig()) # type: ignore - max_number_of_files: int = Field(default=25, description="Maximum number of files. If exceeded, it will prioritize the most important ones.") - split_problem_statement_by_file_prompt: SplitProblemStatementsBasePrompt = Field(description="To be defined in " "subclasses.") - generate_suggestions_by_file_prompt: SplitProblemStatementsBasePrompt = Field(description="To be defined in " "subclasses.") - generate_file_summary_prompt: FileSummaryPrompt = Field(default=FileSummaryPrompt(), description="Generates short summaries to be fed into the LLM with separate files.") + model: ModelConfigType = Field(default=DefaultModelConfig()) # type: ignore -class GradedBasicApproachConfig(BasicApproachConfig, ABC): - """\ -This approach uses an LLM to split up the problem statement and grading instructions by file, if necessary. \ -Then, it generates graded suggestions for each file independently.\ -""" - - split_problem_statement_by_file_prompt: SplitProblemStatementsWithSolutionByFilePrompt = Field(default=SplitProblemStatementsWithSolutionByFilePrompt()) - split_grading_instructions_by_file_prompt: SplitGradingInstructionsByFilePrompt = (Field(default=SplitGradingInstructionsByFilePrompt())) - generate_suggestions_by_file_prompt: FeedbackGenerationBasePrompt = Field(default=GradedFeedbackGenerationPrompt()) - - -class NonGradedBasicApproachConfig(BasicApproachConfig, ABC): - """\ -This approach uses an LLM to split up the problem statement, if necessary. \ -Then, it generates non graded suggestions for each file independently.\ -""" - - split_problem_statement_by_file_prompt: SplitProblemStatementsWithoutSolutionByFilePrompt = Field(default=SplitProblemStatementsWithoutSolutionByFilePrompt()) - generate_suggestions_by_file_prompt: FeedbackGenerationBasePrompt = Field(default=NonGradedFeedbackGenerationPrompt()) - +class BasicByFileApproachConfig(BasicApproachConfig, ABC): + """ + This approach uses an LLM to split up the problem statement and grading instructions by file, if necessary. + Then, it generates graded suggestions for each file independently. + Generates grading instructions if not available. + Validates and filters out generated feedback. + """ + split_problem_statement_by_file: SplitProblemStatementByFile = Field(default=SplitProblemStatementByFile()) + split_grading_instructions_by_file: SplitGradingInstructionsByFile = ( + Field(default=SplitGradingInstructionsByFile())) + generate_suggestions_by_file: GenerateSuggestionsByFile = Field(default=GenerateSuggestionsByFile()) + generate_file_summary: GenerateFileSummary = Field(default=GenerateFileSummary()) + filter_out_solution: FilterOutSolution = Field(default=FilterOutSolution()) + validate_suggestions: ValidateSuggestions = Field(default=ValidateSuggestions()) + generate_grading_criterion: GenerateGradingCriterion = Field(default=GenerateGradingCriterion()) + max_number_of_files: int = Field(default=25, + description="Maximum number of files. If exceeded, it will prioritize the most important ones.") + tokens_before_split: int = Field(default=250, + description="Split the grading instructions into file-based ones after this number of tokens.") + rag_requests: RAG = Field(default=RAG()) + + +# class ZeroShotApproachConfig(BasicApproachConfig, ABC): +# """\ +# This approach uses an LLM to split up the problem statement, if necessary. \ +# Then, it generates non graded suggestions for each file independently.\ +# """ +# +# split_problem_statement_by_file_prompt: SplitProblemStatementsWithoutSolutionByFilePrompt = Field(default=SplitProblemStatementsWithoutSolutionByFilePrompt()) +# generate_suggestions_by_file_prompt: FeedbackGenerationBasePrompt = Field(default=NonGradedFeedbackGenerationPrompt()) +# split_grading_instructions_by_file_prompt: SplitGradingInstructionsByFilePrompt = (Field(default=SplitGradingInstructionsByFilePrompt())) +# +# @config_schema_provider class Configuration(BaseModel): """Configuration settings for the entire module, including debug mode and approach-specific configurations.""" debug: bool = Field(default=False, description="Enable debug mode.") - graded_approach: GradedBasicApproachConfig = Field(default=GradedBasicApproachConfig()) - non_graded_approach: NonGradedBasicApproachConfig = Field(default=NonGradedBasicApproachConfig()) \ No newline at end of file + basic_by_file_approach: BasicByFileApproachConfig = Field(default=BasicByFileApproachConfig()) + # zero_shot_approach: ZeroShotApproachConfig = Field(default=ZeroShotApproachConfig()) diff --git a/modules/programming/module_programming_llm/module_programming_llm/generate_graded_suggestions_by_file.py b/modules/programming/module_programming_llm/module_programming_llm/generate_graded_suggestions_by_file.py deleted file mode 100644 index 4a29b8f6d..000000000 --- a/modules/programming/module_programming_llm/module_programming_llm/generate_graded_suggestions_by_file.py +++ /dev/null @@ -1,328 +0,0 @@ -from typing import List, Optional, Sequence -import os -import asyncio -from pydantic import BaseModel, Field - -from athena import emit_meta -from athena.programming import Exercise, Submission, Feedback - -from module_programming_llm.config import GradedBasicApproachConfig -from module_programming_llm.split_grading_instructions_by_file import ( - split_grading_instructions_by_file, -) -from module_programming_llm.split_problem_statement_by_file import ( - split_problem_statement_by_file, -) -from llm_core.utils.llm_utils import ( - check_prompt_length_and_omit_features_if_necessary, - get_chat_prompt_with_formatting_instructions, - num_tokens_from_string, -) -from llm_core.utils.predict_and_parse import predict_and_parse - -from module_programming_llm.helpers.utils import ( - get_diff, - load_files_from_repo, - add_line_numbers, - get_programming_language_file_extension, -) - - -class FeedbackModel(BaseModel): - title: str = Field( - description="Very short title, i.e. feedback category", example="Logic Error" - ) - description: str = Field(description="Feedback description") - line_start: Optional[int] = Field( - description="Referenced line number start, or empty if unreferenced" - ) - line_end: Optional[int] = Field( - description="Referenced line number end, or empty if unreferenced" - ) - credits: float = Field(0.0, description="Number of points received/deducted") - grading_instruction_id: Optional[int] = Field( - description="ID of the grading instruction that was used to generate this feedback, or empty if no grading instruction was used" - ) - - class Config: - title = "Feedback" - - -class AssessmentModel(BaseModel): - """Collection of feedbacks making up an assessment""" - - feedbacks: Sequence[FeedbackModel] = Field(description="Assessment feedbacks") - - class Config: - title = "Assessment" - - -# pylint: disable=too-many-locals -async def generate_suggestions_by_file( - exercise: Exercise, - submission: Submission, - config: GradedBasicApproachConfig, - debug: bool, -) -> List[Feedback]: - model = config.model.get_model() # type: ignore[attr-defined] - - chat_prompt = get_chat_prompt_with_formatting_instructions( - model=model, - system_message=config.generate_suggestions_by_file_prompt.system_message, - human_message=config.generate_suggestions_by_file_prompt.human_message, - pydantic_object=AssessmentModel, - ) - - # Get split problem statement and grading instructions by file (if necessary) - split_problem_statement, split_grading_instructions = await asyncio.gather( - split_problem_statement_by_file( - exercise=exercise, - submission=submission, - prompt=chat_prompt, - config=config, - debug=debug, - ), - split_grading_instructions_by_file( - exercise=exercise, - submission=submission, - prompt=chat_prompt, - config=config, - debug=debug, - ), - ) - - problem_statement_tokens = num_tokens_from_string(exercise.problem_statement or "") - is_short_problem_statement = ( - problem_statement_tokens - <= config.split_problem_statement_by_file_prompt.tokens_before_split - ) - file_problem_statements = ( - { - item.file_name: item.problem_statement - for item in split_problem_statement.items - } - if split_problem_statement is not None - else {} - ) - - is_short_grading_instructions = ( - num_tokens_from_string(exercise.grading_instructions) - <= config.split_grading_instructions_by_file_prompt.tokens_before_split - if exercise.grading_instructions is not None - else True - ) - file_grading_instructions = ( - { - item.file_name: item.grading_instructions - for item in split_grading_instructions.items - } - if split_grading_instructions is not None - else {} - ) - - prompt_inputs: List[dict] = [] - - # Feature extraction - solution_repo = exercise.get_solution_repository() - template_repo = exercise.get_template_repository() - submission_repo = submission.get_repository() - - changed_files_from_template_to_submission = get_diff( - src_repo=template_repo, dst_repo=submission_repo, file_path=None, name_only=True - ).split("\n") - changed_files_from_template_to_submission = [ - os.path.join(str(submission_repo.working_tree_dir or ""), file_path) - for file_path in changed_files_from_template_to_submission - ] - - # Changed text files - changed_files = load_files_from_repo( - submission_repo, - file_filter=lambda file_path: file_path - in changed_files_from_template_to_submission, - ) - - # Gather prompt inputs for each changed file (independently) - for file_path, file_content in changed_files.items(): - problem_statement = ( - exercise.problem_statement or "" - if is_short_problem_statement - else file_problem_statements.get( - file_path, "No relevant problem statement section found." - ) - ) - problem_statement = ( - problem_statement - if problem_statement.strip() - else "No problem statement found." - ) - - grading_instructions = ( - exercise.grading_instructions or "" - if is_short_grading_instructions - else file_grading_instructions.get( - file_path, "No relevant grading instructions found." - ) - ) - grading_instructions = ( - grading_instructions - if grading_instructions.strip() - else "No grading instructions found." - ) - - file_content = add_line_numbers(file_content) - solution_to_submission_diff = get_diff( - src_repo=solution_repo, - dst_repo=submission_repo, - src_prefix="solution", - dst_prefix="submission", - file_path=file_path, - ) - template_to_submission_diff = get_diff( - src_repo=template_repo, - dst_repo=submission_repo, - src_prefix="template", - dst_prefix="submission", - file_path=file_path, - ) - template_to_solution_diff = get_diff( - src_repo=template_repo, - dst_repo=solution_repo, - src_prefix="template", - dst_prefix="solution", - file_path=file_path, - ) - - prompt_inputs.append( - { - "file_path": file_path, # Not really relevant for the prompt - "priority": len( - template_to_solution_diff - ), # Not really relevant for the prompt - "submission_file": file_content, - "max_points": exercise.max_points, - "bonus_points": exercise.bonus_points, - "solution_to_submission_diff": solution_to_submission_diff, - "template_to_submission_diff": template_to_submission_diff, - "template_to_solution_diff": template_to_solution_diff, - "grading_instructions": grading_instructions, - "problem_statement": problem_statement, - } - ) - - # Filter long prompts (omitting features if necessary) - # Lowest priority features are at the top of the list (i.e. they are omitted first if necessary) - # "submission_file" is not omittable, because it is the main input containing the line numbers - # In the future we might be able to include the line numbers in the diff, but for now we need to keep it - omittable_features = [ - "template_to_solution_diff", # If it is even included in the prompt (has the lowest priority since it is indirectly included in other diffs) - "problem_statement", - "grading_instructions", - "solution_to_submission_diff", - "template_to_submission_diff", # In the future we might indicate the changed lines in the submission_file additionally - ] - - prompt_inputs = [ - omitted_prompt_input - for omitted_prompt_input, should_run in [ - check_prompt_length_and_omit_features_if_necessary( - prompt=chat_prompt, - prompt_input=prompt_input, - max_input_tokens=config.max_input_tokens, - omittable_features=omittable_features, - debug=debug, - ) - for prompt_input in prompt_inputs - ] - if should_run - ] - - # If we have many files we need to filter and prioritize them - if len(prompt_inputs) > config.max_number_of_files: - programming_language_extension = get_programming_language_file_extension( - programming_language=exercise.programming_language - ) - - # Prioritize files that have a diff between solution and submission - prompt_inputs = sorted(prompt_inputs, key=lambda x: x["priority"], reverse=True) - - filtered_prompt_inputs = [] - if programming_language_extension is not None: - filtered_prompt_inputs = [ - prompt_input - for prompt_input in prompt_inputs - if prompt_input["file_path"].endswith(programming_language_extension) - ] - - while ( - len(filtered_prompt_inputs) < config.max_number_of_files and prompt_inputs - ): - filtered_prompt_inputs.append(prompt_inputs.pop(0)) - prompt_inputs = filtered_prompt_inputs - - # noinspection PyTypeChecker - results: List[Optional[AssessmentModel]] = await asyncio.gather( - *[ - predict_and_parse( - model=model, - chat_prompt=chat_prompt, - prompt_input=prompt_input, - pydantic_object=AssessmentModel, - tags=[ - f"exercise-{exercise.id}", - f"submission-{submission.id}", - f"file-{prompt_input['file_path']}", - "generate-suggestions-by-file", - ], - ) - for prompt_input in prompt_inputs - ] - ) - - if debug: - emit_meta( - "generate_suggestions", - [ - { - "file_path": prompt_input["file_path"], - "prompt": chat_prompt.format(**prompt_input), - "result": result.dict() if result is not None else None, - } - for prompt_input, result in zip(prompt_inputs, results) - ], - ) - - grading_instruction_ids = set( - grading_instruction.id - for criterion in exercise.grading_criteria or [] - for grading_instruction in criterion.structured_grading_instructions - ) - - feedbacks: List[Feedback] = [] - for prompt_input, result in zip(prompt_inputs, results): - file_path = prompt_input["file_path"] - if result is None: - continue - for feedback in result.feedbacks: - grading_instruction_id = ( - feedback.grading_instruction_id - if feedback.grading_instruction_id in grading_instruction_ids - else None - ) - feedbacks.append( - Feedback( - exercise_id=exercise.id, - submission_id=submission.id, - title=feedback.title, - description=feedback.description, - file_path=file_path, - line_start=feedback.line_start, - line_end=feedback.line_end, - credits=feedback.credits, - structured_grading_instruction_id=grading_instruction_id, - is_graded=True, - meta={}, - ) - ) - - return feedbacks diff --git a/modules/programming/module_programming_llm/module_programming_llm/generate_non_graded_suggestions_by_file.py b/modules/programming/module_programming_llm/module_programming_llm/generate_non_graded_suggestions_by_file.py deleted file mode 100644 index de654a427..000000000 --- a/modules/programming/module_programming_llm/module_programming_llm/generate_non_graded_suggestions_by_file.py +++ /dev/null @@ -1,266 +0,0 @@ -from typing import List, Optional, Sequence -import os -import asyncio -from pydantic import BaseModel, Field - -from athena import emit_meta -from athena.programming import Exercise, Submission, Feedback - -from module_programming_llm.config import NonGradedBasicApproachConfig -from module_programming_llm.generate_summary_by_file import generate_summary_by_file -from module_programming_llm.split_problem_statement_by_file import ( - split_problem_statement_by_file, -) -from llm_core.utils.llm_utils import ( - check_prompt_length_and_omit_features_if_necessary, - get_chat_prompt_with_formatting_instructions, - num_tokens_from_string, -) -from llm_core.utils.predict_and_parse import predict_and_parse - -from module_programming_llm.helpers.utils import ( - get_diff, - load_files_from_repo, - add_line_numbers, - get_programming_language_file_extension, -) - - -class FeedbackModel(BaseModel): - title: str = Field( - description="Very short title, i.e. feedback category", example="Logic Error" - ) - description: str = Field(description="Feedback description") - line_start: Optional[int] = Field( - description="Referenced line number start, or empty if unreferenced" - ) - line_end: Optional[int] = Field( - description="Referenced line number end, or empty if unreferenced" - ) - - class Config: - title = "Feedback" - - -class ImprovementModel(BaseModel): - """Collection of feedbacks making up an improvement""" - - feedbacks: Sequence[FeedbackModel] = Field(description="Improvement feedbacks") - - class Config: - title = "Improvement" - - -# pylint: disable=too-many-locals -async def generate_suggestions_by_file( - exercise: Exercise, - submission: Submission, - config: NonGradedBasicApproachConfig, - debug: bool, -) -> List[Feedback]: - model = config.model.get_model() # type: ignore[attr-defined] - - chat_prompt = get_chat_prompt_with_formatting_instructions( - model=model, - system_message=config.generate_suggestions_by_file_prompt.system_message, - human_message=config.generate_suggestions_by_file_prompt.human_message, - pydantic_object=ImprovementModel, - ) - - prompt_inputs: List[dict] = [] - - # Feature extraction - template_repo = exercise.get_template_repository() - submission_repo = submission.get_repository() - - changed_files_from_template_to_submission = get_diff( - src_repo=template_repo, dst_repo=submission_repo, file_path=None, name_only=True - ).split("\n") - changed_files_from_template_to_submission = [ - os.path.join(str(submission_repo.working_tree_dir or ""), file_path) - for file_path in changed_files_from_template_to_submission - ] - - # Changed text files - changed_files = load_files_from_repo( - submission_repo, - file_filter=lambda file_path: file_path - in changed_files_from_template_to_submission, - ) - - # Get solution summary by file (if necessary) - solution_summary = await generate_summary_by_file( - exercise=exercise, - submission=submission, - prompt=chat_prompt, - config=config, - debug=debug, - ) - summary_string = solution_summary.describe_solution_summary() if solution_summary is not None else "" - - - # Get split problem statement by file (if necessary) - split_problem_statement = await split_problem_statement_by_file( - exercise=exercise, - submission=submission, - prompt=chat_prompt, - config=config, - debug=debug, - ) - - problem_statement_tokens = num_tokens_from_string(exercise.problem_statement or "") - is_short_problem_statement = ( - problem_statement_tokens - <= config.split_problem_statement_by_file_prompt.tokens_before_split - ) - file_problem_statements = ( - { - item.file_name: item.problem_statement - for item in split_problem_statement.items - } - if split_problem_statement is not None - else {} - ) - - # Gather prompt inputs for each changed file (independently) - for file_path, file_content in changed_files.items(): - problem_statement = ( - exercise.problem_statement or "" - if is_short_problem_statement - else file_problem_statements.get( - file_path, "No relevant problem statement section found." - ) - ) - problem_statement = ( - problem_statement - if problem_statement.strip() - else "No problem statement found." - ) - - file_content = add_line_numbers(file_content) - diff_lines = get_diff( - src_repo=template_repo, - dst_repo=submission_repo, - src_prefix="template", - dst_prefix="submission", - file_path=file_path, - ) - - diff_lines_list = diff_lines.split("\n") - - diff_without_deletions = [] - - for line in diff_lines_list: - if not line.startswith("-"): - diff_without_deletions.append(line) - - template_to_submission_diff = "\n".join(diff_without_deletions) - - prompt_inputs.append( - { - "submission_file": file_content, - "template_to_submission_diff": template_to_submission_diff, - "problem_statement": problem_statement, - "file_path": file_path, - "summary": summary_string, - } - ) - - omittable_features = [ - "template_to_submission_diff", - "summary", - "problem_statement", - # In the future we might indicate the changed lines in the submission_file additionally - ] - - prompt_inputs = [ - omitted_prompt_input - for omitted_prompt_input, should_run in [ - check_prompt_length_and_omit_features_if_necessary( - prompt=chat_prompt, - prompt_input=prompt_input, - max_input_tokens=config.max_input_tokens, - omittable_features=omittable_features, - debug=debug, - ) - for prompt_input in prompt_inputs - ] - if should_run - ] - - # If we have many files we need to filter and prioritize them - if len(prompt_inputs) > config.max_number_of_files: - programming_language_extension = get_programming_language_file_extension( - programming_language=exercise.programming_language - ) - - # Prioritize files that have a diff between solution and submission - prompt_inputs = sorted(prompt_inputs, key=lambda x: x["priority"], reverse=True) - - filtered_prompt_inputs = [] - if programming_language_extension is not None: - filtered_prompt_inputs = [ - prompt_input - for prompt_input in prompt_inputs - if prompt_input["file_path"].endswith(programming_language_extension) - ] - - while ( - len(filtered_prompt_inputs) < config.max_number_of_files and prompt_inputs - ): - filtered_prompt_inputs.append(prompt_inputs.pop(0)) - prompt_inputs = filtered_prompt_inputs - - # noinspection PyTypeChecker - results: List[Optional[ImprovementModel]] = await asyncio.gather( - *[ - predict_and_parse( - model=model, - chat_prompt=chat_prompt, - prompt_input=prompt_input, - pydantic_object=ImprovementModel, - tags=[ - f"exercise-{exercise.id}", - f"submission-{submission.id}", - f"file-{prompt_input['file_path']}", - "generate-suggestions-by-file", - ], - ) - for prompt_input in prompt_inputs - ] - ) - - if debug: - emit_meta( - "generate_suggestions", - [ - { - "file_path": prompt_input["file_path"], - "prompt": chat_prompt.format(**prompt_input), - "result": result.dict() if result is not None else None, - } - for prompt_input, result in zip(prompt_inputs, results) - ], - ) - - feedbacks: List[Feedback] = [] - for prompt_input, result in zip(prompt_inputs, results): - file_path = prompt_input["file_path"] - if result is None: - continue - for feedback in result.feedbacks: - feedbacks.append( - Feedback( - exercise_id=exercise.id, - submission_id=submission.id, - title=feedback.title, - description=feedback.description, - file_path=file_path, - line_start=feedback.line_start, - line_end=feedback.line_end, - is_graded=False, - meta={}, - ) - ) - - return feedbacks diff --git a/modules/programming/module_programming_llm/module_programming_llm/generate_summary_by_file.py b/modules/programming/module_programming_llm/module_programming_llm/generate_summary_by_file.py deleted file mode 100644 index e5ac6aad7..000000000 --- a/modules/programming/module_programming_llm/module_programming_llm/generate_summary_by_file.py +++ /dev/null @@ -1,158 +0,0 @@ -import asyncio -import os -from typing import Optional, List, Dict - -from pydantic import BaseModel, Field -from langchain.prompts import ChatPromptTemplate - -from athena import emit_meta -from athena.programming import Exercise, Submission - -from module_programming_llm.config import GradedBasicApproachConfig, BasicApproachConfig -from llm_core.utils.llm_utils import ( - get_chat_prompt_with_formatting_instructions, - num_tokens_from_prompt, -) -from llm_core.utils.predict_and_parse import predict_and_parse - -from module_programming_llm.helpers.utils import ( - get_diff, - load_files_from_repo, - add_line_numbers -) - - -class FileDescription(BaseModel): - file_name: str = Field(description="File name") - description: str = Field(description="Summary relevant for this file") - - class Config: - title = "FileDescription" - - -class SolutionSummary(BaseModel): - """Collection of summaries, accessible by file path""" - - items: Dict[str, str] = Field(description="File summaries indexed by file path") - - class Config: - title = "SolutionSummary" - - def describe_solution_summary(self) -> str: - descriptions = [] - for file_path, file_summary in self.items.items(): - description = f"File {file_path}: {file_summary}" - descriptions.append(description) - return "\n".join(descriptions) - - -# pylint: disable=too-many-locals -async def generate_summary_by_file( - exercise: Exercise, - submission: Submission, - prompt: ChatPromptTemplate, - config: BasicApproachConfig, - debug: bool, -) -> Optional[SolutionSummary]: - """Generaty summary for the submission file by file - - Args: - exercise (Exercise): Exercise to split the problem statement for (respecting the changed files) - submission (Submission): Submission to split the problem statement for (respecting the changed files) - prompt (ChatPromptTemplate): Prompt template to check for problem_statement - config (GradedBasicApproachConfig): Configuration - - Returns: - Optional[SolutionSummary]: Summarization of the given submission, None if it is too short or too long - """ - - # Return None if submission_file not in the prompt - if "summary" not in prompt.input_variables: - return None - - model = config.model.get_model() # type: ignore[attr-defined] - - template_repo = exercise.get_template_repository() - submission_repo = submission.get_repository() - - changed_files_from_template_to_submission = get_diff( - src_repo=template_repo, dst_repo=submission_repo, file_path=None, name_only=True - ).split("\n") - changed_files_from_template_to_submission = [ - os.path.join(str(submission_repo.working_tree_dir or ""), file_path) - for file_path in changed_files_from_template_to_submission - ] - - # Changed text files - changed_files = load_files_from_repo( - submission_repo, - file_filter=lambda file_path: file_path in changed_files_from_template_to_submission, - ) - chat_prompt = get_chat_prompt_with_formatting_instructions( - model=model, - system_message=config.generate_file_summary_prompt.system_message, - human_message=config.generate_file_summary_prompt.human_message, - pydantic_object=SolutionSummary, - ) - - prompt_inputs = [] - - # Gather prompt inputs for each file (independently) - for file_path, file_content in changed_files.items(): - file_content = add_line_numbers(file_content) - - prompt_inputs.append( - { - "submission_file": file_content, - "file_path": file_path, - } - ) - - valid_prompt_inputs = [ - prompt_input - for prompt_input in prompt_inputs - if num_tokens_from_prompt(chat_prompt, prompt_input) <= config.max_input_tokens - ] - - # noinspection PyTypeChecker - results: List[Optional[FileDescription]] = await asyncio.gather( - *[ - predict_and_parse( - model=model, - chat_prompt=chat_prompt, - prompt_input=prompt_input, - pydantic_object=FileDescription, - tags=[ - f"exercise-{exercise.id}", - f"submission-{submission.id}", - f"file-{prompt_input['file_path']}", - "generate-summary-by-file", - ], - ) - for prompt_input in valid_prompt_inputs - ] - ) - - if debug: - for prompt_input, result in zip(valid_prompt_inputs, results): - emit_meta( - "file_summary", - { - "prompt": chat_prompt.format(**prompt_input), - "result": result.dict() if result is not None else None, - "file_path": prompt_input[ - "file_path" - ], # Include the file path for reference - }, - ) - - if not any(result is not None for result in results): - return None - - items_dict = {} - - for _, file_summary in enumerate(results): - if file_summary is not None: - items_dict[file_summary.file_name] = file_summary.description - - return SolutionSummary(items=items_dict) diff --git a/modules/programming/module_programming_llm/module_programming_llm/helpers/utils.py b/modules/programming/module_programming_llm/module_programming_llm/helpers/utils.py index f708fa5af..5ea15b51c 100644 --- a/modules/programming/module_programming_llm/module_programming_llm/helpers/utils.py +++ b/modules/programming/module_programming_llm/module_programming_llm/helpers/utils.py @@ -6,7 +6,7 @@ from git import Remote from git.repo import Repo -from langchain.document_loaders import GitLoader +from langchain_community.document_loaders import GitLoader from athena import GradingCriterion diff --git a/modules/programming/module_programming_llm/module_programming_llm/helpers/web_search.py b/modules/programming/module_programming_llm/module_programming_llm/helpers/web_search.py new file mode 100644 index 000000000..bba5c47a4 --- /dev/null +++ b/modules/programming/module_programming_llm/module_programming_llm/helpers/web_search.py @@ -0,0 +1,47 @@ +from typing import Sequence, List + +from langchain.chains.qa_with_sources.retrieval import RetrievalQAWithSourcesChain + +from langchain_community.retrievers import WebResearchRetriever +from langchain_community.utilities import GoogleSearchAPIWrapper +from langchain_community.vectorstores import Chroma +from langchain_core.tools import Tool +from langchain_openai import OpenAIEmbeddings + +from llm_core.models import ModelConfigType + + +def bulk_search(queries: Sequence[str], model: ModelConfigType) -> List[str]: + result = [] + for query in queries: + result.append(answer_query(query, model)) + return result + + +def answer_query(query, model: ModelConfigType): + model = model.get_model() # type: ignore[attr-defined] + vectorstore = Chroma( + embedding_function=OpenAIEmbeddings(), persist_directory="./chroma_db_oai" + ) + + # Search + search = GoogleSearchAPIWrapper() + + # # Initialize + web_search_retriever = WebResearchRetriever.from_llm( + vectorstore=vectorstore, llm=model, search=search, allow_dangerous_requests=True + ) + qa_chain = RetrievalQAWithSourcesChain.from_chain_type( + model, retriever=web_search_retriever + ) + result = qa_chain({"question": query}) + + search = GoogleSearchAPIWrapper() + + tool = Tool( + name="google_search", + description="Search Google for recent results.", + func=search.run, + ) + + return tool.run(query) diff --git a/modules/programming/module_programming_llm/module_programming_llm/prompts/__init__.py b/modules/programming/module_programming_llm/module_programming_llm/prompts/__init__.py index e69de29bb..9fee9cd66 100644 --- a/modules/programming/module_programming_llm/module_programming_llm/prompts/__init__.py +++ b/modules/programming/module_programming_llm/module_programming_llm/prompts/__init__.py @@ -0,0 +1,11 @@ +from .generate_suggestions_by_file import GenerateSuggestionsByFile, GenerateSuggestionsByFileOutput +from .generate_file_summary import GenerateFileSummary +from .split_problem_statement_by_file import SplitProblemStatementByFile +from .split_grading_instructions_by_file import SplitGradingInstructionsByFile +from .validate_suggestions import ValidateSuggestions +from .filter_out_solution import FilterOutSolution +from .generate_grading_criterion import GenerateGradingCriterion + +__all__ = ['GenerateSuggestionsByFile', 'GenerateFileSummary', + 'SplitGradingInstructionsByFile', 'GenerateSuggestionsByFileOutput', 'SplitProblemStatementByFile', + 'ValidateSuggestions', 'FilterOutSolution', 'GenerateGradingCriterion'] diff --git a/modules/programming/module_programming_llm/module_programming_llm/prompts/filter_out_solution/__init__.py b/modules/programming/module_programming_llm/module_programming_llm/prompts/filter_out_solution/__init__.py new file mode 100644 index 000000000..487f053ab --- /dev/null +++ b/modules/programming/module_programming_llm/module_programming_llm/prompts/filter_out_solution/__init__.py @@ -0,0 +1,9 @@ +from .filter_out_solution_input import FilterOutSolutionInput +from .filter_out_solution_output import FilterOutSolutionOutput +from .filter_out_solution import FilterOutSolution + +__all__ = [ + "FilterOutSolution", + "FilterOutSolutionOutput", + "FilterOutSolutionInput", +] \ No newline at end of file diff --git a/modules/programming/module_programming_llm/module_programming_llm/prompts/filter_out_solution/filter_out_solution.py b/modules/programming/module_programming_llm/module_programming_llm/prompts/filter_out_solution/filter_out_solution.py new file mode 100644 index 000000000..c7e500881 --- /dev/null +++ b/modules/programming/module_programming_llm/module_programming_llm/prompts/filter_out_solution/filter_out_solution.py @@ -0,0 +1,179 @@ +import asyncio +import json +import os +from collections import defaultdict +from typing import Optional, List, Sequence, Dict + +from athena import emit_meta +from module_programming_llm.prompts.pipeline_step import PipelineStep +from .filter_out_solution_input import FilterOutSolutionInput +from .filter_out_solution_output import FilterOutSolutionOutput +from module_programming_llm.prompts.generate_suggestions_by_file.generate_suggestions_by_file_output import \ + FeedbackModel as SuggestionsFeedbackModel +from .prompt import system_message as prompt_system_message, human_message as prompt_human_message +from pydantic import Field +from llm_core.utils.llm_utils import ( + get_chat_prompt_with_formatting_instructions, + num_tokens_from_string, + check_prompt_length_and_omit_features_if_necessary, +) +from llm_core.utils.predict_and_parse import predict_and_parse +from module_programming_llm.helpers.utils import ( + get_diff, + load_files_from_repo, + add_line_numbers +) +from llm_core.models import ModelConfigType + +class FilterOutSolution(PipelineStep[FilterOutSolutionInput, List[Optional[FilterOutSolutionOutput]]]): + """A pipeline step to remove potential solutions from the output.""" + + system_message: str = Field(prompt_system_message, + description="Message for priming AI behavior and instructing it what to do.") + human_message: str = Field(prompt_human_message, + description="Message from a human. The input on which the AI is supposed to act.") + tokens_before_split: int = Field(default=2000, + description="Split the grading instructions into file-based ones after this number of tokens.") + + # pylint: disable=too-many-locals + async def process( + self, + input_data: FilterOutSolutionInput, + debug: bool, + model: ModelConfigType # type: ignore + ) -> List[Optional[FilterOutSolutionOutput]]: + model = model.get_model() # type: ignore[attr-defined] + + # Prepare the prompt template + prompt = get_chat_prompt_with_formatting_instructions( + model=model, + system_message=self.system_message, + human_message=self.human_message, + pydantic_object=FilterOutSolutionOutput, + ) + + problem_statement_tokens = num_tokens_from_string(input_data.problem_statement or "") + is_short_problem_statement = problem_statement_tokens <= self.tokens_before_split + file_problem_statements = ( + { + item.file_path: item.problem_statement + for item in input_data.problem_statement_by_file.items + } + if input_data.problem_statement_by_file is not None + else {} + ) + + prompt_inputs: List[dict] = [] + + solution_repo = input_data.solution_repo + template_repo = input_data.template_repo + + changed_files_from_template_to_solution = get_diff( + src_repo=template_repo, dst_repo=solution_repo, file_path=None, name_only=True + ).split("\n") + changed_files_from_template_to_solution = [ + os.path.join(str(solution_repo.working_tree_dir or ""), file_path) + for file_path in changed_files_from_template_to_solution + ] + + # Changed text files + changed_files = load_files_from_repo( + solution_repo, + file_filter=lambda file_path: file_path in changed_files_from_template_to_solution, + ) + + feedback_suggestions_by_file: Dict[str, Sequence[SuggestionsFeedbackModel]] = {} + for feedback in input_data.feedback_suggestions: + if feedback is not None: + feedback_suggestions_by_file[feedback.file_path] = feedback.feedbacks + + # Gather prompt inputs for each file + for file_path, file_content in changed_files.items(): + problem_statement = ( + input_data.problem_statement or "" + if is_short_problem_statement + else file_problem_statements.get( + file_path, "No relevant problem statement section found." + ) + ) + problem_statement = ( + problem_statement + if problem_statement.strip() + else "No problem statement found." + ) + + template_to_solution_diff = get_diff( + src_repo=template_repo, + dst_repo=solution_repo, + src_prefix="template", + dst_prefix="solution", + file_path=file_path, + ) + + file_content = add_line_numbers(file_content) + + prompt_inputs.append( + { + "file_path": file_path, + "code_with_line_numbers": file_content, + "problem_statement": problem_statement, + "template_to_solution_diff": template_to_solution_diff, + "feedback_suggestions": json.dumps( + [ob.__dict__ for ob in feedback_suggestions_by_file.get(file_path) or []]) + } + ) + + # Filter long prompts if necessary + omittable_features = [ + "problem_statement", + ] + + prompt_inputs = [ + omitted_prompt_input + for omitted_prompt_input, should_run in [ + check_prompt_length_and_omit_features_if_necessary( + prompt=prompt, + prompt_input=prompt_input, + max_input_tokens=self.max_input_tokens, + omittable_features=omittable_features, + debug=debug, + ) + for prompt_input in prompt_inputs + ] + if should_run + ] + + # Send prompts to the LLM + # noinspection PyTypeChecker + results: List[Optional[FilterOutSolutionOutput]] = await asyncio.gather( + *[ + predict_and_parse( + model=model, + chat_prompt=prompt, + prompt_input=prompt_input, + pydantic_object=FilterOutSolutionOutput, + tags=[ + f"exercise-{input_data.exercise_id}", + f"submission-{input_data.submission_id}", + f"file-{prompt_input['file_path']}", + "filter-out-solution", + ], + ) + for prompt_input in prompt_inputs + ] + ) + + if debug: + emit_meta( + "filter_out_solutions", + [ + { + "file_path": prompt_input["file_path"], + "prompt": prompt.format(**prompt_input), + "result": result.dict() if result is not None else None, + } + for prompt_input, result in zip(prompt_inputs, results) + ], + ) + + return results diff --git a/modules/programming/module_programming_llm/module_programming_llm/prompts/filter_out_solution/filter_out_solution_input.py b/modules/programming/module_programming_llm/module_programming_llm/prompts/filter_out_solution/filter_out_solution_input.py new file mode 100644 index 000000000..fbbba474c --- /dev/null +++ b/modules/programming/module_programming_llm/module_programming_llm/prompts/filter_out_solution/filter_out_solution_input.py @@ -0,0 +1,30 @@ +from typing import Optional, List + +from git import Repo + +from module_programming_llm.prompts.generate_suggestions_by_file import GenerateSuggestionsByFileOutput +from module_programming_llm.prompts.split_problem_statement_by_file import SplitProblemStatementByFileOutput + + +class FilterOutSolutionInput: + """ + DTO class for filtering out + """ + solution_repo: Repo + template_repo: Repo + problem_statement_by_file: Optional[SplitProblemStatementByFileOutput] + problem_statement: Optional[str] + exercise_id: int + submission_id: int + feedback_suggestions: List[Optional[GenerateSuggestionsByFileOutput]] + + def __init__(self, solution_repo: Repo, template_repo: Repo, problem_statement: Optional[str], exercise_id: int, + submission_id: int, feedback_suggestions: List[Optional[GenerateSuggestionsByFileOutput]], + problem_statement_by_file: Optional[SplitProblemStatementByFileOutput]): + self.solution_repo = solution_repo + self.template_repo = template_repo + self.exercise_id = exercise_id + self.submission_id = submission_id + self.problem_statement_by_file = problem_statement_by_file + self.problem_statement = problem_statement + self.feedback_suggestions = feedback_suggestions diff --git a/modules/programming/module_programming_llm/module_programming_llm/prompts/filter_out_solution/filter_out_solution_output.py b/modules/programming/module_programming_llm/module_programming_llm/prompts/filter_out_solution/filter_out_solution_output.py new file mode 100644 index 000000000..463a53fda --- /dev/null +++ b/modules/programming/module_programming_llm/module_programming_llm/prompts/filter_out_solution/filter_out_solution_output.py @@ -0,0 +1,33 @@ +from typing import Optional, Sequence + +from pydantic import BaseModel, Field + + +class FeedbackModel(BaseModel): + title: str = Field( + description="Very short title, i.e. feedback category", example="Logic Error" + ) + description: str = Field(description="Feedback description") + line_start: Optional[int] = Field( + description="Referenced line number start, or empty if unreferenced" + ) + line_end: Optional[int] = Field( + description="Referenced line number end, or empty if unreferenced" + ) + credits: float = Field(0.0, description="Number of points received/deducted") + grading_instruction_id: Optional[int] = Field( + description="ID of the grading instruction that was used to generate this feedback, or empty if no grading instruction was used" + ) + + class Config: + title = "Feedback" + + +class FilterOutSolutionOutput(BaseModel): + """Collection of feedbacks making up an assessment for a file""" + + feedbacks: Sequence[FeedbackModel] = Field(description="Assessment feedbacks", default=[]) + file_path: str = Field(description="The full path of the file, as specified in the input prompt") + + class Config: + title = "Assessment" \ No newline at end of file diff --git a/modules/programming/module_programming_llm/module_programming_llm/prompts/filter_out_solution/prompt.py b/modules/programming/module_programming_llm/module_programming_llm/prompts/filter_out_solution/prompt.py new file mode 100644 index 000000000..b9a1a855c --- /dev/null +++ b/modules/programming/module_programming_llm/module_programming_llm/prompts/filter_out_solution/prompt.py @@ -0,0 +1,20 @@ +system_message = """\ +You are a tutor at a very prestigious university. +# Task +You are given generated feedback suggestions for a programming exercise. +It is absolutely forbidden to reveal the solution to students. +Filter out feedback suggestions that contain solutions or solution hints. Stick to the same format. +In case a suggestion contains solution, try to rewrite it to nudge the student's understanding while hiding the solution. +Problem Statement: +{problem_statement} +Git diff between official template and solution: +{template_to_solution_diff} +""" + +human_message = """\ +Path: {file_path} +Feedback Suggestions: +\"\"\" +{feedback_suggestions} +\"\"\" +""" diff --git a/modules/programming/module_programming_llm/module_programming_llm/prompts/generate_file_summary/__init__.py b/modules/programming/module_programming_llm/module_programming_llm/prompts/generate_file_summary/__init__.py new file mode 100644 index 000000000..e9cdfe6a2 --- /dev/null +++ b/modules/programming/module_programming_llm/module_programming_llm/prompts/generate_file_summary/__init__.py @@ -0,0 +1,9 @@ +from .generate_file_summary import GenerateFileSummary +from .generate_file_summary_input import GenerateFileSummaryInput +from .generate_file_summary_output import GenerateFileSummaryOutput + +__all__ = [ + "GenerateFileSummary", + "GenerateFileSummaryInput", + "GenerateFileSummaryOutput", +] \ No newline at end of file diff --git a/modules/programming/module_programming_llm/module_programming_llm/prompts/generate_file_summary/generate_file_summary.py b/modules/programming/module_programming_llm/module_programming_llm/prompts/generate_file_summary/generate_file_summary.py new file mode 100644 index 000000000..5c544a54b --- /dev/null +++ b/modules/programming/module_programming_llm/module_programming_llm/prompts/generate_file_summary/generate_file_summary.py @@ -0,0 +1,126 @@ +import asyncio +import os +from typing import Optional, List + +from athena import emit_meta +from .generate_file_summary_input import GenerateFileSummaryInput +from .generate_file_summary_output import GenerateFileSummaryOutput +from module_programming_llm.prompts.pipeline_step import PipelineStep +from .generate_file_summary_output import FileDescription +from .prompt import system_message as prompt_system_message, human_message as prompt_human_message +from pydantic import Field +from llm_core.utils.llm_utils import ( + get_chat_prompt_with_formatting_instructions, + num_tokens_from_prompt +) +from llm_core.utils.predict_and_parse import predict_and_parse +from module_programming_llm.helpers.utils import ( + get_diff, + load_files_from_repo, + add_line_numbers +) +from llm_core.models import ModelConfigType + + +class GenerateFileSummary(PipelineStep[GenerateFileSummaryInput, Optional[GenerateFileSummaryOutput]]): + """Generates concise summaries of submission files, facilitating a quicker review and understanding of the content for AI processing.""" + + system_message: str = Field(prompt_system_message, + description="Message for priming AI behavior and instructing it what to do.") + human_message: str = Field(prompt_human_message, + description="Message from a human. The input on which the AI is supposed to act.") + + # pylint: disable=too-many-locals + async def process(self, input_data: GenerateFileSummaryInput, debug: bool, model: ModelConfigType) -> Optional[GenerateFileSummaryOutput]: # type: ignore + """Generate a summary for the submission, file by file. + + Args: + input_data (GenerateFileSummaryInput): Input data containing template and submission repositories. + + Returns: + GenerateFileSummaryOutput: Summarized details of the submission files. + """ + + prompt = get_chat_prompt_with_formatting_instructions( + model=model.get_model(), # type: ignore[attr-defined] + system_message=self.system_message, + human_message=self.human_message, + pydantic_object=FileDescription, + ) + + changed_files_from_template_to_submission = get_diff( + src_repo=input_data.template_repo, dst_repo=input_data.submission_repo, file_path=None, name_only=True + ).split("\n") + + changed_files_from_template_to_submission = [ + os.path.join(str(input_data.submission_repo.working_tree_dir or ""), file_path) + for file_path in changed_files_from_template_to_submission + ] + + # Changed text files + changed_files = load_files_from_repo( + input_data.submission_repo, + file_filter=lambda file_path: file_path in changed_files_from_template_to_submission, + ) + + prompt_inputs = [] + + # Gather prompt inputs for each file (independently) + for file_path, file_content in changed_files.items(): + file_content = add_line_numbers(file_content) + + prompt_inputs.append( + { + "submission_file": file_content, + "file_path": file_path, + } + ) + + valid_prompt_inputs = [ + prompt_input + for prompt_input in prompt_inputs + if num_tokens_from_prompt(prompt, prompt_input) <= self.max_input_tokens + ] + + # noinspection PyTypeChecker + results: List[Optional[FileDescription]] = await asyncio.gather( + *[ + predict_and_parse( + model=model.get_model(), # type: ignore[attr-defined] + chat_prompt=prompt, + prompt_input=prompt_input, + pydantic_object=FileDescription, + tags=[ + f"exercise-{input_data.exercise_id}", + f"submission-{input_data.submission_id}", + f"file-{prompt_input['file_path']}", + "generate-summary-by-file", + ], + ) + for prompt_input in valid_prompt_inputs + ] + ) + + if debug: + for prompt_input, result in zip(valid_prompt_inputs, results): + emit_meta( + "file_summary", + { + "prompt": prompt.format(**prompt_input), + "result": result.dict() if result is not None else None, + "file_path": prompt_input[ + "file_path" + ], # Include the file path for reference + }, + ) + + if not any(result is not None for result in results): + return None + + items_dict = {} + + for _, file_summary in enumerate(results): + if file_summary is not None: + items_dict[file_summary.file_path] = file_summary.description + + return GenerateFileSummaryOutput(items=items_dict) diff --git a/modules/programming/module_programming_llm/module_programming_llm/prompts/generate_file_summary/generate_file_summary_input.py b/modules/programming/module_programming_llm/module_programming_llm/prompts/generate_file_summary/generate_file_summary_input.py new file mode 100644 index 000000000..ac6c0012b --- /dev/null +++ b/modules/programming/module_programming_llm/module_programming_llm/prompts/generate_file_summary/generate_file_summary_input.py @@ -0,0 +1,16 @@ +from git import Repo + +class GenerateFileSummaryInput: + """ + DTO class for generating file summaries, containing repository details. + """ + template_repo: Repo + submission_repo: Repo + exercise_id: int + submission_id: int + + def __init__(self, template_repo: Repo, submission_repo: Repo, exercise_id: int, submission_id: int): + self.template_repo = template_repo + self.submission_repo = submission_repo + self.exercise_id = exercise_id + self.submission_id = submission_id \ No newline at end of file diff --git a/modules/programming/module_programming_llm/module_programming_llm/prompts/generate_file_summary/generate_file_summary_output.py b/modules/programming/module_programming_llm/module_programming_llm/prompts/generate_file_summary/generate_file_summary_output.py new file mode 100644 index 000000000..840e72dac --- /dev/null +++ b/modules/programming/module_programming_llm/module_programming_llm/prompts/generate_file_summary/generate_file_summary_output.py @@ -0,0 +1,27 @@ +from typing import Dict + +from pydantic import BaseModel, Field + + +class FileDescription(BaseModel): + file_path: str = Field(description="The path of the file, as specified in the input prompt") + description: str = Field(description="Summary relevant for the file") + + class Config: + title = "FileDescription" + + +class GenerateFileSummaryOutput(BaseModel): + """Collection of summaries, accessible by file path""" + + items: Dict[str, str] = Field(description="A dictionary of file-wise summary objects") + + class Config: + title = "SolutionSummary" + + def describe_solution_summary(self) -> str: + descriptions = [] + for file_path, file_summary in self.items.items(): + description = f"File {file_path}: {file_summary}" + descriptions.append(description) + return "\n".join(descriptions) \ No newline at end of file diff --git a/modules/programming/module_programming_llm/module_programming_llm/prompts/summarize_submission_by_file.py b/modules/programming/module_programming_llm/module_programming_llm/prompts/generate_file_summary/prompt.py similarity index 81% rename from modules/programming/module_programming_llm/module_programming_llm/prompts/summarize_submission_by_file.py rename to modules/programming/module_programming_llm/module_programming_llm/prompts/generate_file_summary/prompt.py index 7ba2d1281..15b61a9af 100644 --- a/modules/programming/module_programming_llm/module_programming_llm/prompts/summarize_submission_by_file.py +++ b/modules/programming/module_programming_llm/module_programming_llm/prompts/generate_file_summary/prompt.py @@ -14,9 +14,7 @@ Include full path for files where necessary. # Output -If 'Explanation' is empty, put double quotes around. Adhere to schema to correctly encapsulate 'Explanation' only into the response. (Critical!) -It is absolutely unacceptable to include any free text that is not part of schema or any format violating response. """ human_message = """\ diff --git a/modules/programming/module_programming_llm/module_programming_llm/prompts/generate_grading_criterion/__init__.py b/modules/programming/module_programming_llm/module_programming_llm/prompts/generate_grading_criterion/__init__.py new file mode 100644 index 000000000..e1b53ccbf --- /dev/null +++ b/modules/programming/module_programming_llm/module_programming_llm/prompts/generate_grading_criterion/__init__.py @@ -0,0 +1,9 @@ +from .generate_grading_criterion_output import GenerateGradingCriterionOutput +from .generate_grading_criterion_input import GenerateGradingCriterionInput +from .generate_grading_criterion import GenerateGradingCriterion + +__all__ = [ + "GenerateGradingCriterion", + "GenerateGradingCriterionInput", + "GenerateGradingCriterionOutput", +] \ No newline at end of file diff --git a/modules/programming/module_programming_llm/module_programming_llm/prompts/generate_grading_criterion/generate_grading_criterion.py b/modules/programming/module_programming_llm/module_programming_llm/prompts/generate_grading_criterion/generate_grading_criterion.py new file mode 100644 index 000000000..062e4336f --- /dev/null +++ b/modules/programming/module_programming_llm/module_programming_llm/prompts/generate_grading_criterion/generate_grading_criterion.py @@ -0,0 +1,101 @@ +import asyncio +import json +import os +from typing import Optional, List + +from athena import emit_meta +from module_programming_llm.prompts.pipeline_step import PipelineStep +from .generate_grading_criterion_input import GenerateGradingCriterionInput +from .generate_grading_criterion_output import GenerateGradingCriterionOutput +from .prompt import system_message as prompt_system_message, human_message as prompt_human_message +from pydantic import Field +from llm_core.utils.llm_utils import ( + get_chat_prompt_with_formatting_instructions, + num_tokens_from_string, + check_prompt_length_and_omit_features_if_necessary, num_tokens_from_prompt, +) +from llm_core.utils.predict_and_parse import predict_and_parse +from module_programming_llm.helpers.utils import ( + get_diff, + load_files_from_repo, + add_line_numbers, get_programming_language_file_extension +) +from llm_core.models import ModelConfigType + + +class GenerateGradingCriterion(PipelineStep[GenerateGradingCriterionInput, Optional[GenerateGradingCriterionOutput]]): + """Generates structured grading instructions.""" + + system_message: str = Field(prompt_system_message, + description="Message for priming AI behavior and instructing it what to do.") + human_message: str = Field(prompt_human_message, + description="Message from a human. The input on which the AI is supposed to act.") + tokens_before_split: int = Field(default=2000, + description="Split the grading instructions into file-based ones after this number of tokens.") + + async def process(self, input_data: GenerateGradingCriterionInput, debug: bool, model: ModelConfigType) -> Optional[GenerateGradingCriterionOutput]: # type: ignore + model = model.get_model() # type: ignore[attr-defined] + + changed_files_from_template_to_solution = get_diff( + src_repo=input_data.template_repo, + dst_repo=input_data.solution_repo, + file_path=None, + name_only=True + ).split("\n") + + all_changed_files = load_files_from_repo( + input_data.solution_repo + ) + changed_files = {} + changed_files_content = "" + for file in changed_files_from_template_to_solution: + if not file.endswith('.pbxproj'): + changed_files[file] = get_diff( + src_repo=input_data.template_repo, + dst_repo=input_data.solution_repo, + src_prefix="template", + dst_prefix="solution", + file_path=file, + ) + changed_files_content += "\n" + file + ":" + changed_files[file] + + prompt = get_chat_prompt_with_formatting_instructions( + model=model, + system_message=self.system_message, + human_message=self.human_message, + pydantic_object=GenerateGradingCriterionOutput + ) + + prompt_input = { + "problem_statement": input_data.problem_statement or "No problem statement.", + "grading_instructions": input_data.grading_instructions, + "max_points": input_data.max_points, + "bonus_points": input_data.bonus_points, + "template_to_solution_diff": json.dumps(changed_files) + } + + # Return None if the prompt is too long + if num_tokens_from_prompt(prompt, prompt_input) > self.max_input_tokens: + return None + + generate_grading_criterion = await predict_and_parse( + model=model, + chat_prompt=prompt, + prompt_input=prompt_input, + pydantic_object=GenerateGradingCriterionOutput, + tags=[ + f"exercise-{input_data.exercise_id}", + "generate_grading_criterion" + ] + ) + + if debug: + emit_meta("generate_grading_criterion", { + "prompt": prompt.format(**prompt_input), + "result": generate_grading_criterion.dict() if generate_grading_criterion is not None else None + }) + + if generate_grading_criterion is None or not generate_grading_criterion.structured_grading_criterion: + return None + + return generate_grading_criterion \ No newline at end of file diff --git a/modules/programming/module_programming_llm/module_programming_llm/prompts/generate_grading_criterion/generate_grading_criterion_input.py b/modules/programming/module_programming_llm/module_programming_llm/prompts/generate_grading_criterion/generate_grading_criterion_input.py new file mode 100644 index 000000000..1c24280a3 --- /dev/null +++ b/modules/programming/module_programming_llm/module_programming_llm/prompts/generate_grading_criterion/generate_grading_criterion_input.py @@ -0,0 +1,32 @@ +from typing import Optional + +from git import Repo + + +class GenerateGradingCriterionInput: + """ + DTO class for top level feedback generation job + """ + template_repo: Repo + solution_repo: Repo + exercise_id: int + problem_statement: Optional[str] + grading_instructions: Optional[str] + max_points: float + bonus_points: float + + def __init__(self, + template_repo: Repo, + solution_repo: Repo, + exercise_id: int, + max_points: float, + bonus_points: float, + problem_statement: Optional[str] = None, + grading_instructions: Optional[str] = None): + self.template_repo = template_repo + self.solution_repo = solution_repo + self.exercise_id = exercise_id + self.problem_statement = problem_statement + self.grading_instructions = grading_instructions + self.max_points = max_points + self.bonus_points = bonus_points diff --git a/modules/programming/module_programming_llm/module_programming_llm/prompts/generate_grading_criterion/generate_grading_criterion_output.py b/modules/programming/module_programming_llm/module_programming_llm/prompts/generate_grading_criterion/generate_grading_criterion_output.py new file mode 100644 index 000000000..a4facfd56 --- /dev/null +++ b/modules/programming/module_programming_llm/module_programming_llm/prompts/generate_grading_criterion/generate_grading_criterion_output.py @@ -0,0 +1,8 @@ +from pydantic import BaseModel, Field + +from athena import StructuredGradingCriterion + + +class GenerateGradingCriterionOutput(BaseModel): + """Collection of structured grading criterion for a problem""" + structured_grading_criterion: StructuredGradingCriterion = Field(description="Structured grading criterion") diff --git a/modules/programming/module_programming_llm/module_programming_llm/prompts/generate_grading_criterion/prompt.py b/modules/programming/module_programming_llm/module_programming_llm/prompts/generate_grading_criterion/prompt.py new file mode 100644 index 000000000..762c0a64e --- /dev/null +++ b/modules/programming/module_programming_llm/module_programming_llm/prompts/generate_grading_criterion/prompt.py @@ -0,0 +1,27 @@ +system_message = """\ +You are an AI tutor for programming assessment at a prestigious university. + +# Task +Create structured grading criterion for a programming exercise for everything that is required by the problem statement. +Include criteria for syntactically correct programs. + +# Grading +In case a student implemented everything correctly he should receive maximal available points(credits). +If a student made a mistake, he has a chance to compensate with bonus points, if they are available. +Your criterion must cover these cases. + +# Style +1. Constructive, 2. Specific, 3. Balanced, 4. Clear and Concise, 5. Actionable, 6. Educational, 7. Contextual +""" + +human_message = """\ +# Problem statement +{problem_statement} + +# Grading instructions +Markdown grading instructions, if available: {grading_instructions} +Max points: {max_points}, bonus points: {bonus_points} + +# Diff between template (deletions) and sample solution(additions): +{template_to_solution_diff} +""" diff --git a/modules/programming/module_programming_llm/module_programming_llm/prompts/generate_non_graded_suggestions_by_file.py b/modules/programming/module_programming_llm/module_programming_llm/prompts/generate_non_graded_suggestions_by_file.py deleted file mode 100644 index 9b52fefcf..000000000 --- a/modules/programming/module_programming_llm/module_programming_llm/prompts/generate_non_graded_suggestions_by_file.py +++ /dev/null @@ -1,45 +0,0 @@ -system_message = """\ -You are an AI tutor for programming assessment at a prestigious university. - -# Problem statement -{problem_statement} - -# Task -Create non graded improvement suggestions for a student\'s programming submission that a human tutor would recommend. \ -Assume the tutor is not familiar with the solution. -The feedback must contain only the feedback the student can learn from. -Important: the answer you generate must not contain any solution suggestions or contain corrected errors. -Rather concentrate on incorrectly applied principles or inconsistencies. -Students can move some functionality to other files. -Students can deviate to some degree from the problem statement or book unless they complete all tasks. -Very important, the feedback must be balanced. - -# Style -1. Constructive, 2. Specific, 3. Balanced, 4. Clear and Concise, 5. Actionable, 6. Educational, 7. Contextual - -It is strictly prohibited to include feedback that contradicts to the problem statement. -No need to mention anything that is not explicitly in the template->submission diff, as it is out of student's control(e.g. exercise package name). - -In git diff, lines marked with '-' were removed and with '+' were added by the student. - -# The student will be reading your response, use you instead of them -""" - -human_message = """\ -Path: {file_path} - -File(with line numbers : ): -\"\"\" -{submission_file} -\"\"\"\ - -Summary of other files in the solution: -\"\"\" -{summary} -\"\"\" - -The template->submission diff(only as reference): -\"\"\" -{template_to_submission_diff} -\"\"\" -""" diff --git a/modules/programming/module_programming_llm/module_programming_llm/prompts/generate_suggestions_by_file/__init__.py b/modules/programming/module_programming_llm/module_programming_llm/prompts/generate_suggestions_by_file/__init__.py new file mode 100644 index 000000000..84add8962 --- /dev/null +++ b/modules/programming/module_programming_llm/module_programming_llm/prompts/generate_suggestions_by_file/__init__.py @@ -0,0 +1,5 @@ +from .generate_suggestions_by_file import GenerateSuggestionsByFile +from .generate_suggestions_by_file_input import GenerateSuggestionsByFileInput +from .generate_suggestions_by_file_output import GenerateSuggestionsByFileOutput + +__all__ = ['GenerateSuggestionsByFile', 'GenerateSuggestionsByFileOutput', 'GenerateSuggestionsByFileInput'] \ No newline at end of file diff --git a/modules/programming/module_programming_llm/module_programming_llm/prompts/generate_suggestions_by_file/generate_suggestions_by_file.py b/modules/programming/module_programming_llm/module_programming_llm/prompts/generate_suggestions_by_file/generate_suggestions_by_file.py new file mode 100644 index 000000000..429dfbda4 --- /dev/null +++ b/modules/programming/module_programming_llm/module_programming_llm/prompts/generate_suggestions_by_file/generate_suggestions_by_file.py @@ -0,0 +1,251 @@ +import asyncio +import os +from typing import Optional, List + +from athena import emit_meta +from module_programming_llm.prompts.pipeline_step import PipelineStep +from .generate_suggestions_by_file_input import GenerateSuggestionsByFileInput +from .generate_suggestions_by_file_output import GenerateSuggestionsByFileOutput, FeedbackModel +from .prompt import system_message as prompt_system_message, human_message as prompt_human_message +from pydantic import Field +from llm_core.utils.llm_utils import ( + get_chat_prompt_with_formatting_instructions, + num_tokens_from_string, + check_prompt_length_and_omit_features_if_necessary, +) +from llm_core.utils.predict_and_parse import predict_and_parse +from module_programming_llm.helpers.utils import ( + get_diff, + load_files_from_repo, + add_line_numbers, get_programming_language_file_extension, format_grading_instructions +) +from llm_core.models import ModelConfigType + + +class GenerateSuggestionsByFile(PipelineStep[GenerateSuggestionsByFileInput, List[Optional[GenerateSuggestionsByFileOutput]]]): + """Generates concise feedback for submitted files, facilitating a quicker review and understanding of the content""" + + system_message: str = Field(prompt_system_message, + description="Message for priming AI behavior and instructing it what to do.") + human_message: str = Field(prompt_human_message, + description="Message from a human. The input on which the AI is supposed to act.") + max_number_of_files: int = Field(default=25, + description="Maximum number of files. If exceeded, it will prioritize the most important ones.") + tokens_before_split: int = Field(default=2000, + description="Split the grading instructions into file-based ones after this number of tokens.") + + # pylint: disable=too-many-locals + async def process(self, input_data: GenerateSuggestionsByFileInput, debug: bool, model: ModelConfigType) -> List[Optional[GenerateSuggestionsByFileOutput]]: # type: ignore + model = model.get_model() # type: ignore[attr-defined] + + prompt = get_chat_prompt_with_formatting_instructions( + model=model, + system_message=self.system_message, + human_message=self.human_message, + pydantic_object=GenerateSuggestionsByFileOutput, + ) + + problem_statement_tokens = num_tokens_from_string(input_data.problem_statement or "") + is_short_problem_statement = ( + problem_statement_tokens + <= self.tokens_before_split + ) + file_problem_statements = ( + { + item.file_path: item.problem_statement + for item in input_data.problem_statement_by_file.items + } + if input_data.problem_statement_by_file is not None + else {} + ) + + is_short_grading_instructions = ( + num_tokens_from_string(input_data.grading_instructions) + <= self.tokens_before_split + if input_data.grading_instructions is not None + else True + ) + file_grading_instructions = ( + { + item.file_path: item.grading_instructions + for item in input_data.grading_instructions_by_file.items + } + if input_data.grading_instructions_by_file is not None + else {} + ) + + prompt_inputs: List[dict] = [] + + solution_repo = input_data.solution_repo + template_repo = input_data.template_repo + submission_repo = input_data.submission_repo + + changed_files_from_template_to_submission = get_diff( + src_repo=template_repo, dst_repo=submission_repo, file_path=None, name_only=True + ).split("\n") + changed_files_from_template_to_submission = [ + os.path.join(str(submission_repo.working_tree_dir or ""), file_path) + for file_path in changed_files_from_template_to_submission + ] + + # Changed text files + changed_files = load_files_from_repo( + submission_repo, + file_filter=lambda file_path: file_path in changed_files_from_template_to_submission, + ) + + # Gather prompt inputs for each changed file (independently) + for file_path, file_content in changed_files.items(): + problem_statement = ( + input_data.problem_statement or "" + if is_short_problem_statement + else file_problem_statements.get( + file_path, "No relevant problem statement section found." + ) + ) + problem_statement = ( + problem_statement + if problem_statement.strip() + else "No problem statement found." + ) + + grading_instructions = ( + format_grading_instructions(input_data.grading_instructions, input_data.grading_criteria) or "" + if is_short_grading_instructions + else file_grading_instructions.get( + file_path, "No relevant grading instructions found." + ) + ) + grading_instructions = ( + grading_instructions + if grading_instructions.strip() + else "No grading instructions found." + ) + + file_content = add_line_numbers(file_content) + solution_to_submission_diff = get_diff( + src_repo=solution_repo, + dst_repo=submission_repo, + src_prefix="solution", + dst_prefix="submission", + file_path=file_path, + ) + template_to_submission_diff = get_diff( + src_repo=template_repo, + dst_repo=submission_repo, + src_prefix="template", + dst_prefix="submission", + file_path=file_path, + ) + template_to_solution_diff = get_diff( + src_repo=template_repo, + dst_repo=solution_repo, + src_prefix="template", + dst_prefix="solution", + file_path=file_path, + ) + + prompt_inputs.append( + { + "file_path": file_path, # Not really relevant for the prompt + "priority": len( + template_to_solution_diff + ), # Not really relevant for the prompt + "submission_file": file_content, + "max_points": input_data.max_points, + "bonus_points": input_data.bonus_points, + "template_to_submission_diff": template_to_submission_diff, + "template_to_solution_diff": template_to_solution_diff, + "grading_instructions": grading_instructions, + "problem_statement": problem_statement, + "solution_summary": input_data.solution_summary, + "rag_data": input_data.rag_data + } + ) + + # Filter long prompts (omitting features if necessary) + # Lowest priority features are at the top of the list (i.e. they are omitted first if necessary) + # "submission_file" is not omittable, because it is the main input containing the line numbers + # In the future we might be able to include the line numbers in the diff, but for now we need to keep it + omittable_features = [ + "template_to_solution_diff", + # If it is even included in the prompt (has the lowest priority since it is indirectly included in other diffs) + "problem_statement", + "grading_instructions", + "solution_to_submission_diff", + "template_to_submission_diff", + "solution_summary" + # In the future we might indicate the changed lines in the submission_file additionally + ] + + prompt_inputs = [ + omitted_prompt_input + for omitted_prompt_input, should_run in [ + check_prompt_length_and_omit_features_if_necessary( + prompt=prompt, + prompt_input=prompt_input, + max_input_tokens=self.max_input_tokens, + omittable_features=omittable_features, + debug=debug, + ) + for prompt_input in prompt_inputs + ] + if should_run + ] + + # If we have many files we need to filter and prioritize them + if len(prompt_inputs) > self.max_number_of_files: + programming_language_extension = get_programming_language_file_extension( + programming_language=input_data.programming_language + ) + + # Prioritize files that have a diff between solution and submission + prompt_inputs = sorted(prompt_inputs, key=lambda x: x["priority"], reverse=True) + + filtered_prompt_inputs = [] + if programming_language_extension is not None: + filtered_prompt_inputs = [ + prompt_input + for prompt_input in prompt_inputs + if prompt_input["file_path"].endswith(programming_language_extension) + ] + + while ( + len(filtered_prompt_inputs) < self.max_number_of_files and prompt_inputs + ): + filtered_prompt_inputs.append(prompt_inputs.pop(0)) + prompt_inputs = filtered_prompt_inputs + + # noinspection PyTypeChecker + results: List[Optional[GenerateSuggestionsByFileOutput]] = await asyncio.gather( + *[ + predict_and_parse( + model=model, + chat_prompt=prompt, + prompt_input=prompt_input, + pydantic_object=GenerateSuggestionsByFileOutput, + tags=[ + f"exercise-{input_data.exercise_id}", + f"submission-{input_data.submission_id}", + f"file-{prompt_input['file_path']}", + "generate-suggestions-by-file", + ], + ) + for prompt_input in prompt_inputs + ] + ) + + if debug: + emit_meta( + "generate_suggestions", + [ + { + "file_path": prompt_input["file_path"], + "prompt": prompt.format(**prompt_input), + "result": result.dict() if result is not None else None, + } + for prompt_input, result in zip(prompt_inputs, results) + ], + ) + + return results \ No newline at end of file diff --git a/modules/programming/module_programming_llm/module_programming_llm/prompts/generate_suggestions_by_file/generate_suggestions_by_file_input.py b/modules/programming/module_programming_llm/module_programming_llm/prompts/generate_suggestions_by_file/generate_suggestions_by_file_input.py new file mode 100644 index 000000000..7a20c5507 --- /dev/null +++ b/modules/programming/module_programming_llm/module_programming_llm/prompts/generate_suggestions_by_file/generate_suggestions_by_file_input.py @@ -0,0 +1,60 @@ +from typing import Optional, List + +from git import Repo + +from athena import GradingCriterion +from module_programming_llm.prompts.split_grading_instructions_by_file import SplitGradingInstructionsByFileOutput +from module_programming_llm.prompts.split_problem_statement_by_file import SplitProblemStatementByFileOutput + + +class GenerateSuggestionsByFileInput: + """ + DTO class for top level feedback generation job + """ + template_repo: Repo + submission_repo: Repo + solution_repo: Repo + exercise_id: int + submission_id: int + grading_instructions_by_file: Optional[SplitGradingInstructionsByFileOutput] + grading_criteria: Optional[List[GradingCriterion]] + problem_statement_by_file: Optional[SplitProblemStatementByFileOutput] + problem_statement: Optional[str] + grading_instructions: Optional[str] + max_points: float + bonus_points: float + programming_language: str + solution_summary: str + rag_data: List[str] + + def __init__(self, + template_repo: Repo, + submission_repo: Repo, + solution_repo: Repo, + exercise_id: int, + submission_id: int, + max_points: float, + bonus_points: float, + programming_language: str, + solution_summary: str, + rag_data: List[str], + grading_instructions_by_file: Optional[SplitGradingInstructionsByFileOutput] = None, + problem_statement_by_file: Optional[SplitProblemStatementByFileOutput] = None, + grading_criteria: Optional[List[GradingCriterion]] = None, + problem_statement: Optional[str] = None, + grading_instructions: Optional[str] = None): + self.template_repo = template_repo + self.submission_repo = submission_repo + self.solution_repo = solution_repo + self.exercise_id = exercise_id + self.submission_id = submission_id + self.grading_instructions_by_file = grading_instructions_by_file + self.grading_criteria = grading_criteria + self.problem_statement_by_file = problem_statement_by_file + self.problem_statement = problem_statement + self.grading_instructions = grading_instructions + self.max_points = max_points + self.bonus_points = bonus_points + self.programming_language = programming_language + self.solution_summary = solution_summary + self.rag_data = rag_data diff --git a/modules/programming/module_programming_llm/module_programming_llm/prompts/generate_suggestions_by_file/generate_suggestions_by_file_output.py b/modules/programming/module_programming_llm/module_programming_llm/prompts/generate_suggestions_by_file/generate_suggestions_by_file_output.py new file mode 100644 index 000000000..be693d961 --- /dev/null +++ b/modules/programming/module_programming_llm/module_programming_llm/prompts/generate_suggestions_by_file/generate_suggestions_by_file_output.py @@ -0,0 +1,29 @@ +from typing import Optional, Sequence + +from pydantic import BaseModel, Field + + +class FeedbackModel(BaseModel): + title: str = Field( + description="Very short title, i.e. feedback category", example="Logic Error" + ) + description: str = Field(description="Feedback description") + line_start: Optional[int] = Field( + description="Referenced line number start, or empty if unreferenced" + ) + line_end: Optional[int] = Field( + description="Referenced line number end, or empty if unreferenced" + ) + credits: float = Field(0.0, description="Number of points received/deducted") + grading_instruction_id: Optional[int] = Field( + description="ID of the grading instruction that was used to generate this feedback, or empty if no grading instruction was used" + ) + + class Config: + title = "Feedback" + + +class GenerateSuggestionsByFileOutput(BaseModel): + """Collection of feedbacks making up an assessment for a file""" + feedbacks: Sequence[FeedbackModel] = Field(description="Assessment feedbacks for a file", default=[]) + file_path: str = Field(description="The path of the file, as specified in the input prompt") diff --git a/modules/programming/module_programming_llm/module_programming_llm/prompts/generate_graded_suggestions_by_file.py b/modules/programming/module_programming_llm/module_programming_llm/prompts/generate_suggestions_by_file/prompt.py similarity index 52% rename from modules/programming/module_programming_llm/module_programming_llm/prompts/generate_graded_suggestions_by_file.py rename to modules/programming/module_programming_llm/module_programming_llm/prompts/generate_suggestions_by_file/prompt.py index 8b1f00832..7a02b6b9e 100644 --- a/modules/programming/module_programming_llm/module_programming_llm/prompts/generate_graded_suggestions_by_file.py +++ b/modules/programming/module_programming_llm/module_programming_llm/prompts/generate_suggestions_by_file/prompt.py @@ -3,7 +3,8 @@ # Task Create graded feedback suggestions for a student\'s programming submission that a human tutor would accept. \ -Meaning, the feedback you provide should be appliable to the submission with little to no modification. +Meaning, the feedback you provide should be applicable to the submission with little to no modification. +Give points for correct answers. Subtract points for wrong answers. Give 0 points for neutral answers. # Style 1. Constructive, 2. Specific, 3. Balanced, 4. Clear and Concise, 5. Actionable, 6. Educational, 7. Contextual @@ -11,15 +12,18 @@ # Problem statement {problem_statement} -# Grading instructions +# Grading instructions, follow them unless absolutely necessary to deviate {grading_instructions} Max points: {max_points}, bonus points: {bonus_points} (whole assessment, not just this file) -# Diff between solution (deletions) and student\'s submission (additions): -{solution_to_submission_diff} +# Diff between template (deletions) and solution (additions): +{template_to_solution_diff} -# Diff between template (deletions) and student\'s submission (additions): -{template_to_submission_diff} +# Summary of other solution files +{solution_summary} + +# RAG data +{rag_data} """ human_message = """\ @@ -27,4 +31,11 @@ \"\"\" {submission_file} \"\"\"\ + +Diff between template (deletions) and student\'s submission (additions): +{template_to_submission_diff} + +Do not give points for code that was not written by students! + +Path: {file_path} """ diff --git a/modules/programming/module_programming_llm/module_programming_llm/prompts/pipeline_step.py b/modules/programming/module_programming_llm/module_programming_llm/prompts/pipeline_step.py new file mode 100644 index 000000000..0f9dc2485 --- /dev/null +++ b/modules/programming/module_programming_llm/module_programming_llm/prompts/pipeline_step.py @@ -0,0 +1,17 @@ +from typing import Generic, TypeVar, Optional +from abc import abstractmethod +from pydantic import BaseModel, Field + +from llm_core.models import ModelConfigType + +# Generic types for input and output +TInput = TypeVar('TInput') +TOutput = TypeVar('TOutput') + + +class PipelineStep(BaseModel, Generic[TInput, TOutput]): + max_input_tokens: int = Field(default=10000, description="Maximum number of tokens in the input prompt.") + + @abstractmethod + async def process(self, input_data: TInput, debug: bool, model: ModelConfigType) -> Optional[TOutput]: # type: ignore + raise NotImplementedError('This is an abstract method') diff --git a/modules/programming/module_programming_llm/module_programming_llm/prompts/rag/__init__.py b/modules/programming/module_programming_llm/module_programming_llm/prompts/rag/__init__.py new file mode 100644 index 000000000..eea7b8c9d --- /dev/null +++ b/modules/programming/module_programming_llm/module_programming_llm/prompts/rag/__init__.py @@ -0,0 +1,5 @@ +from .rag import RAG +from .rag_input import RAGInput +from .rag_output import RAGOutput + +__all__ = ['RAG', 'RAGInput', 'RAGOutput'] \ No newline at end of file diff --git a/modules/programming/module_programming_llm/module_programming_llm/prompts/rag/prompt.py b/modules/programming/module_programming_llm/module_programming_llm/prompts/rag/prompt.py new file mode 100644 index 000000000..b73fbc5d2 --- /dev/null +++ b/modules/programming/module_programming_llm/module_programming_llm/prompts/rag/prompt.py @@ -0,0 +1,23 @@ +system_message = """\ +You are an AI tutor for programming assessment at a prestigious university. + +# Task +Identify, if you understand the problem and surrounding information completely. +In case you do not understand something, formulate up to 2 specific questions that will help you understand the problem statement better. + +# Style +1. Constructive, 2. Specific, 3. Balanced, 4. Clear and Concise, 5. Contextual + +For testing purposes, assume you do not know anything about sorting +""" + +human_message = """\ +# Problem statement +{problem_statement} + +Changed files from template to sample solution: +{changed_files_from_template_to_solution} + +# Diff between template (deletions) and sample solution(additions): +{template_to_solution_diff} +""" diff --git a/modules/programming/module_programming_llm/module_programming_llm/prompts/rag/rag.py b/modules/programming/module_programming_llm/module_programming_llm/prompts/rag/rag.py new file mode 100644 index 000000000..6225428d1 --- /dev/null +++ b/modules/programming/module_programming_llm/module_programming_llm/prompts/rag/rag.py @@ -0,0 +1,80 @@ +import json +from typing import Optional + +from athena import emit_meta +from module_programming_llm.prompts.pipeline_step import PipelineStep +from .rag_input import RAGInput +from .rag_output import RAGOutput +from .prompt import system_message as prompt_system_message, human_message as prompt_human_message +from pydantic import Field +from llm_core.utils.llm_utils import ( + get_chat_prompt_with_formatting_instructions, + num_tokens_from_prompt, +) +from llm_core.utils.predict_and_parse import predict_and_parse +from module_programming_llm.helpers.utils import ( + get_diff, + load_files_from_repo, +) +from llm_core.models import ModelConfigType + + +class RAG(PipelineStep[RAGInput, Optional[RAGOutput]]): + """Generates RAG queries.""" + + system_message: str = Field(prompt_system_message, + description="Message for priming AI behavior and instructing it what to do.") + human_message: str = Field(prompt_human_message, + description="Message from a human. The input on which the AI is supposed to act.") + + async def process(self, input_data: RAGInput, debug: bool, model: ModelConfigType) -> Optional[ + RAGOutput]: # type: ignore + model = model.get_model() # type: ignore[attr-defined] + + changed_files_from_template_to_solution = get_diff( + src_repo=input_data.template_repo, + dst_repo=input_data.solution_repo, + file_path=None, + name_only=True + ).split("\n") + + changed_files = load_files_from_repo( + input_data.solution_repo, + file_filter=lambda file_path: file_path in changed_files_from_template_to_solution, + ) + + prompt = get_chat_prompt_with_formatting_instructions( + model=model, + system_message=self.system_message, + human_message=self.human_message, + pydantic_object=RAGOutput + ) + + prompt_input = { + "problem_statement": input_data.problem_statement or "No problem statement.", + "changed_files_from_template_to_solution": ", ".join(changed_files_from_template_to_solution), + "template_to_solution_diff": json.dumps(changed_files) + } + + # Return None if the prompt is too long + if num_tokens_from_prompt(prompt, prompt_input) > self.max_input_tokens: + return None + + generate_rag_queries = await predict_and_parse( + model=model, + chat_prompt=prompt, + prompt_input=prompt_input, + pydantic_object=RAGOutput, + tags=[ + f"exercise-{input_data.exercise_id}", + "generate_rag_requests" + ] + ) + + if debug: + emit_meta("generate_rag_requests", { + "prompt": prompt.format(**prompt_input), + "result": generate_rag_queries.dict() if generate_rag_queries is not None else None + }) + + return generate_rag_queries diff --git a/modules/programming/module_programming_llm/module_programming_llm/prompts/rag/rag_input.py b/modules/programming/module_programming_llm/module_programming_llm/prompts/rag/rag_input.py new file mode 100644 index 000000000..89708b2e0 --- /dev/null +++ b/modules/programming/module_programming_llm/module_programming_llm/prompts/rag/rag_input.py @@ -0,0 +1,23 @@ +from typing import Optional + +from git import Repo + + +class RAGInput: + """ + DTO class for top level feedback generation job + """ + template_repo: Repo + solution_repo: Repo + exercise_id: int + problem_statement: Optional[str] + + def __init__(self, + template_repo: Repo, + solution_repo: Repo, + exercise_id: int, + problem_statement: Optional[str] = None): + self.template_repo = template_repo + self.solution_repo = solution_repo + self.exercise_id = exercise_id + self.problem_statement = problem_statement diff --git a/modules/programming/module_programming_llm/module_programming_llm/prompts/rag/rag_output.py b/modules/programming/module_programming_llm/module_programming_llm/prompts/rag/rag_output.py new file mode 100644 index 000000000..19d5dd5c0 --- /dev/null +++ b/modules/programming/module_programming_llm/module_programming_llm/prompts/rag/rag_output.py @@ -0,0 +1,8 @@ +from typing import Sequence + +from pydantic import BaseModel, Field + + +class RAGOutput(BaseModel): + """Collection of feedbacks making up an assessment for a file""" + rag_queries: Sequence[str] = Field(description="A sequence of search queries for a RAG web-search provider", default=[]) diff --git a/modules/programming/module_programming_llm/module_programming_llm/prompts/split_grading_instructions_by_file/__init__.py b/modules/programming/module_programming_llm/module_programming_llm/prompts/split_grading_instructions_by_file/__init__.py new file mode 100644 index 000000000..e6dfe6c97 --- /dev/null +++ b/modules/programming/module_programming_llm/module_programming_llm/prompts/split_grading_instructions_by_file/__init__.py @@ -0,0 +1,9 @@ +from .split_grading_instructions_by_file_input import SplitGradingInstructionsByFileInput +from .split_grading_instructions_by_file_output import SplitGradingInstructionsByFileOutput +from .split_grading_instructions_by_file import SplitGradingInstructionsByFile + +__all__ = [ + "SplitGradingInstructionsByFileInput", + "SplitGradingInstructionsByFileOutput", + "SplitGradingInstructionsByFile", +] \ No newline at end of file diff --git a/modules/programming/module_programming_llm/module_programming_llm/prompts/split_grading_instructions_by_file.py b/modules/programming/module_programming_llm/module_programming_llm/prompts/split_grading_instructions_by_file/prompt.py similarity index 100% rename from modules/programming/module_programming_llm/module_programming_llm/prompts/split_grading_instructions_by_file.py rename to modules/programming/module_programming_llm/module_programming_llm/prompts/split_grading_instructions_by_file/prompt.py diff --git a/modules/programming/module_programming_llm/module_programming_llm/prompts/split_grading_instructions_by_file/split_grading_instructions_by_file.py b/modules/programming/module_programming_llm/module_programming_llm/prompts/split_grading_instructions_by_file/split_grading_instructions_by_file.py new file mode 100644 index 000000000..ed270130a --- /dev/null +++ b/modules/programming/module_programming_llm/module_programming_llm/prompts/split_grading_instructions_by_file/split_grading_instructions_by_file.py @@ -0,0 +1,127 @@ +from collections import defaultdict + +from module_programming_llm.helpers.utils import format_grading_instructions + +from typing import Optional + +from athena import emit_meta +from module_programming_llm.prompts.pipeline_step import PipelineStep +from .split_grading_instructions_by_file_input import SplitGradingInstructionsByFileInput +from .prompt import system_message as prompt_system_message, human_message as prompt_human_message +from pydantic import Field +from llm_core.utils.llm_utils import ( + get_chat_prompt_with_formatting_instructions, + num_tokens_from_string, + num_tokens_from_prompt, +) +from llm_core.utils.predict_and_parse import predict_and_parse +from module_programming_llm.helpers.utils import ( + get_diff, +) +from .split_grading_instructions_by_file_output import FileGradingInstruction, SplitGradingInstructionsByFileOutput +from llm_core.models import ModelConfigType + + +class SplitGradingInstructionsByFile( + PipelineStep[SplitGradingInstructionsByFileInput, Optional[SplitGradingInstructionsByFileOutput]]): + """Splits grading instructions of a programming exercise to match with solution files""" + + system_message: str = Field(prompt_system_message, + description="Message for priming AI behavior and instructing it what to do.") + human_message: str = Field(prompt_human_message, + description="Message from a human. The input on which the AI is supposed to act.") + tokens_before_split: int = Field(default=2000, + description="Split the grading instructions into file-based ones after this number of tokens.") + + async def process(self, input_data: SplitGradingInstructionsByFileInput, debug: bool, model: ModelConfigType) -> Optional[SplitGradingInstructionsByFileOutput]: # type: ignore + """Split the general grading instructions by file + + Args: + input_data (SplitGradingInstructionsByFileInput): Input data containing template and submission repositories. + + Returns: + Optional[SplitGradingInstructionsByFileOutput]: Split grading instructions, None if it is too short or too long + """ + + grading_instructions = format_grading_instructions(input_data.grading_instructions, input_data.grading_criteria) + + # Return None if the grading instructions are too short + if (grading_instructions is None + or num_tokens_from_string( + grading_instructions) <= self.tokens_before_split): + return None + + changed_files_from_template_to_solution = get_diff( + src_repo=input_data.template_repo, dst_repo=input_data.solution_repo, file_path=None, name_only=True + ).split("\n") + + changed_files_from_template_to_submission = get_diff( + src_repo=input_data.template_repo, dst_repo=input_data.submission_repo, file_path=None, name_only=True + ).split("\n") + + prompt = get_chat_prompt_with_formatting_instructions( + model=model.get_model(), # type: ignore[attr-defined] + system_message=self.system_message, + human_message=self.human_message, + pydantic_object=SplitGradingInstructionsByFileOutput, + ) + + prompt_input = { + "grading_instructions": grading_instructions, + "changed_files_from_template_to_solution": ", ".join( + changed_files_from_template_to_solution + ), + "changed_files_from_template_to_submission": ", ".join( + changed_files_from_template_to_submission + ), + } + + # Return None if the prompt is too long + if num_tokens_from_prompt(prompt, prompt_input) > self.max_input_tokens: + return None + + split_grading_instructions = await predict_and_parse( + model=model.get_model(), # type: ignore[attr-defined] + chat_prompt=prompt, + prompt_input=prompt_input, + pydantic_object=SplitGradingInstructionsByFileOutput, + tags=[ + f"exercise-{input_data.exercise_id}", + f"submission-{input_data.submission_id}", + "split-grading-instructions-by-file", + ], + ) + + if debug: + emit_meta( + "file_grading_instructions", + { + "prompt": prompt.format(**prompt_input), + "result": split_grading_instructions.dict() + if split_grading_instructions is not None + else None, + }, + ) + + if split_grading_instructions is None or not split_grading_instructions.items: + return None + + # Join duplicate file names (some responses contain multiple grading instructions for the same file) + file_grading_instructions_by_file_name = defaultdict(list) + for file_grading_instruction in split_grading_instructions.items: + file_grading_instructions_by_file_name[ + file_grading_instruction.file_name + ].append(file_grading_instruction) + + split_grading_instructions.items = [ + FileGradingInstruction( + file_name=file_name, + grading_instructions="\n".join( + file_grading_instruction.grading_instructions + for file_grading_instruction in file_grading_instructions + ), + ) + for file_name, file_grading_instructions in file_grading_instructions_by_file_name.items() + ] + + return split_grading_instructions diff --git a/modules/programming/module_programming_llm/module_programming_llm/prompts/split_grading_instructions_by_file/split_grading_instructions_by_file_input.py b/modules/programming/module_programming_llm/module_programming_llm/prompts/split_grading_instructions_by_file/split_grading_instructions_by_file_input.py new file mode 100644 index 000000000..acc4912c3 --- /dev/null +++ b/modules/programming/module_programming_llm/module_programming_llm/prompts/split_grading_instructions_by_file/split_grading_instructions_by_file_input.py @@ -0,0 +1,29 @@ +from typing import Optional, List + +from git import Repo + +from athena import GradingCriterion + + +class SplitGradingInstructionsByFileInput: + """ + A DTO file that contains information about grading instructions and submission + """ + grading_instructions: Optional[str] + grading_criteria: Optional[List[GradingCriterion]] + template_repo: Repo + submission_repo: Repo + solution_repo: Repo + exercise_id: int + submission_id: int + + def __init__(self, template_repo: Repo, submission_repo: Repo, solution_repo: Repo, exercise_id: int = 1, + submission_id: int = 1, grading_instructions: Optional[str] = None, + grading_criteria: Optional[List[GradingCriterion]] = None): + self.grading_instructions = grading_instructions + self.grading_criteria = grading_criteria + self.template_repo = template_repo + self.submission_repo = submission_repo + self.solution_repo = solution_repo + self.exercise_id = exercise_id + self.submission_id = submission_id \ No newline at end of file diff --git a/modules/programming/module_programming_llm/module_programming_llm/prompts/split_grading_instructions_by_file/split_grading_instructions_by_file_output.py b/modules/programming/module_programming_llm/module_programming_llm/prompts/split_grading_instructions_by_file/split_grading_instructions_by_file_output.py new file mode 100644 index 000000000..5d212f645 --- /dev/null +++ b/modules/programming/module_programming_llm/module_programming_llm/prompts/split_grading_instructions_by_file/split_grading_instructions_by_file_output.py @@ -0,0 +1,13 @@ +from typing import Sequence + +from pydantic import BaseModel, Field + + +class FileGradingInstruction(BaseModel): + file_path: str = Field(description="The full path of the file, as specified in the input prompt") + grading_instructions: str = Field(description="Grading instructions relevant for this file") + + +class SplitGradingInstructionsByFileOutput(BaseModel): + """Collection of grading instructions split by file""" + items: Sequence[FileGradingInstruction] = Field(description="File grading instructions") \ No newline at end of file diff --git a/modules/programming/module_programming_llm/module_programming_llm/prompts/split_problem_non_grading_statement_by_file.py b/modules/programming/module_programming_llm/module_programming_llm/prompts/split_problem_non_grading_statement_by_file.py deleted file mode 100644 index 5dc3024d7..000000000 --- a/modules/programming/module_programming_llm/module_programming_llm/prompts/split_problem_non_grading_statement_by_file.py +++ /dev/null @@ -1,20 +0,0 @@ -system_message = """\ -You are an AI tutor for programming assessment at a prestigious university. - -# Task -Restructure the problem statement by student changed files to gather work items for each file. \ -Some parts of the problem statement may be relevant for multiple files. -Comments in the template solution can be relevant for some files, some might be not. -Include only those work items based on comments that make sense. -For the file keys, include the full path. -""" - -human_message = """\ -Problem statement: -{problem_statement} - -Changed files from template to student submission (Pick from this list, very important!): -{changed_files_from_template_to_submission} - -Problem statement by file: -""" diff --git a/modules/programming/module_programming_llm/module_programming_llm/prompts/split_problem_statement_by_file/__init__.py b/modules/programming/module_programming_llm/module_programming_llm/prompts/split_problem_statement_by_file/__init__.py new file mode 100644 index 000000000..70597e17d --- /dev/null +++ b/modules/programming/module_programming_llm/module_programming_llm/prompts/split_problem_statement_by_file/__init__.py @@ -0,0 +1,9 @@ +from .split_problem_statement_by_file import SplitProblemStatementByFile +from .split_problem_statement_by_file_input import SplitProblemStatementByFileInput +from .split_problem_statement_by_file_output import SplitProblemStatementByFileOutput + +__all__ = [ + "SplitProblemStatementByFile", + "SplitProblemStatementByFileInput", + "SplitProblemStatementByFileOutput", +] \ No newline at end of file diff --git a/modules/programming/module_programming_llm/module_programming_llm/prompts/split_problem_grading_statement_by_file.py b/modules/programming/module_programming_llm/module_programming_llm/prompts/split_problem_statement_by_file/prompt.py similarity index 100% rename from modules/programming/module_programming_llm/module_programming_llm/prompts/split_problem_grading_statement_by_file.py rename to modules/programming/module_programming_llm/module_programming_llm/prompts/split_problem_statement_by_file/prompt.py diff --git a/modules/programming/module_programming_llm/module_programming_llm/prompts/split_problem_statement_by_file/split_problem_statement_by_file.py b/modules/programming/module_programming_llm/module_programming_llm/prompts/split_problem_statement_by_file/split_problem_statement_by_file.py new file mode 100644 index 000000000..7b005b871 --- /dev/null +++ b/modules/programming/module_programming_llm/module_programming_llm/prompts/split_problem_statement_by_file/split_problem_statement_by_file.py @@ -0,0 +1,118 @@ +from collections import defaultdict +from typing import Optional + +from pydantic import Field + +from athena import emit_meta +from .prompt import system_message as prompt_system_message, human_message as prompt_human_message + +from .split_problem_statement_by_file_input import SplitProblemStatementByFileInput +from .split_problem_statement_by_file_output import FileProblemStatement, SplitProblemStatementByFileOutput +from ..pipeline_step import PipelineStep +from llm_core.utils.llm_utils import num_tokens_from_string, get_chat_prompt_with_formatting_instructions, \ + num_tokens_from_prompt +from llm_core.utils.predict_and_parse import predict_and_parse +from llm_core.models import ModelConfigType +from ...helpers.utils import get_diff + + +class SplitProblemStatementByFile(PipelineStep[SplitProblemStatementByFileInput, Optional[SplitProblemStatementByFileOutput]]): + """Splits problem statement of a programming exercise to match with solution files""" + + system_message: str = Field(prompt_system_message, + description="Message for priming AI behavior and instructing it what to do.") + human_message: str = Field(prompt_human_message, + description="Message from a human. The input on which the AI is supposed to act.") + tokens_before_split: int = Field(default=2000, + description="Split the grading instructions into file-based ones after this number of tokens.") + + async def process(self, input_data: SplitProblemStatementByFileInput, debug: bool, model: ModelConfigType) -> Optional[SplitProblemStatementByFileOutput]: # type: ignore + """Split the general problem statement by file + + Args: + input_data (SplitGradingInstructionsByFileInput): Input data containing template and submission repositories, programming exercise. + + Returns: + Optional[SplitGradingInstructionsByFileOutput]: Split problem statement by file + """ + + # Return None if the problem statement is too short + if num_tokens_from_string(input_data.problem_statement or "") <= self.tokens_before_split: + return None + + template_repo = input_data.template_repo + submission_repo = input_data.submission_repo + + changed_files_from_template_to_submission = get_diff( + src_repo=template_repo, + dst_repo=submission_repo, + file_path=None, + name_only=True + ).split("\n") + + prompt = get_chat_prompt_with_formatting_instructions( + model=model.get_model(), # type: ignore[attr-defined] + system_message=self.system_message, + human_message=self.human_message, + pydantic_object=SplitProblemStatementByFileOutput + ) + + prompt_input = { + "problem_statement": input_data.problem_statement or "No problem statement.", + "changed_files_from_template_to_submission": ", ".join(changed_files_from_template_to_submission) + } + + if "changed_files_from_template_to_solution" in prompt.input_variables: + solution_repo = input_data.solution_repo + changed_files_from_template_to_solution = get_diff( + src_repo=template_repo, + dst_repo=solution_repo, + file_path=None, + name_only=True, + ).split("\n") + prompt_input["changed_files_from_template_to_solution"] = ", ".join( + changed_files_from_template_to_solution + ) + + # Return None if the prompt is too long + if num_tokens_from_prompt(prompt, prompt_input) > self.max_input_tokens: + return None + + split_problem_statement = await predict_and_parse( + model=model.get_model(), # type: ignore[attr-defined] + chat_prompt=prompt, + prompt_input=prompt_input, + pydantic_object=SplitProblemStatementByFileOutput, + tags=[ + f"exercise-{input_data.exercise_id}", + f"submission-{input_data.submission_id}", + "split-problem-statement-by-file" + ] + ) + + if debug: + emit_meta("file_problem_statements", { + "prompt": prompt.format(**prompt_input), + "result": split_problem_statement.dict() if split_problem_statement is not None else None + }) + + if split_problem_statement is None or not split_problem_statement.items: + return None + + # Join duplicate file names (some responses contain multiple problem statements for the same file) + file_problem_statements_by_file_name = defaultdict(list) + for file_problem_statement in split_problem_statement.items: + file_problem_statements_by_file_name[file_problem_statement.file_path].append(file_problem_statement) + + split_problem_statement.items = [ + FileProblemStatement( + file_path=file_name, + problem_statement="\n".join( + file_problem_statement.problem_statement + for file_problem_statement in file_problem_statements + ) + ) + for file_name, file_problem_statements in file_problem_statements_by_file_name.items() + ] + + return split_problem_statement diff --git a/modules/programming/module_programming_llm/module_programming_llm/prompts/split_problem_statement_by_file/split_problem_statement_by_file_input.py b/modules/programming/module_programming_llm/module_programming_llm/prompts/split_problem_statement_by_file/split_problem_statement_by_file_input.py new file mode 100644 index 000000000..169bf377b --- /dev/null +++ b/modules/programming/module_programming_llm/module_programming_llm/prompts/split_problem_statement_by_file/split_problem_statement_by_file_input.py @@ -0,0 +1,26 @@ +from typing import Optional + +from git import Repo +from pydantic import Field + + +class SplitProblemStatementByFileInput: + """ + A DTO file that contains information about a programming exercise + """ + problem_statement: Optional[str] + template_repo: Repo + submission_repo: Repo + solution_repo: Repo + exercise_id: int + submission_id: int + + + def __init__(self, template_repo: Repo, submission_repo: Repo, solution_repo: Repo, + problem_statement: Optional[str] = None, exercise_id: int = 1, submission_id: int = 1): + self.problem_statement = problem_statement + self.template_repo = template_repo + self.submission_repo = submission_repo + self.solution_repo = solution_repo + self.exercise_id = exercise_id + self.submission_id = submission_id diff --git a/modules/programming/module_programming_llm/module_programming_llm/prompts/split_problem_statement_by_file/split_problem_statement_by_file_output.py b/modules/programming/module_programming_llm/module_programming_llm/prompts/split_problem_statement_by_file/split_problem_statement_by_file_output.py new file mode 100644 index 000000000..df6967c88 --- /dev/null +++ b/modules/programming/module_programming_llm/module_programming_llm/prompts/split_problem_statement_by_file/split_problem_statement_by_file_output.py @@ -0,0 +1,14 @@ +from typing import Sequence + +from pydantic import BaseModel, Field + + +class FileProblemStatement(BaseModel): + file_path: str = Field(description="The full path of the file, as specified in the input prompt") + problem_statement: str = Field(description="Problem statement relevant for this file") + + +class SplitProblemStatementByFileOutput(BaseModel): + """Collection of problem statements split by file""" + + items: Sequence[FileProblemStatement] = Field(description="File problem statements") \ No newline at end of file diff --git a/modules/programming/module_programming_llm/module_programming_llm/prompts/validate_suggestions/__init__.py b/modules/programming/module_programming_llm/module_programming_llm/prompts/validate_suggestions/__init__.py new file mode 100644 index 000000000..7c3da3ea3 --- /dev/null +++ b/modules/programming/module_programming_llm/module_programming_llm/prompts/validate_suggestions/__init__.py @@ -0,0 +1,9 @@ +from .validate_suggestions_input import ValidateSuggestionsInput +from .validate_suggestions_output import ValidateSuggestionsOutput +from .validate_suggestions import ValidateSuggestions + +__all__ = [ + "ValidateSuggestions", + "ValidateSuggestionsInput", + "ValidateSuggestionsOutput", +] \ No newline at end of file diff --git a/modules/programming/module_programming_llm/module_programming_llm/prompts/validate_suggestions/prompt.py b/modules/programming/module_programming_llm/module_programming_llm/prompts/validate_suggestions/prompt.py new file mode 100644 index 000000000..486a1b9d6 --- /dev/null +++ b/modules/programming/module_programming_llm/module_programming_llm/prompts/validate_suggestions/prompt.py @@ -0,0 +1,41 @@ +system_message = """\ +You are a tutor at a very prestigious university. +# Task +You are given feedback suggestions. +Your task is to filter out feedback suggestions that are wrong or meaningless in the educational context you are given. Do not generate new suggestions. +Please keep only relevant feedback suggestions in your response. +Also, make sure that the number of points is distributed correctly and makes sense. + +# Problem Statement: +{problem_statement} + +# Grading instructions +{grading_instructions} +Max points: {max_points}, bonus points: {bonus_points} (whole assessment, not just this file) + +# Diff between template (deletions) and solution (additions): +{template_to_solution_diff} + +# Summary of other solution files +{solution_summary} + +# RAG data +{rag_data} +""" + +human_message = """\ +Path: {file_path} +Feedback Suggestions: +\"\"\" +{feedback_suggestions} +\"\"\" + +Student\'s submission file to grade (with line numbers : ): +\"\"\" +{submission_file} +\"\"\"\ + +# Diff between template (deletions) and student\'s submission (additions): +{template_to_submission_diff} +Only student\'s changes should be taken into account. +""" diff --git a/modules/programming/module_programming_llm/module_programming_llm/prompts/validate_suggestions/validate_suggestions.py b/modules/programming/module_programming_llm/module_programming_llm/prompts/validate_suggestions/validate_suggestions.py new file mode 100644 index 000000000..87f4ed7cc --- /dev/null +++ b/modules/programming/module_programming_llm/module_programming_llm/prompts/validate_suggestions/validate_suggestions.py @@ -0,0 +1,268 @@ +import asyncio +import json +import os +from typing import Optional, List, Sequence, Dict + +from athena import emit_meta +from module_programming_llm.prompts.pipeline_step import PipelineStep +from module_programming_llm.prompts.generate_suggestions_by_file.generate_suggestions_by_file_output import \ + FeedbackModel as SuggestionsFeedbackModel +from .validate_suggestions_input import ValidateSuggestionsInput +from .validate_suggestions_output import ValidateSuggestionsOutput +from .prompt import system_message as prompt_system_message, human_message as prompt_human_message +from pydantic import Field +from llm_core.utils.llm_utils import ( + get_chat_prompt_with_formatting_instructions, + num_tokens_from_string, + check_prompt_length_and_omit_features_if_necessary, +) +from llm_core.utils.predict_and_parse import predict_and_parse +from module_programming_llm.helpers.utils import ( + get_diff, + load_files_from_repo, + add_line_numbers, get_programming_language_file_extension +) +from llm_core.models import ModelConfigType + + +class ValidateSuggestions(PipelineStep[ValidateSuggestionsInput, List[Optional[ValidateSuggestionsOutput]]]): + """Generates concise summaries of submission files, facilitating a quicker review and understanding of the content for AI processing.""" + + system_message: str = Field(prompt_system_message, + description="Message for priming AI behavior and instructing it what to do.") + human_message: str = Field(prompt_human_message, + description="Message from a human. The input on which the AI is supposed to act.") + tokens_before_split: int = Field(default=2000, + description="Split the grading instructions into file-based ones after this number of tokens.") + max_number_of_files: int = Field(default=25, + description="Maximum number of files. If exceeded, it will prioritize the most important ones.") + + # pylint: disable=too-many-locals + async def process( + self, + input_data: ValidateSuggestionsInput, + debug: bool, + model: ModelConfigType # type: ignore + ) -> List[Optional[ValidateSuggestionsOutput]]: + model = model.get_model() # type: ignore[attr-defined] + + # Prepare the prompt template + prompt = get_chat_prompt_with_formatting_instructions( + model=model, + system_message=self.system_message, + human_message=self.human_message, + pydantic_object=ValidateSuggestionsOutput, + ) + + problem_statement_tokens = num_tokens_from_string(input_data.problem_statement or "") + is_short_problem_statement = ( + problem_statement_tokens + <= self.tokens_before_split + ) + file_problem_statements = ( + { + item.file_path: item.problem_statement + for item in input_data.problem_statement_by_file.items + } + if input_data.problem_statement_by_file is not None + else {} + ) + + is_short_grading_instructions = ( + num_tokens_from_string(input_data.grading_instructions) + <= self.tokens_before_split + if input_data.grading_instructions is not None + else True + ) + file_grading_instructions = ( + { + item.file_path: item.grading_instructions + for item in input_data.grading_instructions_by_file.items + } + if input_data.grading_instructions_by_file is not None + else {} + ) + + prompt_inputs: List[dict] = [] + + solution_repo = input_data.solution_repo + template_repo = input_data.template_repo + submission_repo = input_data.submission_repo + + changed_files_from_template_to_submission = get_diff( + src_repo=template_repo, dst_repo=submission_repo, file_path=None, name_only=True + ).split("\n") + changed_files_from_template_to_submission = [ + os.path.join(str(submission_repo.working_tree_dir or ""), file_path) + for file_path in changed_files_from_template_to_submission + ] + + # Changed text files + changed_files = load_files_from_repo( + submission_repo, + file_filter=lambda file_path: file_path in changed_files_from_template_to_submission, + ) + + feedback_suggestions_by_file: Dict[str, Sequence[SuggestionsFeedbackModel]] = {} + for feedback in input_data.feedback_suggestions: + if feedback is not None: + feedback_suggestions_by_file[feedback.file_path] = feedback.feedbacks + + # Gather prompt inputs for each changed file (independently) + for file_path, file_content in changed_files.items(): + problem_statement = ( + input_data.problem_statement or "" + if is_short_problem_statement + else file_problem_statements.get( + file_path, "No relevant problem statement section found." + ) + ) + problem_statement = ( + problem_statement + if problem_statement.strip() + else "No problem statement found." + ) + + grading_instructions = ( + input_data.grading_instructions or "" + if is_short_grading_instructions + else file_grading_instructions.get( + file_path, "No relevant grading instructions found." + ) + ) + grading_instructions = ( + grading_instructions + if grading_instructions.strip() + else "No grading instructions found." + ) + + file_content = add_line_numbers(file_content) + solution_to_submission_diff = get_diff( + src_repo=solution_repo, + dst_repo=submission_repo, + src_prefix="solution", + dst_prefix="submission", + file_path=file_path, + ) + template_to_submission_diff = get_diff( + src_repo=template_repo, + dst_repo=submission_repo, + src_prefix="template", + dst_prefix="submission", + file_path=file_path, + ) + template_to_solution_diff = get_diff( + src_repo=template_repo, + dst_repo=solution_repo, + src_prefix="template", + dst_prefix="solution", + file_path=file_path, + ) + + prompt_inputs.append( + { + "file_path": file_path, # Not really relevant for the prompt + "priority": len( + template_to_solution_diff + ), # Not really relevant for the prompt + "submission_file": file_content, + "max_points": input_data.max_points, + "bonus_points": input_data.bonus_points, + "template_to_submission_diff": template_to_submission_diff, + "template_to_solution_diff": template_to_solution_diff, + "grading_instructions": grading_instructions, + "problem_statement": problem_statement, + "solution_summary": input_data.solution_summary, + "feedback_suggestions": json.dumps( + [ob.__dict__ for ob in feedback_suggestions_by_file.get(file_path) or []]), + "rag_data": input_data.rag_data + } + ) + + # Filter long prompts (omitting features if necessary) + # Lowest priority features are at the top of the list (i.e. they are omitted first if necessary) + # "submission_file" is not omittable, because it is the main input containing the line numbers + # In the future we might be able to include the line numbers in the diff, but for now we need to keep it + omittable_features = [ + "template_to_solution_diff", + # If it is even included in the prompt (has the lowest priority since it is indirectly included in other diffs) + "problem_statement", + "grading_instructions", + "solution_to_submission_diff", + "template_to_submission_diff", + "solution_summary" + # In the future we might indicate the changed lines in the submission_file additionally + ] + + prompt_inputs = [ + omitted_prompt_input + for omitted_prompt_input, should_run in [ + check_prompt_length_and_omit_features_if_necessary( + prompt=prompt, + prompt_input=prompt_input, + max_input_tokens=self.max_input_tokens, + omittable_features=omittable_features, + debug=debug, + ) + for prompt_input in prompt_inputs + ] + if should_run + ] + + # If we have many files we need to filter and prioritize them + if len(prompt_inputs) > self.max_number_of_files: + programming_language_extension = get_programming_language_file_extension( + programming_language=input_data.programming_language + ) + + # Prioritize files that have a diff between solution and submission + prompt_inputs = sorted(prompt_inputs, key=lambda x: x["priority"], reverse=True) + + filtered_prompt_inputs = [] + if programming_language_extension is not None: + filtered_prompt_inputs = [ + prompt_input + for prompt_input in prompt_inputs + if prompt_input["file_path"].endswith(programming_language_extension) + ] + + while ( + len(filtered_prompt_inputs) < self.max_number_of_files and prompt_inputs + ): + filtered_prompt_inputs.append(prompt_inputs.pop(0)) + prompt_inputs = filtered_prompt_inputs + + # Send prompts to the LLM + # noinspection PyTypeChecker + results: List[Optional[ValidateSuggestionsOutput]] = await asyncio.gather( + *[ + predict_and_parse( + model=model, + chat_prompt=prompt, + prompt_input=prompt_input, + pydantic_object=ValidateSuggestionsOutput, + tags=[ + f"exercise-{input_data.exercise_id}", + f"submission-{input_data.submission_id}", + f"file-{prompt_input['file_path']}", + "validate-suggestions", + ], + ) + for prompt_input in prompt_inputs + ] + ) + + if debug: + emit_meta( + "filter_out_solutions", + [ + { + "file_path": prompt_input["file_path"], + "prompt": prompt.format(**prompt_input), + "result": result.dict() if result is not None else None, + } + for prompt_input, result in zip(prompt_inputs, results) + ], + ) + + return results diff --git a/modules/programming/module_programming_llm/module_programming_llm/prompts/validate_suggestions/validate_suggestions_input.py b/modules/programming/module_programming_llm/module_programming_llm/prompts/validate_suggestions/validate_suggestions_input.py new file mode 100644 index 000000000..14ff42cc9 --- /dev/null +++ b/modules/programming/module_programming_llm/module_programming_llm/prompts/validate_suggestions/validate_suggestions_input.py @@ -0,0 +1,66 @@ +from typing import Optional, List + +from git import Repo + +from athena import GradingCriterion +from module_programming_llm.prompts.generate_suggestions_by_file import GenerateSuggestionsByFileOutput +from module_programming_llm.prompts.split_grading_instructions_by_file import SplitGradingInstructionsByFileOutput +from module_programming_llm.prompts.split_problem_statement_by_file import SplitProblemStatementByFileOutput + + +class ValidateSuggestionsInput: + """ + DTO class for filtering out + """ + solution_repo: Repo + template_repo: Repo + submission_repo: Repo + problem_statement_by_file: Optional[SplitProblemStatementByFileOutput] + problem_statement: Optional[str] + exercise_id: int + submission_id: int + feedback_suggestions: List[Optional[GenerateSuggestionsByFileOutput]] + grading_instructions_by_file: Optional[SplitGradingInstructionsByFileOutput] + grading_criteria: Optional[List[GradingCriterion]] + grading_instructions: Optional[str] + solution_summary: str + max_points: float + bonus_points: float + programming_language: str + rag_data: List[str] + + def __init__( + self, + solution_repo: Repo, + template_repo: Repo, + submission_repo: Repo, + problem_statement_by_file: Optional[SplitProblemStatementByFileOutput], + problem_statement: Optional[str], + exercise_id: int, + submission_id: int, + feedback_suggestions: List[Optional[GenerateSuggestionsByFileOutput]], + grading_instructions_by_file: Optional[SplitGradingInstructionsByFileOutput], + grading_criteria: Optional[List[GradingCriterion]], + grading_instructions: Optional[str], + solution_summary: str, + max_points: float, + bonus_points: float, + programming_language: str, + rag_data: List[str] + ): + self.solution_repo = solution_repo + self.template_repo = template_repo + self.submission_repo = submission_repo + self.problem_statement_by_file = problem_statement_by_file + self.problem_statement = problem_statement + self.exercise_id = exercise_id + self.submission_id = submission_id + self.feedback_suggestions = feedback_suggestions + self.grading_instructions_by_file = grading_instructions_by_file + self.grading_criteria = grading_criteria + self.grading_instructions = grading_instructions + self.solution_summary = solution_summary + self.max_points = max_points + self.bonus_points = bonus_points + self.programming_language = programming_language + self.rag_data = rag_data diff --git a/modules/programming/module_programming_llm/module_programming_llm/prompts/validate_suggestions/validate_suggestions_output.py b/modules/programming/module_programming_llm/module_programming_llm/prompts/validate_suggestions/validate_suggestions_output.py new file mode 100644 index 000000000..3e0063495 --- /dev/null +++ b/modules/programming/module_programming_llm/module_programming_llm/prompts/validate_suggestions/validate_suggestions_output.py @@ -0,0 +1,33 @@ +from typing import Optional, Sequence + +from pydantic import BaseModel, Field + + +class FeedbackModel(BaseModel): + title: str = Field( + description="Very short title, i.e. feedback category", example="Logic Error" + ) + description: str = Field(description="Feedback description") + line_start: Optional[int] = Field( + description="Referenced line number start, or empty if unreferenced" + ) + line_end: Optional[int] = Field( + description="Referenced line number end, or empty if unreferenced" + ) + credits: float = Field(0.0, description="Number of points received/deducted") + grading_instruction_id: Optional[int] = Field( + description="ID of the grading instruction that was used to generate this feedback, or empty if no grading instruction was used" + ) + + class Config: + title = "Feedback" + + +class ValidateSuggestionsOutput(BaseModel): + """Collection of feedbacks making up an assessment for a file""" + + feedbacks: Sequence[FeedbackModel] = Field(description="Assessment feedbacks", default=[]) + file_path: str = Field(description="The full path of the file, as specified in the input prompt") + + class Config: + title = "Assessment" \ No newline at end of file diff --git a/modules/programming/module_programming_llm/module_programming_llm/split_grading_instructions_by_file.py b/modules/programming/module_programming_llm/module_programming_llm/split_grading_instructions_by_file.py deleted file mode 100644 index 08c08924f..000000000 --- a/modules/programming/module_programming_llm/module_programming_llm/split_grading_instructions_by_file.py +++ /dev/null @@ -1,142 +0,0 @@ -from typing import Optional, Sequence -from collections import defaultdict - -from pydantic import BaseModel, Field -from langchain.prompts import ChatPromptTemplate - -from athena import emit_meta -from athena.programming import Exercise, Submission - -from module_programming_llm.config import GradedBasicApproachConfig -from llm_core.utils.llm_utils import ( - get_chat_prompt_with_formatting_instructions, - num_tokens_from_string, - num_tokens_from_prompt, -) -from llm_core.utils.predict_and_parse import predict_and_parse - -from module_programming_llm.helpers.utils import format_grading_instructions, get_diff - - -class FileGradingInstruction(BaseModel): - file_name: str = Field(description="File name") - grading_instructions: str = Field(description="Grading instructions relevant for this file") - - -class SplitGradingInstructions(BaseModel): - """Collection of grading instructions split by file""" - items: Sequence[FileGradingInstruction] = Field(description="File grading instructions") - - -# pylint: disable=too-many-locals -async def split_grading_instructions_by_file( - exercise: Exercise, - submission: Submission, - prompt: ChatPromptTemplate, - config: GradedBasicApproachConfig, - debug: bool - ) -> Optional[SplitGradingInstructions]: - """Split the general grading instructions by file - - Args: - exercise (Exercise): Exercise to split the grading instructions for (respecting the changed files) - submission (Submission): Submission to split the grading instructions for (respecting the changed files) - prompt (ChatPromptTemplate): Prompt template to check for grading_instructions - config (GradedBasicApproachConfig): Configuration - - Returns: - Optional[SplitGradingInstructions]: Split grading instructions, None if it is too short or too long - """ - - grading_instructions = format_grading_instructions(exercise.grading_instructions, exercise.grading_criteria) - - # Return None if the grading instructions are too short - if (grading_instructions is None - or num_tokens_from_string( - grading_instructions) <= config.split_grading_instructions_by_file_prompt.tokens_before_split): - return None - - # Return None if the grading instructions are not in the prompt - if "grading_instructions" not in prompt.input_variables: - return None - - model = config.model.get_model() # type: ignore[attr-defined] - - template_repo = exercise.get_template_repository() - solution_repo = exercise.get_solution_repository() - submission_repo = submission.get_repository() - - changed_files_from_template_to_solution = get_diff( - src_repo=template_repo, dst_repo=solution_repo, file_path=None, name_only=True - ).split("\n") - - changed_files_from_template_to_submission = get_diff( - src_repo=template_repo, dst_repo=submission_repo, file_path=None, name_only=True - ).split("\n") - - chat_prompt = get_chat_prompt_with_formatting_instructions( - model=model, - system_message=config.split_grading_instructions_by_file_prompt.system_message, - human_message=config.split_grading_instructions_by_file_prompt.human_message, - pydantic_object=SplitGradingInstructions, - ) - - prompt_input = { - "grading_instructions": grading_instructions, - "changed_files_from_template_to_solution": ", ".join( - changed_files_from_template_to_solution - ), - "changed_files_from_template_to_submission": ", ".join( - changed_files_from_template_to_submission - ), - } - - # Return None if the prompt is too long - if num_tokens_from_prompt(chat_prompt, prompt_input) > config.max_input_tokens: - return None - - split_grading_instructions = await predict_and_parse( - model=model, - chat_prompt=chat_prompt, - prompt_input=prompt_input, - pydantic_object=SplitGradingInstructions, - tags=[ - f"exercise-{exercise.id}", - f"submission-{submission.id}", - "split-grading-instructions-by-file", - ], - ) - - if debug: - emit_meta( - "file_grading_instructions", - { - "prompt": chat_prompt.format(**prompt_input), - "result": split_grading_instructions.dict() - if split_grading_instructions is not None - else None, - }, - ) - - if split_grading_instructions is None or not split_grading_instructions.items: - return None - - # Join duplicate file names (some responses contain multiple grading instructions for the same file) - file_grading_instructions_by_file_name = defaultdict(list) - for file_grading_instruction in split_grading_instructions.items: - file_grading_instructions_by_file_name[ - file_grading_instruction.file_name - ].append(file_grading_instruction) - - split_grading_instructions.items = [ - FileGradingInstruction( - file_name=file_name, - grading_instructions="\n".join( - file_grading_instruction.grading_instructions - for file_grading_instruction in file_grading_instructions - ), - ) - for file_name, file_grading_instructions in file_grading_instructions_by_file_name.items() - ] - - return split_grading_instructions diff --git a/modules/programming/module_programming_llm/module_programming_llm/split_problem_statement_by_file.py b/modules/programming/module_programming_llm/module_programming_llm/split_problem_statement_by_file.py deleted file mode 100644 index aecf516ac..000000000 --- a/modules/programming/module_programming_llm/module_programming_llm/split_problem_statement_by_file.py +++ /dev/null @@ -1,137 +0,0 @@ -from typing import Optional, Sequence -from collections import defaultdict - -from pydantic import BaseModel, Field -from langchain.prompts import ChatPromptTemplate - -from athena import emit_meta -from athena.programming import Exercise, Submission - -from module_programming_llm.config import GradedBasicApproachConfig, BasicApproachConfig -from llm_core.utils.llm_utils import ( - get_chat_prompt_with_formatting_instructions, - num_tokens_from_string, - num_tokens_from_prompt, -) -from llm_core.utils.predict_and_parse import predict_and_parse - -from module_programming_llm.helpers.utils import get_diff - - -class FileProblemStatement(BaseModel): - file_name: str = Field(description="File name") - problem_statement: str = Field(description="Problem statement relevant for this file") - - -class SplitProblemStatement(BaseModel): - """Collection of problem statements split by file""" - - items: Sequence[FileProblemStatement] = Field(description="File problem statements") - - -# pylint: disable=too-many-locals -async def split_problem_statement_by_file( - exercise: Exercise, - submission: Submission, - prompt: ChatPromptTemplate, - config: BasicApproachConfig, - debug: bool - ) -> Optional[SplitProblemStatement]: - """Split the general problem statement by file - - Args: - exercise (Exercise): Exercise to split the problem statement for (respecting the changed files) - submission (Submission): Submission to split the problem statement for (respecting the changed files) - prompt (ChatPromptTemplate): Prompt template to check for problem_statement - config (GradedBasicApproachConfig): Configuration - - Returns: - Optional[SplitProblemStatement]: Split problem statement, None if it is too short or too long - """ - - # Return None if the problem statement is too short - if num_tokens_from_string(exercise.problem_statement or "") <= config.split_problem_statement_by_file_prompt.tokens_before_split: - return None - - # Return None if the problem statement not in the prompt - if "problem_statement" not in prompt.input_variables: - return None - - model = config.model.get_model() # type: ignore[attr-defined] - - template_repo = exercise.get_template_repository() - submission_repo = submission.get_repository() - - changed_files_from_template_to_submission = get_diff( - src_repo=template_repo, - dst_repo=submission_repo, - file_path=None, - name_only=True - ).split("\n") - - chat_prompt = get_chat_prompt_with_formatting_instructions( - model=model, - system_message=config.split_problem_statement_by_file_prompt.system_message, - human_message=config.split_problem_statement_by_file_prompt.human_message, - pydantic_object=SplitProblemStatement - ) - - prompt_input = { - "problem_statement": exercise.problem_statement or "No problem statement.", - "changed_files_from_template_to_submission": ", ".join(changed_files_from_template_to_submission) - } - - if "changed_files_from_template_to_solution" in chat_prompt.input_variables: - solution_repo = exercise.get_solution_repository() - changed_files_from_template_to_solution = get_diff( - src_repo=template_repo, - dst_repo=solution_repo, - file_path=None, - name_only=True, - ).split("\n") - prompt_input["changed_files_from_template_to_solution"] = ", ".join( - changed_files_from_template_to_solution - ) - - # Return None if the prompt is too long - if num_tokens_from_prompt(chat_prompt, prompt_input) > config.max_input_tokens: - return None - - split_problem_statement = await predict_and_parse( - model=model, - chat_prompt=chat_prompt, - prompt_input=prompt_input, - pydantic_object=SplitProblemStatement, - tags=[ - f"exercise-{exercise.id}", - f"submission-{submission.id}", - "split-problem-statement-by-file" - ] - ) - - if debug: - emit_meta("file_problem_statements", { - "prompt": chat_prompt.format(**prompt_input), - "result": split_problem_statement.dict() if split_problem_statement is not None else None - }) - - if split_problem_statement is None or not split_problem_statement.items: - return None - - # Join duplicate file names (some responses contain multiple problem statements for the same file) - file_problem_statements_by_file_name = defaultdict(list) - for file_problem_statement in split_problem_statement.items: - file_problem_statements_by_file_name[file_problem_statement.file_name].append(file_problem_statement) - - split_problem_statement.items = [ - FileProblemStatement( - file_name=file_name, - problem_statement="\n".join( - file_problem_statement.problem_statement - for file_problem_statement in file_problem_statements - ) - ) - for file_name, file_problem_statements in file_problem_statements_by_file_name.items() - ] - - return split_problem_statement diff --git a/modules/programming/module_programming_llm/poetry.lock b/modules/programming/module_programming_llm/poetry.lock index 9f91758c4..d65a1b00a 100644 --- a/modules/programming/module_programming_llm/poetry.lock +++ b/modules/programming/module_programming_llm/poetry.lock @@ -136,15 +136,26 @@ files = [ [package.dependencies] frozenlist = ">=1.1.0" +[[package]] +name = "annotated-types" +version = "0.7.0" +description = "Reusable constraint types to use with typing.Annotated" +optional = false +python-versions = ">=3.8" +files = [ + {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, + {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, +] + [[package]] name = "anyio" -version = "4.6.0" +version = "4.6.2.post1" description = "High level compatibility layer for multiple asynchronous event loop implementations" optional = false python-versions = ">=3.9" files = [ - {file = "anyio-4.6.0-py3-none-any.whl", hash = "sha256:c7d2e9d63e31599eeb636c8c5c03a7e108d73b345f064f1c19fdc87b79036a9a"}, - {file = "anyio-4.6.0.tar.gz", hash = "sha256:137b4559cbb034c477165047febb6ff83f390fc3b20bf181c1fc0a728cb8beeb"}, + {file = "anyio-4.6.2.post1-py3-none-any.whl", hash = "sha256:6d170c36fba3bdd840c73d3868c1e777e33676a69c3a72cf0a0d5d6d8009b61d"}, + {file = "anyio-4.6.2.post1.tar.gz", hash = "sha256:4c8bc31ccdb51c7f7bd251f51c609e038d63e34219b44aa86e47576389880b4c"}, ] [package.dependencies] @@ -153,7 +164,7 @@ sniffio = ">=1.1" [package.extras] doc = ["Sphinx (>=7.4,<8.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"] -test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.21.0b1)"] +test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "truststore (>=0.9.1)", "uvloop (>=0.21.0b1)"] trio = ["trio (>=0.26.1)"] [[package]] @@ -207,6 +218,52 @@ docs = ["cogapp", "furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphi tests = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] tests-mypy = ["mypy (>=1.11.1)", "pytest-mypy-plugins"] +[[package]] +name = "beautifulsoup4" +version = "4.12.3" +description = "Screen-scraping library" +optional = false +python-versions = ">=3.6.0" +files = [ + {file = "beautifulsoup4-4.12.3-py3-none-any.whl", hash = "sha256:b80878c9f40111313e55da8ba20bdba06d8fa3969fc68304167741bbf9e082ed"}, + {file = "beautifulsoup4-4.12.3.tar.gz", hash = "sha256:74e3d1928edc070d21748185c46e3fb33490f22f52a3addee9aee0f4f7781051"}, +] + +[package.dependencies] +soupsieve = ">1.2" + +[package.extras] +cchardet = ["cchardet"] +chardet = ["chardet"] +charset-normalizer = ["charset-normalizer"] +html5lib = ["html5lib"] +lxml = ["lxml"] + +[[package]] +name = "bs4" +version = "0.0.2" +description = "Dummy package for Beautiful Soup (beautifulsoup4)" +optional = false +python-versions = "*" +files = [ + {file = "bs4-0.0.2-py2.py3-none-any.whl", hash = "sha256:abf8742c0805ef7f662dce4b51cca104cffe52b835238afc169142ab9b3fbccc"}, + {file = "bs4-0.0.2.tar.gz", hash = "sha256:a48685c58f50fe127722417bae83fe6badf500d54b55f7e39ffe43b798653925"}, +] + +[package.dependencies] +beautifulsoup4 = "*" + +[[package]] +name = "cachetools" +version = "5.5.0" +description = "Extensible memoizing collections and decorators" +optional = false +python-versions = ">=3.7" +files = [ + {file = "cachetools-5.5.0-py3-none-any.whl", hash = "sha256:02134e8439cdc2ffb62023ce1debca2944c3f289d66bb17ead3ab3dede74b292"}, + {file = "cachetools-5.5.0.tar.gz", hash = "sha256:2cc24fb4cbe39633fb7badd9db9ca6295d766d9c2995f245725a46715d050f2a"}, +] + [[package]] name = "certifi" version = "2024.8.30" @@ -460,88 +517,103 @@ flake8 = "*" [[package]] name = "frozenlist" -version = "1.4.1" +version = "1.5.0" description = "A list-like structure which implements collections.abc.MutableSequence" optional = false python-versions = ">=3.8" files = [ - {file = "frozenlist-1.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:f9aa1878d1083b276b0196f2dfbe00c9b7e752475ed3b682025ff20c1c1f51ac"}, - {file = "frozenlist-1.4.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:29acab3f66f0f24674b7dc4736477bcd4bc3ad4b896f5f45379a67bce8b96868"}, - {file = "frozenlist-1.4.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:74fb4bee6880b529a0c6560885fce4dc95936920f9f20f53d99a213f7bf66776"}, - {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:590344787a90ae57d62511dd7c736ed56b428f04cd8c161fcc5e7232c130c69a"}, - {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:068b63f23b17df8569b7fdca5517edef76171cf3897eb68beb01341131fbd2ad"}, - {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5c849d495bf5154cd8da18a9eb15db127d4dba2968d88831aff6f0331ea9bd4c"}, - {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9750cc7fe1ae3b1611bb8cfc3f9ec11d532244235d75901fb6b8e42ce9229dfe"}, - {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9b2de4cf0cdd5bd2dee4c4f63a653c61d2408055ab77b151c1957f221cabf2a"}, - {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0633c8d5337cb5c77acbccc6357ac49a1770b8c487e5b3505c57b949b4b82e98"}, - {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:27657df69e8801be6c3638054e202a135c7f299267f1a55ed3a598934f6c0d75"}, - {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:f9a3ea26252bd92f570600098783d1371354d89d5f6b7dfd87359d669f2109b5"}, - {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:4f57dab5fe3407b6c0c1cc907ac98e8a189f9e418f3b6e54d65a718aaafe3950"}, - {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e02a0e11cf6597299b9f3bbd3f93d79217cb90cfd1411aec33848b13f5c656cc"}, - {file = "frozenlist-1.4.1-cp310-cp310-win32.whl", hash = "sha256:a828c57f00f729620a442881cc60e57cfcec6842ba38e1b19fd3e47ac0ff8dc1"}, - {file = "frozenlist-1.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:f56e2333dda1fe0f909e7cc59f021eba0d2307bc6f012a1ccf2beca6ba362439"}, - {file = "frozenlist-1.4.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:a0cb6f11204443f27a1628b0e460f37fb30f624be6051d490fa7d7e26d4af3d0"}, - {file = "frozenlist-1.4.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b46c8ae3a8f1f41a0d2ef350c0b6e65822d80772fe46b653ab6b6274f61d4a49"}, - {file = "frozenlist-1.4.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:fde5bd59ab5357e3853313127f4d3565fc7dad314a74d7b5d43c22c6a5ed2ced"}, - {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:722e1124aec435320ae01ee3ac7bec11a5d47f25d0ed6328f2273d287bc3abb0"}, - {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2471c201b70d58a0f0c1f91261542a03d9a5e088ed3dc6c160d614c01649c106"}, - {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c757a9dd70d72b076d6f68efdbb9bc943665ae954dad2801b874c8c69e185068"}, - {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f146e0911cb2f1da549fc58fc7bcd2b836a44b79ef871980d605ec392ff6b0d2"}, - {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f9c515e7914626b2a2e1e311794b4c35720a0be87af52b79ff8e1429fc25f19"}, - {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:c302220494f5c1ebeb0912ea782bcd5e2f8308037b3c7553fad0e48ebad6ad82"}, - {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:442acde1e068288a4ba7acfe05f5f343e19fac87bfc96d89eb886b0363e977ec"}, - {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:1b280e6507ea8a4fa0c0a7150b4e526a8d113989e28eaaef946cc77ffd7efc0a"}, - {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:fe1a06da377e3a1062ae5fe0926e12b84eceb8a50b350ddca72dc85015873f74"}, - {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:db9e724bebd621d9beca794f2a4ff1d26eed5965b004a97f1f1685a173b869c2"}, - {file = "frozenlist-1.4.1-cp311-cp311-win32.whl", hash = "sha256:e774d53b1a477a67838a904131c4b0eef6b3d8a651f8b138b04f748fccfefe17"}, - {file = "frozenlist-1.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:fb3c2db03683b5767dedb5769b8a40ebb47d6f7f45b1b3e3b4b51ec8ad9d9825"}, - {file = "frozenlist-1.4.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:1979bc0aeb89b33b588c51c54ab0161791149f2461ea7c7c946d95d5f93b56ae"}, - {file = "frozenlist-1.4.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:cc7b01b3754ea68a62bd77ce6020afaffb44a590c2289089289363472d13aedb"}, - {file = "frozenlist-1.4.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c9c92be9fd329ac801cc420e08452b70e7aeab94ea4233a4804f0915c14eba9b"}, - {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c3894db91f5a489fc8fa6a9991820f368f0b3cbdb9cd8849547ccfab3392d86"}, - {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ba60bb19387e13597fb059f32cd4d59445d7b18b69a745b8f8e5db0346f33480"}, - {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8aefbba5f69d42246543407ed2461db31006b0f76c4e32dfd6f42215a2c41d09"}, - {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:780d3a35680ced9ce682fbcf4cb9c2bad3136eeff760ab33707b71db84664e3a"}, - {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9acbb16f06fe7f52f441bb6f413ebae6c37baa6ef9edd49cdd567216da8600cd"}, - {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:23b701e65c7b36e4bf15546a89279bd4d8675faabc287d06bbcfac7d3c33e1e6"}, - {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:3e0153a805a98f5ada7e09826255ba99fb4f7524bb81bf6b47fb702666484ae1"}, - {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:dd9b1baec094d91bf36ec729445f7769d0d0cf6b64d04d86e45baf89e2b9059b"}, - {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:1a4471094e146b6790f61b98616ab8e44f72661879cc63fa1049d13ef711e71e"}, - {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:5667ed53d68d91920defdf4035d1cdaa3c3121dc0b113255124bcfada1cfa1b8"}, - {file = "frozenlist-1.4.1-cp312-cp312-win32.whl", hash = "sha256:beee944ae828747fd7cb216a70f120767fc9f4f00bacae8543c14a6831673f89"}, - {file = "frozenlist-1.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:64536573d0a2cb6e625cf309984e2d873979709f2cf22839bf2d61790b448ad5"}, - {file = "frozenlist-1.4.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:20b51fa3f588ff2fe658663db52a41a4f7aa6c04f6201449c6c7c476bd255c0d"}, - {file = "frozenlist-1.4.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:410478a0c562d1a5bcc2f7ea448359fcb050ed48b3c6f6f4f18c313a9bdb1826"}, - {file = "frozenlist-1.4.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c6321c9efe29975232da3bd0af0ad216800a47e93d763ce64f291917a381b8eb"}, - {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:48f6a4533887e189dae092f1cf981f2e3885175f7a0f33c91fb5b7b682b6bab6"}, - {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6eb73fa5426ea69ee0e012fb59cdc76a15b1283d6e32e4f8dc4482ec67d1194d"}, - {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fbeb989b5cc29e8daf7f976b421c220f1b8c731cbf22b9130d8815418ea45887"}, - {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:32453c1de775c889eb4e22f1197fe3bdfe457d16476ea407472b9442e6295f7a"}, - {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:693945278a31f2086d9bf3df0fe8254bbeaef1fe71e1351c3bd730aa7d31c41b"}, - {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:1d0ce09d36d53bbbe566fe296965b23b961764c0bcf3ce2fa45f463745c04701"}, - {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:3a670dc61eb0d0eb7080890c13de3066790f9049b47b0de04007090807c776b0"}, - {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:dca69045298ce5c11fd539682cff879cc1e664c245d1c64da929813e54241d11"}, - {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:a06339f38e9ed3a64e4c4e43aec7f59084033647f908e4259d279a52d3757d09"}, - {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b7f2f9f912dca3934c1baec2e4585a674ef16fe00218d833856408c48d5beee7"}, - {file = "frozenlist-1.4.1-cp38-cp38-win32.whl", hash = "sha256:e7004be74cbb7d9f34553a5ce5fb08be14fb33bc86f332fb71cbe5216362a497"}, - {file = "frozenlist-1.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:5a7d70357e7cee13f470c7883a063aae5fe209a493c57d86eb7f5a6f910fae09"}, - {file = "frozenlist-1.4.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:bfa4a17e17ce9abf47a74ae02f32d014c5e9404b6d9ac7f729e01562bbee601e"}, - {file = "frozenlist-1.4.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b7e3ed87d4138356775346e6845cccbe66cd9e207f3cd11d2f0b9fd13681359d"}, - {file = "frozenlist-1.4.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c99169d4ff810155ca50b4da3b075cbde79752443117d89429595c2e8e37fed8"}, - {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:edb678da49d9f72c9f6c609fbe41a5dfb9a9282f9e6a2253d5a91e0fc382d7c0"}, - {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6db4667b187a6742b33afbbaf05a7bc551ffcf1ced0000a571aedbb4aa42fc7b"}, - {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:55fdc093b5a3cb41d420884cdaf37a1e74c3c37a31f46e66286d9145d2063bd0"}, - {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82e8211d69a4f4bc360ea22cd6555f8e61a1bd211d1d5d39d3d228b48c83a897"}, - {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:89aa2c2eeb20957be2d950b85974b30a01a762f3308cd02bb15e1ad632e22dc7"}, - {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9d3e0c25a2350080e9319724dede4f31f43a6c9779be48021a7f4ebde8b2d742"}, - {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7268252af60904bf52c26173cbadc3a071cece75f873705419c8681f24d3edea"}, - {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:0c250a29735d4f15321007fb02865f0e6b6a41a6b88f1f523ca1596ab5f50bd5"}, - {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:96ec70beabbd3b10e8bfe52616a13561e58fe84c0101dd031dc78f250d5128b9"}, - {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:23b2d7679b73fe0e5a4560b672a39f98dfc6f60df63823b0a9970525325b95f6"}, - {file = "frozenlist-1.4.1-cp39-cp39-win32.whl", hash = "sha256:a7496bfe1da7fb1a4e1cc23bb67c58fab69311cc7d32b5a99c2007b4b2a0e932"}, - {file = "frozenlist-1.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:e6a20a581f9ce92d389a8c7d7c3dd47c81fd5d6e655c8dddf341e14aa48659d0"}, - {file = "frozenlist-1.4.1-py3-none-any.whl", hash = "sha256:04ced3e6a46b4cfffe20f9ae482818e34eba9b5fb0ce4056e4cc9b6e212d09b7"}, - {file = "frozenlist-1.4.1.tar.gz", hash = "sha256:c037a86e8513059a2613aaba4d817bb90b9d9b6b69aace3ce9c877e8c8ed402b"}, + {file = "frozenlist-1.5.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:5b6a66c18b5b9dd261ca98dffcb826a525334b2f29e7caa54e182255c5f6a65a"}, + {file = "frozenlist-1.5.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d1b3eb7b05ea246510b43a7e53ed1653e55c2121019a97e60cad7efb881a97bb"}, + {file = "frozenlist-1.5.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:15538c0cbf0e4fa11d1e3a71f823524b0c46299aed6e10ebb4c2089abd8c3bec"}, + {file = "frozenlist-1.5.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e79225373c317ff1e35f210dd5f1344ff31066ba8067c307ab60254cd3a78ad5"}, + {file = "frozenlist-1.5.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9272fa73ca71266702c4c3e2d4a28553ea03418e591e377a03b8e3659d94fa76"}, + {file = "frozenlist-1.5.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:498524025a5b8ba81695761d78c8dd7382ac0b052f34e66939c42df860b8ff17"}, + {file = "frozenlist-1.5.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:92b5278ed9d50fe610185ecd23c55d8b307d75ca18e94c0e7de328089ac5dcba"}, + {file = "frozenlist-1.5.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f3c8c1dacd037df16e85227bac13cca58c30da836c6f936ba1df0c05d046d8d"}, + {file = "frozenlist-1.5.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:f2ac49a9bedb996086057b75bf93538240538c6d9b38e57c82d51f75a73409d2"}, + {file = "frozenlist-1.5.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e66cc454f97053b79c2ab09c17fbe3c825ea6b4de20baf1be28919460dd7877f"}, + {file = "frozenlist-1.5.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:5a3ba5f9a0dfed20337d3e966dc359784c9f96503674c2faf015f7fe8e96798c"}, + {file = "frozenlist-1.5.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:6321899477db90bdeb9299ac3627a6a53c7399c8cd58d25da094007402b039ab"}, + {file = "frozenlist-1.5.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:76e4753701248476e6286f2ef492af900ea67d9706a0155335a40ea21bf3b2f5"}, + {file = "frozenlist-1.5.0-cp310-cp310-win32.whl", hash = "sha256:977701c081c0241d0955c9586ffdd9ce44f7a7795df39b9151cd9a6fd0ce4cfb"}, + {file = "frozenlist-1.5.0-cp310-cp310-win_amd64.whl", hash = "sha256:189f03b53e64144f90990d29a27ec4f7997d91ed3d01b51fa39d2dbe77540fd4"}, + {file = "frozenlist-1.5.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:fd74520371c3c4175142d02a976aee0b4cb4a7cc912a60586ffd8d5929979b30"}, + {file = "frozenlist-1.5.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2f3f7a0fbc219fb4455264cae4d9f01ad41ae6ee8524500f381de64ffaa077d5"}, + {file = "frozenlist-1.5.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f47c9c9028f55a04ac254346e92977bf0f166c483c74b4232bee19a6697e4778"}, + {file = "frozenlist-1.5.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0996c66760924da6e88922756d99b47512a71cfd45215f3570bf1e0b694c206a"}, + {file = "frozenlist-1.5.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a2fe128eb4edeabe11896cb6af88fca5346059f6c8d807e3b910069f39157869"}, + {file = "frozenlist-1.5.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1a8ea951bbb6cacd492e3948b8da8c502a3f814f5d20935aae74b5df2b19cf3d"}, + {file = "frozenlist-1.5.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:de537c11e4aa01d37db0d403b57bd6f0546e71a82347a97c6a9f0dcc532b3a45"}, + {file = "frozenlist-1.5.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9c2623347b933fcb9095841f1cc5d4ff0b278addd743e0e966cb3d460278840d"}, + {file = "frozenlist-1.5.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:cee6798eaf8b1416ef6909b06f7dc04b60755206bddc599f52232606e18179d3"}, + {file = "frozenlist-1.5.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:f5f9da7f5dbc00a604fe74aa02ae7c98bcede8a3b8b9666f9f86fc13993bc71a"}, + {file = "frozenlist-1.5.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:90646abbc7a5d5c7c19461d2e3eeb76eb0b204919e6ece342feb6032c9325ae9"}, + {file = "frozenlist-1.5.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:bdac3c7d9b705d253b2ce370fde941836a5f8b3c5c2b8fd70940a3ea3af7f4f2"}, + {file = "frozenlist-1.5.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:03d33c2ddbc1816237a67f66336616416e2bbb6beb306e5f890f2eb22b959cdf"}, + {file = "frozenlist-1.5.0-cp311-cp311-win32.whl", hash = "sha256:237f6b23ee0f44066219dae14c70ae38a63f0440ce6750f868ee08775073f942"}, + {file = "frozenlist-1.5.0-cp311-cp311-win_amd64.whl", hash = "sha256:0cc974cc93d32c42e7b0f6cf242a6bd941c57c61b618e78b6c0a96cb72788c1d"}, + {file = "frozenlist-1.5.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:31115ba75889723431aa9a4e77d5f398f5cf976eea3bdf61749731f62d4a4a21"}, + {file = "frozenlist-1.5.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:7437601c4d89d070eac8323f121fcf25f88674627505334654fd027b091db09d"}, + {file = "frozenlist-1.5.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7948140d9f8ece1745be806f2bfdf390127cf1a763b925c4a805c603df5e697e"}, + {file = "frozenlist-1.5.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:feeb64bc9bcc6b45c6311c9e9b99406660a9c05ca8a5b30d14a78555088b0b3a"}, + {file = "frozenlist-1.5.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:683173d371daad49cffb8309779e886e59c2f369430ad28fe715f66d08d4ab1a"}, + {file = "frozenlist-1.5.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7d57d8f702221405a9d9b40f9da8ac2e4a1a8b5285aac6100f3393675f0a85ee"}, + {file = "frozenlist-1.5.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:30c72000fbcc35b129cb09956836c7d7abf78ab5416595e4857d1cae8d6251a6"}, + {file = "frozenlist-1.5.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:000a77d6034fbad9b6bb880f7ec073027908f1b40254b5d6f26210d2dab1240e"}, + {file = "frozenlist-1.5.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:5d7f5a50342475962eb18b740f3beecc685a15b52c91f7d975257e13e029eca9"}, + {file = "frozenlist-1.5.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:87f724d055eb4785d9be84e9ebf0f24e392ddfad00b3fe036e43f489fafc9039"}, + {file = "frozenlist-1.5.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:6e9080bb2fb195a046e5177f10d9d82b8a204c0736a97a153c2466127de87784"}, + {file = "frozenlist-1.5.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:9b93d7aaa36c966fa42efcaf716e6b3900438632a626fb09c049f6a2f09fc631"}, + {file = "frozenlist-1.5.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:52ef692a4bc60a6dd57f507429636c2af8b6046db8b31b18dac02cbc8f507f7f"}, + {file = "frozenlist-1.5.0-cp312-cp312-win32.whl", hash = "sha256:29d94c256679247b33a3dc96cce0f93cbc69c23bf75ff715919332fdbb6a32b8"}, + {file = "frozenlist-1.5.0-cp312-cp312-win_amd64.whl", hash = "sha256:8969190d709e7c48ea386db202d708eb94bdb29207a1f269bab1196ce0dcca1f"}, + {file = "frozenlist-1.5.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:7a1a048f9215c90973402e26c01d1cff8a209e1f1b53f72b95c13db61b00f953"}, + {file = "frozenlist-1.5.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:dd47a5181ce5fcb463b5d9e17ecfdb02b678cca31280639255ce9d0e5aa67af0"}, + {file = "frozenlist-1.5.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1431d60b36d15cda188ea222033eec8e0eab488f39a272461f2e6d9e1a8e63c2"}, + {file = "frozenlist-1.5.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6482a5851f5d72767fbd0e507e80737f9c8646ae7fd303def99bfe813f76cf7f"}, + {file = "frozenlist-1.5.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:44c49271a937625619e862baacbd037a7ef86dd1ee215afc298a417ff3270608"}, + {file = "frozenlist-1.5.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:12f78f98c2f1c2429d42e6a485f433722b0061d5c0b0139efa64f396efb5886b"}, + {file = "frozenlist-1.5.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ce3aa154c452d2467487765e3adc730a8c153af77ad84096bc19ce19a2400840"}, + {file = "frozenlist-1.5.0-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9b7dc0c4338e6b8b091e8faf0db3168a37101943e687f373dce00959583f7439"}, + {file = "frozenlist-1.5.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:45e0896250900b5aa25180f9aec243e84e92ac84bd4a74d9ad4138ef3f5c97de"}, + {file = "frozenlist-1.5.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:561eb1c9579d495fddb6da8959fd2a1fca2c6d060d4113f5844b433fc02f2641"}, + {file = "frozenlist-1.5.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:df6e2f325bfee1f49f81aaac97d2aa757c7646534a06f8f577ce184afe2f0a9e"}, + {file = "frozenlist-1.5.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:140228863501b44b809fb39ec56b5d4071f4d0aa6d216c19cbb08b8c5a7eadb9"}, + {file = "frozenlist-1.5.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:7707a25d6a77f5d27ea7dc7d1fc608aa0a478193823f88511ef5e6b8a48f9d03"}, + {file = "frozenlist-1.5.0-cp313-cp313-win32.whl", hash = "sha256:31a9ac2b38ab9b5a8933b693db4939764ad3f299fcaa931a3e605bc3460e693c"}, + {file = "frozenlist-1.5.0-cp313-cp313-win_amd64.whl", hash = "sha256:11aabdd62b8b9c4b84081a3c246506d1cddd2dd93ff0ad53ede5defec7886b28"}, + {file = "frozenlist-1.5.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:dd94994fc91a6177bfaafd7d9fd951bc8689b0a98168aa26b5f543868548d3ca"}, + {file = "frozenlist-1.5.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2d0da8bbec082bf6bf18345b180958775363588678f64998c2b7609e34719b10"}, + {file = "frozenlist-1.5.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:73f2e31ea8dd7df61a359b731716018c2be196e5bb3b74ddba107f694fbd7604"}, + {file = "frozenlist-1.5.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:828afae9f17e6de596825cf4228ff28fbdf6065974e5ac1410cecc22f699d2b3"}, + {file = "frozenlist-1.5.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f1577515d35ed5649d52ab4319db757bb881ce3b2b796d7283e6634d99ace307"}, + {file = "frozenlist-1.5.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2150cc6305a2c2ab33299453e2968611dacb970d2283a14955923062c8d00b10"}, + {file = "frozenlist-1.5.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a72b7a6e3cd2725eff67cd64c8f13335ee18fc3c7befc05aed043d24c7b9ccb9"}, + {file = "frozenlist-1.5.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c16d2fa63e0800723139137d667e1056bee1a1cf7965153d2d104b62855e9b99"}, + {file = "frozenlist-1.5.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:17dcc32fc7bda7ce5875435003220a457bcfa34ab7924a49a1c19f55b6ee185c"}, + {file = "frozenlist-1.5.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:97160e245ea33d8609cd2b8fd997c850b56db147a304a262abc2b3be021a9171"}, + {file = "frozenlist-1.5.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:f1e6540b7fa044eee0bb5111ada694cf3dc15f2b0347ca125ee9ca984d5e9e6e"}, + {file = "frozenlist-1.5.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:91d6c171862df0a6c61479d9724f22efb6109111017c87567cfeb7b5d1449fdf"}, + {file = "frozenlist-1.5.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:c1fac3e2ace2eb1052e9f7c7db480818371134410e1f5c55d65e8f3ac6d1407e"}, + {file = "frozenlist-1.5.0-cp38-cp38-win32.whl", hash = "sha256:b97f7b575ab4a8af9b7bc1d2ef7f29d3afee2226bd03ca3875c16451ad5a7723"}, + {file = "frozenlist-1.5.0-cp38-cp38-win_amd64.whl", hash = "sha256:374ca2dabdccad8e2a76d40b1d037f5bd16824933bf7bcea3e59c891fd4a0923"}, + {file = "frozenlist-1.5.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:9bbcdfaf4af7ce002694a4e10a0159d5a8d20056a12b05b45cea944a4953f972"}, + {file = "frozenlist-1.5.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1893f948bf6681733aaccf36c5232c231e3b5166d607c5fa77773611df6dc336"}, + {file = "frozenlist-1.5.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2b5e23253bb709ef57a8e95e6ae48daa9ac5f265637529e4ce6b003a37b2621f"}, + {file = "frozenlist-1.5.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0f253985bb515ecd89629db13cb58d702035ecd8cfbca7d7a7e29a0e6d39af5f"}, + {file = "frozenlist-1.5.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:04a5c6babd5e8fb7d3c871dc8b321166b80e41b637c31a995ed844a6139942b6"}, + {file = "frozenlist-1.5.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a9fe0f1c29ba24ba6ff6abf688cb0b7cf1efab6b6aa6adc55441773c252f7411"}, + {file = "frozenlist-1.5.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:226d72559fa19babe2ccd920273e767c96a49b9d3d38badd7c91a0fdeda8ea08"}, + {file = "frozenlist-1.5.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15b731db116ab3aedec558573c1a5eec78822b32292fe4f2f0345b7f697745c2"}, + {file = "frozenlist-1.5.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:366d8f93e3edfe5a918c874702f78faac300209a4d5bf38352b2c1bdc07a766d"}, + {file = "frozenlist-1.5.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:1b96af8c582b94d381a1c1f51ffaedeb77c821c690ea5f01da3d70a487dd0a9b"}, + {file = "frozenlist-1.5.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:c03eff4a41bd4e38415cbed054bbaff4a075b093e2394b6915dca34a40d1e38b"}, + {file = "frozenlist-1.5.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:50cf5e7ee9b98f22bdecbabf3800ae78ddcc26e4a435515fc72d97903e8488e0"}, + {file = "frozenlist-1.5.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:1e76bfbc72353269c44e0bc2cfe171900fbf7f722ad74c9a7b638052afe6a00c"}, + {file = "frozenlist-1.5.0-cp39-cp39-win32.whl", hash = "sha256:666534d15ba8f0fda3f53969117383d5dc021266b3c1a42c9ec4855e4b58b9d3"}, + {file = "frozenlist-1.5.0-cp39-cp39-win_amd64.whl", hash = "sha256:5c28f4b5dbef8a0d8aad0d4de24d1e9e981728628afaf4ea0792f5d0939372f0"}, + {file = "frozenlist-1.5.0-py3-none-any.whl", hash = "sha256:d994863bba198a4a518b467bb971c56e1db3f180a25c6cf7bb1949c267f748c3"}, + {file = "frozenlist-1.5.0.tar.gz", hash = "sha256:81d5af29e61b9c8348e876d442253723928dce6433e0e76cd925cd83f1b4b817"}, ] [[package]] @@ -575,6 +647,104 @@ gitdb = ">=4.0.1,<5" [package.extras] test = ["black", "coverage[toml]", "ddt (>=1.1.1,!=1.4.3)", "mock", "mypy", "pre-commit", "pytest (>=7.3.1)", "pytest-cov", "pytest-instafail", "pytest-mock", "pytest-sugar", "sumtypes"] +[[package]] +name = "google-api-core" +version = "2.11.1" +description = "Google API client core library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "google-api-core-2.11.1.tar.gz", hash = "sha256:25d29e05a0058ed5f19c61c0a78b1b53adea4d9364b464d014fbda941f6d1c9a"}, + {file = "google_api_core-2.11.1-py3-none-any.whl", hash = "sha256:d92a5a92dc36dd4f4b9ee4e55528a90e432b059f93aee6ad857f9de8cc7ae94a"}, +] + +[package.dependencies] +google-auth = ">=2.14.1,<3.0.dev0" +googleapis-common-protos = ">=1.56.2,<2.0.dev0" +protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<5.0.0.dev0" +requests = ">=2.18.0,<3.0.0.dev0" + +[package.extras] +grpc = ["grpcio (>=1.33.2,<2.0dev)", "grpcio (>=1.49.1,<2.0dev)", "grpcio-status (>=1.33.2,<2.0.dev0)", "grpcio-status (>=1.49.1,<2.0.dev0)"] +grpcgcp = ["grpcio-gcp (>=0.2.2,<1.0.dev0)"] +grpcio-gcp = ["grpcio-gcp (>=0.2.2,<1.0.dev0)"] + +[[package]] +name = "google-api-python-client" +version = "2.95.0" +description = "Google API Client Library for Python" +optional = false +python-versions = ">=3.7" +files = [ + {file = "google-api-python-client-2.95.0.tar.gz", hash = "sha256:d2731ede12f79e53fbe11fdb913dfe986440b44c0a28431c78a8ec275f4c1541"}, + {file = "google_api_python_client-2.95.0-py2.py3-none-any.whl", hash = "sha256:a8aab2da678f42a01f2f52108f787fef4310f23f9dd917c4e64664c3f0c885ba"}, +] + +[package.dependencies] +google-api-core = ">=1.31.5,<2.0.dev0 || >2.3.0,<3.0.0.dev0" +google-auth = ">=1.19.0,<3.0.0.dev0" +google-auth-httplib2 = ">=0.1.0" +httplib2 = ">=0.15.0,<1.dev0" +uritemplate = ">=3.0.1,<5" + +[[package]] +name = "google-auth" +version = "2.22.0" +description = "Google Authentication Library" +optional = false +python-versions = ">=3.6" +files = [ + {file = "google-auth-2.22.0.tar.gz", hash = "sha256:164cba9af4e6e4e40c3a4f90a1a6c12ee56f14c0b4868d1ca91b32826ab334ce"}, + {file = "google_auth-2.22.0-py2.py3-none-any.whl", hash = "sha256:d61d1b40897407b574da67da1a833bdc10d5a11642566e506565d1b1a46ba873"}, +] + +[package.dependencies] +cachetools = ">=2.0.0,<6.0" +pyasn1-modules = ">=0.2.1" +rsa = ">=3.1.4,<5" +six = ">=1.9.0" +urllib3 = "<2.0" + +[package.extras] +aiohttp = ["aiohttp (>=3.6.2,<4.0.0.dev0)", "requests (>=2.20.0,<3.0.0.dev0)"] +enterprise-cert = ["cryptography (==36.0.2)", "pyopenssl (==22.0.0)"] +pyopenssl = ["cryptography (>=38.0.3)", "pyopenssl (>=20.0.0)"] +reauth = ["pyu2f (>=0.1.5)"] +requests = ["requests (>=2.20.0,<3.0.0.dev0)"] + +[[package]] +name = "google-auth-httplib2" +version = "0.1.0" +description = "Google Authentication Library: httplib2 transport" +optional = false +python-versions = "*" +files = [ + {file = "google-auth-httplib2-0.1.0.tar.gz", hash = "sha256:a07c39fd632becacd3f07718dfd6021bf396978f03ad3ce4321d060015cc30ac"}, + {file = "google_auth_httplib2-0.1.0-py2.py3-none-any.whl", hash = "sha256:31e49c36c6b5643b57e82617cb3e021e3e1d2df9da63af67252c02fa9c1f4a10"}, +] + +[package.dependencies] +google-auth = "*" +httplib2 = ">=0.15.0" +six = "*" + +[[package]] +name = "googleapis-common-protos" +version = "1.59.1" +description = "Common protobufs used in Google APIs" +optional = false +python-versions = ">=3.7" +files = [ + {file = "googleapis-common-protos-1.59.1.tar.gz", hash = "sha256:b35d530fe825fb4227857bc47ad84c33c809ac96f312e13182bdeaa2abe1178a"}, + {file = "googleapis_common_protos-1.59.1-py2.py3-none-any.whl", hash = "sha256:0cbedb6fb68f1c07e18eb4c48256320777707e7d0c55063ae56c15db3224a61e"}, +] + +[package.dependencies] +protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<5.0.0.dev0" + +[package.extras] +grpc = ["grpcio (>=1.44.0,<2.0.0.dev0)"] + [[package]] name = "greenlet" version = "3.1.1" @@ -693,6 +863,20 @@ sniffio = "==1.*" http2 = ["h2 (>=3,<5)"] socks = ["socksio (==1.*)"] +[[package]] +name = "httplib2" +version = "0.22.0" +description = "A comprehensive HTTP client library." +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "httplib2-0.22.0-py3-none-any.whl", hash = "sha256:14ae0a53c1ba8f3d37e9e27cf37eabb0fb9980f435ba405d546948b009dd64dc"}, + {file = "httplib2-0.22.0.tar.gz", hash = "sha256:d7a10bc5ef5ab08322488bde8c726eeee5c8618723fdb399597ec58f3d82df81"}, +] + +[package.dependencies] +pyparsing = {version = ">=2.4.2,<3.0.0 || >3.0.0,<3.0.1 || >3.0.1,<3.0.2 || >3.0.2,<3.0.3 || >3.0.3,<4", markers = "python_version > \"3.0\""} + [[package]] name = "httpx" version = "0.24.1" @@ -960,13 +1144,13 @@ langchain-core = ">=0.2.38,<0.3.0" [[package]] name = "langsmith" -version = "0.1.134" +version = "0.1.137" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." optional = false python-versions = "<4.0,>=3.8.1" files = [ - {file = "langsmith-0.1.134-py3-none-any.whl", hash = "sha256:ada98ad80ef38807725f32441a472da3dd28394010877751f48f458d3289da04"}, - {file = "langsmith-0.1.134.tar.gz", hash = "sha256:23abee3b508875a0e63c602afafffc02442a19cfd88f9daae05b3e9054fd6b61"}, + {file = "langsmith-0.1.137-py3-none-any.whl", hash = "sha256:4256d5c61133749890f7b5c88321dbb133ce0f440c621ea28e76513285859b81"}, + {file = "langsmith-0.1.137.tar.gz", hash = "sha256:56cdfcc6c74cb20a3f437d5bd144feb5bf93f54c5a2918d1e568cbd084a372d4"}, ] [package.dependencies] @@ -1003,22 +1187,22 @@ url = "../../../llm_core" [[package]] name = "marshmallow" -version = "3.22.0" +version = "3.23.0" description = "A lightweight library for converting complex datatypes to and from native Python datatypes." optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "marshmallow-3.22.0-py3-none-any.whl", hash = "sha256:71a2dce49ef901c3f97ed296ae5051135fd3febd2bf43afe0ae9a82143a494d9"}, - {file = "marshmallow-3.22.0.tar.gz", hash = "sha256:4972f529104a220bb8637d595aa4c9762afbe7f7a77d82dc58c1615d70c5823e"}, + {file = "marshmallow-3.23.0-py3-none-any.whl", hash = "sha256:82f20a2397834fe6d9611b241f2f7e7b680ed89c49f84728a1ad937be6b4bdf4"}, + {file = "marshmallow-3.23.0.tar.gz", hash = "sha256:98d8827a9f10c03d44ead298d2e99c6aea8197df18ccfad360dae7f89a50da2e"}, ] [package.dependencies] packaging = ">=17.0" [package.extras] -dev = ["marshmallow[tests]", "pre-commit (>=3.5,<4.0)", "tox"] -docs = ["alabaster (==1.0.0)", "autodocsumm (==0.2.13)", "sphinx (==8.0.2)", "sphinx-issues (==4.1.0)", "sphinx-version-warning (==1.1.2)"] -tests = ["pytest", "pytz", "simplejson"] +dev = ["marshmallow[tests]", "pre-commit (>=3.5,<5.0)", "tox"] +docs = ["alabaster (==1.0.0)", "autodocsumm (==0.2.13)", "sphinx (==8.1.3)", "sphinx-issues (==5.0.0)", "sphinx-version-warning (==1.1.2)"] +tests = ["pytest", "simplejson"] [[package]] name = "mccabe" @@ -1134,38 +1318,43 @@ files = [ [[package]] name = "mypy" -version = "1.11.2" +version = "1.13.0" description = "Optional static typing for Python" optional = false python-versions = ">=3.8" files = [ - {file = "mypy-1.11.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d42a6dd818ffce7be66cce644f1dff482f1d97c53ca70908dff0b9ddc120b77a"}, - {file = "mypy-1.11.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:801780c56d1cdb896eacd5619a83e427ce436d86a3bdf9112527f24a66618fef"}, - {file = "mypy-1.11.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:41ea707d036a5307ac674ea172875f40c9d55c5394f888b168033177fce47383"}, - {file = "mypy-1.11.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6e658bd2d20565ea86da7d91331b0eed6d2eee22dc031579e6297f3e12c758c8"}, - {file = "mypy-1.11.2-cp310-cp310-win_amd64.whl", hash = "sha256:478db5f5036817fe45adb7332d927daa62417159d49783041338921dcf646fc7"}, - {file = "mypy-1.11.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:75746e06d5fa1e91bfd5432448d00d34593b52e7e91a187d981d08d1f33d4385"}, - {file = "mypy-1.11.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a976775ab2256aadc6add633d44f100a2517d2388906ec4f13231fafbb0eccca"}, - {file = "mypy-1.11.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:cd953f221ac1379050a8a646585a29574488974f79d8082cedef62744f0a0104"}, - {file = "mypy-1.11.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:57555a7715c0a34421013144a33d280e73c08df70f3a18a552938587ce9274f4"}, - {file = "mypy-1.11.2-cp311-cp311-win_amd64.whl", hash = "sha256:36383a4fcbad95f2657642a07ba22ff797de26277158f1cc7bd234821468b1b6"}, - {file = "mypy-1.11.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:e8960dbbbf36906c5c0b7f4fbf2f0c7ffb20f4898e6a879fcf56a41a08b0d318"}, - {file = "mypy-1.11.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:06d26c277962f3fb50e13044674aa10553981ae514288cb7d0a738f495550b36"}, - {file = "mypy-1.11.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6e7184632d89d677973a14d00ae4d03214c8bc301ceefcdaf5c474866814c987"}, - {file = "mypy-1.11.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:3a66169b92452f72117e2da3a576087025449018afc2d8e9bfe5ffab865709ca"}, - {file = "mypy-1.11.2-cp312-cp312-win_amd64.whl", hash = "sha256:969ea3ef09617aff826885a22ece0ddef69d95852cdad2f60c8bb06bf1f71f70"}, - {file = "mypy-1.11.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:37c7fa6121c1cdfcaac97ce3d3b5588e847aa79b580c1e922bb5d5d2902df19b"}, - {file = "mypy-1.11.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4a8a53bc3ffbd161b5b2a4fff2f0f1e23a33b0168f1c0778ec70e1a3d66deb86"}, - {file = "mypy-1.11.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2ff93107f01968ed834f4256bc1fc4475e2fecf6c661260066a985b52741ddce"}, - {file = "mypy-1.11.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:edb91dded4df17eae4537668b23f0ff6baf3707683734b6a818d5b9d0c0c31a1"}, - {file = "mypy-1.11.2-cp38-cp38-win_amd64.whl", hash = "sha256:ee23de8530d99b6db0573c4ef4bd8f39a2a6f9b60655bf7a1357e585a3486f2b"}, - {file = "mypy-1.11.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:801ca29f43d5acce85f8e999b1e431fb479cb02d0e11deb7d2abb56bdaf24fd6"}, - {file = "mypy-1.11.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:af8d155170fcf87a2afb55b35dc1a0ac21df4431e7d96717621962e4b9192e70"}, - {file = "mypy-1.11.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f7821776e5c4286b6a13138cc935e2e9b6fde05e081bdebf5cdb2bb97c9df81d"}, - {file = "mypy-1.11.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:539c570477a96a4e6fb718b8d5c3e0c0eba1f485df13f86d2970c91f0673148d"}, - {file = "mypy-1.11.2-cp39-cp39-win_amd64.whl", hash = "sha256:3f14cd3d386ac4d05c5a39a51b84387403dadbd936e17cb35882134d4f8f0d24"}, - {file = "mypy-1.11.2-py3-none-any.whl", hash = "sha256:b499bc07dbdcd3de92b0a8b29fdf592c111276f6a12fe29c30f6c417dd546d12"}, - {file = "mypy-1.11.2.tar.gz", hash = "sha256:7f9993ad3e0ffdc95c2a14b66dee63729f021968bff8ad911867579c65d13a79"}, + {file = "mypy-1.13.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6607e0f1dd1fb7f0aca14d936d13fd19eba5e17e1cd2a14f808fa5f8f6d8f60a"}, + {file = "mypy-1.13.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8a21be69bd26fa81b1f80a61ee7ab05b076c674d9b18fb56239d72e21d9f4c80"}, + {file = "mypy-1.13.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7b2353a44d2179846a096e25691d54d59904559f4232519d420d64da6828a3a7"}, + {file = "mypy-1.13.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0730d1c6a2739d4511dc4253f8274cdd140c55c32dfb0a4cf8b7a43f40abfa6f"}, + {file = "mypy-1.13.0-cp310-cp310-win_amd64.whl", hash = "sha256:c5fc54dbb712ff5e5a0fca797e6e0aa25726c7e72c6a5850cfd2adbc1eb0a372"}, + {file = "mypy-1.13.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:581665e6f3a8a9078f28d5502f4c334c0c8d802ef55ea0e7276a6e409bc0d82d"}, + {file = "mypy-1.13.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3ddb5b9bf82e05cc9a627e84707b528e5c7caaa1c55c69e175abb15a761cec2d"}, + {file = "mypy-1.13.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:20c7ee0bc0d5a9595c46f38beb04201f2620065a93755704e141fcac9f59db2b"}, + {file = "mypy-1.13.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3790ded76f0b34bc9c8ba4def8f919dd6a46db0f5a6610fb994fe8efdd447f73"}, + {file = "mypy-1.13.0-cp311-cp311-win_amd64.whl", hash = "sha256:51f869f4b6b538229c1d1bcc1dd7d119817206e2bc54e8e374b3dfa202defcca"}, + {file = "mypy-1.13.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:5c7051a3461ae84dfb5dd15eff5094640c61c5f22257c8b766794e6dd85e72d5"}, + {file = "mypy-1.13.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:39bb21c69a5d6342f4ce526e4584bc5c197fd20a60d14a8624d8743fffb9472e"}, + {file = "mypy-1.13.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:164f28cb9d6367439031f4c81e84d3ccaa1e19232d9d05d37cb0bd880d3f93c2"}, + {file = "mypy-1.13.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a4c1bfcdbce96ff5d96fc9b08e3831acb30dc44ab02671eca5953eadad07d6d0"}, + {file = "mypy-1.13.0-cp312-cp312-win_amd64.whl", hash = "sha256:a0affb3a79a256b4183ba09811e3577c5163ed06685e4d4b46429a271ba174d2"}, + {file = "mypy-1.13.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a7b44178c9760ce1a43f544e595d35ed61ac2c3de306599fa59b38a6048e1aa7"}, + {file = "mypy-1.13.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5d5092efb8516d08440e36626f0153b5006d4088c1d663d88bf79625af3d1d62"}, + {file = "mypy-1.13.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:de2904956dac40ced10931ac967ae63c5089bd498542194b436eb097a9f77bc8"}, + {file = "mypy-1.13.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:7bfd8836970d33c2105562650656b6846149374dc8ed77d98424b40b09340ba7"}, + {file = "mypy-1.13.0-cp313-cp313-win_amd64.whl", hash = "sha256:9f73dba9ec77acb86457a8fc04b5239822df0c14a082564737833d2963677dbc"}, + {file = "mypy-1.13.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:100fac22ce82925f676a734af0db922ecfea991e1d7ec0ceb1e115ebe501301a"}, + {file = "mypy-1.13.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7bcb0bb7f42a978bb323a7c88f1081d1b5dee77ca86f4100735a6f541299d8fb"}, + {file = "mypy-1.13.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bde31fc887c213e223bbfc34328070996061b0833b0a4cfec53745ed61f3519b"}, + {file = "mypy-1.13.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:07de989f89786f62b937851295ed62e51774722e5444a27cecca993fc3f9cd74"}, + {file = "mypy-1.13.0-cp38-cp38-win_amd64.whl", hash = "sha256:4bde84334fbe19bad704b3f5b78c4abd35ff1026f8ba72b29de70dda0916beb6"}, + {file = "mypy-1.13.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:0246bcb1b5de7f08f2826451abd947bf656945209b140d16ed317f65a17dc7dc"}, + {file = "mypy-1.13.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7f5b7deae912cf8b77e990b9280f170381fdfbddf61b4ef80927edd813163732"}, + {file = "mypy-1.13.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7029881ec6ffb8bc233a4fa364736789582c738217b133f1b55967115288a2bc"}, + {file = "mypy-1.13.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:3e38b980e5681f28f033f3be86b099a247b13c491f14bb8b1e1e134d23bb599d"}, + {file = "mypy-1.13.0-cp39-cp39-win_amd64.whl", hash = "sha256:a6789be98a2017c912ae6ccb77ea553bbaf13d27605d2ca20a76dfbced631b24"}, + {file = "mypy-1.13.0-py3-none-any.whl", hash = "sha256:9c250883f9fd81d212e0952c92dbfcc96fc237f4b7c92f56ac81fd48460b3e5a"}, + {file = "mypy-1.13.0.tar.gz", hash = "sha256:0291a61b6fbf3e6673e3405cfcc0e7650bebc7939659fdca2702958038bd835e"}, ] [package.dependencies] @@ -1174,6 +1363,7 @@ typing-extensions = ">=4.6.0" [package.extras] dmypy = ["psutil (>=4.0)"] +faster-cache = ["orjson"] install-types = ["pip"] mypyc = ["setuptools (>=50)"] reports = ["lxml"] @@ -1285,68 +1475,69 @@ datalib = ["numpy (>=1)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)"] [[package]] name = "orjson" -version = "3.10.7" +version = "3.10.10" description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy" optional = false python-versions = ">=3.8" files = [ - {file = "orjson-3.10.7-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:74f4544f5a6405b90da8ea724d15ac9c36da4d72a738c64685003337401f5c12"}, - {file = "orjson-3.10.7-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:34a566f22c28222b08875b18b0dfbf8a947e69df21a9ed5c51a6bf91cfb944ac"}, - {file = "orjson-3.10.7-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bf6ba8ebc8ef5792e2337fb0419f8009729335bb400ece005606336b7fd7bab7"}, - {file = "orjson-3.10.7-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ac7cf6222b29fbda9e3a472b41e6a5538b48f2c8f99261eecd60aafbdb60690c"}, - {file = "orjson-3.10.7-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:de817e2f5fc75a9e7dd350c4b0f54617b280e26d1631811a43e7e968fa71e3e9"}, - {file = "orjson-3.10.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:348bdd16b32556cf8d7257b17cf2bdb7ab7976af4af41ebe79f9796c218f7e91"}, - {file = "orjson-3.10.7-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:479fd0844ddc3ca77e0fd99644c7fe2de8e8be1efcd57705b5c92e5186e8a250"}, - {file = "orjson-3.10.7-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:fdf5197a21dd660cf19dfd2a3ce79574588f8f5e2dbf21bda9ee2d2b46924d84"}, - {file = "orjson-3.10.7-cp310-none-win32.whl", hash = "sha256:d374d36726746c81a49f3ff8daa2898dccab6596864ebe43d50733275c629175"}, - {file = "orjson-3.10.7-cp310-none-win_amd64.whl", hash = "sha256:cb61938aec8b0ffb6eef484d480188a1777e67b05d58e41b435c74b9d84e0b9c"}, - {file = "orjson-3.10.7-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:7db8539039698ddfb9a524b4dd19508256107568cdad24f3682d5773e60504a2"}, - {file = "orjson-3.10.7-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:480f455222cb7a1dea35c57a67578848537d2602b46c464472c995297117fa09"}, - {file = "orjson-3.10.7-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8a9c9b168b3a19e37fe2778c0003359f07822c90fdff8f98d9d2a91b3144d8e0"}, - {file = "orjson-3.10.7-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8de062de550f63185e4c1c54151bdddfc5625e37daf0aa1e75d2a1293e3b7d9a"}, - {file = "orjson-3.10.7-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6b0dd04483499d1de9c8f6203f8975caf17a6000b9c0c54630cef02e44ee624e"}, - {file = "orjson-3.10.7-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b58d3795dafa334fc8fd46f7c5dc013e6ad06fd5b9a4cc98cb1456e7d3558bd6"}, - {file = "orjson-3.10.7-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:33cfb96c24034a878d83d1a9415799a73dc77480e6c40417e5dda0710d559ee6"}, - {file = "orjson-3.10.7-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:e724cebe1fadc2b23c6f7415bad5ee6239e00a69f30ee423f319c6af70e2a5c0"}, - {file = "orjson-3.10.7-cp311-none-win32.whl", hash = "sha256:82763b46053727a7168d29c772ed5c870fdae2f61aa8a25994c7984a19b1021f"}, - {file = "orjson-3.10.7-cp311-none-win_amd64.whl", hash = "sha256:eb8d384a24778abf29afb8e41d68fdd9a156cf6e5390c04cc07bbc24b89e98b5"}, - {file = "orjson-3.10.7-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:44a96f2d4c3af51bfac6bc4ef7b182aa33f2f054fd7f34cc0ee9a320d051d41f"}, - {file = "orjson-3.10.7-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76ac14cd57df0572453543f8f2575e2d01ae9e790c21f57627803f5e79b0d3c3"}, - {file = "orjson-3.10.7-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bdbb61dcc365dd9be94e8f7df91975edc9364d6a78c8f7adb69c1cdff318ec93"}, - {file = "orjson-3.10.7-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b48b3db6bb6e0a08fa8c83b47bc169623f801e5cc4f24442ab2b6617da3b5313"}, - {file = "orjson-3.10.7-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:23820a1563a1d386414fef15c249040042b8e5d07b40ab3fe3efbfbbcbcb8864"}, - {file = "orjson-3.10.7-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0c6a008e91d10a2564edbb6ee5069a9e66df3fbe11c9a005cb411f441fd2c09"}, - {file = "orjson-3.10.7-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d352ee8ac1926d6193f602cbe36b1643bbd1bbcb25e3c1a657a4390f3000c9a5"}, - {file = "orjson-3.10.7-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:d2d9f990623f15c0ae7ac608103c33dfe1486d2ed974ac3f40b693bad1a22a7b"}, - {file = "orjson-3.10.7-cp312-none-win32.whl", hash = "sha256:7c4c17f8157bd520cdb7195f75ddbd31671997cbe10aee559c2d613592e7d7eb"}, - {file = "orjson-3.10.7-cp312-none-win_amd64.whl", hash = "sha256:1d9c0e733e02ada3ed6098a10a8ee0052dd55774de3d9110d29868d24b17faa1"}, - {file = "orjson-3.10.7-cp313-cp313-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:77d325ed866876c0fa6492598ec01fe30e803272a6e8b10e992288b009cbe149"}, - {file = "orjson-3.10.7-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ea2c232deedcb605e853ae1db2cc94f7390ac776743b699b50b071b02bea6fe"}, - {file = "orjson-3.10.7-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:3dcfbede6737fdbef3ce9c37af3fb6142e8e1ebc10336daa05872bfb1d87839c"}, - {file = "orjson-3.10.7-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:11748c135f281203f4ee695b7f80bb1358a82a63905f9f0b794769483ea854ad"}, - {file = "orjson-3.10.7-cp313-none-win32.whl", hash = "sha256:a7e19150d215c7a13f39eb787d84db274298d3f83d85463e61d277bbd7f401d2"}, - {file = "orjson-3.10.7-cp313-none-win_amd64.whl", hash = "sha256:eef44224729e9525d5261cc8d28d6b11cafc90e6bd0be2157bde69a52ec83024"}, - {file = "orjson-3.10.7-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:6ea2b2258eff652c82652d5e0f02bd5e0463a6a52abb78e49ac288827aaa1469"}, - {file = "orjson-3.10.7-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:430ee4d85841e1483d487e7b81401785a5dfd69db5de01314538f31f8fbf7ee1"}, - {file = "orjson-3.10.7-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4b6146e439af4c2472c56f8540d799a67a81226e11992008cb47e1267a9b3225"}, - {file = "orjson-3.10.7-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:084e537806b458911137f76097e53ce7bf5806dda33ddf6aaa66a028f8d43a23"}, - {file = "orjson-3.10.7-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4829cf2195838e3f93b70fd3b4292156fc5e097aac3739859ac0dcc722b27ac0"}, - {file = "orjson-3.10.7-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1193b2416cbad1a769f868b1749535d5da47626ac29445803dae7cc64b3f5c98"}, - {file = "orjson-3.10.7-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:4e6c3da13e5a57e4b3dca2de059f243ebec705857522f188f0180ae88badd354"}, - {file = "orjson-3.10.7-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:c31008598424dfbe52ce8c5b47e0752dca918a4fdc4a2a32004efd9fab41d866"}, - {file = "orjson-3.10.7-cp38-none-win32.whl", hash = "sha256:7122a99831f9e7fe977dc45784d3b2edc821c172d545e6420c375e5a935f5a1c"}, - {file = "orjson-3.10.7-cp38-none-win_amd64.whl", hash = "sha256:a763bc0e58504cc803739e7df040685816145a6f3c8a589787084b54ebc9f16e"}, - {file = "orjson-3.10.7-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:e76be12658a6fa376fcd331b1ea4e58f5a06fd0220653450f0d415b8fd0fbe20"}, - {file = "orjson-3.10.7-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed350d6978d28b92939bfeb1a0570c523f6170efc3f0a0ef1f1df287cd4f4960"}, - {file = "orjson-3.10.7-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:144888c76f8520e39bfa121b31fd637e18d4cc2f115727865fdf9fa325b10412"}, - {file = "orjson-3.10.7-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:09b2d92fd95ad2402188cf51573acde57eb269eddabaa60f69ea0d733e789fe9"}, - {file = "orjson-3.10.7-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5b24a579123fa884f3a3caadaed7b75eb5715ee2b17ab5c66ac97d29b18fe57f"}, - {file = "orjson-3.10.7-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e72591bcfe7512353bd609875ab38050efe3d55e18934e2f18950c108334b4ff"}, - {file = "orjson-3.10.7-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:f4db56635b58cd1a200b0a23744ff44206ee6aa428185e2b6c4a65b3197abdcd"}, - {file = "orjson-3.10.7-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:0fa5886854673222618638c6df7718ea7fe2f3f2384c452c9ccedc70b4a510a5"}, - {file = "orjson-3.10.7-cp39-none-win32.whl", hash = "sha256:8272527d08450ab16eb405f47e0f4ef0e5ff5981c3d82afe0efd25dcbef2bcd2"}, - {file = "orjson-3.10.7-cp39-none-win_amd64.whl", hash = "sha256:974683d4618c0c7dbf4f69c95a979734bf183d0658611760017f6e70a145af58"}, - {file = "orjson-3.10.7.tar.gz", hash = "sha256:75ef0640403f945f3a1f9f6400686560dbfb0fb5b16589ad62cd477043c4eee3"}, + {file = "orjson-3.10.10-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:b788a579b113acf1c57e0a68e558be71d5d09aa67f62ca1f68e01117e550a998"}, + {file = "orjson-3.10.10-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:804b18e2b88022c8905bb79bd2cbe59c0cd014b9328f43da8d3b28441995cda4"}, + {file = "orjson-3.10.10-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9972572a1d042ec9ee421b6da69f7cc823da5962237563fa548ab17f152f0b9b"}, + {file = "orjson-3.10.10-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dc6993ab1c2ae7dd0711161e303f1db69062955ac2668181bfdf2dd410e65258"}, + {file = "orjson-3.10.10-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d78e4cacced5781b01d9bc0f0cd8b70b906a0e109825cb41c1b03f9c41e4ce86"}, + {file = "orjson-3.10.10-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e6eb2598df518281ba0cbc30d24c5b06124ccf7e19169e883c14e0831217a0bc"}, + {file = "orjson-3.10.10-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:23776265c5215ec532de6238a52707048401a568f0fa0d938008e92a147fe2c7"}, + {file = "orjson-3.10.10-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:8cc2a654c08755cef90b468ff17c102e2def0edd62898b2486767204a7f5cc9c"}, + {file = "orjson-3.10.10-cp310-none-win32.whl", hash = "sha256:081b3fc6a86d72efeb67c13d0ea7c030017bd95f9868b1e329a376edc456153b"}, + {file = "orjson-3.10.10-cp310-none-win_amd64.whl", hash = "sha256:ff38c5fb749347768a603be1fb8a31856458af839f31f064c5aa74aca5be9efe"}, + {file = "orjson-3.10.10-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:879e99486c0fbb256266c7c6a67ff84f46035e4f8749ac6317cc83dacd7f993a"}, + {file = "orjson-3.10.10-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:019481fa9ea5ff13b5d5d95e6fd5ab25ded0810c80b150c2c7b1cc8660b662a7"}, + {file = "orjson-3.10.10-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0dd57eff09894938b4c86d4b871a479260f9e156fa7f12f8cad4b39ea8028bb5"}, + {file = "orjson-3.10.10-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dbde6d70cd95ab4d11ea8ac5e738e30764e510fc54d777336eec09bb93b8576c"}, + {file = "orjson-3.10.10-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3b2625cb37b8fb42e2147404e5ff7ef08712099197a9cd38895006d7053e69d6"}, + {file = "orjson-3.10.10-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dbf3c20c6a7db69df58672a0d5815647ecf78c8e62a4d9bd284e8621c1fe5ccb"}, + {file = "orjson-3.10.10-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:75c38f5647e02d423807d252ce4528bf6a95bd776af999cb1fb48867ed01d1f6"}, + {file = "orjson-3.10.10-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:23458d31fa50ec18e0ec4b0b4343730928296b11111df5f547c75913714116b2"}, + {file = "orjson-3.10.10-cp311-none-win32.whl", hash = "sha256:2787cd9dedc591c989f3facd7e3e86508eafdc9536a26ec277699c0aa63c685b"}, + {file = "orjson-3.10.10-cp311-none-win_amd64.whl", hash = "sha256:6514449d2c202a75183f807bc755167713297c69f1db57a89a1ef4a0170ee269"}, + {file = "orjson-3.10.10-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:8564f48f3620861f5ef1e080ce7cd122ee89d7d6dacf25fcae675ff63b4d6e05"}, + {file = "orjson-3.10.10-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5bf161a32b479034098c5b81f2608f09167ad2fa1c06abd4e527ea6bf4837a9"}, + {file = "orjson-3.10.10-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:68b65c93617bcafa7f04b74ae8bc2cc214bd5cb45168a953256ff83015c6747d"}, + {file = "orjson-3.10.10-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e8e28406f97fc2ea0c6150f4c1b6e8261453318930b334abc419214c82314f85"}, + {file = "orjson-3.10.10-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e4d0d9fe174cc7a5bdce2e6c378bcdb4c49b2bf522a8f996aa586020e1b96cee"}, + {file = "orjson-3.10.10-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b3be81c42f1242cbed03cbb3973501fcaa2675a0af638f8be494eaf37143d999"}, + {file = "orjson-3.10.10-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:65f9886d3bae65be026219c0a5f32dbbe91a9e6272f56d092ab22561ad0ea33b"}, + {file = "orjson-3.10.10-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:730ed5350147db7beb23ddaf072f490329e90a1d059711d364b49fe352ec987b"}, + {file = "orjson-3.10.10-cp312-none-win32.whl", hash = "sha256:a8f4bf5f1c85bea2170800020d53a8877812892697f9c2de73d576c9307a8a5f"}, + {file = "orjson-3.10.10-cp312-none-win_amd64.whl", hash = "sha256:384cd13579a1b4cd689d218e329f459eb9ddc504fa48c5a83ef4889db7fd7a4f"}, + {file = "orjson-3.10.10-cp313-cp313-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:44bffae68c291f94ff5a9b4149fe9d1bdd4cd0ff0fb575bcea8351d48db629a1"}, + {file = "orjson-3.10.10-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e27b4c6437315df3024f0835887127dac2a0a3ff643500ec27088d2588fa5ae1"}, + {file = "orjson-3.10.10-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bca84df16d6b49325a4084fd8b2fe2229cb415e15c46c529f868c3387bb1339d"}, + {file = "orjson-3.10.10-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:c14ce70e8f39bd71f9f80423801b5d10bf93d1dceffdecd04df0f64d2c69bc01"}, + {file = "orjson-3.10.10-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:24ac62336da9bda1bd93c0491eff0613003b48d3cb5d01470842e7b52a40d5b4"}, + {file = "orjson-3.10.10-cp313-none-win32.whl", hash = "sha256:eb0a42831372ec2b05acc9ee45af77bcaccbd91257345f93780a8e654efc75db"}, + {file = "orjson-3.10.10-cp313-none-win_amd64.whl", hash = "sha256:f0c4f37f8bf3f1075c6cc8dd8a9f843689a4b618628f8812d0a71e6968b95ffd"}, + {file = "orjson-3.10.10-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:829700cc18503efc0cf502d630f612884258020d98a317679cd2054af0259568"}, + {file = "orjson-3.10.10-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e0ceb5e0e8c4f010ac787d29ae6299846935044686509e2f0f06ed441c1ca949"}, + {file = "orjson-3.10.10-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0c25908eb86968613216f3db4d3003f1c45d78eb9046b71056ca327ff92bdbd4"}, + {file = "orjson-3.10.10-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:218cb0bc03340144b6328a9ff78f0932e642199ac184dd74b01ad691f42f93ff"}, + {file = "orjson-3.10.10-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e2277ec2cea3775640dc81ab5195bb5b2ada2fe0ea6eee4677474edc75ea6785"}, + {file = "orjson-3.10.10-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:848ea3b55ab5ccc9d7bbd420d69432628b691fba3ca8ae3148c35156cbd282aa"}, + {file = "orjson-3.10.10-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:e3e67b537ac0c835b25b5f7d40d83816abd2d3f4c0b0866ee981a045287a54f3"}, + {file = "orjson-3.10.10-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:7948cfb909353fce2135dcdbe4521a5e7e1159484e0bb024c1722f272488f2b8"}, + {file = "orjson-3.10.10-cp38-none-win32.whl", hash = "sha256:78bee66a988f1a333dc0b6257503d63553b1957889c17b2c4ed72385cd1b96ae"}, + {file = "orjson-3.10.10-cp38-none-win_amd64.whl", hash = "sha256:f1d647ca8d62afeb774340a343c7fc023efacfd3a39f70c798991063f0c681dd"}, + {file = "orjson-3.10.10-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:5a059afddbaa6dd733b5a2d76a90dbc8af790b993b1b5cb97a1176ca713b5df8"}, + {file = "orjson-3.10.10-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6f9b5c59f7e2a1a410f971c5ebc68f1995822837cd10905ee255f96074537ee6"}, + {file = "orjson-3.10.10-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d5ef198bafdef4aa9d49a4165ba53ffdc0a9e1c7b6f76178572ab33118afea25"}, + {file = "orjson-3.10.10-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aaf29ce0bb5d3320824ec3d1508652421000ba466abd63bdd52c64bcce9eb1fa"}, + {file = "orjson-3.10.10-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dddd5516bcc93e723d029c1633ae79c4417477b4f57dad9bfeeb6bc0315e654a"}, + {file = "orjson-3.10.10-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a12f2003695b10817f0fa8b8fca982ed7f5761dcb0d93cff4f2f9f6709903fd7"}, + {file = "orjson-3.10.10-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:672f9874a8a8fb9bb1b771331d31ba27f57702c8106cdbadad8bda5d10bc1019"}, + {file = "orjson-3.10.10-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:1dcbb0ca5fafb2b378b2c74419480ab2486326974826bbf6588f4dc62137570a"}, + {file = "orjson-3.10.10-cp39-none-win32.whl", hash = "sha256:d9bbd3a4b92256875cb058c3381b782649b9a3c68a4aa9a2fff020c2f9cfc1be"}, + {file = "orjson-3.10.10-cp39-none-win_amd64.whl", hash = "sha256:766f21487a53aee8524b97ca9582d5c6541b03ab6210fbaf10142ae2f3ced2aa"}, + {file = "orjson-3.10.10.tar.gz", hash = "sha256:37949383c4df7b4337ce82ee35b6d7471e55195efa7dcb45ab8226ceadb0fe3b"}, ] [[package]] @@ -1392,18 +1583,17 @@ type = ["mypy (>=1.11.2)"] [[package]] name = "promptlayer" -version = "0.1.99" +version = "0.1.96" description = "PromptLayer is a package to keep track of your GPT models training" optional = false -python-versions = ">=3.8.1,<4.0" +python-versions = "*" files = [ - {file = "promptlayer-0.1.99-py3-none-any.whl", hash = "sha256:8cfdb67adfa9ad2508e35a67dd0d1f42aa80afe6e43fbf74e0b5c9ab3f9e7b32"}, - {file = "promptlayer-0.1.99.tar.gz", hash = "sha256:7bf839c50a921d2c7244ebf48c88ccb761df3d05c0dd18978d1be3c792287688"}, + {file = "promptlayer-0.1.96-py3-none-any.whl", hash = "sha256:ad2f214c3814ec86950e9f6dc76e34903e4839c25e844a9ea27c33bc18cb5af3"}, + {file = "promptlayer-0.1.96.tar.gz", hash = "sha256:ccd8d839955e97ae1dc25e8c7d29dd647622772313962539f200d245c32b8037"}, ] [package.dependencies] -pydantic = ">=1,<2" -requests = ">=2.31.0,<3.0.0" +requests = "*" [[package]] name = "propcache" @@ -1550,28 +1740,69 @@ with-pyright = ["pyright (>=1.1.3)"] with-pyroma = ["pyroma (>=2.4)"] with-vulture = ["vulture (>=1.5)"] +[[package]] +name = "protobuf" +version = "4.25.5" +description = "" +optional = false +python-versions = ">=3.8" +files = [ + {file = "protobuf-4.25.5-cp310-abi3-win32.whl", hash = "sha256:5e61fd921603f58d2f5acb2806a929b4675f8874ff5f330b7d6f7e2e784bbcd8"}, + {file = "protobuf-4.25.5-cp310-abi3-win_amd64.whl", hash = "sha256:4be0571adcbe712b282a330c6e89eae24281344429ae95c6d85e79e84780f5ea"}, + {file = "protobuf-4.25.5-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:b2fde3d805354df675ea4c7c6338c1aecd254dfc9925e88c6d31a2bcb97eb173"}, + {file = "protobuf-4.25.5-cp37-abi3-manylinux2014_aarch64.whl", hash = "sha256:919ad92d9b0310070f8356c24b855c98df2b8bd207ebc1c0c6fcc9ab1e007f3d"}, + {file = "protobuf-4.25.5-cp37-abi3-manylinux2014_x86_64.whl", hash = "sha256:fe14e16c22be926d3abfcb500e60cab068baf10b542b8c858fa27e098123e331"}, + {file = "protobuf-4.25.5-cp38-cp38-win32.whl", hash = "sha256:98d8d8aa50de6a2747efd9cceba361c9034050ecce3e09136f90de37ddba66e1"}, + {file = "protobuf-4.25.5-cp38-cp38-win_amd64.whl", hash = "sha256:b0234dd5a03049e4ddd94b93400b67803c823cfc405689688f59b34e0742381a"}, + {file = "protobuf-4.25.5-cp39-cp39-win32.whl", hash = "sha256:abe32aad8561aa7cc94fc7ba4fdef646e576983edb94a73381b03c53728a626f"}, + {file = "protobuf-4.25.5-cp39-cp39-win_amd64.whl", hash = "sha256:7a183f592dc80aa7c8da7ad9e55091c4ffc9497b3054452d629bb85fa27c2a45"}, + {file = "protobuf-4.25.5-py3-none-any.whl", hash = "sha256:0aebecb809cae990f8129ada5ca273d9d670b76d9bfc9b1809f0a9c02b7dbf41"}, + {file = "protobuf-4.25.5.tar.gz", hash = "sha256:7f8249476b4a9473645db7f8ab42b02fe1488cbe5fb72fddd445e0665afd8584"}, +] + [[package]] name = "psycopg2" -version = "2.9.9" +version = "2.9.10" description = "psycopg2 - Python-PostgreSQL Database Adapter" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" +files = [ + {file = "psycopg2-2.9.10-cp310-cp310-win32.whl", hash = "sha256:5df2b672140f95adb453af93a7d669d7a7bf0a56bcd26f1502329166f4a61716"}, + {file = "psycopg2-2.9.10-cp310-cp310-win_amd64.whl", hash = "sha256:c6f7b8561225f9e711a9c47087388a97fdc948211c10a4bccbf0ba68ab7b3b5a"}, + {file = "psycopg2-2.9.10-cp311-cp311-win32.whl", hash = "sha256:47c4f9875125344f4c2b870e41b6aad585901318068acd01de93f3677a6522c2"}, + {file = "psycopg2-2.9.10-cp311-cp311-win_amd64.whl", hash = "sha256:0435034157049f6846e95103bd8f5a668788dd913a7c30162ca9503fdf542cb4"}, + {file = "psycopg2-2.9.10-cp312-cp312-win32.whl", hash = "sha256:65a63d7ab0e067e2cdb3cf266de39663203d38d6a8ed97f5ca0cb315c73fe067"}, + {file = "psycopg2-2.9.10-cp312-cp312-win_amd64.whl", hash = "sha256:4a579d6243da40a7b3182e0430493dbd55950c493d8c68f4eec0b302f6bbf20e"}, + {file = "psycopg2-2.9.10-cp39-cp39-win32.whl", hash = "sha256:9d5b3b94b79a844a986d029eee38998232451119ad653aea42bb9220a8c5066b"}, + {file = "psycopg2-2.9.10-cp39-cp39-win_amd64.whl", hash = "sha256:88138c8dedcbfa96408023ea2b0c369eda40fe5d75002c0964c78f46f11fa442"}, + {file = "psycopg2-2.9.10.tar.gz", hash = "sha256:12ec0b40b0273f95296233e8750441339298e6a572f7039da5b260e3c8b60e11"}, +] + +[[package]] +name = "pyasn1" +version = "0.6.1" +description = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs (X.208)" +optional = false +python-versions = ">=3.8" files = [ - {file = "psycopg2-2.9.9-cp310-cp310-win32.whl", hash = "sha256:38a8dcc6856f569068b47de286b472b7c473ac7977243593a288ebce0dc89516"}, - {file = "psycopg2-2.9.9-cp310-cp310-win_amd64.whl", hash = "sha256:426f9f29bde126913a20a96ff8ce7d73fd8a216cfb323b1f04da402d452853c3"}, - {file = "psycopg2-2.9.9-cp311-cp311-win32.whl", hash = "sha256:ade01303ccf7ae12c356a5e10911c9e1c51136003a9a1d92f7aa9d010fb98372"}, - {file = "psycopg2-2.9.9-cp311-cp311-win_amd64.whl", hash = "sha256:121081ea2e76729acfb0673ff33755e8703d45e926e416cb59bae3a86c6a4981"}, - {file = "psycopg2-2.9.9-cp312-cp312-win32.whl", hash = "sha256:d735786acc7dd25815e89cc4ad529a43af779db2e25aa7c626de864127e5a024"}, - {file = "psycopg2-2.9.9-cp312-cp312-win_amd64.whl", hash = "sha256:a7653d00b732afb6fc597e29c50ad28087dcb4fbfb28e86092277a559ae4e693"}, - {file = "psycopg2-2.9.9-cp37-cp37m-win32.whl", hash = "sha256:5e0d98cade4f0e0304d7d6f25bbfbc5bd186e07b38eac65379309c4ca3193efa"}, - {file = "psycopg2-2.9.9-cp37-cp37m-win_amd64.whl", hash = "sha256:7e2dacf8b009a1c1e843b5213a87f7c544b2b042476ed7755be813eaf4e8347a"}, - {file = "psycopg2-2.9.9-cp38-cp38-win32.whl", hash = "sha256:ff432630e510709564c01dafdbe996cb552e0b9f3f065eb89bdce5bd31fabf4c"}, - {file = "psycopg2-2.9.9-cp38-cp38-win_amd64.whl", hash = "sha256:bac58c024c9922c23550af2a581998624d6e02350f4ae9c5f0bc642c633a2d5e"}, - {file = "psycopg2-2.9.9-cp39-cp39-win32.whl", hash = "sha256:c92811b2d4c9b6ea0285942b2e7cac98a59e166d59c588fe5cfe1eda58e72d59"}, - {file = "psycopg2-2.9.9-cp39-cp39-win_amd64.whl", hash = "sha256:de80739447af31525feddeb8effd640782cf5998e1a4e9192ebdf829717e3913"}, - {file = "psycopg2-2.9.9.tar.gz", hash = "sha256:d1454bde93fb1e224166811694d600e746430c006fbb031ea06ecc2ea41bf156"}, + {file = "pyasn1-0.6.1-py3-none-any.whl", hash = "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629"}, + {file = "pyasn1-0.6.1.tar.gz", hash = "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034"}, ] +[[package]] +name = "pyasn1-modules" +version = "0.4.1" +description = "A collection of ASN.1-based protocols modules" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pyasn1_modules-0.4.1-py3-none-any.whl", hash = "sha256:49bfa96b45a292b711e986f222502c1c9a5e1f4e568fc30e2574a6c7d07838fd"}, + {file = "pyasn1_modules-0.4.1.tar.gz", hash = "sha256:c28e2dbf9c06ad61c71a075c7e0f9fd0f1b0bb2d2ad4377f240d33ac2ab60a7c"}, +] + +[package.dependencies] +pyasn1 = ">=0.4.6,<0.7.0" + [[package]] name = "pycodestyle" version = "2.12.1" @@ -1585,62 +1816,113 @@ files = [ [[package]] name = "pydantic" -version = "1.10.17" -description = "Data validation and settings management using python type hints" +version = "2.7.4" +description = "Data validation using Python type hints" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "pydantic-1.10.17-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0fa51175313cc30097660b10eec8ca55ed08bfa07acbfe02f7a42f6c242e9a4b"}, - {file = "pydantic-1.10.17-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c7e8988bb16988890c985bd2093df9dd731bfb9d5e0860db054c23034fab8f7a"}, - {file = "pydantic-1.10.17-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:371dcf1831f87c9e217e2b6a0c66842879a14873114ebb9d0861ab22e3b5bb1e"}, - {file = "pydantic-1.10.17-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4866a1579c0c3ca2c40575398a24d805d4db6cb353ee74df75ddeee3c657f9a7"}, - {file = "pydantic-1.10.17-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:543da3c6914795b37785703ffc74ba4d660418620cc273490d42c53949eeeca6"}, - {file = "pydantic-1.10.17-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7623b59876f49e61c2e283551cc3647616d2fbdc0b4d36d3d638aae8547ea681"}, - {file = "pydantic-1.10.17-cp310-cp310-win_amd64.whl", hash = "sha256:409b2b36d7d7d19cd8310b97a4ce6b1755ef8bd45b9a2ec5ec2b124db0a0d8f3"}, - {file = "pydantic-1.10.17-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:fa43f362b46741df8f201bf3e7dff3569fa92069bcc7b4a740dea3602e27ab7a"}, - {file = "pydantic-1.10.17-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2a72d2a5ff86a3075ed81ca031eac86923d44bc5d42e719d585a8eb547bf0c9b"}, - {file = "pydantic-1.10.17-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b4ad32aed3bf5eea5ca5decc3d1bbc3d0ec5d4fbcd72a03cdad849458decbc63"}, - {file = "pydantic-1.10.17-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:aeb4e741782e236ee7dc1fb11ad94dc56aabaf02d21df0e79e0c21fe07c95741"}, - {file = "pydantic-1.10.17-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:d2f89a719411cb234105735a520b7c077158a81e0fe1cb05a79c01fc5eb59d3c"}, - {file = "pydantic-1.10.17-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:db3b48d9283d80a314f7a682f7acae8422386de659fffaba454b77a083c3937d"}, - {file = "pydantic-1.10.17-cp311-cp311-win_amd64.whl", hash = "sha256:9c803a5113cfab7bbb912f75faa4fc1e4acff43e452c82560349fff64f852e1b"}, - {file = "pydantic-1.10.17-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:820ae12a390c9cbb26bb44913c87fa2ff431a029a785642c1ff11fed0a095fcb"}, - {file = "pydantic-1.10.17-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c1e51d1af306641b7d1574d6d3307eaa10a4991542ca324f0feb134fee259815"}, - {file = "pydantic-1.10.17-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e53fb834aae96e7b0dadd6e92c66e7dd9cdf08965340ed04c16813102a47fab"}, - {file = "pydantic-1.10.17-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0e2495309b1266e81d259a570dd199916ff34f7f51f1b549a0d37a6d9b17b4dc"}, - {file = "pydantic-1.10.17-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:098ad8de840c92ea586bf8efd9e2e90c6339d33ab5c1cfbb85be66e4ecf8213f"}, - {file = "pydantic-1.10.17-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:525bbef620dac93c430d5d6bdbc91bdb5521698d434adf4434a7ef6ffd5c4b7f"}, - {file = "pydantic-1.10.17-cp312-cp312-win_amd64.whl", hash = "sha256:6654028d1144df451e1da69a670083c27117d493f16cf83da81e1e50edce72ad"}, - {file = "pydantic-1.10.17-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c87cedb4680d1614f1d59d13fea353faf3afd41ba5c906a266f3f2e8c245d655"}, - {file = "pydantic-1.10.17-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:11289fa895bcbc8f18704efa1d8020bb9a86314da435348f59745473eb042e6b"}, - {file = "pydantic-1.10.17-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:94833612d6fd18b57c359a127cbfd932d9150c1b72fea7c86ab58c2a77edd7c7"}, - {file = "pydantic-1.10.17-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:d4ecb515fa7cb0e46e163ecd9d52f9147ba57bc3633dca0e586cdb7a232db9e3"}, - {file = "pydantic-1.10.17-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:7017971ffa7fd7808146880aa41b266e06c1e6e12261768a28b8b41ba55c8076"}, - {file = "pydantic-1.10.17-cp37-cp37m-win_amd64.whl", hash = "sha256:e840e6b2026920fc3f250ea8ebfdedf6ea7a25b77bf04c6576178e681942ae0f"}, - {file = "pydantic-1.10.17-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:bfbb18b616abc4df70591b8c1ff1b3eabd234ddcddb86b7cac82657ab9017e33"}, - {file = "pydantic-1.10.17-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ebb249096d873593e014535ab07145498957091aa6ae92759a32d40cb9998e2e"}, - {file = "pydantic-1.10.17-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d8c209af63ccd7b22fba94b9024e8b7fd07feffee0001efae50dd99316b27768"}, - {file = "pydantic-1.10.17-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d4b40c9e13a0b61583e5599e7950490c700297b4a375b55b2b592774332798b7"}, - {file = "pydantic-1.10.17-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:c31d281c7485223caf6474fc2b7cf21456289dbaa31401844069b77160cab9c7"}, - {file = "pydantic-1.10.17-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:ae5184e99a060a5c80010a2d53c99aee76a3b0ad683d493e5f0620b5d86eeb75"}, - {file = "pydantic-1.10.17-cp38-cp38-win_amd64.whl", hash = "sha256:ad1e33dc6b9787a6f0f3fd132859aa75626528b49cc1f9e429cdacb2608ad5f0"}, - {file = "pydantic-1.10.17-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7e17c0ee7192e54a10943f245dc79e36d9fe282418ea05b886e1c666063a7b54"}, - {file = "pydantic-1.10.17-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:cafb9c938f61d1b182dfc7d44a7021326547b7b9cf695db5b68ec7b590214773"}, - {file = "pydantic-1.10.17-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95ef534e3c22e5abbdbdd6f66b6ea9dac3ca3e34c5c632894f8625d13d084cbe"}, - {file = "pydantic-1.10.17-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:62d96b8799ae3d782df7ec9615cb59fc32c32e1ed6afa1b231b0595f6516e8ab"}, - {file = "pydantic-1.10.17-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:ab2f976336808fd5d539fdc26eb51f9aafc1f4b638e212ef6b6f05e753c8011d"}, - {file = "pydantic-1.10.17-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:b8ad363330557beac73159acfbeed220d5f1bfcd6b930302a987a375e02f74fd"}, - {file = "pydantic-1.10.17-cp39-cp39-win_amd64.whl", hash = "sha256:48db882e48575ce4b39659558b2f9f37c25b8d348e37a2b4e32971dd5a7d6227"}, - {file = "pydantic-1.10.17-py3-none-any.whl", hash = "sha256:e41b5b973e5c64f674b3b4720286ded184dcc26a691dd55f34391c62c6934688"}, - {file = "pydantic-1.10.17.tar.gz", hash = "sha256:f434160fb14b353caf634149baaf847206406471ba70e64657c1e8330277a991"}, + {file = "pydantic-2.7.4-py3-none-any.whl", hash = "sha256:ee8538d41ccb9c0a9ad3e0e5f07bf15ed8015b481ced539a1759d8cc89ae90d0"}, + {file = "pydantic-2.7.4.tar.gz", hash = "sha256:0c84efd9548d545f63ac0060c1e4d39bb9b14db8b3c0652338aecc07b5adec52"}, ] [package.dependencies] -typing-extensions = ">=4.2.0" +annotated-types = ">=0.4.0" +pydantic-core = "2.18.4" +typing-extensions = ">=4.6.1" [package.extras] -dotenv = ["python-dotenv (>=0.10.4)"] -email = ["email-validator (>=1.0.3)"] +email = ["email-validator (>=2.0.0)"] + +[[package]] +name = "pydantic-core" +version = "2.18.4" +description = "Core functionality for Pydantic validation and serialization" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pydantic_core-2.18.4-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:f76d0ad001edd426b92233d45c746fd08f467d56100fd8f30e9ace4b005266e4"}, + {file = "pydantic_core-2.18.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:59ff3e89f4eaf14050c8022011862df275b552caef8082e37b542b066ce1ff26"}, + {file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a55b5b16c839df1070bc113c1f7f94a0af4433fcfa1b41799ce7606e5c79ce0a"}, + {file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4d0dcc59664fcb8974b356fe0a18a672d6d7cf9f54746c05f43275fc48636851"}, + {file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8951eee36c57cd128f779e641e21eb40bc5073eb28b2d23f33eb0ef14ffb3f5d"}, + {file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4701b19f7e3a06ea655513f7938de6f108123bf7c86bbebb1196eb9bd35cf724"}, + {file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e00a3f196329e08e43d99b79b286d60ce46bed10f2280d25a1718399457e06be"}, + {file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:97736815b9cc893b2b7f663628e63f436018b75f44854c8027040e05230eeddb"}, + {file = "pydantic_core-2.18.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:6891a2ae0e8692679c07728819b6e2b822fb30ca7445f67bbf6509b25a96332c"}, + {file = "pydantic_core-2.18.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bc4ff9805858bd54d1a20efff925ccd89c9d2e7cf4986144b30802bf78091c3e"}, + {file = "pydantic_core-2.18.4-cp310-none-win32.whl", hash = "sha256:1b4de2e51bbcb61fdebd0ab86ef28062704f62c82bbf4addc4e37fa4b00b7cbc"}, + {file = "pydantic_core-2.18.4-cp310-none-win_amd64.whl", hash = "sha256:6a750aec7bf431517a9fd78cb93c97b9b0c496090fee84a47a0d23668976b4b0"}, + {file = "pydantic_core-2.18.4-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:942ba11e7dfb66dc70f9ae66b33452f51ac7bb90676da39a7345e99ffb55402d"}, + {file = "pydantic_core-2.18.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b2ebef0e0b4454320274f5e83a41844c63438fdc874ea40a8b5b4ecb7693f1c4"}, + {file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a642295cd0c8df1b86fc3dced1d067874c353a188dc8e0f744626d49e9aa51c4"}, + {file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f09baa656c904807e832cf9cce799c6460c450c4ad80803517032da0cd062e2"}, + {file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:98906207f29bc2c459ff64fa007afd10a8c8ac080f7e4d5beff4c97086a3dabd"}, + {file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:19894b95aacfa98e7cb093cd7881a0c76f55731efad31073db4521e2b6ff5b7d"}, + {file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0fbbdc827fe5e42e4d196c746b890b3d72876bdbf160b0eafe9f0334525119c8"}, + {file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f85d05aa0918283cf29a30b547b4df2fbb56b45b135f9e35b6807cb28bc47951"}, + {file = "pydantic_core-2.18.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e85637bc8fe81ddb73fda9e56bab24560bdddfa98aa64f87aaa4e4b6730c23d2"}, + {file = "pydantic_core-2.18.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:2f5966897e5461f818e136b8451d0551a2e77259eb0f73a837027b47dc95dab9"}, + {file = "pydantic_core-2.18.4-cp311-none-win32.whl", hash = "sha256:44c7486a4228413c317952e9d89598bcdfb06399735e49e0f8df643e1ccd0558"}, + {file = "pydantic_core-2.18.4-cp311-none-win_amd64.whl", hash = "sha256:8a7164fe2005d03c64fd3b85649891cd4953a8de53107940bf272500ba8a788b"}, + {file = "pydantic_core-2.18.4-cp311-none-win_arm64.whl", hash = "sha256:4e99bc050fe65c450344421017f98298a97cefc18c53bb2f7b3531eb39bc7805"}, + {file = "pydantic_core-2.18.4-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:6f5c4d41b2771c730ea1c34e458e781b18cc668d194958e0112455fff4e402b2"}, + {file = "pydantic_core-2.18.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2fdf2156aa3d017fddf8aea5adfba9f777db1d6022d392b682d2a8329e087cef"}, + {file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4748321b5078216070b151d5271ef3e7cc905ab170bbfd27d5c83ee3ec436695"}, + {file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:847a35c4d58721c5dc3dba599878ebbdfd96784f3fb8bb2c356e123bdcd73f34"}, + {file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3c40d4eaad41f78e3bbda31b89edc46a3f3dc6e171bf0ecf097ff7a0ffff7cb1"}, + {file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:21a5e440dbe315ab9825fcd459b8814bb92b27c974cbc23c3e8baa2b76890077"}, + {file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:01dd777215e2aa86dfd664daed5957704b769e726626393438f9c87690ce78c3"}, + {file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4b06beb3b3f1479d32befd1f3079cc47b34fa2da62457cdf6c963393340b56e9"}, + {file = "pydantic_core-2.18.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:564d7922e4b13a16b98772441879fcdcbe82ff50daa622d681dd682175ea918c"}, + {file = "pydantic_core-2.18.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:0eb2a4f660fcd8e2b1c90ad566db2b98d7f3f4717c64fe0a83e0adb39766d5b8"}, + {file = "pydantic_core-2.18.4-cp312-none-win32.whl", hash = "sha256:8b8bab4c97248095ae0c4455b5a1cd1cdd96e4e4769306ab19dda135ea4cdb07"}, + {file = "pydantic_core-2.18.4-cp312-none-win_amd64.whl", hash = "sha256:14601cdb733d741b8958224030e2bfe21a4a881fb3dd6fbb21f071cabd48fa0a"}, + {file = "pydantic_core-2.18.4-cp312-none-win_arm64.whl", hash = "sha256:c1322d7dd74713dcc157a2b7898a564ab091ca6c58302d5c7b4c07296e3fd00f"}, + {file = "pydantic_core-2.18.4-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:823be1deb01793da05ecb0484d6c9e20baebb39bd42b5d72636ae9cf8350dbd2"}, + {file = "pydantic_core-2.18.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ebef0dd9bf9b812bf75bda96743f2a6c5734a02092ae7f721c048d156d5fabae"}, + {file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ae1d6df168efb88d7d522664693607b80b4080be6750c913eefb77e34c12c71a"}, + {file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f9899c94762343f2cc2fc64c13e7cae4c3cc65cdfc87dd810a31654c9b7358cc"}, + {file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:99457f184ad90235cfe8461c4d70ab7dd2680e28821c29eca00252ba90308c78"}, + {file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:18f469a3d2a2fdafe99296a87e8a4c37748b5080a26b806a707f25a902c040a8"}, + {file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b7cdf28938ac6b8b49ae5e92f2735056a7ba99c9b110a474473fd71185c1af5d"}, + {file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:938cb21650855054dc54dfd9120a851c974f95450f00683399006aa6e8abb057"}, + {file = "pydantic_core-2.18.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:44cd83ab6a51da80fb5adbd9560e26018e2ac7826f9626bc06ca3dc074cd198b"}, + {file = "pydantic_core-2.18.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:972658f4a72d02b8abfa2581d92d59f59897d2e9f7e708fdabe922f9087773af"}, + {file = "pydantic_core-2.18.4-cp38-none-win32.whl", hash = "sha256:1d886dc848e60cb7666f771e406acae54ab279b9f1e4143babc9c2258213daa2"}, + {file = "pydantic_core-2.18.4-cp38-none-win_amd64.whl", hash = "sha256:bb4462bd43c2460774914b8525f79b00f8f407c945d50881568f294c1d9b4443"}, + {file = "pydantic_core-2.18.4-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:44a688331d4a4e2129140a8118479443bd6f1905231138971372fcde37e43528"}, + {file = "pydantic_core-2.18.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a2fdd81edd64342c85ac7cf2753ccae0b79bf2dfa063785503cb85a7d3593223"}, + {file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:86110d7e1907ab36691f80b33eb2da87d780f4739ae773e5fc83fb272f88825f"}, + {file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:46387e38bd641b3ee5ce247563b60c5ca098da9c56c75c157a05eaa0933ed154"}, + {file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:123c3cec203e3f5ac7b000bd82235f1a3eced8665b63d18be751f115588fea30"}, + {file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dc1803ac5c32ec324c5261c7209e8f8ce88e83254c4e1aebdc8b0a39f9ddb443"}, + {file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:53db086f9f6ab2b4061958d9c276d1dbe3690e8dd727d6abf2321d6cce37fa94"}, + {file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:abc267fa9837245cc28ea6929f19fa335f3dc330a35d2e45509b6566dc18be23"}, + {file = "pydantic_core-2.18.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:a0d829524aaefdebccb869eed855e2d04c21d2d7479b6cada7ace5448416597b"}, + {file = "pydantic_core-2.18.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:509daade3b8649f80d4e5ff21aa5673e4ebe58590b25fe42fac5f0f52c6f034a"}, + {file = "pydantic_core-2.18.4-cp39-none-win32.whl", hash = "sha256:ca26a1e73c48cfc54c4a76ff78df3727b9d9f4ccc8dbee4ae3f73306a591676d"}, + {file = "pydantic_core-2.18.4-cp39-none-win_amd64.whl", hash = "sha256:c67598100338d5d985db1b3d21f3619ef392e185e71b8d52bceacc4a7771ea7e"}, + {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:574d92eac874f7f4db0ca653514d823a0d22e2354359d0759e3f6a406db5d55d"}, + {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:1f4d26ceb5eb9eed4af91bebeae4b06c3fb28966ca3a8fb765208cf6b51102ab"}, + {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77450e6d20016ec41f43ca4a6c63e9fdde03f0ae3fe90e7c27bdbeaece8b1ed4"}, + {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d323a01da91851a4f17bf592faf46149c9169d68430b3146dcba2bb5e5719abc"}, + {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:43d447dd2ae072a0065389092a231283f62d960030ecd27565672bd40746c507"}, + {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:578e24f761f3b425834f297b9935e1ce2e30f51400964ce4801002435a1b41ef"}, + {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:81b5efb2f126454586d0f40c4d834010979cb80785173d1586df845a632e4e6d"}, + {file = "pydantic_core-2.18.4-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:ab86ce7c8f9bea87b9d12c7f0af71102acbf5ecbc66c17796cff45dae54ef9a5"}, + {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:90afc12421df2b1b4dcc975f814e21bc1754640d502a2fbcc6d41e77af5ec312"}, + {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:51991a89639a912c17bef4b45c87bd83593aee0437d8102556af4885811d59f5"}, + {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:293afe532740370aba8c060882f7d26cfd00c94cae32fd2e212a3a6e3b7bc15e"}, + {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b48ece5bde2e768197a2d0f6e925f9d7e3e826f0ad2271120f8144a9db18d5c8"}, + {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:eae237477a873ab46e8dd748e515c72c0c804fb380fbe6c85533c7de51f23a8f"}, + {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:834b5230b5dfc0c1ec37b2fda433b271cbbc0e507560b5d1588e2cc1148cf1ce"}, + {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e858ac0a25074ba4bce653f9b5d0a85b7456eaddadc0ce82d3878c22489fa4ee"}, + {file = "pydantic_core-2.18.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2fd41f6eff4c20778d717af1cc50eca52f5afe7805ee530a4fbd0bae284f16e9"}, + {file = "pydantic_core-2.18.4.tar.gz", hash = "sha256:ec3beeada09ff865c344ff3bc2f427f5e6c26401cc6113d77e372c3fdac73864"}, +] + +[package.dependencies] +typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" [[package]] name = "pydocstyle" @@ -1754,6 +2036,20 @@ files = [ [package.dependencies] pylint = ">=1.7" +[[package]] +name = "pyparsing" +version = "3.2.0" +description = "pyparsing module - Classes and methods to define and execute parsing grammars" +optional = false +python-versions = ">=3.9" +files = [ + {file = "pyparsing-3.2.0-py3-none-any.whl", hash = "sha256:93d9577b88da0bbea8cc8334ee8b918ed014968fd2ec383e868fb8afb1ccef84"}, + {file = "pyparsing-3.2.0.tar.gz", hash = "sha256:cbf74e27246d595d9a74b186b810f6fbb86726dbf3b9532efb343f6d7294fe9c"}, +] + +[package.extras] +diagrams = ["jinja2", "railroad-diagrams"] + [[package]] name = "python-dotenv" version = "1.0.0" @@ -1933,6 +2229,25 @@ files = [ {file = "regex-2024.9.11.tar.gz", hash = "sha256:6c188c307e8433bcb63dc1915022deb553b4203a70722fc542c363bf120a01fd"}, ] +[[package]] +name = "replicate" +version = "0.11.0" +description = "Python client for Replicate" +optional = false +python-versions = ">=3.8" +files = [ + {file = "replicate-0.11.0-py3-none-any.whl", hash = "sha256:fbb8815068864dc822cd4fa7b6103d6f4089d6ef122abd6c3441ca0f0f110c46"}, + {file = "replicate-0.11.0.tar.gz", hash = "sha256:4d54b5838c1552a6f76cc37c3af8d9a7998105382082d672acad31636ad443b5"}, +] + +[package.dependencies] +packaging = "*" +pydantic = ">1" +requests = ">2" + +[package.extras] +dev = ["black", "mypy", "pytest", "responses", "ruff"] + [[package]] name = "requests" version = "2.32.3" @@ -1985,6 +2300,20 @@ packaging = ">=21.3" semver = ">=3.0.0,<4.0.0" toml = ">=0.10.2,<0.11.0" +[[package]] +name = "rsa" +version = "4.9" +description = "Pure-Python RSA implementation" +optional = false +python-versions = ">=3.6,<4" +files = [ + {file = "rsa-4.9-py3-none-any.whl", hash = "sha256:90260d9058e514786967344d0ef75fa8727eed8a7d2e43ce9f4bcf1b536174f7"}, + {file = "rsa-4.9.tar.gz", hash = "sha256:e38464a49c6c85d7f1351b0126661487a7e0a14a50f1675ec50eb34d4f20ef21"}, +] + +[package.dependencies] +pyasn1 = ">=0.1.3" + [[package]] name = "semver" version = "3.0.2" @@ -2010,6 +2339,17 @@ files = [ [package.extras] yaml = ["pyyaml"] +[[package]] +name = "six" +version = "1.16.0" +description = "Python 2 and 3 compatibility utilities" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" +files = [ + {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, + {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, +] + [[package]] name = "smmap" version = "5.0.1" @@ -2043,62 +2383,81 @@ files = [ {file = "snowballstemmer-2.2.0.tar.gz", hash = "sha256:09b16deb8547d3412ad7b590689584cd0fe25ec8db3be37788be3810cbf19cb1"}, ] +[[package]] +name = "soupsieve" +version = "2.6" +description = "A modern CSS selector implementation for Beautiful Soup." +optional = false +python-versions = ">=3.8" +files = [ + {file = "soupsieve-2.6-py3-none-any.whl", hash = "sha256:e72c4ff06e4fb6e4b5a9f0f55fe6e81514581fca1515028625d0f299c602ccc9"}, + {file = "soupsieve-2.6.tar.gz", hash = "sha256:e2e68417777af359ec65daac1057404a3c8a5455bb8abc36f1a9866ab1a51abb"}, +] + [[package]] name = "sqlalchemy" -version = "2.0.35" +version = "2.0.36" description = "Database Abstraction Library" optional = false python-versions = ">=3.7" files = [ - {file = "SQLAlchemy-2.0.35-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:67219632be22f14750f0d1c70e62f204ba69d28f62fd6432ba05ab295853de9b"}, - {file = "SQLAlchemy-2.0.35-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4668bd8faf7e5b71c0319407b608f278f279668f358857dbfd10ef1954ac9f90"}, - {file = "SQLAlchemy-2.0.35-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb8bea573863762bbf45d1e13f87c2d2fd32cee2dbd50d050f83f87429c9e1ea"}, - {file = "SQLAlchemy-2.0.35-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f552023710d4b93d8fb29a91fadf97de89c5926c6bd758897875435f2a939f33"}, - {file = "SQLAlchemy-2.0.35-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:016b2e665f778f13d3c438651dd4de244214b527a275e0acf1d44c05bc6026a9"}, - {file = "SQLAlchemy-2.0.35-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:7befc148de64b6060937231cbff8d01ccf0bfd75aa26383ffdf8d82b12ec04ff"}, - {file = "SQLAlchemy-2.0.35-cp310-cp310-win32.whl", hash = "sha256:22b83aed390e3099584b839b93f80a0f4a95ee7f48270c97c90acd40ee646f0b"}, - {file = "SQLAlchemy-2.0.35-cp310-cp310-win_amd64.whl", hash = "sha256:a29762cd3d116585278ffb2e5b8cc311fb095ea278b96feef28d0b423154858e"}, - {file = "SQLAlchemy-2.0.35-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e21f66748ab725ade40fa7af8ec8b5019c68ab00b929f6643e1b1af461eddb60"}, - {file = "SQLAlchemy-2.0.35-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8a6219108a15fc6d24de499d0d515c7235c617b2540d97116b663dade1a54d62"}, - {file = "SQLAlchemy-2.0.35-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:042622a5306c23b972192283f4e22372da3b8ddf5f7aac1cc5d9c9b222ab3ff6"}, - {file = "SQLAlchemy-2.0.35-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:627dee0c280eea91aed87b20a1f849e9ae2fe719d52cbf847c0e0ea34464b3f7"}, - {file = "SQLAlchemy-2.0.35-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:4fdcd72a789c1c31ed242fd8c1bcd9ea186a98ee8e5408a50e610edfef980d71"}, - {file = "SQLAlchemy-2.0.35-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:89b64cd8898a3a6f642db4eb7b26d1b28a497d4022eccd7717ca066823e9fb01"}, - {file = "SQLAlchemy-2.0.35-cp311-cp311-win32.whl", hash = "sha256:6a93c5a0dfe8d34951e8a6f499a9479ffb9258123551fa007fc708ae2ac2bc5e"}, - {file = "SQLAlchemy-2.0.35-cp311-cp311-win_amd64.whl", hash = "sha256:c68fe3fcde03920c46697585620135b4ecfdfc1ed23e75cc2c2ae9f8502c10b8"}, - {file = "SQLAlchemy-2.0.35-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:eb60b026d8ad0c97917cb81d3662d0b39b8ff1335e3fabb24984c6acd0c900a2"}, - {file = "SQLAlchemy-2.0.35-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6921ee01caf375363be5e9ae70d08ce7ca9d7e0e8983183080211a062d299468"}, - {file = "SQLAlchemy-2.0.35-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8cdf1a0dbe5ced887a9b127da4ffd7354e9c1a3b9bb330dce84df6b70ccb3a8d"}, - {file = "SQLAlchemy-2.0.35-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:93a71c8601e823236ac0e5d087e4f397874a421017b3318fd92c0b14acf2b6db"}, - {file = "SQLAlchemy-2.0.35-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e04b622bb8a88f10e439084486f2f6349bf4d50605ac3e445869c7ea5cf0fa8c"}, - {file = "SQLAlchemy-2.0.35-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1b56961e2d31389aaadf4906d453859f35302b4eb818d34a26fab72596076bb8"}, - {file = "SQLAlchemy-2.0.35-cp312-cp312-win32.whl", hash = "sha256:0f9f3f9a3763b9c4deb8c5d09c4cc52ffe49f9876af41cc1b2ad0138878453cf"}, - {file = "SQLAlchemy-2.0.35-cp312-cp312-win_amd64.whl", hash = "sha256:25b0f63e7fcc2a6290cb5f7f5b4fc4047843504983a28856ce9b35d8f7de03cc"}, - {file = "SQLAlchemy-2.0.35-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:f021d334f2ca692523aaf7bbf7592ceff70c8594fad853416a81d66b35e3abf9"}, - {file = "SQLAlchemy-2.0.35-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:05c3f58cf91683102f2f0265c0db3bd3892e9eedabe059720492dbaa4f922da1"}, - {file = "SQLAlchemy-2.0.35-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:032d979ce77a6c2432653322ba4cbeabf5a6837f704d16fa38b5a05d8e21fa00"}, - {file = "SQLAlchemy-2.0.35-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:2e795c2f7d7249b75bb5f479b432a51b59041580d20599d4e112b5f2046437a3"}, - {file = "SQLAlchemy-2.0.35-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:cc32b2990fc34380ec2f6195f33a76b6cdaa9eecf09f0c9404b74fc120aef36f"}, - {file = "SQLAlchemy-2.0.35-cp37-cp37m-win32.whl", hash = "sha256:9509c4123491d0e63fb5e16199e09f8e262066e58903e84615c301dde8fa2e87"}, - {file = "SQLAlchemy-2.0.35-cp37-cp37m-win_amd64.whl", hash = "sha256:3655af10ebcc0f1e4e06c5900bb33e080d6a1fa4228f502121f28a3b1753cde5"}, - {file = "SQLAlchemy-2.0.35-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:4c31943b61ed8fdd63dfd12ccc919f2bf95eefca133767db6fbbd15da62078ec"}, - {file = "SQLAlchemy-2.0.35-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a62dd5d7cc8626a3634208df458c5fe4f21200d96a74d122c83bc2015b333bc1"}, - {file = "SQLAlchemy-2.0.35-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0630774b0977804fba4b6bbea6852ab56c14965a2b0c7fc7282c5f7d90a1ae72"}, - {file = "SQLAlchemy-2.0.35-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d625eddf7efeba2abfd9c014a22c0f6b3796e0ffb48f5d5ab106568ef01ff5a"}, - {file = "SQLAlchemy-2.0.35-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:ada603db10bb865bbe591939de854faf2c60f43c9b763e90f653224138f910d9"}, - {file = "SQLAlchemy-2.0.35-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:c41411e192f8d3ea39ea70e0fae48762cd11a2244e03751a98bd3c0ca9a4e936"}, - {file = "SQLAlchemy-2.0.35-cp38-cp38-win32.whl", hash = "sha256:d299797d75cd747e7797b1b41817111406b8b10a4f88b6e8fe5b5e59598b43b0"}, - {file = "SQLAlchemy-2.0.35-cp38-cp38-win_amd64.whl", hash = "sha256:0375a141e1c0878103eb3d719eb6d5aa444b490c96f3fedab8471c7f6ffe70ee"}, - {file = "SQLAlchemy-2.0.35-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ccae5de2a0140d8be6838c331604f91d6fafd0735dbdcee1ac78fc8fbaba76b4"}, - {file = "SQLAlchemy-2.0.35-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2a275a806f73e849e1c309ac11108ea1a14cd7058577aba962cd7190e27c9e3c"}, - {file = "SQLAlchemy-2.0.35-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:732e026240cdd1c1b2e3ac515c7a23820430ed94292ce33806a95869c46bd139"}, - {file = "SQLAlchemy-2.0.35-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:890da8cd1941fa3dab28c5bac3b9da8502e7e366f895b3b8e500896f12f94d11"}, - {file = "SQLAlchemy-2.0.35-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:c0d8326269dbf944b9201911b0d9f3dc524d64779a07518199a58384c3d37a44"}, - {file = "SQLAlchemy-2.0.35-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:b76d63495b0508ab9fc23f8152bac63205d2a704cd009a2b0722f4c8e0cba8e0"}, - {file = "SQLAlchemy-2.0.35-cp39-cp39-win32.whl", hash = "sha256:69683e02e8a9de37f17985905a5eca18ad651bf592314b4d3d799029797d0eb3"}, - {file = "SQLAlchemy-2.0.35-cp39-cp39-win_amd64.whl", hash = "sha256:aee110e4ef3c528f3abbc3c2018c121e708938adeeff9006428dd7c8555e9b3f"}, - {file = "SQLAlchemy-2.0.35-py3-none-any.whl", hash = "sha256:2ab3f0336c0387662ce6221ad30ab3a5e6499aab01b9790879b6578fd9b8faa1"}, - {file = "sqlalchemy-2.0.35.tar.gz", hash = "sha256:e11d7ea4d24f0a262bccf9a7cd6284c976c5369dac21db237cff59586045ab9f"}, + {file = "SQLAlchemy-2.0.36-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:59b8f3adb3971929a3e660337f5dacc5942c2cdb760afcabb2614ffbda9f9f72"}, + {file = "SQLAlchemy-2.0.36-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:37350015056a553e442ff672c2d20e6f4b6d0b2495691fa239d8aa18bb3bc908"}, + {file = "SQLAlchemy-2.0.36-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8318f4776c85abc3f40ab185e388bee7a6ea99e7fa3a30686580b209eaa35c08"}, + {file = "SQLAlchemy-2.0.36-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c245b1fbade9c35e5bd3b64270ab49ce990369018289ecfde3f9c318411aaa07"}, + {file = "SQLAlchemy-2.0.36-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:69f93723edbca7342624d09f6704e7126b152eaed3cdbb634cb657a54332a3c5"}, + {file = "SQLAlchemy-2.0.36-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f9511d8dd4a6e9271d07d150fb2f81874a3c8c95e11ff9af3a2dfc35fe42ee44"}, + {file = "SQLAlchemy-2.0.36-cp310-cp310-win32.whl", hash = "sha256:c3f3631693003d8e585d4200730616b78fafd5a01ef8b698f6967da5c605b3fa"}, + {file = "SQLAlchemy-2.0.36-cp310-cp310-win_amd64.whl", hash = "sha256:a86bfab2ef46d63300c0f06936bd6e6c0105faa11d509083ba8f2f9d237fb5b5"}, + {file = "SQLAlchemy-2.0.36-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:fd3a55deef00f689ce931d4d1b23fa9f04c880a48ee97af488fd215cf24e2a6c"}, + {file = "SQLAlchemy-2.0.36-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4f5e9cd989b45b73bd359f693b935364f7e1f79486e29015813c338450aa5a71"}, + {file = "SQLAlchemy-2.0.36-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d0ddd9db6e59c44875211bc4c7953a9f6638b937b0a88ae6d09eb46cced54eff"}, + {file = "SQLAlchemy-2.0.36-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2519f3a5d0517fc159afab1015e54bb81b4406c278749779be57a569d8d1bb0d"}, + {file = "SQLAlchemy-2.0.36-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:59b1ee96617135f6e1d6f275bbe988f419c5178016f3d41d3c0abb0c819f75bb"}, + {file = "SQLAlchemy-2.0.36-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:39769a115f730d683b0eb7b694db9789267bcd027326cccc3125e862eb03bfd8"}, + {file = "SQLAlchemy-2.0.36-cp311-cp311-win32.whl", hash = "sha256:66bffbad8d6271bb1cc2f9a4ea4f86f80fe5e2e3e501a5ae2a3dc6a76e604e6f"}, + {file = "SQLAlchemy-2.0.36-cp311-cp311-win_amd64.whl", hash = "sha256:23623166bfefe1487d81b698c423f8678e80df8b54614c2bf4b4cfcd7c711959"}, + {file = "SQLAlchemy-2.0.36-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f7b64e6ec3f02c35647be6b4851008b26cff592a95ecb13b6788a54ef80bbdd4"}, + {file = "SQLAlchemy-2.0.36-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:46331b00096a6db1fdc052d55b101dbbfc99155a548e20a0e4a8e5e4d1362855"}, + {file = "SQLAlchemy-2.0.36-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fdf3386a801ea5aba17c6410dd1dc8d39cf454ca2565541b5ac42a84e1e28f53"}, + {file = "SQLAlchemy-2.0.36-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac9dfa18ff2a67b09b372d5db8743c27966abf0e5344c555d86cc7199f7ad83a"}, + {file = "SQLAlchemy-2.0.36-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:90812a8933df713fdf748b355527e3af257a11e415b613dd794512461eb8a686"}, + {file = "SQLAlchemy-2.0.36-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1bc330d9d29c7f06f003ab10e1eaced295e87940405afe1b110f2eb93a233588"}, + {file = "SQLAlchemy-2.0.36-cp312-cp312-win32.whl", hash = "sha256:79d2e78abc26d871875b419e1fd3c0bca31a1cb0043277d0d850014599626c2e"}, + {file = "SQLAlchemy-2.0.36-cp312-cp312-win_amd64.whl", hash = "sha256:b544ad1935a8541d177cb402948b94e871067656b3a0b9e91dbec136b06a2ff5"}, + {file = "SQLAlchemy-2.0.36-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b5cc79df7f4bc3d11e4b542596c03826063092611e481fcf1c9dfee3c94355ef"}, + {file = "SQLAlchemy-2.0.36-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3c01117dd36800f2ecaa238c65365b7b16497adc1522bf84906e5710ee9ba0e8"}, + {file = "SQLAlchemy-2.0.36-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9bc633f4ee4b4c46e7adcb3a9b5ec083bf1d9a97c1d3854b92749d935de40b9b"}, + {file = "SQLAlchemy-2.0.36-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e46ed38affdfc95d2c958de328d037d87801cfcbea6d421000859e9789e61c2"}, + {file = "SQLAlchemy-2.0.36-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:b2985c0b06e989c043f1dc09d4fe89e1616aadd35392aea2844f0458a989eacf"}, + {file = "SQLAlchemy-2.0.36-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4a121d62ebe7d26fec9155f83f8be5189ef1405f5973ea4874a26fab9f1e262c"}, + {file = "SQLAlchemy-2.0.36-cp313-cp313-win32.whl", hash = "sha256:0572f4bd6f94752167adfd7c1bed84f4b240ee6203a95e05d1e208d488d0d436"}, + {file = "SQLAlchemy-2.0.36-cp313-cp313-win_amd64.whl", hash = "sha256:8c78ac40bde930c60e0f78b3cd184c580f89456dd87fc08f9e3ee3ce8765ce88"}, + {file = "SQLAlchemy-2.0.36-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:be9812b766cad94a25bc63bec11f88c4ad3629a0cec1cd5d4ba48dc23860486b"}, + {file = "SQLAlchemy-2.0.36-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50aae840ebbd6cdd41af1c14590e5741665e5272d2fee999306673a1bb1fdb4d"}, + {file = "SQLAlchemy-2.0.36-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4557e1f11c5f653ebfdd924f3f9d5ebfc718283b0b9beebaa5dd6b77ec290971"}, + {file = "SQLAlchemy-2.0.36-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:07b441f7d03b9a66299ce7ccf3ef2900abc81c0db434f42a5694a37bd73870f2"}, + {file = "SQLAlchemy-2.0.36-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:28120ef39c92c2dd60f2721af9328479516844c6b550b077ca450c7d7dc68575"}, + {file = "SQLAlchemy-2.0.36-cp37-cp37m-win32.whl", hash = "sha256:b81ee3d84803fd42d0b154cb6892ae57ea6b7c55d8359a02379965706c7efe6c"}, + {file = "SQLAlchemy-2.0.36-cp37-cp37m-win_amd64.whl", hash = "sha256:f942a799516184c855e1a32fbc7b29d7e571b52612647866d4ec1c3242578fcb"}, + {file = "SQLAlchemy-2.0.36-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:3d6718667da04294d7df1670d70eeddd414f313738d20a6f1d1f379e3139a545"}, + {file = "SQLAlchemy-2.0.36-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:72c28b84b174ce8af8504ca28ae9347d317f9dba3999e5981a3cd441f3712e24"}, + {file = "SQLAlchemy-2.0.36-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b11d0cfdd2b095e7b0686cf5fabeb9c67fae5b06d265d8180715b8cfa86522e3"}, + {file = "SQLAlchemy-2.0.36-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e32092c47011d113dc01ab3e1d3ce9f006a47223b18422c5c0d150af13a00687"}, + {file = "SQLAlchemy-2.0.36-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:6a440293d802d3011028e14e4226da1434b373cbaf4a4bbb63f845761a708346"}, + {file = "SQLAlchemy-2.0.36-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:c54a1e53a0c308a8e8a7dffb59097bff7facda27c70c286f005327f21b2bd6b1"}, + {file = "SQLAlchemy-2.0.36-cp38-cp38-win32.whl", hash = "sha256:1e0d612a17581b6616ff03c8e3d5eff7452f34655c901f75d62bd86449d9750e"}, + {file = "SQLAlchemy-2.0.36-cp38-cp38-win_amd64.whl", hash = "sha256:8958b10490125124463095bbdadda5aa22ec799f91958e410438ad6c97a7b793"}, + {file = "SQLAlchemy-2.0.36-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:dc022184d3e5cacc9579e41805a681187650e170eb2fd70e28b86192a479dcaa"}, + {file = "SQLAlchemy-2.0.36-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b817d41d692bf286abc181f8af476c4fbef3fd05e798777492618378448ee689"}, + {file = "SQLAlchemy-2.0.36-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a4e46a888b54be23d03a89be510f24a7652fe6ff660787b96cd0e57a4ebcb46d"}, + {file = "SQLAlchemy-2.0.36-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c4ae3005ed83f5967f961fd091f2f8c5329161f69ce8480aa8168b2d7fe37f06"}, + {file = "SQLAlchemy-2.0.36-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:03e08af7a5f9386a43919eda9de33ffda16b44eb11f3b313e6822243770e9763"}, + {file = "SQLAlchemy-2.0.36-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:3dbb986bad3ed5ceaf090200eba750b5245150bd97d3e67343a3cfed06feecf7"}, + {file = "SQLAlchemy-2.0.36-cp39-cp39-win32.whl", hash = "sha256:9fe53b404f24789b5ea9003fc25b9a3988feddebd7e7b369c8fac27ad6f52f28"}, + {file = "SQLAlchemy-2.0.36-cp39-cp39-win_amd64.whl", hash = "sha256:af148a33ff0349f53512a049c6406923e4e02bf2f26c5fb285f143faf4f0e46a"}, + {file = "SQLAlchemy-2.0.36-py3-none-any.whl", hash = "sha256:fddbe92b4760c6f5d48162aef14824add991aeda8ddadb3c31d56eb15ca69f8e"}, + {file = "sqlalchemy-2.0.36.tar.gz", hash = "sha256:7f2767680b6d2398aea7082e45a774b2b0767b5c8d8ffb9c8b683088ea9b29c5"}, ] [package.dependencies] @@ -2112,7 +2471,7 @@ aioodbc = ["aioodbc", "greenlet (!=0.4.17)"] aiosqlite = ["aiosqlite", "greenlet (!=0.4.17)", "typing_extensions (!=3.10.0.1)"] asyncio = ["greenlet (!=0.4.17)"] asyncmy = ["asyncmy (>=0.2.3,!=0.2.4,!=0.2.6)", "greenlet (!=0.4.17)"] -mariadb-connector = ["mariadb (>=1.0.1,!=1.1.2,!=1.1.5)"] +mariadb-connector = ["mariadb (>=1.0.1,!=1.1.2,!=1.1.5,!=1.1.10)"] mssql = ["pyodbc"] mssql-pymssql = ["pymssql"] mssql-pyodbc = ["pyodbc"] @@ -2239,13 +2598,13 @@ files = [ [[package]] name = "tqdm" -version = "4.66.5" +version = "4.66.6" description = "Fast, Extensible Progress Meter" optional = false python-versions = ">=3.7" files = [ - {file = "tqdm-4.66.5-py3-none-any.whl", hash = "sha256:90279a3770753eafc9194a0364852159802111925aa30eb3f9d85b0e805ac7cd"}, - {file = "tqdm-4.66.5.tar.gz", hash = "sha256:e1020aef2e5096702d8a025ac7d16b1577279c9d63f8375b63083e9a5f0fcbad"}, + {file = "tqdm-4.66.6-py3-none-any.whl", hash = "sha256:223e8b5359c2efc4b30555531f09e9f2f3589bcd7fdd389271191031b49b7a63"}, + {file = "tqdm-4.66.6.tar.gz", hash = "sha256:4bdd694238bef1485ce839d67967ab50af8f9272aab687c0d7702a01da0be090"}, ] [package.dependencies] @@ -2257,20 +2616,6 @@ notebook = ["ipywidgets (>=6)"] slack = ["slack-sdk"] telegram = ["requests"] -[[package]] -name = "types-requests" -version = "2.32.0.20240914" -description = "Typing stubs for requests" -optional = false -python-versions = ">=3.8" -files = [ - {file = "types-requests-2.32.0.20240914.tar.gz", hash = "sha256:2850e178db3919d9bf809e434eef65ba49d0e7e33ac92d588f4a5e295fffd405"}, - {file = "types_requests-2.32.0.20240914-py3-none-any.whl", hash = "sha256:59c2f673eb55f32a99b2894faf6020e1a9f4a402ad0f192bfee0b64469054310"}, -] - -[package.dependencies] -urllib3 = ">=2" - [[package]] name = "typing-extensions" version = "4.12.2" @@ -2297,22 +2642,32 @@ files = [ mypy-extensions = ">=0.3.0" typing-extensions = ">=3.7.4" +[[package]] +name = "uritemplate" +version = "4.1.1" +description = "Implementation of RFC 6570 URI Templates" +optional = false +python-versions = ">=3.6" +files = [ + {file = "uritemplate-4.1.1-py2.py3-none-any.whl", hash = "sha256:830c08b8d99bdd312ea4ead05994a38e8936266f84b9a7878232db50b044e02e"}, + {file = "uritemplate-4.1.1.tar.gz", hash = "sha256:4346edfc5c3b79f694bccd6d6099a322bbeb628dbf2cd86eea55a456ce5124f0"}, +] + [[package]] name = "urllib3" -version = "2.2.3" +version = "1.26.20" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false -python-versions = ">=3.8" +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" files = [ - {file = "urllib3-2.2.3-py3-none-any.whl", hash = "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac"}, - {file = "urllib3-2.2.3.tar.gz", hash = "sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9"}, + {file = "urllib3-1.26.20-py2.py3-none-any.whl", hash = "sha256:0ed14ccfbf1c30a9072c7ca157e4319b70d65f623e91e7b32fadb2853431016e"}, + {file = "urllib3-1.26.20.tar.gz", hash = "sha256:40c2dc0c681e47eb8f90e7e27bf6ff7df2e677421fd46756da1161c39ca70d32"}, ] [package.extras] -brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] -h2 = ["h2 (>=4,<5)"] -socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] -zstd = ["zstandard (>=0.18.0)"] +brotli = ["brotli (==1.0.9)", "brotli (>=1.0.9)", "brotlicffi (>=0.8.0)", "brotlipy (>=0.6.0)"] +secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress", "pyOpenSSL (>=0.14)", "urllib3-secure-extra"] +socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] [[package]] name = "uvicorn" @@ -2334,103 +2689,93 @@ standard = ["colorama (>=0.4)", "httptools (>=0.5.0)", "python-dotenv (>=0.13)", [[package]] name = "yarl" -version = "1.14.0" +version = "1.17.0" description = "Yet another URL library" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "yarl-1.14.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:1bfc25aa6a7c99cf86564210f79a0b7d4484159c67e01232b116e445b3036547"}, - {file = "yarl-1.14.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0cf21f46a15d445417de8fc89f2568852cf57fe8ca1ab3d19ddb24d45c0383ae"}, - {file = "yarl-1.14.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1dda53508df0de87b6e6b0a52d6718ff6c62a5aca8f5552748404963df639269"}, - {file = "yarl-1.14.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:587c3cc59bc148a9b1c07a019346eda2549bc9f468acd2f9824d185749acf0a6"}, - {file = "yarl-1.14.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3007a5b75cb50140708420fe688c393e71139324df599434633019314ceb8b59"}, - {file = "yarl-1.14.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:06ff23462398333c78b6f4f8d3d70410d657a471c2c5bbe6086133be43fc8f1a"}, - {file = "yarl-1.14.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:689a99a42ee4583fcb0d3a67a0204664aa1539684aed72bdafcbd505197a91c4"}, - {file = "yarl-1.14.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b0547ab1e9345dc468cac8368d88ea4c5bd473ebc1d8d755347d7401982b5dd8"}, - {file = "yarl-1.14.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:742aef0a99844faaac200564ea6f5e08facb285d37ea18bd1a5acf2771f3255a"}, - {file = "yarl-1.14.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:176110bff341b6730f64a1eb3a7070e12b373cf1c910a9337e7c3240497db76f"}, - {file = "yarl-1.14.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:46a9772a1efa93f9cd170ad33101c1817c77e0e9914d4fe33e2da299d7cf0f9b"}, - {file = "yarl-1.14.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:ee2c68e4f2dd1b1c15b849ba1c96fac105fca6ffdb7c1e8be51da6fabbdeafb9"}, - {file = "yarl-1.14.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:047b258e00b99091b6f90355521f026238c63bd76dcf996d93527bb13320eefd"}, - {file = "yarl-1.14.0-cp310-cp310-win32.whl", hash = "sha256:0aa92e3e30a04f9462a25077db689c4ac5ea9ab6cc68a2e563881b987d42f16d"}, - {file = "yarl-1.14.0-cp310-cp310-win_amd64.whl", hash = "sha256:d9baec588f015d0ee564057aa7574313c53a530662ffad930b7886becc85abdf"}, - {file = "yarl-1.14.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:07f9eaf57719d6721ab15805d85f4b01a5b509a0868d7320134371bcb652152d"}, - {file = "yarl-1.14.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c14b504a74e58e2deb0378b3eca10f3d076635c100f45b113c18c770b4a47a50"}, - {file = "yarl-1.14.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:16a682a127930f3fc4e42583becca6049e1d7214bcad23520c590edd741d2114"}, - {file = "yarl-1.14.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:73bedd2be05f48af19f0f2e9e1353921ce0c83f4a1c9e8556ecdcf1f1eae4892"}, - {file = "yarl-1.14.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f3ab950f8814f3b7b5e3eebc117986f817ec933676f68f0a6c5b2137dd7c9c69"}, - {file = "yarl-1.14.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b693c63e7e64b524f54aa4888403c680342d1ad0d97be1707c531584d6aeeb4f"}, - {file = "yarl-1.14.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:85cb3e40eaa98489f1e2e8b29f5ad02ee1ee40d6ce6b88d50cf0f205de1d9d2c"}, - {file = "yarl-1.14.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4f24f08b6c9b9818fd80612c97857d28f9779f0d1211653ece9844fc7b414df2"}, - {file = "yarl-1.14.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:29a84a46ec3ebae7a1c024c055612b11e9363a8a23238b3e905552d77a2bc51b"}, - {file = "yarl-1.14.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:5cd5dad8366e0168e0fd23d10705a603790484a6dbb9eb272b33673b8f2cce72"}, - {file = "yarl-1.14.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:a152751af7ef7b5d5fa6d215756e508dd05eb07d0cf2ba51f3e740076aa74373"}, - {file = "yarl-1.14.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:3d569f877ed9a708e4c71a2d13d2940cb0791da309f70bd970ac1a5c088a0a92"}, - {file = "yarl-1.14.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:6a615cad11ec3428020fb3c5a88d85ce1b5c69fd66e9fcb91a7daa5e855325dd"}, - {file = "yarl-1.14.0-cp311-cp311-win32.whl", hash = "sha256:bab03192091681d54e8225c53f270b0517637915d9297028409a2a5114ff4634"}, - {file = "yarl-1.14.0-cp311-cp311-win_amd64.whl", hash = "sha256:985623575e5c4ea763056ffe0e2d63836f771a8c294b3de06d09480538316b13"}, - {file = "yarl-1.14.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:fc2c80bc87fba076e6cbb926216c27fba274dae7100a7b9a0983b53132dd99f2"}, - {file = "yarl-1.14.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:55c144d363ad4626ca744556c049c94e2b95096041ac87098bb363dcc8635e8d"}, - {file = "yarl-1.14.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b03384eed107dbeb5f625a99dc3a7de8be04fc8480c9ad42fccbc73434170b20"}, - {file = "yarl-1.14.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f72a0d746d38cb299b79ce3d4d60ba0892c84bbc905d0d49c13df5bace1b65f8"}, - {file = "yarl-1.14.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8648180b34faaea4aa5b5ca7e871d9eb1277033fa439693855cf0ea9195f85f1"}, - {file = "yarl-1.14.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9557c9322aaa33174d285b0c1961fb32499d65ad1866155b7845edc876c3c835"}, - {file = "yarl-1.14.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f50eb3837012a937a2b649ec872b66ba9541ad9d6f103ddcafb8231cfcafd22"}, - {file = "yarl-1.14.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8892fa575ac9b1b25fae7b221bc4792a273877b9b56a99ee2d8d03eeb3dbb1d2"}, - {file = "yarl-1.14.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e6a2c5c5bb2556dfbfffffc2bcfb9c235fd2b566d5006dfb2a37afc7e3278a07"}, - {file = "yarl-1.14.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:ab3abc0b78a5dfaa4795a6afbe7b282b6aa88d81cf8c1bb5e394993d7cae3457"}, - {file = "yarl-1.14.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:47eede5d11d669ab3759b63afb70d28d5328c14744b8edba3323e27dc52d298d"}, - {file = "yarl-1.14.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:fe4d2536c827f508348d7b40c08767e8c7071614250927233bf0c92170451c0a"}, - {file = "yarl-1.14.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:0fd7b941dd1b00b5f0acb97455fea2c4b7aac2dd31ea43fb9d155e9bc7b78664"}, - {file = "yarl-1.14.0-cp312-cp312-win32.whl", hash = "sha256:99ff3744f5fe48288be6bc402533b38e89749623a43208e1d57091fc96b783b9"}, - {file = "yarl-1.14.0-cp312-cp312-win_amd64.whl", hash = "sha256:1ca3894e9e9f72da93544f64988d9c052254a338a9f855165f37f51edb6591de"}, - {file = "yarl-1.14.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:5d02d700705d67e09e1f57681f758f0b9d4412eeb70b2eb8d96ca6200b486db3"}, - {file = "yarl-1.14.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:30600ba5db60f7c0820ef38a2568bb7379e1418ecc947a0f76fd8b2ff4257a97"}, - {file = "yarl-1.14.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:e85d86527baebb41a214cc3b45c17177177d900a2ad5783dbe6f291642d4906f"}, - {file = "yarl-1.14.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37001e5d4621cef710c8dc1429ca04e189e572f128ab12312eab4e04cf007132"}, - {file = "yarl-1.14.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f4f4547944d4f5cfcdc03f3f097d6f05bbbc915eaaf80a2ee120d0e756de377d"}, - {file = "yarl-1.14.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:75ff4c819757f9bdb35de049a509814d6ce851fe26f06eb95a392a5640052482"}, - {file = "yarl-1.14.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:68ac1a09392ed6e3fd14be880d39b951d7b981fd135416db7d18a6208c536561"}, - {file = "yarl-1.14.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:96952f642ac69075e44c7d0284528938fdff39422a1d90d3e45ce40b72e5e2d9"}, - {file = "yarl-1.14.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a56fbe3d7f3bce1d060ea18d2413a2ca9ca814eea7cedc4d247b5f338d54844e"}, - {file = "yarl-1.14.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:7e2637d75e92763d1322cb5041573279ec43a80c0f7fbbd2d64f5aee98447b17"}, - {file = "yarl-1.14.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:9abe80ae2c9d37c17599557b712e6515f4100a80efb2cda15f5f070306477cd2"}, - {file = "yarl-1.14.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:217a782020b875538eebf3948fac3a7f9bbbd0fd9bf8538f7c2ad7489e80f4e8"}, - {file = "yarl-1.14.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:b9cfef3f14f75bf6aba73a76caf61f9d00865912a04a4393c468a7ce0981b519"}, - {file = "yarl-1.14.0-cp313-cp313-win32.whl", hash = "sha256:d8361c7d04e6a264481f0b802e395f647cd3f8bbe27acfa7c12049efea675bd1"}, - {file = "yarl-1.14.0-cp313-cp313-win_amd64.whl", hash = "sha256:bc24f968b82455f336b79bf37dbb243b7d76cd40897489888d663d4e028f5069"}, - {file = "yarl-1.14.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:91d875f75fabf76b3018c5f196bf3d308ed2b49ddcb46c1576d6b075754a1393"}, - {file = "yarl-1.14.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:4009def9be3a7e5175db20aa2d7307ecd00bbf50f7f0f989300710eee1d0b0b9"}, - {file = "yarl-1.14.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:582cedde49603f139be572252a318b30dc41039bc0b8165f070f279e5d12187f"}, - {file = "yarl-1.14.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dbd9ff43a04f8ffe8a959a944c2dca10d22f5f99fc6a459f49c3ebfb409309d9"}, - {file = "yarl-1.14.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b9f805e37ed16cc212fdc538a608422d7517e7faf539bedea4fe69425bc55d76"}, - {file = "yarl-1.14.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:95e16e9eaa2d7f5d87421b8fe694dd71606aa61d74b824c8d17fc85cc51983d1"}, - {file = "yarl-1.14.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:816d24f584edefcc5ca63428f0b38fee00b39fe64e3c5e558f895a18983efe96"}, - {file = "yarl-1.14.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cd2660c01367eb3ef081b8fa0a5da7fe767f9427aa82023a961a5f28f0d4af6c"}, - {file = "yarl-1.14.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:94b2bb9bcfd5be9d27004ea4398fb640373dd0c1a9e219084f42c08f77a720ab"}, - {file = "yarl-1.14.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:c2089a9afef887664115f7fa6d3c0edd6454adaca5488dba836ca91f60401075"}, - {file = "yarl-1.14.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:2192f718db4a8509f63dd6d950f143279211fa7e6a2c612edc17d85bf043d36e"}, - {file = "yarl-1.14.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:8385ab36bf812e9d37cf7613999a87715f27ef67a53f0687d28c44b819df7cb0"}, - {file = "yarl-1.14.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:b4c1ecba93e7826dc71ddba75fb7740cdb52e7bd0be9f03136b83f54e6a1f511"}, - {file = "yarl-1.14.0-cp38-cp38-win32.whl", hash = "sha256:e749af6c912a7bb441d105c50c1a3da720474e8acb91c89350080dd600228f0e"}, - {file = "yarl-1.14.0-cp38-cp38-win_amd64.whl", hash = "sha256:147e36331f6f63e08a14640acf12369e041e0751bb70d9362df68c2d9dcf0c87"}, - {file = "yarl-1.14.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7a9f917966d27f7ce30039fe8d900f913c5304134096554fd9bea0774bcda6d1"}, - {file = "yarl-1.14.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8a2f8fb7f944bcdfecd4e8d855f84c703804a594da5123dd206f75036e536d4d"}, - {file = "yarl-1.14.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8f4e475f29a9122f908d0f1f706e1f2fc3656536ffd21014ff8a6f2e1b14d1d8"}, - {file = "yarl-1.14.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8089d4634d8fa2b1806ce44fefa4979b1ab2c12c0bc7ef3dfa45c8a374811348"}, - {file = "yarl-1.14.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1b16f6c75cffc2dc0616ea295abb0e1967601bd1fb1e0af6a1de1c6c887f3439"}, - {file = "yarl-1.14.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:498b3c55087b9d762636bca9b45f60d37e51d24341786dc01b81253f9552a607"}, - {file = "yarl-1.14.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e3f8bfc1db82589ef965ed234b87de30d140db8b6dc50ada9e33951ccd8ec07a"}, - {file = "yarl-1.14.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:625f207b1799e95e7c823f42f473c1e9dbfb6192bd56bba8695656d92be4535f"}, - {file = "yarl-1.14.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:781e2495e408a81e4eaeedeb41ba32b63b1980dddf8b60dbbeff6036bcd35049"}, - {file = "yarl-1.14.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:659603d26d40dd4463200df9bfbc339fbfaed3fe32e5c432fe1dc2b5d4aa94b4"}, - {file = "yarl-1.14.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:4e0d45ebf975634468682c8bec021618b3ad52c37619e5c938f8f831fa1ac5c0"}, - {file = "yarl-1.14.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:a2e4725a08cb2b4794db09e350c86dee18202bb8286527210e13a1514dc9a59a"}, - {file = "yarl-1.14.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:19268b4fec1d7760134f2de46ef2608c2920134fb1fa61e451f679e41356dc55"}, - {file = "yarl-1.14.0-cp39-cp39-win32.whl", hash = "sha256:337912bcdcf193ade64b9aae5a4017a0a1950caf8ca140362e361543c6773f21"}, - {file = "yarl-1.14.0-cp39-cp39-win_amd64.whl", hash = "sha256:b6d0147574ce2e7b812c989e50fa72bbc5338045411a836bd066ce5fc8ac0bce"}, - {file = "yarl-1.14.0-py3-none-any.whl", hash = "sha256:c8ed4034f0765f8861620c1f2f2364d2e58520ea288497084dae880424fc0d9f"}, - {file = "yarl-1.14.0.tar.gz", hash = "sha256:88c7d9d58aab0724b979ab5617330acb1c7030b79379c8138c1c8c94e121d1b3"}, + {file = "yarl-1.17.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:2d8715edfe12eee6f27f32a3655f38d6c7410deb482158c0b7d4b7fad5d07628"}, + {file = "yarl-1.17.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1803bf2a7a782e02db746d8bd18f2384801bc1d108723840b25e065b116ad726"}, + {file = "yarl-1.17.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2e66589110e20c2951221a938fa200c7aa134a8bdf4e4dc97e6b21539ff026d4"}, + {file = "yarl-1.17.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7069d411cfccf868e812497e0ec4acb7c7bf8d684e93caa6c872f1e6f5d1664d"}, + {file = "yarl-1.17.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cbf70ba16118db3e4b0da69dcde9d4d4095d383c32a15530564c283fa38a7c52"}, + {file = "yarl-1.17.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0bc53cc349675b32ead83339a8de79eaf13b88f2669c09d4962322bb0f064cbc"}, + {file = "yarl-1.17.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d6aa18a402d1c80193ce97c8729871f17fd3e822037fbd7d9b719864018df746"}, + {file = "yarl-1.17.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d89c5bc701861cfab357aa0cd039bc905fe919997b8c312b4b0c358619c38d4d"}, + {file = "yarl-1.17.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:b728bdf38ca58f2da1d583e4af4ba7d4cd1a58b31a363a3137a8159395e7ecc7"}, + {file = "yarl-1.17.0-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:5542e57dc15d5473da5a39fbde14684b0cc4301412ee53cbab677925e8497c11"}, + {file = "yarl-1.17.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e564b57e5009fb150cb513804d7e9e9912fee2e48835638f4f47977f88b4a39c"}, + {file = "yarl-1.17.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:eb3c4cff524b4c1c1dba3a6da905edb1dfd2baf6f55f18a58914bbb2d26b59e1"}, + {file = "yarl-1.17.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:05e13f389038842da930d439fbed63bdce3f7644902714cb68cf527c971af804"}, + {file = "yarl-1.17.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:153c38ee2b4abba136385af4467459c62d50f2a3f4bde38c7b99d43a20c143ef"}, + {file = "yarl-1.17.0-cp310-cp310-win32.whl", hash = "sha256:4065b4259d1ae6f70fd9708ffd61e1c9c27516f5b4fae273c41028afcbe3a094"}, + {file = "yarl-1.17.0-cp310-cp310-win_amd64.whl", hash = "sha256:abf366391a02a8335c5c26163b5fe6f514cc1d79e74d8bf3ffab13572282368e"}, + {file = "yarl-1.17.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:19a4fe0279626c6295c5b0c8c2bb7228319d2e985883621a6e87b344062d8135"}, + {file = "yarl-1.17.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cadd0113f4db3c6b56868d6a19ca6286f5ccfa7bc08c27982cf92e5ed31b489a"}, + {file = "yarl-1.17.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:60d6693eef43215b1ccfb1df3f6eae8db30a9ff1e7989fb6b2a6f0b468930ee8"}, + {file = "yarl-1.17.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5bb8bf3843e1fa8cf3fe77813c512818e57368afab7ebe9ef02446fe1a10b492"}, + {file = "yarl-1.17.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d2a5b35fd1d8d90443e061d0c8669ac7600eec5c14c4a51f619e9e105b136715"}, + {file = "yarl-1.17.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c5bf17b32f392df20ab5c3a69d37b26d10efaa018b4f4e5643c7520d8eee7ac7"}, + {file = "yarl-1.17.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:48f51b529b958cd06e78158ff297a8bf57b4021243c179ee03695b5dbf9cb6e1"}, + {file = "yarl-1.17.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5fcaa06bf788e19f913d315d9c99a69e196a40277dc2c23741a1d08c93f4d430"}, + {file = "yarl-1.17.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:32f3ee19ff0f18a7a522d44e869e1ebc8218ad3ae4ebb7020445f59b4bbe5897"}, + {file = "yarl-1.17.0-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:a4fb69a81ae2ec2b609574ae35420cf5647d227e4d0475c16aa861dd24e840b0"}, + {file = "yarl-1.17.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:7bacc8b77670322132a1b2522c50a1f62991e2f95591977455fd9a398b4e678d"}, + {file = "yarl-1.17.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:437bf6eb47a2d20baaf7f6739895cb049e56896a5ffdea61a4b25da781966e8b"}, + {file = "yarl-1.17.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:30534a03c87484092080e3b6e789140bd277e40f453358900ad1f0f2e61fc8ec"}, + {file = "yarl-1.17.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b30df4ff98703649915144be6f0df3b16fd4870ac38a09c56d5d9e54ff2d5f96"}, + {file = "yarl-1.17.0-cp311-cp311-win32.whl", hash = "sha256:263b487246858e874ab53e148e2a9a0de8465341b607678106829a81d81418c6"}, + {file = "yarl-1.17.0-cp311-cp311-win_amd64.whl", hash = "sha256:07055a9e8b647a362e7d4810fe99d8f98421575e7d2eede32e008c89a65a17bd"}, + {file = "yarl-1.17.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:84095ab25ba69a8fa3fb4936e14df631b8a71193fe18bd38be7ecbe34d0f5512"}, + {file = "yarl-1.17.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:02608fb3f6df87039212fc746017455ccc2a5fc96555ee247c45d1e9f21f1d7b"}, + {file = "yarl-1.17.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:13468d291fe8c12162b7cf2cdb406fe85881c53c9e03053ecb8c5d3523822cd9"}, + {file = "yarl-1.17.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8da3f8f368fb7e2f052fded06d5672260c50b5472c956a5f1bd7bf474ae504ab"}, + {file = "yarl-1.17.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ec0507ab6523980bed050137007c76883d941b519aca0e26d4c1ec1f297dd646"}, + {file = "yarl-1.17.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:08fc76df7fd8360e9ff30e6ccc3ee85b8dbd6ed5d3a295e6ec62bcae7601b932"}, + {file = "yarl-1.17.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d522f390686acb6bab2b917dd9ca06740c5080cd2eaa5aef8827b97e967319d"}, + {file = "yarl-1.17.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:147c527a80bb45b3dcd6e63401af8ac574125d8d120e6afe9901049286ff64ef"}, + {file = "yarl-1.17.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:24cf43bcd17a0a1f72284e47774f9c60e0bf0d2484d5851f4ddf24ded49f33c6"}, + {file = "yarl-1.17.0-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:c28a44b9e0fba49c3857360e7ad1473fc18bc7f6659ca08ed4f4f2b9a52c75fa"}, + {file = "yarl-1.17.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:350cacb2d589bc07d230eb995d88fcc646caad50a71ed2d86df533a465a4e6e1"}, + {file = "yarl-1.17.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:fd1ab1373274dea1c6448aee420d7b38af163b5c4732057cd7ee9f5454efc8b1"}, + {file = "yarl-1.17.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:4934e0f96dadc567edc76d9c08181633c89c908ab5a3b8f698560124167d9488"}, + {file = "yarl-1.17.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:8d0a278170d75c88e435a1ce76557af6758bfebc338435b2eba959df2552163e"}, + {file = "yarl-1.17.0-cp312-cp312-win32.whl", hash = "sha256:61584f33196575a08785bb56db6b453682c88f009cd9c6f338a10f6737ce419f"}, + {file = "yarl-1.17.0-cp312-cp312-win_amd64.whl", hash = "sha256:9987a439ad33a7712bd5bbd073f09ad10d38640425fa498ecc99d8aa064f8fc4"}, + {file = "yarl-1.17.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:8deda7b8eb15a52db94c2014acdc7bdd14cb59ec4b82ac65d2ad16dc234a109e"}, + {file = "yarl-1.17.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:56294218b348dcbd3d7fce0ffd79dd0b6c356cb2a813a1181af730b7c40de9e7"}, + {file = "yarl-1.17.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1fab91292f51c884b290ebec0b309a64a5318860ccda0c4940e740425a67b6b7"}, + {file = "yarl-1.17.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5cf93fa61ff4d9c7d40482ce1a2c9916ca435e34a1b8451e17f295781ccc034f"}, + {file = "yarl-1.17.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:261be774a0d71908c8830c33bacc89eef15c198433a8cc73767c10eeeb35a7d0"}, + {file = "yarl-1.17.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:deec9693b67f6af856a733b8a3e465553ef09e5e8ead792f52c25b699b8f9e6e"}, + {file = "yarl-1.17.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c804b07622ba50a765ca7fb8145512836ab65956de01307541def869e4a456c9"}, + {file = "yarl-1.17.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1d013a7c9574e98c14831a8f22d27277688ec3b2741d0188ac01a910b009987a"}, + {file = "yarl-1.17.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:e2cfcba719bd494c7413dcf0caafb51772dec168c7c946e094f710d6aa70494e"}, + {file = "yarl-1.17.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:c068aba9fc5b94dfae8ea1cedcbf3041cd4c64644021362ffb750f79837e881f"}, + {file = "yarl-1.17.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:3616df510ffac0df3c9fa851a40b76087c6c89cbcea2de33a835fc80f9faac24"}, + {file = "yarl-1.17.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:755d6176b442fba9928a4df787591a6a3d62d4969f05c406cad83d296c5d4e05"}, + {file = "yarl-1.17.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:c18f6e708d1cf9ff5b1af026e697ac73bea9cb70ee26a2b045b112548579bed2"}, + {file = "yarl-1.17.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:5b937c216b6dee8b858c6afea958de03c5ff28406257d22b55c24962a2baf6fd"}, + {file = "yarl-1.17.0-cp313-cp313-win32.whl", hash = "sha256:d0131b14cb545c1a7bd98f4565a3e9bdf25a1bd65c83fc156ee5d8a8499ec4a3"}, + {file = "yarl-1.17.0-cp313-cp313-win_amd64.whl", hash = "sha256:01c96efa4313c01329e88b7e9e9e1b2fc671580270ddefdd41129fa8d0db7696"}, + {file = "yarl-1.17.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:0d44f67e193f0a7acdf552ecb4d1956a3a276c68e7952471add9f93093d1c30d"}, + {file = "yarl-1.17.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:16ea0aa5f890cdcb7ae700dffa0397ed6c280840f637cd07bffcbe4b8d68b985"}, + {file = "yarl-1.17.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:cf5469dc7dcfa65edf5cc3a6add9f84c5529c6b556729b098e81a09a92e60e51"}, + {file = "yarl-1.17.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e662bf2f6e90b73cf2095f844e2bc1fda39826472a2aa1959258c3f2a8500a2f"}, + {file = "yarl-1.17.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8260e88f1446904ba20b558fa8ce5d0ab9102747238e82343e46d056d7304d7e"}, + {file = "yarl-1.17.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5dc16477a4a2c71e64c5d3d15d7ae3d3a6bb1e8b955288a9f73c60d2a391282f"}, + {file = "yarl-1.17.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:46027e326cecd55e5950184ec9d86c803f4f6fe4ba6af9944a0e537d643cdbe0"}, + {file = "yarl-1.17.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fc95e46c92a2b6f22e70afe07e34dbc03a4acd07d820204a6938798b16f4014f"}, + {file = "yarl-1.17.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:16ca76c7ac9515320cd09d6cc083d8d13d1803f6ebe212b06ea2505fd66ecff8"}, + {file = "yarl-1.17.0-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:eb1a5b97388f2613f9305d78a3473cdf8d80c7034e554d8199d96dcf80c62ac4"}, + {file = "yarl-1.17.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:41fd5498975418cdc34944060b8fbeec0d48b2741068077222564bea68daf5a6"}, + {file = "yarl-1.17.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:146ca582ed04a5664ad04b0e0603934281eaab5c0115a5a46cce0b3c061a56a1"}, + {file = "yarl-1.17.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:6abb8c06107dbec97481b2392dafc41aac091a5d162edf6ed7d624fe7da0587a"}, + {file = "yarl-1.17.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:4d14be4613dd4f96c25feb4bd8c0d8ce0f529ab0ae555a17df5789e69d8ec0c5"}, + {file = "yarl-1.17.0-cp39-cp39-win32.whl", hash = "sha256:174d6a6cad1068f7850702aad0c7b1bca03bcac199ca6026f84531335dfc2646"}, + {file = "yarl-1.17.0-cp39-cp39-win_amd64.whl", hash = "sha256:6af417ca2c7349b101d3fd557ad96b4cd439fdb6ab0d288e3f64a068eea394d0"}, + {file = "yarl-1.17.0-py3-none-any.whl", hash = "sha256:62dd42bb0e49423f4dd58836a04fcf09c80237836796025211bbe913f1524993"}, + {file = "yarl-1.17.0.tar.gz", hash = "sha256:d3f13583f378930377e02002b4085a3d025b00402d5a80911726d43a67911cd9"}, ] [package.dependencies] @@ -2441,4 +2786,4 @@ propcache = ">=0.2.0" [metadata] lock-version = "2.0" python-versions = "3.11.*" -content-hash = "cbadc308177fe76c3676a9ce88f1e7118f5c64d0c963e4c2c2e779ed8a5b82d4" +content-hash = "edf46f687b0e4c950a96bce90ab995a9d2c9254beea314e8ac4c74da938bdfc2" diff --git a/modules/programming/module_programming_llm/pyproject.toml b/modules/programming/module_programming_llm/pyproject.toml index 855300fc1..c6f872c56 100644 --- a/modules/programming/module_programming_llm/pyproject.toml +++ b/modules/programming/module_programming_llm/pyproject.toml @@ -8,19 +8,23 @@ license = "MIT" [tool.poetry.dependencies] python = "3.11.*" # if you have local changes in the common Athena module, use the line below. Otherwise, please use a VCS stable version. Also, a version with tag = "" is possible. -# athena = { path = "../athena", develop = true } athena = { path = "../../../athena", develop = true } #athena = { git = "https://github.com/ls1intum/Athena.git", rev = "2da2d33", subdirectory = "athena"} llm_core = { path = "../../../llm_core", develop = true } gitpython = "^3.1.41" -tiktoken = "0.7.0" promptlayer = "^0.1.85" python-dotenv = "^1.0.0" nltk = "^3.8.1" +replicate = "^0.11.0" +google-api-core = "2.11.1" +google-api-python-client = "2.95.0" +google-auth = "2.22.0" +google-auth-httplib2 = "0.1.0" +googleapis-common-protos = "1.59.1" +bs4 = "0.0.2" [tool.poetry.group.dev.dependencies] -types-requests = "^2.31.0.8" -pydantic = "1.10.17" +pydantic = "2.7.4" prospector = "^1.10.2" [tool.poetry.scripts]