From 2ce23990672a871c05e2da45791cd2f60f9682c5 Mon Sep 17 00:00:00 2001 From: Akshay Goel Date: Tue, 22 Jul 2025 01:39:11 -0400 Subject: [PATCH 01/20] docs(pypi): Improve README display and badge reliability - Switch from badge.fury.io to shields.io for working PyPI badge - Convert relative paths to absolute GitHub URLs for PyPI compatibility - Bump version to 0.1.3 --- README.md | 16 ++++++++-------- pyproject.toml | 2 +- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index b49d4b6a..efe64a72 100644 --- a/README.md +++ b/README.md @@ -1,12 +1,12 @@

- LangExtract Logo + LangExtract Logo

# LangExtract -[![PyPI version](https://badge.fury.io/py/langextract.svg)](https://badge.fury.io/py/langextract) +[![PyPI version](https://img.shields.io/pypi/v/langextract.svg)](https://pypi.org/project/langextract/) [![GitHub stars](https://img.shields.io/github/stars/google/langextract.svg?style=social&label=Star)](https://github.com/google/langextract) ![Tests](https://github.com/google/langextract/actions/workflows/ci.yaml/badge.svg) @@ -121,7 +121,7 @@ with open("visualization.html", "w") as f: This creates an animated and interactive HTML file: -![Romeo and Juliet Basic Visualization ](docs/_static/romeo_juliet_basic.gif) +![Romeo and Juliet Basic Visualization ](https://raw.githubusercontent.com/google/langextract/main/docs/_static/romeo_juliet_basic.gif) > **Note on LLM Knowledge Utilization:** This example demonstrates extractions that stay close to the text evidence - extracting "longing" for Lady Juliet's emotional state and identifying "yearning" from "gazed longingly at the stars." The task could be modified to generate attributes that draw more heavily from the LLM's world knowledge (e.g., adding `"identity": "Capulet family daughter"` or `"literary_context": "tragic heroine"`). The balance between text-evidence and knowledge-inference is controlled by your prompt instructions and example attributes. @@ -142,7 +142,7 @@ result = lx.extract( ) ``` -This approach can extract hundreds of entities from full novels while maintaining high accuracy. The interactive visualization seamlessly handles large result sets, making it easy to explore hundreds of entities from the output JSONL file. **[See the full *Romeo and Juliet* extraction example →](docs/examples/longer_text_example.md)** for detailed results and performance insights. +This approach can extract hundreds of entities from full novels while maintaining high accuracy. The interactive visualization seamlessly handles large result sets, making it easy to explore hundreds of entities from the output JSONL file. **[See the full *Romeo and Juliet* extraction example →](https://github.com/google/langextract/blob/main/docs/examples/longer_text_example.md)** for detailed results and performance insights. ## Installation @@ -252,7 +252,7 @@ Additional examples of LangExtract in action: LangExtract can process complete documents directly from URLs. This example demonstrates extraction from the full text of *Romeo and Juliet* from Project Gutenberg (147,843 characters), showing parallel processing, sequential extraction passes, and performance optimization for long document processing. -**[View *Romeo and Juliet* Full Text Example →](docs/examples/longer_text_example.md)** +**[View *Romeo and Juliet* Full Text Example →](https://github.com/google/langextract/blob/main/docs/examples/longer_text_example.md)** ### Medication Extraction @@ -260,7 +260,7 @@ LangExtract can process complete documents directly from URLs. This example demo LangExtract excels at extracting structured medical information from clinical text. These examples demonstrate both basic entity recognition (medication names, dosages, routes) and relationship extraction (connecting medications to their attributes), showing LangExtract's effectiveness for healthcare applications. -**[View Medication Examples →](docs/examples/medication_examples.md)** +**[View Medication Examples →](https://github.com/google/langextract/blob/main/docs/examples/medication_examples.md)** ### Radiology Report Structuring: RadExtract @@ -270,7 +270,7 @@ Explore RadExtract, a live interactive demo on HuggingFace Spaces that shows how ## Contributing -Contributions are welcome! See [CONTRIBUTING.md](CONTRIBUTING.md) to get started +Contributions are welcome! See [CONTRIBUTING.md](https://github.com/google/langextract/blob/main/CONTRIBUTING.md) to get started with development, testing, and pull requests. You must sign a [Contributor License Agreement](https://cla.developers.google.com/about) before submitting patches. @@ -301,7 +301,7 @@ tox # runs pylint + pytest on Python 3.10 and 3.11 This is not an officially supported Google product. If you use LangExtract in production or publications, please cite accordingly and -acknowledge usage. Use is subject to the [Apache 2.0 License](LICENSE). +acknowledge usage. Use is subject to the [Apache 2.0 License](https://github.com/google/langextract/blob/main/LICENSE). For health-related applications, use of LangExtract is also subject to the [Health AI Developer Foundations Terms of Use](https://developers.google.com/health-ai-developer-foundations/terms). diff --git a/pyproject.toml b/pyproject.toml index a2dfd19c..4de558bb 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -18,7 +18,7 @@ build-backend = "setuptools.build_meta" [project] name = "langextract" -version = "0.1.0" +version = "0.1.3" description = "LangExtract: A library for extracting structured data from language models" readme = "README.md" requires-python = ">=3.10" From 4fe7580b307355b77abafe0b2c354a0a226a8378 Mon Sep 17 00:00:00 2001 From: Akshay Goel Date: Tue, 22 Jul 2025 17:57:40 -0400 Subject: [PATCH 02/20] feat: add trusted publishing workflow and prepare v1.0.0 release - Add GitHub Actions workflow for automated PyPI publishing via OIDC - Configure trusted publishing environment for verified releases - Update project metadata with proper URLs and license format - Prepare for v1.0.0 stable release with production-ready automation --- .github/workflows/publish.yml | 55 +++++++++++++++++++++++++++++++++++ pyproject.toml | 8 ++++- 2 files changed, 62 insertions(+), 1 deletion(-) create mode 100644 .github/workflows/publish.yml diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml new file mode 100644 index 00000000..d037b9d1 --- /dev/null +++ b/.github/workflows/publish.yml @@ -0,0 +1,55 @@ +# Copyright 2025 Google LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +name: Publish to PyPI + +on: + release: + types: [published] + +permissions: + contents: read + id-token: write + +jobs: + pypi-publish: + name: Publish to PyPI + runs-on: ubuntu-latest + environment: pypi + permissions: + id-token: write + steps: + - uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: '3.11' + + - name: Install build dependencies + run: | + python -m pip install --upgrade pip + pip install build + + - name: Build package + run: python -m build + + - name: Verify build artifacts + run: | + ls -la dist/ + pip install twine + twine check dist/* + + - name: Publish to PyPI + uses: pypa/gh-action-pypi-publish@release/v1 \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 4de558bb..b725282e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -18,7 +18,7 @@ build-backend = "setuptools.build_meta" [project] name = "langextract" -version = "0.1.3" +version = "1.0.0" description = "LangExtract: A library for extracting structured data from language models" readme = "README.md" requires-python = ">=3.10" @@ -46,6 +46,12 @@ dependencies = [ "typing-extensions>=4.0.0" ] +[project.urls] +"Homepage" = "https://github.com/google/langextract" +"Repository" = "https://github.com/google/langextract" +"Documentation" = "https://github.com/google/langextract/blob/main/README.md" +"Bug Tracker" = "https://github.com/google/langextract/issues" + [project.optional-dependencies] dev = [ "black>=23.7.0", From e696a48db1876aa3e417c5eca26c5964a8a00304 Mon Sep 17 00:00:00 2001 From: Akshay Goel Date: Fri, 1 Aug 2025 01:07:03 -0400 Subject: [PATCH 03/20] Fix: Resolve libmagic ImportError (#6) - Add pylibmagic>=0.5.0 dependency for bundled libraries - Add [full] install option and pre-import handling - Update README with troubleshooting and Docker sections - Bump version to 1.0.1 Fixes #6 --- Dockerfile | 16 ++++++++++++++++ README.md | 12 ++++++++++++ langextract/__init__.py | 7 +++++++ pyproject.toml | 7 ++++++- 4 files changed, 41 insertions(+), 1 deletion(-) create mode 100644 Dockerfile diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 00000000..ca90ce9f --- /dev/null +++ b/Dockerfile @@ -0,0 +1,16 @@ +# Production Dockerfile for LangExtract with libmagic support +FROM python:3.10-slim + +# Install system dependencies including libmagic +RUN apt-get update && apt-get install -y --no-install-recommends \ + libmagic1 \ + && rm -rf /var/lib/apt/lists/* + +# Set working directory +WORKDIR /app + +# Install LangExtract from PyPI +RUN pip install --no-cache-dir langextract + +# Set default command +CMD ["python"] \ No newline at end of file diff --git a/README.md b/README.md index efe64a72..4e4710b0 100644 --- a/README.md +++ b/README.md @@ -181,6 +181,12 @@ pip install -e ".[dev]" pip install -e ".[test]" ``` +### Docker + +```bash +docker build -t langextract . +docker run --rm -e LANGEXTRACT_API_KEY="your-api-key" langextract python your_script.py +``` ## API Key Setup for Cloud Models @@ -297,6 +303,12 @@ Or reproduce the full CI matrix locally with tox: tox # runs pylint + pytest on Python 3.10 and 3.11 ``` +## Troubleshooting + +**libmagic error**: If you see "failed to find libmagic", install with `pip install langextract[full]` or install system dependencies: +- Ubuntu/Debian: `sudo apt-get install libmagic1` +- macOS: `brew install libmagic` + ## Disclaimer This is not an officially supported Google product. If you use diff --git a/langextract/__init__.py b/langextract/__init__.py index 817e04f2..73e4d00f 100644 --- a/langextract/__init__.py +++ b/langextract/__init__.py @@ -16,6 +16,13 @@ from __future__ import annotations +# Ensure libmagic is available before langfun imports python-magic. +# pylibmagic provides pre-built binaries that python-magic needs. +try: + import pylibmagic # noqa: F401 (side-effect import) +except ImportError: + pass + from collections.abc import Iterable, Sequence import os from typing import Any, Type, TypeVar, cast diff --git a/pyproject.toml b/pyproject.toml index b725282e..16e4afbe 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -18,7 +18,7 @@ build-backend = "setuptools.build_meta" [project] name = "langextract" -version = "1.0.0" +version = "1.0.1" description = "LangExtract: A library for extracting structured data from language models" readme = "README.md" requires-python = ">=3.10" @@ -41,6 +41,7 @@ dependencies = [ "pydantic>=1.8.0", "python-dotenv>=0.19.0", "python-magic>=0.4.27", + "pylibmagic>=0.5.0", "requests>=2.25.0", "tqdm>=4.64.0", "typing-extensions>=4.0.0" @@ -64,6 +65,10 @@ test = [ "pytest>=7.4.0", "tomli>=2.0.0" ] +full = [ + "python-magic>=0.4.27", + "pylibmagic>=0.5.0", +] [tool.setuptools] packages = ["langextract"] From 5447637ca1becb4b1561f7edb1b303c7f843ba4f Mon Sep 17 00:00:00 2001 From: Leena Kamran <62442533+kleeena@users.noreply.github.com> Date: Fri, 1 Aug 2025 13:43:42 +0500 Subject: [PATCH 04/20] docs: clarify output_dir behavior in medication_examples.md --- docs/examples/medication_examples.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/docs/examples/medication_examples.md b/docs/examples/medication_examples.md index 7fb27b11..64a4c13c 100644 --- a/docs/examples/medication_examples.md +++ b/docs/examples/medication_examples.md @@ -193,7 +193,11 @@ for med_name, extractions in medication_groups.items(): print(f" • {extraction.extraction_class.capitalize()}: {extraction.extraction_text}{position_info}") # Save and visualize the results -lx.io.save_annotated_documents([result], output_name="medical_relationship_extraction.jsonl") +lx.io.save_annotated_documents( + [result], + output_name="medical_ner_extraction.jsonl", + output_dir="." # Saves to the current directory instead of the default 'test_output/' +) # Generate the interactive visualization html_content = lx.visualize("medical_relationship_extraction.jsonl") From 175e0750c039edc7e9de0d4c99cf7f5618c4aab1 Mon Sep 17 00:00:00 2001 From: Leena Kamran <62442533+kleeena@users.noreply.github.com> Date: Sat, 2 Aug 2025 13:50:11 +0500 Subject: [PATCH 05/20] Removed inline comment in medication example Deleted an inline comment referencing the output directory in the save_annotated_documents. --- docs/examples/medication_examples.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/examples/medication_examples.md b/docs/examples/medication_examples.md index 64a4c13c..f167cdc5 100644 --- a/docs/examples/medication_examples.md +++ b/docs/examples/medication_examples.md @@ -196,7 +196,7 @@ for med_name, extractions in medication_groups.items(): lx.io.save_annotated_documents( [result], output_name="medical_ner_extraction.jsonl", - output_dir="." # Saves to the current directory instead of the default 'test_output/' + output_dir="." ) # Generate the interactive visualization From e6c3dcd81476e6f32c21d931e25b6608a58271e6 Mon Sep 17 00:00:00 2001 From: Akshay Goel Date: Sat, 2 Aug 2025 05:34:08 -0400 Subject: [PATCH 06/20] docs: add output_dir="." to all save_annotated_documents examples Prevents confusion from default `test_output/...` by explicitly saving to current directory. --- README.md | 2 +- docs/examples/longer_text_example.md | 2 +- docs/examples/medication_examples.md | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 4e4710b0..3bb8d615 100644 --- a/README.md +++ b/README.md @@ -111,7 +111,7 @@ The extractions can be saved to a `.jsonl` file, a popular format for working wi ```python # Save the results to a JSONL file -lx.io.save_annotated_documents([result], output_name="extraction_results.jsonl") +lx.io.save_annotated_documents([result], output_name="extraction_results.jsonl", output_dir=".") # Generate the visualization from the file html_content = lx.visualize("extraction_results.jsonl") diff --git a/docs/examples/longer_text_example.md b/docs/examples/longer_text_example.md index 62d1ff39..a92d2625 100644 --- a/docs/examples/longer_text_example.md +++ b/docs/examples/longer_text_example.md @@ -76,7 +76,7 @@ result = lx.extract( print(f"Extracted {len(result.extractions)} entities from {len(result.text):,} characters") # Save and visualize the results -lx.io.save_annotated_documents([result], output_name="romeo_juliet_extractions.jsonl") +lx.io.save_annotated_documents([result], output_name="romeo_juliet_extractions.jsonl", output_dir=".") # Generate the interactive visualization html_content = lx.visualize("romeo_juliet_extractions.jsonl") diff --git a/docs/examples/medication_examples.md b/docs/examples/medication_examples.md index f167cdc5..fcf645f2 100644 --- a/docs/examples/medication_examples.md +++ b/docs/examples/medication_examples.md @@ -62,7 +62,7 @@ for entity in result.extractions: print(f"• {entity.extraction_class.capitalize()}: {entity.extraction_text}{position_info}") # Save and visualize the results -lx.io.save_annotated_documents([result], output_name="medical_ner_extraction.jsonl") +lx.io.save_annotated_documents([result], output_name="medical_ner_extraction.jsonl", output_dir=".") # Generate the interactive visualization html_content = lx.visualize("medical_ner_extraction.jsonl") From 13fbd2ca718c2ba5c90239f121dc43faa062be7d Mon Sep 17 00:00:00 2001 From: Akshay Goel Date: Sun, 3 Aug 2025 03:24:27 -0400 Subject: [PATCH 07/20] build: add formatting & linting pipeline with pre-commit integration --- .pre-commit-config.yaml | 46 +++++ .pylintrc | 414 +++++++++++++++++++++++++++++++++++++++- CONTRIBUTING.md | 102 +++++++++- README.md | 33 ++++ autoformat.sh | 125 ++++++++++++ pyproject.toml | 28 ++- tests/.pylintrc | 52 +++++ tox.ini | 18 +- 8 files changed, 807 insertions(+), 11 deletions(-) create mode 100644 .pre-commit-config.yaml create mode 100755 autoformat.sh create mode 100644 tests/.pylintrc diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 00000000..84410316 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,46 @@ +# Copyright 2025 Google LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Pre-commit hooks for LangExtract +# Install with: pre-commit install +# Run manually: pre-commit run --all-files + +repos: + - repo: https://github.com/PyCQA/isort + rev: 5.13.2 + hooks: + - id: isort + name: isort (import sorting) + # Configuration is in pyproject.toml + + - repo: https://github.com/google/pyink + rev: 24.3.0 + hooks: + - id: pyink + name: pyink (Google's Black fork) + args: ["--config", "pyproject.toml"] + + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.5.0 + hooks: + - id: end-of-file-fixer + exclude: \.gif$|\.svg$ + - id: trailing-whitespace + - id: check-yaml + - id: check-added-large-files + args: ['--maxkb=1000'] + - id: check-merge-conflict + - id: check-case-conflict + - id: mixed-line-ending + args: ['--fix=lf'] diff --git a/.pylintrc b/.pylintrc index 5709bc73..2e09c87f 100644 --- a/.pylintrc +++ b/.pylintrc @@ -14,10 +14,418 @@ [MASTER] +# Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the +# number of processors available to use. +jobs=0 + +# When enabled, pylint would attempt to guess common misconfiguration and emit +# user-friendly hints instead of false-positive error messages. +suggestion-mode=yes + +# Pickle collected data for later comparisons. +persistent=yes + +# List of plugins (as comma separated values of python modules names) to load, +# usually to register additional checkers. +# Note: These plugins require Pylint >= 3.0 +load-plugins= + pylint.extensions.docparams, + pylint.extensions.typing + +# Allow loading of arbitrary C extensions. Extensions are imported into the +# active Python interpreter and may run arbitrary code. +unsafe-load-any-extension=no + + [MESSAGES CONTROL] -disable=all -enable=F + +# Enable the message, report, category or checker with the given id(s). You can +# either give multiple identifier separated by comma (,) or put this option +# multiple time. +enable= + useless-suppression + +# Disable the message, report, category or checker with the given id(s). You +# can either give multiple identifier separated by comma (,) or put this option +# multiple time (only on the command line, not in the configuration file where +# it should appear only once). +disable= + abstract-method, # Protocol/ABC classes often have abstract methods + too-few-public-methods, # Valid for data classes with minimal interface + fixme, # TODO/FIXME comments are useful for tracking work + # --- Code style and formatting --- + line-too-long, # Handled by pyink formatter + bad-indentation, # Pyink uses 2-space indentation + # --- Design complexity --- + too-many-positional-arguments, + too-many-locals, + too-many-arguments, + too-many-branches, + too-many-statements, + too-many-nested-blocks, + # --- Style preferences --- + no-else-return, + no-else-raise, + # --- Documentation --- + missing-function-docstring, + missing-class-docstring, + missing-raises-doc, + # --- Gradual improvements --- + deprecated-typing-alias, # For typing.Type etc. + unspecified-encoding, + unused-import + [REPORTS] + +# Set the output format. Available formats are text, parseable, colorized, msvs +# (visual studio) and html. output-format=text -reports=no \ No newline at end of file + +# Tells whether to display a full report or only the messages +reports=no + +# Activate the evaluation score. +score=no + + +[REFACTORING] + +# Maximum number of nested blocks for function / method body +max-nested-blocks=5 + +# Complete name of functions that never returns. When checking for +# inconsistent-return-statements if a never returning function is called then +# it will be considered as an explicit return statement and no message will be +# printed. +never-returning-functions=sys.exit + + +[BASIC] + +# Naming style matching correct argument names. +argument-naming-style=snake_case + +# Naming style matching correct attribute names. +attr-naming-style=snake_case + +# Bad variable names which should always be refused, separated by a comma. +bad-names=foo,bar,baz,toto,tutu,tata + +# Naming style matching correct class attribute names. +class-attribute-naming-style=any + +# Naming style matching correct class names. +class-naming-style=PascalCase + +# Naming style matching correct constant names. +const-naming-style=UPPER_CASE + +# Minimum line length for functions/classes that require docstrings, shorter +# ones are exempt. +docstring-min-length=-1 + +# Naming style matching correct function names. +function-naming-style=snake_case + +# Good variable names which should always be accepted, separated by a comma. +good-names=i,j,k,ex,Run,_,id,ok + +# Good variable names regexes, separated by a comma. If names match any regex, +# they will always be accepted +good-names-rgxs= + +# Include a hint for the correct naming format with invalid-name. +include-naming-hint=no + +# Naming style matching correct inline iteration names. +inlinevar-naming-style=any + +# Naming style matching correct method names. +method-naming-style=snake_case + +# Naming style matching correct module names. +module-naming-style=snake_case + +# Colon-delimited sets of names that determine each other's naming style when +# the name regexes allow several styles. +name-group= + +# Regular expression which should only match function or class names that do +# not require a docstring. +no-docstring-rgx=^_ + +# List of decorators that produce properties, such as abc.abstractproperty. Add +# to this list to register other decorators that produce valid properties. +# These decorators are taken in consideration only for invalid-name. +property-classes=abc.abstractproperty + +# Naming style matching correct variable names. +variable-naming-style=snake_case + + +[FORMAT] + +# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. +expected-line-ending-format=LF + +# Regexp for a line that is allowed to be longer than the limit. +ignore-long-lines=^\s*(# )??$ + +# Number of spaces of indent required inside a hanging or continued line. +indent-after-paren=2 + +# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 +# tab). +indent-string=" " + +# Maximum number of characters on a single line. +max-line-length=80 + +# Maximum number of lines in a module. +max-module-lines=2000 + +# Allow the body of a class to be on the same line as the declaration if body +# contains single statement. +single-line-class-stmt=no + +# Allow the body of an if to be on the same line as the test if there is no +# else. +single-line-if-stmt=no + + +[LOGGING] + +# The type of string formatting that logging methods do. `old` means using % +# formatting, `new` is for `{}` formatting. +logging-format-style=old + +# Logging modules to check that the string format arguments are in logging +# function parameter format. +logging-modules=logging + + +[MISCELLANEOUS] + +# List of note tags to take in consideration, separated by a comma. +notes=FIXME,XXX,TODO + + +[SIMILARITIES] + +# Ignore comments when computing similarities. +ignore-comments=yes + +# Ignore docstrings when computing similarities. +ignore-docstrings=yes + +# Ignore imports when computing similarities. +ignore-imports=no + +# Minimum lines number of a similarity. +min-similarity-lines=4 + + +[SPELLING] + +# Limits count of emitted suggestions for spelling mistakes. +max-spelling-suggestions=4 + +# Spelling dictionary name. Available dictionaries: none. To make it working +# install python-enchant package.. +spelling-dict= + +# List of comma separated words that should not be checked. +spelling-ignore-words= + +# A path to a file that contains private dictionary; one word per line. +spelling-private-dict-file= + +# Tells whether to store unknown words to indicated private dictionary in +# --spelling-private-dict-file option instead of raising a message. +spelling-store-unknown-words=no + + +[TYPECHECK] + +# List of decorators that produce context managers, such as +# contextlib.contextmanager. Add to this list to register other decorators that +# produce valid context managers. +contextmanager-decorators=contextlib.contextmanager + +# List of members which are set dynamically and missed by pylint inference +# system, and so shouldn't trigger E1101 when accessed. Python regular +# expressions are accepted. +generated-members= + +# Tells whether missing members accessed in mixin class should be ignored. A +# mixin class is detected if its name ends with "mixin" (case insensitive). +ignore-mixin-members=yes + +# Tells whether to warn about missing members when the owner of the attribute +# is inferred to be None. +ignore-none=yes + +# This flag controls whether pylint should warn about no-member and similar +# checks whenever an opaque object is returned when inferring. The inference +# can return multiple potential results while evaluating a Python object, but +# some branches might not be evaluated, which results in partial inference. In +# that case, it might be useful to still emit no-member and other checks for +# the rest of the inferred objects. +ignore-on-opaque-inference=yes + +# List of class names for which member attributes should not be checked (useful +# for classes with dynamically set attributes). This supports the use of +# qualified names. +ignored-classes=optparse.Values,thread._local,_thread._local,dataclasses.InitVar,typing.Any + +# List of module names for which member attributes should not be checked +# (useful for modules/projects where namespaces are manipulated during runtime +# and thus existing member attributes cannot be deduced by static analysis. It +# supports qualified module names, as well as Unix pattern matching. +ignored-modules=dotenv,absl,more_itertools,pandas,requests,pydantic,yaml,IPython.display, + tqdm,numpy,google,langfun,typing_extensions + +# Show a hint with possible names when a member name was not found. The aspect +# of finding the hint is based on edit distance. +missing-member-hint=yes + +# The minimum edit distance a name should have in order to be considered a +# similar match for a missing member name. +missing-member-hint-distance=1 + +# The total number of similar names that should be taken in consideration when +# showing a hint for a missing member. +missing-member-max-choices=1 + +# List of decorators that change the signature of a decorated function. +signature-mutators= + + +[VARIABLES] + +# List of additional names supposed to be defined in builtins. Remember that +# you should avoid defining new builtins when possible. +additional-builtins= + +# Tells whether unused global variables should be treated as a violation. +allow-global-unused-variables=yes + +# List of strings which can identify a callback function by name. A callback +# name must start or end with one of those strings. +callbacks=cb_,_cb + +# A regular expression matching the name of dummy variables (i.e. expected to +# not be used). +dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_ + +# Argument names that match this expression will be ignored. Default to name +# with leading underscore. +ignored-argument-names=_.*|^ignored_|^unused_ + +# Tells whether we should check for unused import in __init__ files. +init-import=no + +# List of qualified module names which can have objects that can redefine +# builtins. +redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io + + +[CLASSES] + +# List of method names used to declare (i.e. assign) instance attributes. +defining-attr-methods=__init__, + __new__, + setUp, + __post_init__ + +# List of member names, which should be excluded from the protected access +# warning. +exclude-protected=_asdict, + _fields, + _replace, + _source, + _make + +# List of valid names for the first argument in a class method. +valid-classmethod-first-arg=cls + +# List of valid names for the first argument in a metaclass class method. +valid-metaclass-classmethod-first-arg=cls + + +[DESIGN] + +# Maximum number of arguments for function / method. +max-args=7 + +# Maximum number of attributes for a class (see R0902). +max-attributes=10 + +# Maximum number of boolean expressions in an if statement. +max-bool-expr=5 + +# Maximum number of branch for function / method body. +max-branches=12 + +# Maximum number of locals for function / method body. +max-locals=15 + +# Maximum number of parents for a class (see R0901). +max-parents=7 + +# Maximum number of public methods for a class (see R0904). +max-public-methods=20 + +# Maximum number of return / yield for function / method body. +max-returns=6 + +# Maximum number of statements in function / method body. +max-statements=50 + +# Minimum number of public methods for a class (see R0903). +min-public-methods=0 + + +[IMPORTS] + +# Allow wildcard imports from modules that define __all__. +allow-wildcard-with-all=yes + +# Analyse import fallback blocks. This can be used to support both Python 2 and +# 3 compatible code, which means that the block might have code that exists +# only in one or another interpreter, leading to false positives when analysed. +analyse-fallback-blocks=no + +# Deprecated modules which should not be used, separated by a comma. +deprecated-modules=optparse,tkinter.tix + +# Create a graph of external dependencies in the given file (report RP0402 must +# not be disabled). +ext-import-graph= + +# Create a graph of every (i.e. internal and external) dependencies in the +# given file (report RP0402 must not be disabled). +import-graph= + +# Create a graph of internal dependencies in the given file (report RP0402 must +# not be disabled). +int-import-graph= + +# Force import order to recognize a module as part of the standard +# compatibility libraries. +known-standard-library= + +# Force import order to recognize a module as part of a third party library. +known-third-party=enchant,numpy,pandas,torch,langfun,pyglove + +# Couples of modules and preferred modules, separated by a comma. +preferred-modules= + + +[EXCEPTIONS] + +# Exceptions that will emit a warning when being caught. Defaults to +# "BaseException, Exception". +overgeneral-exceptions=BaseException, + Exception diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 724ff7f6..60f2eaae 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -23,13 +23,111 @@ sign a new one. This project follows HAI-DEF's [Community guidelines](https://developers.google.com/health-ai-developer-foundations/community-guidelines) -## Contribution process +## Reporting Issues -### Code Reviews +If you encounter a bug or have a feature request, please open an issue on GitHub. +We have templates to help guide you: + +- **[Bug Report](.github/ISSUE_TEMPLATE/1-bug.md)**: For reporting bugs or unexpected behavior +- **[Feature Request](.github/ISSUE_TEMPLATE/2-feature-request.md)**: For suggesting new features or improvements + +When creating an issue, GitHub will prompt you to choose the appropriate template. +Please provide as much detail as possible to help us understand and address your concern. + +## Contribution Process + +### 1. Development Setup + +To get started, clone the repository and install the necessary dependencies for development and testing. Detailed instructions can be found in the [Installation from Source](https://github.com/google/langextract#from-source) section of the `README.md`. + +**Windows Users**: The formatting scripts use bash. Please use one of: +- Git Bash (comes with Git for Windows) +- WSL (Windows Subsystem for Linux) +- PowerShell with bash-compatible commands + +### 2. Code Style and Formatting + +This project uses automated tools to maintain a consistent code style. Before submitting a pull request, please format your code: + +```bash +# Run the auto-formatter +./autoformat.sh +``` + +This script uses: +- `isort` to organize imports with Google style (single-line imports) +- `pyink` (Google's fork of Black) to format code according to Google's Python Style Guide + +You can also run the formatters manually: +```bash +isort langextract tests +pyink langextract tests --config pyproject.toml +``` + +Note: The formatters target only `langextract` and `tests` directories by default to avoid +formatting virtual environments or other non-source directories. + +### 3. Pre-commit Hooks (Recommended) + +For automatic formatting checks before each commit: + +```bash +# Install pre-commit +pip install pre-commit + +# Install the git hooks +pre-commit install + +# Run manually on all files +pre-commit run --all-files +``` + +### 4. Linting and Testing + +All contributions must pass linting checks and unit tests. Please run these locally before submitting your changes: + +```bash +# Run linting with Pylint 3.x +pylint --rcfile=.pylintrc langextract tests + +# Run tests +pytest tests +``` + +**Note on Pylint Configuration**: We use a modern, minimal configuration that: +- Only disables truly noisy checks (not entire categories) +- Keeps critical error detection enabled +- Uses plugins for enhanced docstring and type checking +- Aligns with our pyink formatter (80-char lines, 2-space indents) + +For full testing across Python versions: +```bash +tox # runs pylint + pytest on Python 3.10 and 3.11 +``` + +### 5. Submit Your Pull Request All submissions, including submissions by project members, require review. We use [GitHub pull requests](https://docs.github.com/articles/about-pull-requests) for this purpose. +When you create a pull request, GitHub will automatically populate it with our +[pull request template](.github/PULL_REQUEST_TEMPLATE/pull_request_template.md). +Please fill out all sections of the template to help reviewers understand your changes. + +#### Pull Request Guidelines + +- **Keep PRs focused and small**: Each PR should address a single, specific change. This makes review easier and faster. +- **Reference related issues**: Use "Fixes #123" or "Addresses #123" in your PR description to link to relevant issues. +- **Single-change commits**: A PR should typically comprise a single git commit. Squash multiple commits before submitting. +- **Clear description**: Explain what your change does and why it's needed. +- **Ensure all tests pass**: Check that both formatting and tests are green before requesting review. +- **Respond to feedback promptly**: Address reviewer comments in a timely manner. + +If your change is large or complex, consider: +- Opening an issue first to discuss the approach +- Breaking it into multiple smaller PRs +- Clearly explaining in the PR description why a larger change is necessary + For more details, read HAI-DEF's [Contributing guidelines](https://developers.google.com/health-ai-developer-foundations/community-guidelines#contributing) diff --git a/README.md b/README.md index 4e4710b0..a921bbe8 100644 --- a/README.md +++ b/README.md @@ -303,6 +303,39 @@ Or reproduce the full CI matrix locally with tox: tox # runs pylint + pytest on Python 3.10 and 3.11 ``` +## Development + +### Code Formatting + +This project uses automated formatting tools to maintain consistent code style: + +```bash +# Auto-format all code +./autoformat.sh + +# Or run formatters separately +isort langextract tests --profile google --line-length 80 +pyink langextract tests --config pyproject.toml +``` + +### Pre-commit Hooks + +For automatic formatting checks: +```bash +pre-commit install # One-time setup +pre-commit run --all-files # Manual run +``` + +### Linting + +Run linting before submitting PRs: + +```bash +pylint --rcfile=.pylintrc langextract tests +``` + +See [CONTRIBUTING.md](CONTRIBUTING.md) for full development guidelines. + ## Troubleshooting **libmagic error**: If you see "failed to find libmagic", install with `pip install langextract[full]` or install system dependencies: diff --git a/autoformat.sh b/autoformat.sh new file mode 100755 index 00000000..5b7b1897 --- /dev/null +++ b/autoformat.sh @@ -0,0 +1,125 @@ +#!/bin/bash +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Autoformat LangExtract codebase +# +# Usage: ./autoformat.sh [target_directory ...] +# If no target is specified, formats the current directory +# +# This script runs: +# 1. isort for import sorting +# 2. pyink (Google's Black fork) for code formatting +# 3. pre-commit hooks for additional formatting (trailing whitespace, end-of-file, etc.) + +set -e + +echo "LangExtract Auto-formatter" +echo "==========================" +echo + +# Check for required tools +check_tool() { + if ! command -v "$1" &> /dev/null; then + echo "Error: $1 not found. Please install with: pip install $1" + exit 1 + fi +} + +check_tool "isort" +check_tool "pyink" +check_tool "pre-commit" + +# Parse command line arguments +show_usage() { + echo "Usage: $0 [target_directory ...]" + echo + echo "Formats Python code using isort and pyink according to Google style." + echo + echo "Arguments:" + echo " target_directory One or more directories to format (default: langextract tests)" + echo + echo "Examples:" + echo " $0 # Format langextract and tests directories" + echo " $0 langextract # Format only langextract directory" + echo " $0 src tests # Format multiple specific directories" +} + +# Check for help flag +if [ "$1" = "-h" ] || [ "$1" = "--help" ]; then + show_usage + exit 0 +fi + +# Determine target directories +if [ $# -eq 0 ]; then + TARGETS="langextract tests" + echo "No target specified. Formatting default directories: langextract tests" +else + TARGETS="$@" + echo "Formatting targets: $TARGETS" +fi + +# Find pyproject.toml relative to script location +SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +CONFIG_FILE="${SCRIPT_DIR}/pyproject.toml" + +if [ ! -f "$CONFIG_FILE" ]; then + echo "Warning: pyproject.toml not found at ${CONFIG_FILE}" + echo "Using default configuration." + CONFIG_ARG="" +else + CONFIG_ARG="--config $CONFIG_FILE" +fi + +echo + +# Run isort +echo "Running isort to organize imports..." +if isort $TARGETS; then + echo "Import sorting complete" +else + echo "Import sorting failed" + exit 1 +fi + +echo + +# Run pyink +echo "Running pyink to format code (Google style, 80 chars)..." +if pyink $TARGETS $CONFIG_ARG; then + echo "Code formatting complete" +else + echo "Code formatting failed" + exit 1 +fi + +echo + +# Run pre-commit hooks for additional formatting +echo "Running pre-commit hooks for additional formatting..." +if pre-commit run --all-files; then + echo "Pre-commit hooks passed" +else + echo "Pre-commit hooks made changes - please review" + # Exit with success since formatting was applied + exit 0 +fi + +echo +echo "All formatting complete!" +echo +echo "Next steps:" +echo " - Run: pylint --rcfile=${SCRIPT_DIR}/.pylintrc $TARGETS" +echo " - Commit your changes" diff --git a/pyproject.toml b/pyproject.toml index 16e4afbe..a1ec70ef 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -55,8 +55,9 @@ dependencies = [ [project.optional-dependencies] dev = [ - "black>=23.7.0", - "pylint>=2.17.5", + "pyink~=24.3.0", + "isort>=5.13.0", + "pylint>=3.0.0", "pytest>=7.4.0", "pytype>=2024.10.11", "tox>=4.0.0", @@ -83,6 +84,25 @@ include-package-data = false "*.svg", ] -[tool.pytest] +[tool.pytest.ini_options] testpaths = ["tests"] -python_files = "*_test.py" \ No newline at end of file +python_files = "*_test.py" +python_classes = "Test*" +python_functions = "test_*" +# Show extra test summary info +addopts = "-ra" + +[tool.pyink] +# Configuration for Google's style guide +line-length = 80 +unstable = true +pyink-indentation = 2 +pyink-use-majority-quotes = true + +[tool.isort] +# Configuration for Google's style guide +profile = "google" +line_length = 80 +force_sort_within_sections = true +# Allow multiple imports on one line for these modules +single_line_exclusions = ["typing", "typing_extensions", "collections.abc"] diff --git a/tests/.pylintrc b/tests/.pylintrc new file mode 100644 index 00000000..cb61873e --- /dev/null +++ b/tests/.pylintrc @@ -0,0 +1,52 @@ +# Copyright 2025 Google LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Test-specific Pylint configuration +# Inherits from parent ../.pylintrc and adds test-specific relaxations + +[MASTER] +# Python will merge with parent; no need to repeat plugins. + +[MESSAGES CONTROL] +# Additional disables for test code only +disable= + # --- Test-specific relaxations --- + duplicate-code, # Test fixtures often have similar patterns + too-many-lines, # Large test files are common + missing-module-docstring, # Tests don't need module docs + missing-class-docstring, # Test classes are self-explanatory + missing-function-docstring, # Test method names describe intent + line-too-long, # Golden strings and test data + invalid-name, # setUp, tearDown, maxDiff, etc. + protected-access, # Tests often access private members + use-dict-literal, # Parametrized tests benefit from dict() + bad-indentation, # pyink 2-space style conflicts with pylint + unused-argument, # Mock callbacks often have unused args + import-error, # Test dependencies may not be installed + unused-import, # Some imports are for test fixtures + too-many-positional-arguments # Test methods can have many args + +[DESIGN] +# Relax complexity limits for tests +max-args = 10 # Fixtures often take many params +max-locals = 25 # Complex test setups +max-statements = 75 # Detailed test scenarios +max-branches = 15 # Multiple test conditions + +[BASIC] +# Allow common test naming patterns +good-names=i,j,k,ex,Run,_,id,ok,fd,fp,maxDiff,setUp,tearDown + +# Include test-specific naming patterns +method-rgx=[a-z_][a-z0-9_]{2,50}$|test[A-Z_][a-zA-Z0-9]*$|assert[A-Z][a-zA-Z0-9]*$ \ No newline at end of file diff --git a/tox.ini b/tox.ini index e8988af7..ece6f529 100644 --- a/tox.ini +++ b/tox.ini @@ -22,5 +22,19 @@ setenv = deps = .[dev,test] commands = - pylint --rcfile=.pylintrc --score n langextract tests - pytest -q \ No newline at end of file + pytest -ra + +[testenv:format] +skip_install = true +deps = + isort>=5.13.2 + pyink~=24.3.0 +commands = + isort langextract tests --check-only --diff + pyink langextract tests --check --diff --config pyproject.toml + +[testenv:lint] +deps = + pylint>=3.0.0 +commands = + pylint --rcfile=.pylintrc langextract tests From c8d2027adabb8eab8cf823e0b3b780de3085870b Mon Sep 17 00:00:00 2001 From: Akshay Goel Date: Sun, 3 Aug 2025 05:52:24 -0400 Subject: [PATCH 08/20] style: apply pyink, isort, and pre-commit formatting --- .github/ISSUE_TEMPLATE/config.yml | 2 +- .github/workflows/ci.yaml | 2 +- .github/workflows/publish.yml | 12 ++++++------ .gitignore | 2 +- .hgignore | 2 +- Dockerfile | 2 +- README.md | 2 +- docs/examples/longer_text_example.md | 2 +- docs/examples/medication_examples.md | 6 +++--- kokoro/presubmit.cfg | 2 +- kokoro/test.sh | 2 +- langextract/__init__.py | 7 +++---- langextract/inference.py | 3 --- langextract/io.py | 5 +---- langextract/progress.py | 1 + langextract/prompting.py | 4 ++-- langextract/schema.py | 1 - langextract/visualization.py | 16 ++++++++-------- tests/.pylintrc | 2 +- tests/annotation_test.py | 1 + tests/chunking_test.py | 9 ++++++--- tests/data_lib_test.py | 4 ++-- tests/inference_test.py | 5 ++++- tests/init_test.py | 4 +++- tests/prompting_test.py | 1 + tests/resolver_test.py | 1 + tests/schema_test.py | 4 +--- tests/tokenizer_test.py | 3 ++- tests/visualization_test.py | 1 + 29 files changed, 56 insertions(+), 52 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index 1708201c..ba9aef5b 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -24,4 +24,4 @@ contact_links: url: https://g.co/vulnz about: > To report a security issue, please use https://g.co/vulnz. The Google Security Team will - respond within 5 working days of your report on https://g.co/vulnz. \ No newline at end of file + respond within 5 working days of your report on https://g.co/vulnz. diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index fc8a2a87..1a72b20e 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -44,4 +44,4 @@ jobs: - name: Run tox (lint + tests) run: | - tox \ No newline at end of file + tox diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index d037b9d1..cb3ff700 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -31,25 +31,25 @@ jobs: id-token: write steps: - uses: actions/checkout@v4 - + - name: Set up Python uses: actions/setup-python@v5 with: python-version: '3.11' - + - name: Install build dependencies run: | python -m pip install --upgrade pip pip install build - + - name: Build package run: python -m build - + - name: Verify build artifacts run: | ls -la dist/ pip install twine twine check dist/* - + - name: Publish to PyPI - uses: pypa/gh-action-pypi-publish@release/v1 \ No newline at end of file + uses: pypa/gh-action-pypi-publish@release/v1 diff --git a/.gitignore b/.gitignore index 458f449d..fc93e588 100644 --- a/.gitignore +++ b/.gitignore @@ -51,4 +51,4 @@ docs/_build/ *.swp # OS-specific -.DS_Store \ No newline at end of file +.DS_Store diff --git a/.hgignore b/.hgignore index 4ef06c6c..3fb66f47 100644 --- a/.hgignore +++ b/.hgignore @@ -12,4 +12,4 @@ # See the License for the specific language governing permissions and # limitations under the License. -gdm/codeai/codemind/cli/GEMINI.md \ No newline at end of file +gdm/codeai/codemind/cli/GEMINI.md diff --git a/Dockerfile b/Dockerfile index ca90ce9f..edf8504e 100644 --- a/Dockerfile +++ b/Dockerfile @@ -13,4 +13,4 @@ WORKDIR /app RUN pip install --no-cache-dir langextract # Set default command -CMD ["python"] \ No newline at end of file +CMD ["python"] diff --git a/README.md b/README.md index a921bbe8..a16bac33 100644 --- a/README.md +++ b/README.md @@ -352,4 +352,4 @@ For health-related applications, use of LangExtract is also subject to the --- -**Happy Extracting!** \ No newline at end of file +**Happy Extracting!** diff --git a/docs/examples/longer_text_example.md b/docs/examples/longer_text_example.md index 62d1ff39..f97fb172 100644 --- a/docs/examples/longer_text_example.md +++ b/docs/examples/longer_text_example.md @@ -171,4 +171,4 @@ LangExtract combines precise text positioning with world knowledge enrichment, e --- -¹ Models like Gemini 1.5 Pro show strong performance on many benchmarks, but [needle-in-a-haystack tests](https://cloud.google.com/blog/products/ai-machine-learning/the-needle-in-the-haystack-test-and-how-gemini-pro-solves-it) across million-token contexts indicate that performance can vary in multi-fact retrieval scenarios. This demonstrates how LangExtract's smaller context windows approach ensures consistently high quality across entire documents by avoiding the complexity and potential degradation of massive single-context processing. \ No newline at end of file +¹ Models like Gemini 1.5 Pro show strong performance on many benchmarks, but [needle-in-a-haystack tests](https://cloud.google.com/blog/products/ai-machine-learning/the-needle-in-the-haystack-test-and-how-gemini-pro-solves-it) across million-token contexts indicate that performance can vary in multi-fact retrieval scenarios. This demonstrates how LangExtract's smaller context windows approach ensures consistently high quality across entire documents by avoiding the complexity and potential degradation of massive single-context processing. diff --git a/docs/examples/medication_examples.md b/docs/examples/medication_examples.md index f167cdc5..85151b86 100644 --- a/docs/examples/medication_examples.md +++ b/docs/examples/medication_examples.md @@ -196,8 +196,8 @@ for med_name, extractions in medication_groups.items(): lx.io.save_annotated_documents( [result], output_name="medical_ner_extraction.jsonl", - output_dir="." -) + output_dir="." +) # Generate the interactive visualization html_content = lx.visualize("medical_relationship_extraction.jsonl") @@ -243,4 +243,4 @@ This example demonstrates how attributes enable efficient relationship extractio - **Relationship Extraction**: Groups related entities using attributes - **Position Tracking**: Records exact positions of extracted entities in the source text - **Structured Output**: Organizes information in a format suitable for healthcare applications -- **Interactive Visualization**: Generates HTML visualizations for exploring complex medical extractions with entity groupings and relationships clearly displayed \ No newline at end of file +- **Interactive Visualization**: Generates HTML visualizations for exploring complex medical extractions with entity groupings and relationships clearly displayed diff --git a/kokoro/presubmit.cfg b/kokoro/presubmit.cfg index 6d821424..746c6d14 100644 --- a/kokoro/presubmit.cfg +++ b/kokoro/presubmit.cfg @@ -28,4 +28,4 @@ container_properties { xunit_test_results { target_name: "pytest_results" result_xml_path: "git/repo/pytest_results/test.xml" -} \ No newline at end of file +} diff --git a/kokoro/test.sh b/kokoro/test.sh index ba75ace2..87134817 100644 --- a/kokoro/test.sh +++ b/kokoro/test.sh @@ -103,4 +103,4 @@ deactivate echo "=========================================" echo "Kokoro test script for langextract finished successfully." -echo "=========================================" \ No newline at end of file +echo "=========================================" diff --git a/langextract/__init__.py b/langextract/__init__.py index 73e4d00f..dbe8be14 100644 --- a/langextract/__init__.py +++ b/langextract/__init__.py @@ -19,13 +19,13 @@ # Ensure libmagic is available before langfun imports python-magic. # pylibmagic provides pre-built binaries that python-magic needs. try: - import pylibmagic # noqa: F401 (side-effect import) + import pylibmagic # noqa: F401 (side-effect import) except ImportError: - pass + pass from collections.abc import Iterable, Sequence import os -from typing import Any, Type, TypeVar, cast +from typing import Any, cast, Type, TypeVar import warnings import dotenv @@ -39,7 +39,6 @@ from langextract import schema from langextract import visualization - LanguageModelT = TypeVar("LanguageModelT", bound=inference.BaseLanguageModel) # Set up visualization helper at the top level (lx.visualize). diff --git a/langextract/inference.py b/langextract/inference.py index 822661fd..ecd525ff 100644 --- a/langextract/inference.py +++ b/langextract/inference.py @@ -29,12 +29,9 @@ from typing_extensions import override import yaml - - from langextract import data from langextract import schema - _OLLAMA_DEFAULT_MODEL_URL = 'http://localhost:11434' diff --git a/langextract/io.py b/langextract/io.py index 7f94a193..593951c7 100644 --- a/langextract/io.py +++ b/langextract/io.py @@ -18,15 +18,12 @@ import dataclasses import json import os +import pathlib from typing import Any, Iterator import pandas as pd import requests -import os -import pathlib -import os -import pathlib from langextract import data from langextract import data_lib from langextract import progress diff --git a/langextract/progress.py b/langextract/progress.py index a79b9126..41c4f3b8 100644 --- a/langextract/progress.py +++ b/langextract/progress.py @@ -16,6 +16,7 @@ from typing import Any import urllib.parse + import tqdm # ANSI color codes for terminal output diff --git a/langextract/prompting.py b/langextract/prompting.py index 5d6623b1..97856ac6 100644 --- a/langextract/prompting.py +++ b/langextract/prompting.py @@ -16,12 +16,12 @@ import dataclasses import json +import os +import pathlib import pydantic import yaml -import os -import pathlib from langextract import data from langextract import schema diff --git a/langextract/schema.py b/langextract/schema.py index 2c02baac..dd553bdc 100644 --- a/langextract/schema.py +++ b/langextract/schema.py @@ -22,7 +22,6 @@ import enum from typing import Any - from langextract import data diff --git a/langextract/visualization.py b/langextract/visualization.py index 513cfa58..5aa02c2b 100644 --- a/langextract/visualization.py +++ b/langextract/visualization.py @@ -28,10 +28,10 @@ import html import itertools import json -import textwrap - import os import pathlib +import textwrap + from langextract import data as _data from langextract import io as _io @@ -130,9 +130,9 @@ 50% { text-decoration-color: #ff0000; } 100% { text-decoration-color: #ff4444; } } - .lx-legend { - font-size: 12px; margin-bottom: 8px; - padding-bottom: 8px; border-bottom: 1px solid #e0e0e0; + .lx-legend { + font-size: 12px; margin-bottom: 8px; + padding-bottom: 8px; border-bottom: 1px solid #e0e0e0; } .lx-label { display: inline-block; @@ -456,12 +456,12 @@ def _extraction_sort_key(extraction):
-
- Entity 1/{len(extractions)} | + Entity 1/{len(extractions)} | Pos {pos_info_str}
diff --git a/tests/.pylintrc b/tests/.pylintrc index cb61873e..4b06ddd5 100644 --- a/tests/.pylintrc +++ b/tests/.pylintrc @@ -49,4 +49,4 @@ max-branches = 15 # Multiple test conditions good-names=i,j,k,ex,Run,_,id,ok,fd,fp,maxDiff,setUp,tearDown # Include test-specific naming patterns -method-rgx=[a-z_][a-z0-9_]{2,50}$|test[A-Z_][a-zA-Z0-9]*$|assert[A-Z][a-zA-Z0-9]*$ \ No newline at end of file +method-rgx=[a-z_][a-z0-9_]{2,50}$|test[A-Z_][a-zA-Z0-9]*$|assert[A-Z][a-zA-Z0-9]*$ diff --git a/tests/annotation_test.py b/tests/annotation_test.py index bfa87c09..2bff8892 100644 --- a/tests/annotation_test.py +++ b/tests/annotation_test.py @@ -20,6 +20,7 @@ from absl.testing import absltest from absl.testing import parameterized + from langextract import annotation from langextract import data from langextract import inference diff --git a/tests/chunking_test.py b/tests/chunking_test.py index ad4f17b5..f28866a8 100644 --- a/tests/chunking_test.py +++ b/tests/chunking_test.py @@ -14,11 +14,12 @@ import textwrap +from absl.testing import absltest +from absl.testing import parameterized + from langextract import chunking from langextract import data from langextract import tokenizer -from absl.testing import absltest -from absl.testing import parameterized class SentenceIterTest(absltest.TestCase): @@ -368,7 +369,9 @@ def test_string_output(self): )""") document = data.Document(text=text, document_id="test_doc_123") tokenized_text = tokenizer.tokenize(text) - chunk_iter = chunking.ChunkIterator(tokenized_text, max_char_buffer=7, document=document) + chunk_iter = chunking.ChunkIterator( + tokenized_text, max_char_buffer=7, document=document + ) text_chunk = next(chunk_iter) self.assertEqual(str(text_chunk), expected) diff --git a/tests/data_lib_test.py b/tests/data_lib_test.py index 0eed51cc..e1cbdeb0 100644 --- a/tests/data_lib_test.py +++ b/tests/data_lib_test.py @@ -14,13 +14,13 @@ import json +from absl.testing import absltest +from absl.testing import parameterized import numpy as np from langextract import data from langextract import data_lib from langextract import tokenizer -from absl.testing import absltest -from absl.testing import parameterized class DataLibToDictParameterizedTest(parameterized.TestCase): diff --git a/tests/inference_test.py b/tests/inference_test.py index d9cf6b57..0cf8a54b 100644 --- a/tests/inference_test.py +++ b/tests/inference_test.py @@ -13,12 +13,15 @@ # limitations under the License. from unittest import mock -import langfun as lf + from absl.testing import absltest +import langfun as lf + from langextract import inference class TestLangFunLanguageModel(absltest.TestCase): + @mock.patch.object( inference.lf.core.language_model, "LanguageModel", autospec=True ) diff --git a/tests/init_test.py b/tests/init_test.py index b68371f7..d79a07f4 100644 --- a/tests/init_test.py +++ b/tests/init_test.py @@ -18,11 +18,12 @@ from unittest import mock from absl.testing import absltest -import langextract as lx + from langextract import data from langextract import inference from langextract import prompting from langextract import schema +import langextract as lx class InitTest(absltest.TestCase): @@ -142,5 +143,6 @@ def test_lang_extract_as_lx_extract( self.assertDataclassEqual(expected_result, actual_result) + if __name__ == "__main__": absltest.main() diff --git a/tests/prompting_test.py b/tests/prompting_test.py index 93712121..5449139b 100644 --- a/tests/prompting_test.py +++ b/tests/prompting_test.py @@ -16,6 +16,7 @@ from absl.testing import absltest from absl.testing import parameterized + from langextract import data from langextract import prompting from langextract import schema diff --git a/tests/resolver_test.py b/tests/resolver_test.py index 61d2a5e6..b96270ee 100644 --- a/tests/resolver_test.py +++ b/tests/resolver_test.py @@ -17,6 +17,7 @@ from absl.testing import absltest from absl.testing import parameterized + from langextract import chunking from langextract import data from langextract import resolver as resolver_lib diff --git a/tests/schema_test.py b/tests/schema_test.py index 4664da08..d4b067b5 100644 --- a/tests/schema_test.py +++ b/tests/schema_test.py @@ -16,11 +16,9 @@ import textwrap from unittest import mock - - - from absl.testing import absltest from absl.testing import parameterized + from langextract import data from langextract import schema diff --git a/tests/tokenizer_test.py b/tests/tokenizer_test.py index 9d296978..021f802a 100644 --- a/tests/tokenizer_test.py +++ b/tests/tokenizer_test.py @@ -14,10 +14,11 @@ import textwrap -from langextract import tokenizer from absl.testing import absltest from absl.testing import parameterized +from langextract import tokenizer + class TokenizerTest(parameterized.TestCase): diff --git a/tests/visualization_test.py b/tests/visualization_test.py index 0cb7fbe2..647107f9 100644 --- a/tests/visualization_test.py +++ b/tests/visualization_test.py @@ -17,6 +17,7 @@ from unittest import mock from absl.testing import absltest + from langextract import data as lx_data from langextract import visualization From 146a095f8f1ff75f9369216c539a2a4d80d97fb7 Mon Sep 17 00:00:00 2001 From: Akshay Goel Date: Sun, 3 Aug 2025 05:52:30 -0400 Subject: [PATCH 09/20] ci: enable format and lint checks in tox --- tox.ini | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/tox.ini b/tox.ini index ece6f529..176ac893 100644 --- a/tox.ini +++ b/tox.ini @@ -13,7 +13,7 @@ # limitations under the License. [tox] -envlist = py310, py311 +envlist = py310, py311, format, lint-src, lint-tests skip_missing_interpreters = True [testenv] @@ -33,8 +33,14 @@ commands = isort langextract tests --check-only --diff pyink langextract tests --check --diff --config pyproject.toml -[testenv:lint] +[testenv:lint-src] deps = pylint>=3.0.0 commands = - pylint --rcfile=.pylintrc langextract tests + pylint --rcfile=.pylintrc langextract + +[testenv:lint-tests] +deps = + pylint>=3.0.0 +commands = + pylint --rcfile=tests/.pylintrc tests From ed65bcaa4bd123f84e61b927f2e74407c4e600b3 Mon Sep 17 00:00:00 2001 From: Akshay Goel Date: Sun, 3 Aug 2025 08:17:42 -0400 Subject: [PATCH 10/20] Add LangExtractError base exception for centralized error handling Introduces a common base exception class that all library-specific exceptions inherit from, enabling users to catch all LangExtract errors with a single except clause. --- exceptions.py | 30 ++++++++++++++++++++++++++++++ langextract/__init__.py | 15 +++++++++++++++ langextract/annotation.py | 3 ++- langextract/chunking.py | 3 ++- langextract/exceptions.py | 26 ++++++++++++++++++++++++++ langextract/inference.py | 3 ++- langextract/io.py | 3 ++- langextract/prompting.py | 3 ++- langextract/resolver.py | 3 ++- langextract/tokenizer.py | 4 +++- 10 files changed, 86 insertions(+), 7 deletions(-) create mode 100644 exceptions.py create mode 100644 langextract/exceptions.py diff --git a/exceptions.py b/exceptions.py new file mode 100644 index 00000000..26162d8e --- /dev/null +++ b/exceptions.py @@ -0,0 +1,30 @@ +# Copyright 2025 Google LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Base exceptions for LangExtract. + +This module defines the base exception class that all LangExtract exceptions +inherit from. Individual modules define their own specific exceptions. +""" + +__all__ = ["LangExtractError"] + + +class LangExtractError(Exception): + """Base exception for all LangExtract errors. + + All exceptions raised by LangExtract should inherit from this class. + This allows users to catch all LangExtract-specific errors with a single + except clause. + """ \ No newline at end of file diff --git a/langextract/__init__.py b/langextract/__init__.py index dbe8be14..f14c1675 100644 --- a/langextract/__init__.py +++ b/langextract/__init__.py @@ -32,6 +32,7 @@ from langextract import annotation from langextract import data +from langextract import exceptions from langextract import inference from langextract import io from langextract import prompting @@ -39,6 +40,20 @@ from langextract import schema from langextract import visualization +__all__ = [ + "extract", + "visualize", + "annotation", + "data", + "exceptions", + "inference", + "io", + "prompting", + "resolver", + "schema", + "visualization", +] + LanguageModelT = TypeVar("LanguageModelT", bound=inference.BaseLanguageModel) # Set up visualization helper at the top level (lx.visualize). diff --git a/langextract/annotation.py b/langextract/annotation.py index fe3b5a54..a370be9e 100644 --- a/langextract/annotation.py +++ b/langextract/annotation.py @@ -31,6 +31,7 @@ from langextract import chunking from langextract import data +from langextract import exceptions from langextract import inference from langextract import progress from langextract import prompting @@ -39,7 +40,7 @@ ATTRIBUTE_SUFFIX = "_attributes" -class DocumentRepeatError(Exception): +class DocumentRepeatError(exceptions.LangExtractError): """Exception raised when identical document ids are present.""" diff --git a/langextract/chunking.py b/langextract/chunking.py index 3625d7a1..2663ed85 100644 --- a/langextract/chunking.py +++ b/langextract/chunking.py @@ -28,10 +28,11 @@ import more_itertools from langextract import data +from langextract import exceptions from langextract import tokenizer -class TokenUtilError(Exception): +class TokenUtilError(exceptions.LangExtractError): """Error raised when token_util returns unexpected values.""" diff --git a/langextract/exceptions.py b/langextract/exceptions.py new file mode 100644 index 00000000..b3103ab7 --- /dev/null +++ b/langextract/exceptions.py @@ -0,0 +1,26 @@ +# Copyright 2025 Google LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Base exceptions for LangExtract.""" + +__all__ = ["LangExtractError"] + + +class LangExtractError(Exception): + """Base exception for all LangExtract errors. + + All exceptions raised by LangExtract should inherit from this class. + This allows users to catch all LangExtract-specific errors with a single + except clause. + """ diff --git a/langextract/inference.py b/langextract/inference.py index ecd525ff..931acdff 100644 --- a/langextract/inference.py +++ b/langextract/inference.py @@ -30,6 +30,7 @@ import yaml from langextract import data +from langextract import exceptions from langextract import schema _OLLAMA_DEFAULT_MODEL_URL = 'http://localhost:11434' @@ -49,7 +50,7 @@ def __str__(self) -> str: return f'Score: {self.score:.2f}\nOutput:\n{formatted_lines}' -class InferenceOutputError(Exception): +class InferenceOutputError(exceptions.LangExtractError): """Exception raised when no scored outputs are available from the language model.""" def __init__(self, message: str): diff --git a/langextract/io.py b/langextract/io.py index 593951c7..ae5619dc 100644 --- a/langextract/io.py +++ b/langextract/io.py @@ -26,12 +26,13 @@ from langextract import data from langextract import data_lib +from langextract import exceptions from langextract import progress DEFAULT_TIMEOUT_SECONDS = 30 -class InvalidDatasetError(Exception): +class InvalidDatasetError(exceptions.LangExtractError): """Error raised when Dataset is empty or invalid.""" diff --git a/langextract/prompting.py b/langextract/prompting.py index 97856ac6..4484273b 100644 --- a/langextract/prompting.py +++ b/langextract/prompting.py @@ -23,10 +23,11 @@ import yaml from langextract import data +from langextract import exceptions from langextract import schema -class PromptBuilderError(Exception): +class PromptBuilderError(exceptions.LangExtractError): """Failure to build prompt.""" diff --git a/langextract/resolver.py b/langextract/resolver.py index e9085f16..c6496b82 100644 --- a/langextract/resolver.py +++ b/langextract/resolver.py @@ -31,6 +31,7 @@ import yaml from langextract import data +from langextract import exceptions from langextract import schema from langextract import tokenizer @@ -151,7 +152,7 @@ def align( ExtractionValueType = str | int | float | dict | list | None -class ResolverParsingError(Exception): +class ResolverParsingError(exceptions.LangExtractError): """Error raised when content cannot be parsed as the given format.""" diff --git a/langextract/tokenizer.py b/langextract/tokenizer.py index f4036f36..5028fb0f 100644 --- a/langextract/tokenizer.py +++ b/langextract/tokenizer.py @@ -30,8 +30,10 @@ from absl import logging +from langextract import exceptions -class BaseTokenizerError(Exception): + +class BaseTokenizerError(exceptions.LangExtractError): """Base class for all tokenizer-related errors.""" From 8b852258b51b4a4ebec8624724d2a4221e101150 Mon Sep 17 00:00:00 2001 From: Akshay Goel Date: Sun, 3 Aug 2025 09:24:27 -0400 Subject: [PATCH 11/20] fix: Remove LangFun and pylibmagic dependencies (v1.0.2) Fixes #25 - Windows installation failure due to pylibmagic build requirements Breaking change: LangFunLanguageModel removed. Use GeminiLanguageModel or OllamaLanguageModel instead. --- Dockerfile | 7 +----- README.md | 6 ----- exceptions.py | 2 +- langextract/__init__.py | 7 ------ langextract/inference.py | 44 ------------------------------------ pyproject.toml | 9 +------- tests/annotation_test.py | 8 +++---- tests/inference_test.py | 49 ---------------------------------------- 8 files changed, 7 insertions(+), 125 deletions(-) diff --git a/Dockerfile b/Dockerfile index edf8504e..e8a74312 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,11 +1,6 @@ -# Production Dockerfile for LangExtract with libmagic support +# Production Dockerfile for LangExtract FROM python:3.10-slim -# Install system dependencies including libmagic -RUN apt-get update && apt-get install -y --no-install-recommends \ - libmagic1 \ - && rm -rf /var/lib/apt/lists/* - # Set working directory WORKDIR /app diff --git a/README.md b/README.md index 939cb9a7..25c27301 100644 --- a/README.md +++ b/README.md @@ -336,12 +336,6 @@ pylint --rcfile=.pylintrc langextract tests See [CONTRIBUTING.md](CONTRIBUTING.md) for full development guidelines. -## Troubleshooting - -**libmagic error**: If you see "failed to find libmagic", install with `pip install langextract[full]` or install system dependencies: -- Ubuntu/Debian: `sudo apt-get install libmagic1` -- macOS: `brew install libmagic` - ## Disclaimer This is not an officially supported Google product. If you use diff --git a/exceptions.py b/exceptions.py index 26162d8e..0199da56 100644 --- a/exceptions.py +++ b/exceptions.py @@ -27,4 +27,4 @@ class LangExtractError(Exception): All exceptions raised by LangExtract should inherit from this class. This allows users to catch all LangExtract-specific errors with a single except clause. - """ \ No newline at end of file + """ diff --git a/langextract/__init__.py b/langextract/__init__.py index f14c1675..a278a095 100644 --- a/langextract/__init__.py +++ b/langextract/__init__.py @@ -16,13 +16,6 @@ from __future__ import annotations -# Ensure libmagic is available before langfun imports python-magic. -# pylibmagic provides pre-built binaries that python-magic needs. -try: - import pylibmagic # noqa: F401 (side-effect import) -except ImportError: - pass - from collections.abc import Iterable, Sequence import os from typing import Any, cast, Type, TypeVar diff --git a/langextract/inference.py b/langextract/inference.py index 931acdff..6177847e 100644 --- a/langextract/inference.py +++ b/langextract/inference.py @@ -24,7 +24,6 @@ from typing import Any from google import genai -import langfun as lf import requests from typing_extensions import override import yaml @@ -97,49 +96,6 @@ class InferenceType(enum.Enum): MULTIPROCESS = 'multiprocess' -# TODO: Add support for llm options. -@dataclasses.dataclass(init=False) -class LangFunLanguageModel(BaseLanguageModel): - """Language model inference class using LangFun language class. - - See https://github.com/google/langfun for more details on LangFun. - """ - - _lm: lf.core.language_model.LanguageModel # underlying LangFun model - _constraint: schema.Constraint = dataclasses.field( - default_factory=schema.Constraint, repr=False, compare=False - ) - _extra_kwargs: dict[str, Any] = dataclasses.field( - default_factory=dict, repr=False, compare=False - ) - - def __init__( - self, - language_model: lf.core.language_model.LanguageModel, - constraint: schema.Constraint = schema.Constraint(), - **kwargs, - ) -> None: - self._lm = language_model - self._constraint = constraint - - # Preserve any unused kwargs for debugging / future use - self._extra_kwargs = kwargs or {} - super().__init__(constraint=constraint) - - @override - def infer( - self, batch_prompts: Sequence[str], **kwargs - ) -> Iterator[Sequence[ScoredOutput]]: - responses = self._lm.sample(prompts=batch_prompts) - for a_response in responses: - for sample in a_response.samples: - yield [ - ScoredOutput( - score=sample.response.score, output=sample.response.text - ) - ] - - @dataclasses.dataclass(init=False) class OllamaLanguageModel(BaseLanguageModel): """Language model inference class using Ollama based host.""" diff --git a/pyproject.toml b/pyproject.toml index a1ec70ef..65373b44 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -18,7 +18,7 @@ build-backend = "setuptools.build_meta" [project] name = "langextract" -version = "1.0.1" +version = "1.0.2" description = "LangExtract: A library for extracting structured data from language models" readme = "README.md" requires-python = ">=3.10" @@ -32,7 +32,6 @@ dependencies = [ "async_timeout>=4.0.0", "exceptiongroup>=1.1.0", "google-genai>=0.1.0", - "langfun>=0.1.0", "ml-collections>=0.1.0", "more-itertools>=8.0.0", "numpy>=1.20.0", @@ -40,8 +39,6 @@ dependencies = [ "pandas>=1.3.0", "pydantic>=1.8.0", "python-dotenv>=0.19.0", - "python-magic>=0.4.27", - "pylibmagic>=0.5.0", "requests>=2.25.0", "tqdm>=4.64.0", "typing-extensions>=4.0.0" @@ -66,10 +63,6 @@ test = [ "pytest>=7.4.0", "tomli>=2.0.0" ] -full = [ - "python-magic>=0.4.27", - "pylibmagic>=0.5.0", -] [tool.setuptools] packages = ["langextract"] diff --git a/tests/annotation_test.py b/tests/annotation_test.py index 2bff8892..a5540e4e 100644 --- a/tests/annotation_test.py +++ b/tests/annotation_test.py @@ -35,7 +35,7 @@ class AnnotatorTest(absltest.TestCase): def setUp(self): super().setUp() self.mock_language_model = self.enter_context( - mock.patch.object(inference, "LangFunLanguageModel", autospec=True) + mock.patch.object(inference, "GeminiLanguageModel", autospec=True) ) self.annotator = annotation.Annotator( language_model=self.mock_language_model, @@ -688,7 +688,7 @@ def test_annotate_documents( batch_length: int = 1, ): mock_language_model = self.enter_context( - mock.patch.object(inference, "LangFunLanguageModel", autospec=True) + mock.patch.object(inference, "GeminiLanguageModel", autospec=True) ) # Define a side effect function so return length based on batch length. @@ -761,7 +761,7 @@ def test_annotate_documents_exceptions( batch_length: int = 1, ): mock_language_model = self.enter_context( - mock.patch.object(inference, "LangFunLanguageModel", autospec=True) + mock.patch.object(inference, "GeminiLanguageModel", autospec=True) ) mock_language_model.infer.return_value = [ [ @@ -798,7 +798,7 @@ class AnnotatorMultiPassTest(absltest.TestCase): def setUp(self): super().setUp() self.mock_language_model = self.enter_context( - mock.patch.object(inference, "LangFunLanguageModel", autospec=True) + mock.patch.object(inference, "GeminiLanguageModel", autospec=True) ) self.annotator = annotation.Annotator( language_model=self.mock_language_model, diff --git a/tests/inference_test.py b/tests/inference_test.py index 0cf8a54b..abf77ddf 100644 --- a/tests/inference_test.py +++ b/tests/inference_test.py @@ -15,59 +15,10 @@ from unittest import mock from absl.testing import absltest -import langfun as lf from langextract import inference -class TestLangFunLanguageModel(absltest.TestCase): - - @mock.patch.object( - inference.lf.core.language_model, "LanguageModel", autospec=True - ) - def test_langfun_infer(self, mock_lf_model): - mock_client_instance = mock_lf_model.return_value - metadata = { - "score": -0.004259720362824737, - "logprobs": None, - "is_cached": False, - } - source = lf.UserMessage( - text="What's heart in Italian?.", - sender="User", - metadata={"formatted_text": "What's heart in Italian?."}, - tags=["lm-input"], - ) - sample = lf.LMSample( - response=lf.AIMessage( - text="Cuore", - sender="AI", - metadata=metadata, - source=source, - tags=["lm-response"], - ), - score=-0.004259720362824737, - ) - actual_response = lf.LMSamplingResult( - samples=[sample], - ) - - # Mock the sample response. - mock_client_instance.sample.return_value = [actual_response] - model = inference.LangFunLanguageModel(language_model=mock_client_instance) - - batch_prompts = ["What's heart in Italian?"] - - expected_results = [ - [inference.ScoredOutput(score=-0.004259720362824737, output="Cuore")] - ] - - results = list(model.infer(batch_prompts)) - - mock_client_instance.sample.assert_called_once_with(prompts=batch_prompts) - self.assertEqual(results, expected_results) - - class TestOllamaLanguageModel(absltest.TestCase): @mock.patch.object(inference.OllamaLanguageModel, "_ollama_query") From 75a6f120415ae98c14cfe519f68001aa7474e0b3 Mon Sep 17 00:00:00 2001 From: Akshay Goel Date: Sun, 3 Aug 2025 10:40:20 -0400 Subject: [PATCH 12/20] Fix save_annotated_documents to handle string paths - Modified save_annotated_documents to accept both pathlib.Path and string paths - Convert string paths to Path objects before calling mkdir() - This fixes the error when using output_dir='.' as shown in the README example --- langextract/io.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/langextract/io.py b/langextract/io.py index ae5619dc..59dead7a 100644 --- a/langextract/io.py +++ b/langextract/io.py @@ -81,7 +81,7 @@ def load(self, delimiter: str = ',') -> Iterator[data.Document]: def save_annotated_documents( annotated_documents: Iterator[data.AnnotatedDocument], - output_dir: pathlib.Path | None = None, + output_dir: pathlib.Path | str | None = None, output_name: str = 'data.jsonl', show_progress: bool = True, ) -> None: @@ -90,7 +90,7 @@ def save_annotated_documents( Args: annotated_documents: Iterator over AnnotatedDocument objects to save. output_dir: The directory to which the JSONL file should be written. - Defaults to 'test_output/' if None. + Can be a Path object or a string. Defaults to 'test_output/' if None. output_name: File name for the JSONL file. show_progress: Whether to show a progress bar during saving. @@ -100,6 +100,8 @@ def save_annotated_documents( """ if output_dir is None: output_dir = pathlib.Path('test_output') + else: + output_dir = pathlib.Path(output_dir) output_dir.mkdir(parents=True, exist_ok=True) From 8289b3a5425c13fc3af894b7ea8bb5601681e9e0 Mon Sep 17 00:00:00 2001 From: Akshay Goel Date: Sun, 3 Aug 2025 12:32:52 -0400 Subject: [PATCH 13/20] feat: Add OpenAI language model support --- README.md | 24 +++++- langextract/inference.py | 170 ++++++++++++++++++++++++++++++++++++++- pyproject.toml | 8 +- tests/inference_test.py | 116 ++++++++++++++++++++++++++ 4 files changed, 312 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 25c27301..e88a89e3 100644 --- a/README.md +++ b/README.md @@ -190,7 +190,7 @@ docker run --rm -e LANGEXTRACT_API_KEY="your-api-key" langextract python your_sc ## API Key Setup for Cloud Models -When using LangExtract with cloud-hosted models (like Gemini), you'll need to +When using LangExtract with cloud-hosted models (like Gemini or OpenAI), you'll need to set up an API key. On-device models don't require an API key. For developers using local LLMs, LangExtract offers built-in support for Ollama and can be extended to other third-party APIs by updating the inference endpoints. @@ -201,6 +201,7 @@ Get API keys from: * [AI Studio](https://aistudio.google.com/app/apikey) for Gemini models * [Vertex AI](https://cloud.google.com/vertex-ai/generative-ai/docs/sdks/overview) for enterprise use +* [OpenAI Platform](https://platform.openai.com/api-keys) for OpenAI models ### Setting up API key in your environment @@ -250,6 +251,27 @@ result = lx.extract( ) ``` +## Using OpenAI Models + +LangExtract also supports OpenAI models. Example OpenAI configuration: + +```python +from langextract.inference import OpenAILanguageModel + +result = lx.extract( + text_or_documents=input_text, + prompt_description=prompt, + examples=examples, + language_model_type=OpenAILanguageModel, + model_id="gpt-4o", + api_key=os.environ.get('OPENAI_API_KEY'), + fence_output=True, + use_schema_constraints=False +) +``` + +Note: OpenAI models require `fence_output=True` and `use_schema_constraints=False` because LangExtract doesn't implement schema constraints for OpenAI yet. + ## More Examples Additional examples of LangExtract in action: diff --git a/langextract/inference.py b/langextract/inference.py index 6177847e..5cbc9a08 100644 --- a/langextract/inference.py +++ b/langextract/inference.py @@ -24,6 +24,7 @@ from typing import Any from google import genai +import openai import requests from typing_extensions import override import yaml @@ -383,7 +384,174 @@ def infer( yield [result] def parse_output(self, output: str) -> Any: - """Parses Gemini output as JSON or YAML.""" + """Parses Gemini output as JSON or YAML. + + Note: This expects raw JSON/YAML without code fences. + Code fence extraction is handled by resolver.py. + """ + try: + if self.format_type == data.FormatType.JSON: + return json.loads(output) + else: + return yaml.safe_load(output) + except Exception as e: + raise ValueError( + f'Failed to parse output as {self.format_type.name}: {str(e)}' + ) from e + + +@dataclasses.dataclass(init=False) +class OpenAILanguageModel(BaseLanguageModel): + """Language model inference using OpenAI's API with structured output.""" + + model_id: str = 'gpt-4o-mini' + api_key: str | None = None + organization: str | None = None + format_type: data.FormatType = data.FormatType.JSON + temperature: float = 0.0 + max_workers: int = 10 + _client: openai.OpenAI | None = dataclasses.field( + default=None, repr=False, compare=False + ) + _extra_kwargs: dict[str, Any] = dataclasses.field( + default_factory=dict, repr=False, compare=False + ) + + def __init__( + self, + model_id: str = 'gpt-4o-mini', + api_key: str | None = None, + organization: str | None = None, + format_type: data.FormatType = data.FormatType.JSON, + temperature: float = 0.0, + max_workers: int = 10, + **kwargs, + ) -> None: + """Initialize the OpenAI language model. + + Args: + model_id: The OpenAI model ID to use (e.g., 'gpt-4o-mini', 'gpt-4o'). + api_key: API key for OpenAI service. + organization: Optional OpenAI organization ID. + format_type: Output format (JSON or YAML). + temperature: Sampling temperature. + max_workers: Maximum number of parallel API calls. + **kwargs: Ignored extra parameters so callers can pass a superset of + arguments shared across back-ends without raising ``TypeError``. + """ + self.model_id = model_id + self.api_key = api_key + self.organization = organization + self.format_type = format_type + self.temperature = temperature + self.max_workers = max_workers + self._extra_kwargs = kwargs or {} + + if not self.api_key: + raise ValueError('API key not provided.') + + # Initialize the OpenAI client + self._client = openai.OpenAI( + api_key=self.api_key, organization=self.organization + ) + + super().__init__( + constraint=schema.Constraint(constraint_type=schema.ConstraintType.NONE) + ) + + def _process_single_prompt(self, prompt: str, config: dict) -> ScoredOutput: + """Process a single prompt and return a ScoredOutput.""" + try: + # Prepare the system message for structured output + system_message = '' + if self.format_type == data.FormatType.JSON: + system_message = ( + 'You are a helpful assistant that responds in JSON format.' + ) + elif self.format_type == data.FormatType.YAML: + system_message = ( + 'You are a helpful assistant that responds in YAML format.' + ) + + # Create the chat completion using the v1.x client API + response = self._client.chat.completions.create( + model=self.model_id, + messages=[ + {'role': 'system', 'content': system_message}, + {'role': 'user', 'content': prompt}, + ], + temperature=config.get('temperature', self.temperature), + max_tokens=config.get('max_output_tokens'), + top_p=config.get('top_p'), + n=1, + ) + + # Extract the response text using the v1.x response format + output_text = response.choices[0].message.content + + return ScoredOutput(score=1.0, output=output_text) + + except Exception as e: + raise InferenceOutputError(f'OpenAI API error: {str(e)}') from e + + def infer( + self, batch_prompts: Sequence[str], **kwargs + ) -> Iterator[Sequence[ScoredOutput]]: + """Runs inference on a list of prompts via OpenAI's API. + + Args: + batch_prompts: A list of string prompts. + **kwargs: Additional generation params (temperature, top_p, etc.) + + Yields: + Lists of ScoredOutputs. + """ + config = { + 'temperature': kwargs.get('temperature', self.temperature), + } + if 'max_output_tokens' in kwargs: + config['max_output_tokens'] = kwargs['max_output_tokens'] + if 'top_p' in kwargs: + config['top_p'] = kwargs['top_p'] + + # Use parallel processing for batches larger than 1 + if len(batch_prompts) > 1 and self.max_workers > 1: + with concurrent.futures.ThreadPoolExecutor( + max_workers=min(self.max_workers, len(batch_prompts)) + ) as executor: + future_to_index = { + executor.submit( + self._process_single_prompt, prompt, config.copy() + ): i + for i, prompt in enumerate(batch_prompts) + } + + results: list[ScoredOutput | None] = [None] * len(batch_prompts) + for future in concurrent.futures.as_completed(future_to_index): + index = future_to_index[future] + try: + results[index] = future.result() + except Exception as e: + raise InferenceOutputError( + f'Parallel inference error: {str(e)}' + ) from e + + for result in results: + if result is None: + raise InferenceOutputError('Failed to process one or more prompts') + yield [result] + else: + # Sequential processing for single prompt or worker + for prompt in batch_prompts: + result = self._process_single_prompt(prompt, config.copy()) + yield [result] + + def parse_output(self, output: str) -> Any: + """Parses OpenAI output as JSON or YAML. + + Note: This expects raw JSON/YAML without code fences. + Code fence extraction is handled by resolver.py. + """ try: if self.format_type == data.FormatType.JSON: return json.loads(output) diff --git a/pyproject.toml b/pyproject.toml index 65373b44..9949b375 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -18,7 +18,7 @@ build-backend = "setuptools.build_meta" [project] name = "langextract" -version = "1.0.2" +version = "1.0.3" description = "LangExtract: A library for extracting structured data from language models" readme = "README.md" requires-python = ">=3.10" @@ -35,10 +35,11 @@ dependencies = [ "ml-collections>=0.1.0", "more-itertools>=8.0.0", "numpy>=1.20.0", - "openai>=0.27.0", + "openai>=1.50.0", "pandas>=1.3.0", "pydantic>=1.8.0", "python-dotenv>=0.19.0", + "PyYAML>=6.0", "requests>=2.25.0", "tqdm>=4.64.0", "typing-extensions>=4.0.0" @@ -55,9 +56,8 @@ dev = [ "pyink~=24.3.0", "isort>=5.13.0", "pylint>=3.0.0", - "pytest>=7.4.0", "pytype>=2024.10.11", - "tox>=4.0.0", + "tox>=4.0.0" ] test = [ "pytest>=7.4.0", diff --git a/tests/inference_test.py b/tests/inference_test.py index abf77ddf..876a480f 100644 --- a/tests/inference_test.py +++ b/tests/inference_test.py @@ -16,6 +16,7 @@ from absl.testing import absltest +from langextract import data from langextract import inference @@ -93,5 +94,120 @@ def test_ollama_infer(self, mock_ollama_query): self.assertEqual(results, expected_results) +class TestOpenAILanguageModel(absltest.TestCase): + + @mock.patch("openai.OpenAI") + def test_openai_infer(self, mock_openai_class): + # Mock the OpenAI client and chat completion response + mock_client = mock.Mock() + mock_openai_class.return_value = mock_client + + # Mock response structure for v1.x API + mock_response = mock.Mock() + mock_response.choices = [ + mock.Mock(message=mock.Mock(content='{"name": "John", "age": 30}')) + ] + mock_client.chat.completions.create.return_value = mock_response + + # Create model instance + model = inference.OpenAILanguageModel( + model_id="gpt-4o-mini", api_key="test-api-key", temperature=0.5 + ) + + # Test inference + batch_prompts = ["Extract name and age from: John is 30 years old"] + results = list(model.infer(batch_prompts)) + + # Verify API was called correctly + mock_client.chat.completions.create.assert_called_once_with( + model="gpt-4o-mini", + messages=[ + { + "role": "system", + "content": ( + "You are a helpful assistant that responds in JSON format." + ), + }, + { + "role": "user", + "content": "Extract name and age from: John is 30 years old", + }, + ], + temperature=0.5, + max_tokens=None, + top_p=None, + n=1, + ) + + # Check results + expected_results = [[ + inference.ScoredOutput(score=1.0, output='{"name": "John", "age": 30}') + ]] + self.assertEqual(results, expected_results) + + def test_openai_parse_output_json(self): + model = inference.OpenAILanguageModel( + api_key="test-key", format_type=data.FormatType.JSON + ) + + # Test valid JSON parsing + output = '{"key": "value", "number": 42}' + parsed = model.parse_output(output) + self.assertEqual(parsed, {"key": "value", "number": 42}) + + # Test invalid JSON + with self.assertRaises(ValueError) as context: + model.parse_output("invalid json") + self.assertIn("Failed to parse output as JSON", str(context.exception)) + + def test_openai_parse_output_yaml(self): + model = inference.OpenAILanguageModel( + api_key="test-key", format_type=data.FormatType.YAML + ) + + # Test valid YAML parsing + output = "key: value\nnumber: 42" + parsed = model.parse_output(output) + self.assertEqual(parsed, {"key": "value", "number": 42}) + + # Test invalid YAML + with self.assertRaises(ValueError) as context: + model.parse_output("invalid: yaml: bad") + self.assertIn("Failed to parse output as YAML", str(context.exception)) + + def test_openai_no_api_key_raises_error(self): + with self.assertRaises(ValueError) as context: + inference.OpenAILanguageModel(api_key=None) + self.assertEqual(str(context.exception), "API key not provided.") + + @mock.patch("openai.OpenAI") + def test_openai_temperature_zero(self, mock_openai_class): + # Test that temperature=0.0 is properly passed through + mock_client = mock.Mock() + mock_openai_class.return_value = mock_client + + mock_response = mock.Mock() + mock_response.choices = [ + mock.Mock(message=mock.Mock(content='{"result": "test"}')) + ] + mock_client.chat.completions.create.return_value = mock_response + + model = inference.OpenAILanguageModel( + api_key="test-key", temperature=0.0 # Testing zero temperature + ) + + list(model.infer(["test prompt"])) + + # Verify temperature=0.0 was passed to the API + mock_client.chat.completions.create.assert_called_with( + model="gpt-4o-mini", + messages=mock.ANY, + temperature=0.0, + max_tokens=None, + top_p=None, + n=1, + ) + + if __name__ == "__main__": absltest.main() From dfe8188e7a015440baaddb6a9decd30d7ef7c3d3 Mon Sep 17 00:00:00 2001 From: tonebeta Date: Mon, 4 Aug 2025 11:03:12 +0800 Subject: [PATCH 14/20] fix(ui): prevent current highlight border from being obscured. Changes: (#10) * docs: clarify output_dir behavior in medication_examples.md * Removed inline comment in medication example Deleted an inline comment referencing the output directory in the save_annotated_documents. * docs: add output_dir="." to all save_annotated_documents examples Prevents confusion from default `test_output/...` by explicitly saving to current directory. * build: add formatting & linting pipeline with pre-commit integration * style: apply pyink, isort, and pre-commit formatting * ci: enable format and lint checks in tox * Add LangExtractError base exception for centralized error handling Introduces a common base exception class that all library-specific exceptions inherit from, enabling users to catch all LangExtract errors with a single except clause. * fix(ui): prevent current highlight border from being obscured --------- Co-authored-by: Leena Kamran <62442533+kleeena@users.noreply.github.com> Co-authored-by: Akshay Goel --- langextract/visualization.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/langextract/visualization.py b/langextract/visualization.py index 5aa02c2b..b382961a 100644 --- a/langextract/visualization.py +++ b/langextract/visualization.py @@ -119,9 +119,7 @@ padding: 8px 10px; margin-top: 8px; font-size: 13px; } .lx-current-highlight { - text-decoration: underline; - text-decoration-color: #ff4444; - text-decoration-thickness: 3px; + border-bottom: 4px solid #ff4444; font-weight: bold; animation: lx-pulse 1s ease-in-out; } From 87c511e081e7a54fe6fd8832a6b45013afc3c15f Mon Sep 17 00:00:00 2001 From: Akshay Goel Date: Mon, 4 Aug 2025 10:24:15 -0400 Subject: [PATCH 15/20] feat: Add live API integration tests (#39) - Gemini & OpenAI test suites with retry on transient errors - CI: Separate job, Python 3.11 only, skips for forks - Validates char_interval for all extractions - Multilingual test xfail (issue #13) TODO: Remove xfail from multilingual test after tokenizer fix --- .github/workflows/ci.yaml | 38 ++- pyproject.toml | 3 + tests/test_live_api.py | 585 ++++++++++++++++++++++++++++++++++++++ tox.ini | 12 +- 4 files changed, 635 insertions(+), 3 deletions(-) create mode 100644 tests/test_live_api.py diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 1a72b20e..e4291408 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -42,6 +42,40 @@ jobs: python -m pip install --upgrade pip pip install -e ".[dev,test]" - - name: Run tox (lint + tests) + - name: Run unit tests and linting run: | - tox + PY_VERSION=$(echo "${{ matrix.python-version }}" | tr -d '.') + tox -e py${PY_VERSION},format,lint-src,lint-tests + + live-api-tests: + needs: test + runs-on: ubuntu-latest + if: | + github.event_name == 'push' || + (github.event_name == 'pull_request' && + github.event.pull_request.head.repo.full_name == github.repository) + + steps: + - uses: actions/checkout@v4 + + - name: Set up Python 3.11 + uses: actions/setup-python@v4 + with: + python-version: "3.11" + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -e ".[dev,test]" + + - name: Run live API tests + env: + GEMINI_API_KEY: ${{ secrets.GEMINI_API_KEY }} + LANGEXTRACT_API_KEY: ${{ secrets.GEMINI_API_KEY }} # For backward compatibility + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + run: | + if [[ -z "$GEMINI_API_KEY" && -z "$OPENAI_API_KEY" ]]; then + echo "::notice::Live API tests skipped - no provider secrets configured" + exit 0 + fi + tox -e live-api diff --git a/pyproject.toml b/pyproject.toml index 9949b375..5abaa506 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -84,6 +84,9 @@ python_classes = "Test*" python_functions = "test_*" # Show extra test summary info addopts = "-ra" +markers = [ + "live_api: marks tests as requiring live API access", +] [tool.pyink] # Configuration for Google's style guide diff --git a/tests/test_live_api.py b/tests/test_live_api.py new file mode 100644 index 00000000..8d9801eb --- /dev/null +++ b/tests/test_live_api.py @@ -0,0 +1,585 @@ +# Copyright 2025 Google LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Live API integration tests that require real API keys. + +These tests are skipped if API keys are not available in the environment. +They should run in CI after all other tests pass. +""" + +from functools import wraps +import os +import re +import textwrap +import time +import unittest + +from dotenv import load_dotenv +import pytest + +import langextract as lx +from langextract.inference import OpenAILanguageModel + +load_dotenv() + +DEFAULT_GEMINI_MODEL = "gemini-2.5-flash" +DEFAULT_OPENAI_MODEL = "gpt-4o" + +GEMINI_API_KEY = os.environ.get("GEMINI_API_KEY") or os.environ.get( + "LANGEXTRACT_API_KEY" +) +OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY") + +skip_if_no_gemini = pytest.mark.skipif( + not GEMINI_API_KEY, + reason=( + "Gemini API key not available (set GEMINI_API_KEY or" + " LANGEXTRACT_API_KEY)" + ), +) +skip_if_no_openai = pytest.mark.skipif( + not OPENAI_API_KEY, + reason="OpenAI API key not available (set OPENAI_API_KEY)", +) + +live_api = pytest.mark.live_api + +GEMINI_MODEL_PARAMS = { + "temperature": 0.0, + "top_p": 0.0, + "max_output_tokens": 256, +} + +OPENAI_MODEL_PARAMS = { + "temperature": 0.0, +} + + +INITIAL_RETRY_DELAY = 1.0 +MAX_RETRY_DELAY = 8.0 + + +def retry_on_transient_errors(max_retries=3, backoff_factor=2.0): + """Decorator to retry tests on transient API errors with exponential backoff. + + Args: + max_retries: Maximum number of retry attempts + backoff_factor: Multiplier for exponential backoff (e.g., 2.0 = 1s, 2s, 4s) + """ + + def decorator(func): + @wraps(func) + def wrapper(*args, **kwargs): + last_exception = None + delay = INITIAL_RETRY_DELAY + + for attempt in range(max_retries + 1): + try: + return func(*args, **kwargs) + except ( + lx.exceptions.LangExtractError, + ConnectionError, + TimeoutError, + OSError, + RuntimeError, + ) as e: + last_exception = e + error_str = str(e).lower() + error_type = type(e).__name__ + + transient_errors = [ + "503", + "service unavailable", + "temporarily unavailable", + "rate limit", + "429", + "too many requests", + "connection reset", + "timeout", + "deadline exceeded", + ] + + is_transient = any( + err in error_str for err in transient_errors + ) or error_type in ["ServiceUnavailable", "RateLimitError", "Timeout"] + + if is_transient and attempt < max_retries: + print( + f"\nTransient error ({error_type}) on attempt" + f" {attempt + 1}/{max_retries + 1}: {e}" + ) + print(f"Retrying in {delay} seconds...") + time.sleep(delay) + delay = min(delay * backoff_factor, MAX_RETRY_DELAY) + else: + raise + + raise last_exception + + return wrapper + + return decorator + + +@pytest.fixture(autouse=True) +def add_delay_between_tests(): + """Add a small delay between tests to avoid rate limiting.""" + yield + time.sleep(0.5) + + +def get_basic_medication_examples(): + """Get example data for basic medication extraction.""" + return [ + lx.data.ExampleData( + text="Patient was given 250 mg IV Cefazolin TID for one week.", + extractions=[ + lx.data.Extraction( + extraction_class="dosage", extraction_text="250 mg" + ), + lx.data.Extraction( + extraction_class="route", extraction_text="IV" + ), + lx.data.Extraction( + extraction_class="medication", extraction_text="Cefazolin" + ), + lx.data.Extraction( + extraction_class="frequency", + extraction_text="TID", # TID = three times a day + ), + lx.data.Extraction( + extraction_class="duration", extraction_text="for one week" + ), + ], + ) + ] + + +def get_relationship_examples(): + """Get example data for medication relationship extraction.""" + return [ + lx.data.ExampleData( + text=( + "Patient takes Aspirin 100mg daily for heart health and" + " Simvastatin 20mg at bedtime." + ), + extractions=[ + # First medication group + lx.data.Extraction( + extraction_class="medication", + extraction_text="Aspirin", + attributes={"medication_group": "Aspirin"}, + ), + lx.data.Extraction( + extraction_class="dosage", + extraction_text="100mg", + attributes={"medication_group": "Aspirin"}, + ), + lx.data.Extraction( + extraction_class="frequency", + extraction_text="daily", + attributes={"medication_group": "Aspirin"}, + ), + lx.data.Extraction( + extraction_class="condition", + extraction_text="heart health", + attributes={"medication_group": "Aspirin"}, + ), + # Second medication group + lx.data.Extraction( + extraction_class="medication", + extraction_text="Simvastatin", + attributes={"medication_group": "Simvastatin"}, + ), + lx.data.Extraction( + extraction_class="dosage", + extraction_text="20mg", + attributes={"medication_group": "Simvastatin"}, + ), + lx.data.Extraction( + extraction_class="frequency", + extraction_text="at bedtime", + attributes={"medication_group": "Simvastatin"}, + ), + ], + ) + ] + + +def extract_by_class(result, extraction_class): + """Helper to extract entities by class. + + Returns a set of extraction texts for the given class. + """ + return { + e.extraction_text + for e in result.extractions + if e.extraction_class == extraction_class + } + + +def assert_extractions_contain(test_case, result, expected_classes): + """Assert that result contains all expected extraction classes. + + Uses unittest assertions for richer error messages. + """ + actual_classes = {e.extraction_class for e in result.extractions} + missing_classes = expected_classes - actual_classes + test_case.assertFalse( + missing_classes, + f"Missing expected classes: {missing_classes}. Found extractions:" + f" {[f'{e.extraction_class}:{e.extraction_text}' for e in result.extractions]}", + ) + + +def assert_valid_char_intervals(test_case, result): + """Assert that all extractions have valid char intervals and alignment status.""" + for extraction in result.extractions: + test_case.assertIsNotNone( + extraction.char_interval, + f"Missing char_interval for extraction: {extraction.extraction_text}", + ) + test_case.assertIsNotNone( + extraction.alignment_status, + "Missing alignment_status for extraction:" + f" {extraction.extraction_text}", + ) + if hasattr(result, "text") and result.text: + text_length = len(result.text) + test_case.assertGreaterEqual( + extraction.char_interval.start_pos, + 0, + f"Invalid start_pos for extraction: {extraction.extraction_text}", + ) + test_case.assertLessEqual( + extraction.char_interval.end_pos, + text_length, + f"Invalid end_pos for extraction: {extraction.extraction_text}", + ) + + +class TestLiveAPIGemini(unittest.TestCase): + """Tests using real Gemini API.""" + + @skip_if_no_gemini + @live_api + @retry_on_transient_errors(max_retries=2) + def test_medication_extraction(self): + """Test medication extraction with entities in order.""" + prompt = textwrap.dedent("""\ + Extract medication information including medication name, dosage, route, frequency, + and duration in the order they appear in the text.""") + + examples = get_basic_medication_examples() + input_text = "Patient took 400 mg PO Ibuprofen q4h for two days." + + result = lx.extract( + text_or_documents=input_text, + prompt_description=prompt, + examples=examples, + model_id=DEFAULT_GEMINI_MODEL, + api_key=GEMINI_API_KEY, + language_model_params=GEMINI_MODEL_PARAMS, + ) + + assert result is not None + assert hasattr(result, "extractions") + assert len(result.extractions) > 0 + + expected_classes = { + "dosage", + "route", + "medication", + "frequency", + "duration", + } + assert_extractions_contain(self, result, expected_classes) + assert_valid_char_intervals(self, result) + + # Using regex for precise matching to avoid false positives + medication_texts = extract_by_class(result, "medication") + self.assertTrue( + any( + re.search(r"\bIbuprofen\b", text, re.IGNORECASE) + for text in medication_texts + ), + f"No Ibuprofen found in: {medication_texts}", + ) + + dosage_texts = extract_by_class(result, "dosage") + self.assertTrue( + any( + re.search(r"\b400\s*mg\b", text, re.IGNORECASE) + for text in dosage_texts + ), + f"No 400mg dosage found in: {dosage_texts}", + ) + + route_texts = extract_by_class(result, "route") + self.assertTrue( + any( + re.search(r"\b(PO|oral)\b", text, re.IGNORECASE) + for text in route_texts + ), + f"No PO/oral route found in: {route_texts}", + ) + + @skip_if_no_gemini + @live_api + @retry_on_transient_errors(max_retries=2) + @pytest.mark.xfail( + reason=( + "Known tokenizer issue with non-Latin characters - see GitHub" + " issue #13" + ), + strict=True, + ) + def test_multilingual_medication_extraction(self): + """Test medication extraction with Japanese text.""" + text = ( # "The patient takes 10 mg of medication daily." + "患者は毎日10mgの薬を服用します。" + ) + + prompt = "Extract medication information including dosage and frequency." + + examples = [ + lx.data.ExampleData( + text="The patient takes 20mg of aspirin twice daily.", + extractions=[ + lx.data.Extraction( + extraction_class="medication", + extraction_text="aspirin", + attributes={"dosage": "20mg", "frequency": "twice daily"}, + ), + ], + ) + ] + + result = lx.extract( + text_or_documents=text, + prompt_description=prompt, + examples=examples, + model_id=DEFAULT_GEMINI_MODEL, + api_key=GEMINI_API_KEY, + language_model_params=GEMINI_MODEL_PARAMS, + ) + + assert result is not None + assert hasattr(result, "extractions") + assert len(result.extractions) > 0 + + medication_extractions = [ + e for e in result.extractions if e.extraction_class == "medication" + ] + assert ( + len(medication_extractions) > 0 + ), "No medication entities found in Japanese text" + assert_valid_char_intervals(self, result) + + @skip_if_no_gemini + @live_api + @retry_on_transient_errors(max_retries=2) + def test_medication_relationship_extraction(self): + """Test relationship extraction for medications with Gemini.""" + input_text = """ + The patient was prescribed Lisinopril and Metformin last month. + He takes the Lisinopril 10mg daily for hypertension, but often misses + his Metformin 500mg dose which should be taken twice daily for diabetes. + """ + + prompt = textwrap.dedent(""" + Extract medications with their details, using attributes to group related information: + + 1. Extract entities in the order they appear in the text + 2. Each entity must have a 'medication_group' attribute linking it to its medication + 3. All details about a medication should share the same medication_group value + """) + + examples = get_relationship_examples() + + result = lx.extract( + text_or_documents=input_text, + prompt_description=prompt, + examples=examples, + model_id=DEFAULT_GEMINI_MODEL, + api_key=GEMINI_API_KEY, + language_model_params=GEMINI_MODEL_PARAMS, + ) + + assert result is not None + assert len(result.extractions) > 0 + assert_valid_char_intervals(self, result) + + medication_groups = {} + for extraction in result.extractions: + assert ( + extraction.attributes is not None + ), f"Missing attributes for {extraction.extraction_text}" + assert ( + "medication_group" in extraction.attributes + ), f"Missing medication_group for {extraction.extraction_text}" + + group_name = extraction.attributes["medication_group"] + medication_groups.setdefault(group_name, []).append(extraction) + + assert ( + len(medication_groups) >= 2 + ), f"Expected at least 2 medications, found {len(medication_groups)}" + + # Allow flexible matching for dosage field (could be "dosage" or "dose") + for med_name, extractions in medication_groups.items(): + extraction_classes = {e.extraction_class for e in extractions} + # At minimum, each group should have the medication itself + assert ( + "medication" in extraction_classes + ), f"{med_name} group missing medication entity" + # Dosage is expected but might be formatted differently + assert any( + c in extraction_classes for c in ["dosage", "dose"] + ), f"{med_name} group missing dosage" + + +class TestLiveAPIOpenAI(unittest.TestCase): + """Tests using real OpenAI API.""" + + @skip_if_no_openai + @live_api + @retry_on_transient_errors(max_retries=2) + def test_medication_extraction(self): + """Test medication extraction with OpenAI models.""" + prompt = textwrap.dedent("""\ + Extract medication information including medication name, dosage, route, frequency, + and duration in the order they appear in the text.""") + + examples = get_basic_medication_examples() + input_text = "Patient took 400 mg PO Ibuprofen q4h for two days." + + result = lx.extract( + text_or_documents=input_text, + prompt_description=prompt, + examples=examples, + language_model_type=OpenAILanguageModel, + model_id=DEFAULT_OPENAI_MODEL, + api_key=OPENAI_API_KEY, + fence_output=True, + use_schema_constraints=False, + language_model_params=OPENAI_MODEL_PARAMS, + ) + + assert result is not None + assert hasattr(result, "extractions") + assert len(result.extractions) > 0 + + expected_classes = { + "dosage", + "route", + "medication", + "frequency", + "duration", + } + assert_extractions_contain(self, result, expected_classes) + assert_valid_char_intervals(self, result) + + # Using regex for precise matching to avoid false positives + medication_texts = extract_by_class(result, "medication") + self.assertTrue( + any( + re.search(r"\bIbuprofen\b", text, re.IGNORECASE) + for text in medication_texts + ), + f"No Ibuprofen found in: {medication_texts}", + ) + + dosage_texts = extract_by_class(result, "dosage") + self.assertTrue( + any( + re.search(r"\b400\s*mg\b", text, re.IGNORECASE) + for text in dosage_texts + ), + f"No 400mg dosage found in: {dosage_texts}", + ) + + route_texts = extract_by_class(result, "route") + self.assertTrue( + any( + re.search(r"\b(PO|oral)\b", text, re.IGNORECASE) + for text in route_texts + ), + f"No PO/oral route found in: {route_texts}", + ) + + @skip_if_no_openai + @live_api + @retry_on_transient_errors(max_retries=2) + def test_medication_relationship_extraction(self): + """Test relationship extraction for medications with OpenAI.""" + input_text = """ + The patient was prescribed Lisinopril and Metformin last month. + He takes the Lisinopril 10mg daily for hypertension, but often misses + his Metformin 500mg dose which should be taken twice daily for diabetes. + """ + + prompt = textwrap.dedent(""" + Extract medications with their details, using attributes to group related information: + + 1. Extract entities in the order they appear in the text + 2. Each entity must have a 'medication_group' attribute linking it to its medication + 3. All details about a medication should share the same medication_group value + """) + + examples = get_relationship_examples() + + result = lx.extract( + text_or_documents=input_text, + prompt_description=prompt, + examples=examples, + language_model_type=OpenAILanguageModel, + model_id=DEFAULT_OPENAI_MODEL, + api_key=OPENAI_API_KEY, + fence_output=True, + use_schema_constraints=False, + language_model_params=OPENAI_MODEL_PARAMS, + ) + + assert result is not None + assert len(result.extractions) > 0 + assert_valid_char_intervals(self, result) + + medication_groups = {} + for extraction in result.extractions: + assert ( + extraction.attributes is not None + ), f"Missing attributes for {extraction.extraction_text}" + assert ( + "medication_group" in extraction.attributes + ), f"Missing medication_group for {extraction.extraction_text}" + + group_name = extraction.attributes["medication_group"] + medication_groups.setdefault(group_name, []).append(extraction) + + assert ( + len(medication_groups) >= 2 + ), f"Expected at least 2 medications, found {len(medication_groups)}" + + # Allow flexible matching for dosage field (could be "dosage" or "dose") + for med_name, extractions in medication_groups.items(): + extraction_classes = {e.extraction_class for e in extractions} + # At minimum, each group should have the medication itself + assert ( + "medication" in extraction_classes + ), f"{med_name} group missing medication entity" + # Dosage is expected but might be formatted differently + assert any( + c in extraction_classes for c in ["dosage", "dose"] + ), f"{med_name} group missing dosage" diff --git a/tox.ini b/tox.ini index 176ac893..8c7b89e2 100644 --- a/tox.ini +++ b/tox.ini @@ -22,7 +22,7 @@ setenv = deps = .[dev,test] commands = - pytest -ra + pytest -ra -m "not live_api" [testenv:format] skip_install = true @@ -44,3 +44,13 @@ deps = pylint>=3.0.0 commands = pylint --rcfile=tests/.pylintrc tests + +[testenv:live-api] +basepython = python3.11 +passenv = + GEMINI_API_KEY + LANGEXTRACT_API_KEY + OPENAI_API_KEY +deps = {[testenv]deps} +commands = + pytest tests/test_live_api.py -v -m live_api --maxfail=1 From dc613729017d91fd8a1474a18b58114be2ecc017 Mon Sep 17 00:00:00 2001 From: Akshay Goel Date: Mon, 4 Aug 2025 13:19:47 -0400 Subject: [PATCH 16/20] Add PR template validation workflow (#45) --- .github/workflows/validate_pr_template.yaml | 41 +++++++++++++++++++++ 1 file changed, 41 insertions(+) create mode 100644 .github/workflows/validate_pr_template.yaml diff --git a/.github/workflows/validate_pr_template.yaml b/.github/workflows/validate_pr_template.yaml new file mode 100644 index 00000000..46c4963e --- /dev/null +++ b/.github/workflows/validate_pr_template.yaml @@ -0,0 +1,41 @@ +name: Validate PR template + +on: + pull_request: + types: [opened, edited, synchronize, reopened] + +permissions: + contents: read + +jobs: + check: + if: github.event.pull_request.draft == false # drafts can save early + runs-on: ubuntu-latest + + steps: + - name: Fail if template untouched + env: + PR_BODY: ${{ github.event.pull_request.body }} + run: | + printf '%s\n' "$PR_BODY" | tr -d '\r' > body.txt + + # Required sections from the template + required=( "# Description" "Fixes #" "# How Has This Been Tested?" "# Checklist" ) + err=0 + + # Check for required sections + for h in "${required[@]}"; do + grep -Fq "$h" body.txt || { echo "::error::$h missing"; err=1; } + done + + # Check for placeholder text that should be replaced + grep -Eiq 'Replace this with|Choose one:' body.txt && { + echo "::error::Template placeholders still present"; err=1; + } + + # Also check for the unmodified issue number placeholder + grep -Fq 'Fixes #[issue number]' body.txt && { + echo "::error::Issue number placeholder not updated"; err=1; + } + + exit $err From da771e697744010943f211fd2ffeff376bb937e8 Mon Sep 17 00:00:00 2001 From: Akshay Goel Date: Tue, 5 Aug 2025 04:19:28 -0400 Subject: [PATCH 17/20] fix: Change OllamaLanguageModel parameter from 'model' to 'model_id' (#57) Fixes #27 --- langextract/inference.py | 4 ++-- tests/inference_test.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/langextract/inference.py b/langextract/inference.py index 5cbc9a08..c758f713 100644 --- a/langextract/inference.py +++ b/langextract/inference.py @@ -113,13 +113,13 @@ class OllamaLanguageModel(BaseLanguageModel): def __init__( self, - model: str, + model_id: str, model_url: str = _OLLAMA_DEFAULT_MODEL_URL, structured_output_format: str = 'json', constraint: schema.Constraint = schema.Constraint(), **kwargs, ) -> None: - self._model = model + self._model = model_id self._model_url = model_url self._structured_output_format = structured_output_format self._constraint = constraint diff --git a/tests/inference_test.py b/tests/inference_test.py index 876a480f..88b84d42 100644 --- a/tests/inference_test.py +++ b/tests/inference_test.py @@ -73,7 +73,7 @@ def test_ollama_infer(self, mock_ollama_query): } mock_ollama_query.return_value = gemma_response model = inference.OllamaLanguageModel( - model="gemma2:latest", + model_id="gemma2:latest", model_url="http://localhost:11434", structured_output_format="json", ) From e83d5cf3a730603ece1af8c5125fc4c32ecbfc9e Mon Sep 17 00:00:00 2001 From: Akshay Goel Date: Tue, 5 Aug 2025 06:47:10 -0400 Subject: [PATCH 18/20] feat: Add CITATION.cff file for proper software citation --- CITATION.cff | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) create mode 100644 CITATION.cff diff --git a/CITATION.cff b/CITATION.cff new file mode 100644 index 00000000..2eb3134a --- /dev/null +++ b/CITATION.cff @@ -0,0 +1,28 @@ +# SPDX-FileCopyrightText: 2025 Google LLC +# SPDX-License-Identifier: Apache-2.0 +# +# This file contains citation metadata for LangExtract. +# For more information visit: https://citation-file-format.github.io/ + +cff-version: 1.2.0 +title: "LangExtract" +message: "If you use this software, please cite it as below." +type: software +authors: + - given-names: Akshay + family-names: Goel + email: goelak@google.com + affiliation: Google LLC +repository-code: "https://github.com/google/langextract" +url: "https://github.com/google/langextract" +repository: "https://github.com/google/langextract" +abstract: "LangExtract: A library for extracting structured data from language models" +keywords: + - language-models + - structured-data-extraction + - nlp + - machine-learning + - python +license: Apache-2.0 +version: 1.0.3 +date-released: 2025-07-30 From 337beee7c95870f5241ca0997b954b9e78b3a805 Mon Sep 17 00:00:00 2001 From: Akshay Goel Date: Tue, 5 Aug 2025 08:20:10 -0400 Subject: [PATCH 19/20] feat: Add Ollama integration with Docker examples and CI tests (#62) - Add quickstart example and documentation for local LLM usage - Include Docker setup with health checks and docker-compose - Add integration tests and update CI pipeline - Secure setup: localhost-only binding, containerized deployment Signed-off-by: Akshay Goel --- .github/workflows/ci.yaml | 53 +++++++++- .github/workflows/validate_pr_template.yaml | 14 +-- README.md | 40 +++++++- examples/ollama/.dockerignore | 35 +++++++ examples/ollama/Dockerfile | 23 +++++ examples/ollama/README.md | 32 ++++++ examples/ollama/docker-compose.yml | 42 ++++++++ examples/ollama/quickstart.py | 108 ++++++++++++++++++++ tests/test_ollama_integration.py | 80 +++++++++++++++ tox.ini | 8 +- 10 files changed, 424 insertions(+), 11 deletions(-) create mode 100644 examples/ollama/.dockerignore create mode 100644 examples/ollama/Dockerfile create mode 100644 examples/ollama/README.md create mode 100644 examples/ollama/docker-compose.yml create mode 100644 examples/ollama/quickstart.py create mode 100644 tests/test_ollama_integration.py diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index e4291408..5a1f8977 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -28,7 +28,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: ["3.10", "3.11"] + python-version: ["3.10", "3.11", "3.12"] steps: - uses: actions/checkout@v4 @@ -79,3 +79,54 @@ jobs: exit 0 fi tox -e live-api + + ollama-integration-test: + needs: test + runs-on: ubuntu-latest + if: github.event_name == 'pull_request' + + steps: + - uses: actions/checkout@v4 + + - name: Detect file changes + id: changes + uses: tj-actions/changed-files@v44 + with: + files: | + langextract/inference.py + examples/ollama/** + tests/test_ollama_integration.py + .github/workflows/ci.yaml + + - name: Skip if no Ollama changes + if: steps.changes.outputs.any_changed == 'false' + run: | + echo "No Ollama-related changes detected – skipping job." + exit 0 + + - name: Set up Python 3.11 + uses: actions/setup-python@v4 + with: + python-version: "3.11" + + - name: Launch Ollama container + run: | + docker run -d --name ollama \ + -p 127.0.0.1:11434:11434 \ + -v ollama:/root/.ollama \ + ollama/ollama:0.5.4 + for i in {1..20}; do + curl -fs http://localhost:11434/api/version && break + sleep 3 + done + + - name: Pull gemma2 model + run: docker exec ollama ollama pull gemma2:2b || true + + - name: Install tox + run: | + python -m pip install --upgrade pip + pip install tox + + - name: Run Ollama integration tests + run: tox -e ollama-integration diff --git a/.github/workflows/validate_pr_template.yaml b/.github/workflows/validate_pr_template.yaml index 46c4963e..73525892 100644 --- a/.github/workflows/validate_pr_template.yaml +++ b/.github/workflows/validate_pr_template.yaml @@ -11,31 +11,31 @@ jobs: check: if: github.event.pull_request.draft == false # drafts can save early runs-on: ubuntu-latest - + steps: - name: Fail if template untouched env: PR_BODY: ${{ github.event.pull_request.body }} run: | printf '%s\n' "$PR_BODY" | tr -d '\r' > body.txt - + # Required sections from the template required=( "# Description" "Fixes #" "# How Has This Been Tested?" "# Checklist" ) err=0 - + # Check for required sections for h in "${required[@]}"; do grep -Fq "$h" body.txt || { echo "::error::$h missing"; err=1; } done - + # Check for placeholder text that should be replaced grep -Eiq 'Replace this with|Choose one:' body.txt && { - echo "::error::Template placeholders still present"; err=1; + echo "::error::Template placeholders still present"; err=1; } - + # Also check for the unmodified issue number placeholder grep -Fq 'Fixes #[issue number]' body.txt && { echo "::error::Issue number placeholder not updated"; err=1; } - + exit $err diff --git a/README.md b/README.md index e88a89e3..2cbe5820 100644 --- a/README.md +++ b/README.md @@ -17,6 +17,8 @@ - [Quick Start](#quick-start) - [Installation](#installation) - [API Key Setup for Cloud Models](#api-key-setup-for-cloud-models) +- [Using OpenAI Models](#using-openai-models) +- [Using Local LLMs with Ollama](#using-local-llms-with-ollama) - [More Examples](#more-examples) - [*Romeo and Juliet* Full Text Extraction](#romeo-and-juliet-full-text-extraction) - [Medication Extraction](#medication-extraction) @@ -256,13 +258,13 @@ result = lx.extract( LangExtract also supports OpenAI models. Example OpenAI configuration: ```python -from langextract.inference import OpenAILanguageModel +import langextract as lx result = lx.extract( text_or_documents=input_text, prompt_description=prompt, examples=examples, - language_model_type=OpenAILanguageModel, + language_model_type=lx.inference.OpenAILanguageModel, model_id="gpt-4o", api_key=os.environ.get('OPENAI_API_KEY'), fence_output=True, @@ -272,6 +274,29 @@ result = lx.extract( Note: OpenAI models require `fence_output=True` and `use_schema_constraints=False` because LangExtract doesn't implement schema constraints for OpenAI yet. +## Using Local LLMs with Ollama + +LangExtract supports local inference using Ollama, allowing you to run models without API keys: + +```python +import langextract as lx + +result = lx.extract( + text_or_documents=input_text, + prompt_description=prompt, + examples=examples, + language_model_type=lx.inference.OllamaLanguageModel, + model_id="gemma2:2b", # or any Ollama model + model_url="http://localhost:11434", + fence_output=False, + use_schema_constraints=False +) +``` + +**Quick setup:** Install Ollama from [ollama.com](https://ollama.com/), run `ollama pull gemma2:2b`, then `ollama serve`. + +For detailed installation, Docker setup, and examples, see [`examples/ollama/`](examples/ollama/). + ## More Examples Additional examples of LangExtract in action: @@ -325,6 +350,17 @@ Or reproduce the full CI matrix locally with tox: tox # runs pylint + pytest on Python 3.10 and 3.11 ``` +### Ollama Integration Testing + +If you have Ollama installed locally, you can run integration tests: + +```bash +# Test Ollama integration (requires Ollama running with gemma2:2b model) +tox -e ollama-integration +``` + +This test will automatically detect if Ollama is available and run real inference tests. + ## Development ### Code Formatting diff --git a/examples/ollama/.dockerignore b/examples/ollama/.dockerignore new file mode 100644 index 00000000..77374252 --- /dev/null +++ b/examples/ollama/.dockerignore @@ -0,0 +1,35 @@ +# Ignore Python cache +__pycache__/ +*.pyc +*.pyo +*.pyd +.Python + +# Ignore version control +.git/ +.gitignore + +# Ignore OS files +.DS_Store +Thumbs.db + +# Ignore virtual environments +venv/ +env/ +.venv/ + +# Ignore IDE files +.vscode/ +.idea/ +*.swp +*.swo + +# Ignore test artifacts +.pytest_cache/ +.coverage +htmlcov/ + +# Ignore build artifacts +build/ +dist/ +*.egg-info/ diff --git a/examples/ollama/Dockerfile b/examples/ollama/Dockerfile new file mode 100644 index 00000000..48690a6a --- /dev/null +++ b/examples/ollama/Dockerfile @@ -0,0 +1,23 @@ +# Copyright 2025 Google LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM python:3.11-slim-bookworm + +WORKDIR /app + +RUN pip install langextract + +COPY quickstart.py . + +CMD ["python", "quickstart.py"] diff --git a/examples/ollama/README.md b/examples/ollama/README.md new file mode 100644 index 00000000..2fff8593 --- /dev/null +++ b/examples/ollama/README.md @@ -0,0 +1,32 @@ +# Ollama Examples + +This directory contains examples for using LangExtract with Ollama for local LLM inference. + +For setup instructions and documentation, see the [main README's Ollama section](../../README.md#using-local-llms-with-ollama). + +## Quick Reference + +**Local setup:** +```bash +ollama pull gemma2:2b +python quickstart.py +``` + +**Docker setup:** +```bash +docker-compose up +``` + +## Files + +- `quickstart.py` - Basic extraction example with configurable model +- `docker-compose.yml` - Production-ready Docker setup with health checks +- `Dockerfile` - Container definition for LangExtract + +## Model License + +Ollama models come with their own licenses. For example: +- Gemma models: [Gemma Terms of Use](https://ai.google.dev/gemma/terms) +- Llama models: [Meta Llama License](https://llama.meta.com/llama-downloads/) + +Please review the license for any model you use. diff --git a/examples/ollama/docker-compose.yml b/examples/ollama/docker-compose.yml new file mode 100644 index 00000000..431765ea --- /dev/null +++ b/examples/ollama/docker-compose.yml @@ -0,0 +1,42 @@ +# Copyright 2025 Google LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +services: + ollama: + image: ollama/ollama:0.5.4 + ports: + - "127.0.0.1:11434:11434" # Bind only to localhost for security + volumes: + - ollama-data:/root/.ollama # Cross-platform support + command: serve + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:11434/api/version"] + interval: 5s + timeout: 3s + retries: 5 + start_period: 10s + + langextract: + build: . + depends_on: + ollama: + condition: service_healthy + environment: + - OLLAMA_HOST=http://ollama:11434 + volumes: + - .:/app + command: python quickstart.py + +volumes: + ollama-data: diff --git a/examples/ollama/quickstart.py b/examples/ollama/quickstart.py new file mode 100644 index 00000000..ed578412 --- /dev/null +++ b/examples/ollama/quickstart.py @@ -0,0 +1,108 @@ +#!/usr/bin/env python3 +# Copyright 2025 Google LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Quick-start example for using Ollama with langextract.""" + +import argparse +import os + +import langextract as lx + + +def run_extraction(model_id="gemma2:2b", temperature=0.3): + """Run a simple extraction example using Ollama.""" + input_text = "Isaac Asimov was a prolific science fiction writer." + + prompt = "Extract the author's full name and their primary literary genre." + + examples = [ + lx.data.ExampleData( + text=( + "J.R.R. Tolkien was an English writer, best known for" + " high-fantasy." + ), + extractions=[ + lx.data.Extraction( + extraction_class="author_details", + # extraction_text includes full context with ellipsis for clarity + extraction_text="J.R.R. Tolkien was an English writer...", + attributes={ + "name": "J.R.R. Tolkien", + "genre": "high-fantasy", + }, + ) + ], + ) + ] + + result = lx.extract( + text_or_documents=input_text, + prompt_description=prompt, + examples=examples, + language_model_type=lx.inference.OllamaLanguageModel, + model_id=model_id, + model_url=os.getenv("OLLAMA_HOST", "http://localhost:11434"), + temperature=temperature, + fence_output=False, + use_schema_constraints=False, + ) + + return result + + +def main(): + """Main function to run the quick-start example.""" + parser = argparse.ArgumentParser(description="Run Ollama extraction example") + parser.add_argument( + "--model-id", + default=os.getenv("MODEL_ID", "gemma2:2b"), + help="Ollama model ID (default: gemma2:2b or MODEL_ID env var)", + ) + parser.add_argument( + "--temperature", + type=float, + default=float(os.getenv("TEMPERATURE", "0.3")), + help="Model temperature (default: 0.3 or TEMPERATURE env var)", + ) + args = parser.parse_args() + + print(f"🚀 Running Ollama quick-start example with {args.model_id}...") + print("-" * 50) + + try: + result = run_extraction( + model_id=args.model_id, temperature=args.temperature + ) + + for extraction in result.extractions: + print(f"Class: {extraction.extraction_class}") + print(f"Text: {extraction.extraction_text}") + print(f"Attributes: {extraction.attributes}") + + print("\n✅ SUCCESS! Ollama is working with langextract") + return True + + except ConnectionError as e: + print(f"\nConnectionError: {e}") + print("Make sure Ollama is running: 'ollama serve'") + return False + except Exception as e: + print(f"\nError: {type(e).__name__}: {e}") + return False + + +if __name__ == "__main__": + success = main() + exit(0 if success else 1) diff --git a/tests/test_ollama_integration.py b/tests/test_ollama_integration.py new file mode 100644 index 00000000..5ab4397d --- /dev/null +++ b/tests/test_ollama_integration.py @@ -0,0 +1,80 @@ +# Copyright 2025 Google LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Integration tests for Ollama functionality.""" +import socket + +import pytest +import requests + +import langextract as lx + + +def _ollama_available(): + """Check if Ollama is running on localhost:11434.""" + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock: + result = sock.connect_ex(("localhost", 11434)) + return result == 0 + + +@pytest.mark.skipif(not _ollama_available(), reason="Ollama not running") +def test_ollama_extraction(): + """Test extraction using Ollama when available.""" + input_text = "Isaac Asimov was a prolific science fiction writer." + prompt = "Extract the author's full name and their primary literary genre." + + examples = [ + lx.data.ExampleData( + text=( + "J.R.R. Tolkien was an English writer, best known for" + " high-fantasy." + ), + extractions=[ + lx.data.Extraction( + extraction_class="author_details", + extraction_text="J.R.R. Tolkien was an English writer...", + attributes={ + "name": "J.R.R. Tolkien", + "genre": "high-fantasy", + }, + ) + ], + ) + ] + + model_id = "gemma2:2b" + + try: + result = lx.extract( + text_or_documents=input_text, + prompt_description=prompt, + examples=examples, + language_model_type=lx.inference.OllamaLanguageModel, + model_id=model_id, + model_url="http://localhost:11434", + temperature=0.3, + fence_output=False, + use_schema_constraints=False, + ) + + assert len(result.extractions) > 0 + extraction = result.extractions[0] + assert extraction.extraction_class == "author_details" + if extraction.attributes: + assert "asimov" in extraction.attributes.get("name", "").lower() + + except ValueError as e: + if "Can't find Ollama" in str(e): + pytest.skip(f"Ollama model {model_id} not available") + raise diff --git a/tox.ini b/tox.ini index 8c7b89e2..7abd98a0 100644 --- a/tox.ini +++ b/tox.ini @@ -13,7 +13,7 @@ # limitations under the License. [tox] -envlist = py310, py311, format, lint-src, lint-tests +envlist = py310, py311, py312, format, lint-src, lint-tests skip_missing_interpreters = True [testenv] @@ -54,3 +54,9 @@ passenv = deps = {[testenv]deps} commands = pytest tests/test_live_api.py -v -m live_api --maxfail=1 + +[testenv:ollama-integration] +basepython = python3.11 +deps = {[testenv]deps} +commands = + pytest tests/test_ollama_integration.py -v --tb=short From a7ef0bd14dffa68d59c8c14f64375c2ccb3b48fc Mon Sep 17 00:00:00 2001 From: Akshay Goel Date: Tue, 5 Aug 2025 08:28:27 -0400 Subject: [PATCH 20/20] chore: Bump version to 1.0.4 for release - Ollama integration with Docker examples - Fixed OllamaLanguageModel parameter name (model -> model_id) - Added CI/CD tests for Ollama - Updated documentation with consistent API examples --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 5abaa506..7be5c5f6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -18,7 +18,7 @@ build-backend = "setuptools.build_meta" [project] name = "langextract" -version = "1.0.3" +version = "1.0.4" description = "LangExtract: A library for extracting structured data from language models" readme = "README.md" requires-python = ">=3.10"