From 17fe53683693690c9b6aa3bd120396b7b96bdeb5 Mon Sep 17 00:00:00 2001 From: pamella Date: Tue, 19 Nov 2024 18:46:41 +0000 Subject: [PATCH] Deployed 2e5aff2 to dev with MkDocs 1.6.1 and mike 2.1.3 --- dev/reference/assistants-ref/index.html | 40 ++++++++++++++---------- dev/search/search_index.json | 2 +- dev/sitemap.xml | 22 ++++++------- dev/sitemap.xml.gz | Bin 291 -> 291 bytes 4 files changed, 36 insertions(+), 28 deletions(-) diff --git a/dev/reference/assistants-ref/index.html b/dev/reference/assistants-ref/index.html index 77f987f..4f02484 100644 --- a/dev/reference/assistants-ref/index.html +++ b/dev/reference/assistants-ref/index.html @@ -2040,7 +2040,9 @@

597 598 599 -600
class AIAssistant(abc.ABC):  # noqa: F821
+600
+601
+602
class AIAssistant(abc.ABC):  # noqa: F821
     """Base class for AI Assistants. Subclasses must define at least the following attributes:
 
     * id: str
@@ -2454,7 +2456,9 @@ 

# This is necessary for compatibility with Anthropic messages_to_summarize = state["messages"][1:-1] input_message = state["messages"][-1] - docs = retriever.invoke({"input": input_message, "history": messages_to_summarize}) + docs = retriever.invoke( + {"input": input_message.content, "history": messages_to_summarize} + ) document_separator = self.get_document_separator() document_prompt = self.get_document_prompt() @@ -4336,7 +4340,9 @@

533 534 535 -536

@with_cast_id
+536
+537
+538
@with_cast_id
 def as_graph(self, thread_id: Any | None = None) -> Runnable[dict, dict]:
     """Create the LangGraph graph for the assistant.\n
     This graph is an agent that supports chat history, tool calling, and RAG (if `has_rag=True`).\n
@@ -4394,7 +4400,9 @@ 

# This is necessary for compatibility with Anthropic messages_to_summarize = state["messages"][1:-1] input_message = state["messages"][-1] - docs = retriever.invoke({"input": input_message, "history": messages_to_summarize}) + docs = retriever.invoke( + {"input": input_message.content, "history": messages_to_summarize} + ) document_separator = self.get_document_separator() document_prompt = self.get_document_prompt() @@ -4576,9 +4584,7 @@

Source code in django_ai_assistant/helpers/assistants.py -
538
-539
-540
+              
540
 541
 542
 543
@@ -4597,7 +4603,9 @@ 

556 557 558 -559

@with_cast_id
+559
+560
+561
@with_cast_id
 def invoke(self, *args: Any, thread_id: Any | None, **kwargs: Any) -> dict:
     """Invoke the assistant LangChain graph with the given arguments and keyword arguments.\n
     This is the lower-level method to run the assistant.\n
@@ -4722,9 +4730,7 @@ 

Source code in django_ai_assistant/helpers/assistants.py -
561
-562
-563
+              
563
 564
 565
 566
@@ -4742,7 +4748,9 @@ 

578 579 580 -581

@with_cast_id
+581
+582
+583
@with_cast_id
 def run(self, message: str, thread_id: Any | None = None, **kwargs: Any) -> Any:
     """Run the assistant with the given message and thread ID.\n
     This is the higher-level method to run the assistant.\n
@@ -4837,9 +4845,7 @@ 

Source code in django_ai_assistant/helpers/assistants.py -
586
-587
-588
+              
588
 589
 590
 591
@@ -4851,7 +4857,9 @@ 

597 598 599 -600

def as_tool(self, description: str) -> BaseTool:
+600
+601
+602
def as_tool(self, description: str) -> BaseTool:
     """Create a tool from the assistant.\n
     This is useful to compose assistants.\n
 
diff --git a/dev/search/search_index.json b/dev/search/search_index.json
index 827f232..6aff3ff 100644
--- a/dev/search/search_index.json
+++ b/dev/search/search_index.json
@@ -1 +1 @@
-{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"Django AI Assistant","text":"

Combine the power of Large Language Models with Django's productivity to build intelligent applications.

Regardless of the feasibility of AGI, AI assistants are a new paradigm for computation. AI agents and assistants allow devs to easily build applications that make smart decisions.

The latest LLMs from major AI providers have a \"killer feature\" called Tool Calling, which enables AI models to call methods from Django's side, and essentially do anything a Django view can, such as DB queries, file management, external API calls, etc.

While users commonly interact with LLMs via conversations, AI Assistants can do a lot with any kind of string input, including JSON. Your end users won't even realize that a LLM is doing the heavy-lifting behind the scenes! Some ideas for innovative AI assistants include:

  • A movie recommender chatbot that helps users manage their movie backlogs
  • An autofill button for forms of your application
  • Tailored email reminders that consider users' activity
  • A real-time tourist guide that recommends attractions given the user's current location

We provide examples for some of those applications. Get Started now!

"},{"location":"changelog/","title":"Changelog","text":"

This changelog references changes made both to the Django backend, django-ai-assistant, and the frontend TypeScript client, django-ai-assistant-client.

Note

The backend and the frontend are versioned together, that is, they have the same version number. When you update the backend, you should also update the frontend to the same version.

"},{"location":"changelog/#0.1.0","title":"0.1.0 October 11, 2024","text":"
  • Refactor the code to use LangGraph instead of LangChain LCEL (except for RAG functionality, see the get_history_aware_retriever method).
  • Store all messages in the Thread model, including tool calls and their outputs.
  • Allow separation of threads per assistant: assistant_id in the Thread model.
  • New updateThread function from useThreadList hook.
  • Improved examples:
    • Add markdown rendering to HTMX example.
    • Better Movie Recommendation example.
    • Better Tour Guide example.
"},{"location":"changelog/#0.0.4","title":"0.0.4 July 5, 2024","text":"
  • Fix frontend README.
"},{"location":"changelog/#0.0.3","title":"0.0.3 July 5, 2024","text":"
  • Less restrictive Python version in pyproject.toml. Support future Python versions.
"},{"location":"changelog/#0.0.2","title":"0.0.2 June 28, 2024","text":"
  • Add support for Django 4.2 LTS
  • Add support for Python 3.10 and 3.11
"},{"location":"changelog/#0.0.1","title":"0.0.1 June 25, 2024","text":"
  • Initial release
"},{"location":"contributing/","title":"Contributing","text":"

We can always use your help to improve Django AI Assistant! Please feel free to tackle existing issues. If you have a new idea, please create a thread on Discussions.

Please follow this guide to learn more about how to develop and test the project locally, before opening a pull request.

"},{"location":"contributing/#local-dev-setup","title":"Local Dev Setup","text":""},{"location":"contributing/#clone-the-repo","title":"Clone the repo","text":"
git clone git@github.com:vintasoftware/django-ai-assistant.git\n
"},{"location":"contributing/#install-development-tools","title":"Install development tools","text":"

This project uses Poetry for dependency and virtual environment management.

If you need to install the version of Python recommended for the project, you can use Pyenv.

For installing Node, we recommend NVM.

"},{"location":"contributing/#install-dependencies","title":"Install dependencies","text":""},{"location":"contributing/#backend","title":"Backend","text":"

Go to the project root. To instantiate the virtual environment, run

poetry shell\n

Install the Python dependencies:

poetry install\n

If you encounter an error regarding the Python version required for the project, you can use pyenv to install the appropriate version based on .python-version:

pyenv install\n
"},{"location":"contributing/#frontend","title":"Frontend","text":"

Go to the frontend directory and install the Node dependencies:

cd frontend\npnpm install\n
"},{"location":"contributing/#install-pre-commit-hooks","title":"Install pre-commit hooks","text":"
pre-commit install\n

It's critical to run the pre-commit hooks before pushing your code to follow the project's code style, and avoid linting errors.

"},{"location":"contributing/#updating-the-openapi-schema","title":"Updating the OpenAPI schema","text":"

It's critical to update the OpenAPI schema when you make changes to the django_ai_assistant/api/views.py or related files:

poetry run python manage.py generate_openapi_schema --output frontend/openapi_schema.json\nsh -c 'cd frontend && pnpm run generate-client'\n
"},{"location":"contributing/#developing-with-the-example-project","title":"Developing with the example project","text":"

Run the frontend project in build:watch mode:

cd frontend\npnpm run build:watch\n

Go to the example project, install the dependencies, and link the frontend project:

cd ..  # back to project root directory\ncd example\npnpm install\npnpm remove django-ai-assistant-client  # remove the distributed package to use the local one\npnpm link ../frontend\n

Then follow the instructions in the example README to run the example project.

"},{"location":"contributing/#tests","title":"Tests","text":"

Before running tests copy the .env.example file to .env.tests.

cp .env.example .env.tests\n

Run tests with:

poetry run pytest\n

The tests use pytest-vcr to record and replay HTTP requests to AI models.

If you're implementing a new test that needs to call a real AI model, you need to set the OPENAI_API_KEY environment variable with a real API key in the .env.tests file.

Then, you will run the tests in record mode:

poetry run pytest --record-mode=once\n

To run frontend tests:

cd frontend\npnpm run test\n
"},{"location":"contributing/#documentation","title":"Documentation","text":"

We use mkdocs-material to generate the documentation from markdown files. Check the files in the docs directory.

To run the documentation locally, you need to run:

poetry run mkdocs serve\n
"},{"location":"contributing/#release","title":"Release","text":"

Info

The backend and the frontend are versioned together, that is, they should have the same version number.

To release and publish a new version, follow these steps:

  1. Update the version in pyproject.toml, frontend/package.json and example/package.json.
  2. Re-install the local version of the Python project: poetry install
  3. In the project root, run poetry run python manage.py generate_openapi_schema --output frontend/openapi_schema.json to update the OpenAPI schema.
  4. Re-install the local version of the frontend project:
cd frontend\npnpm install\npnpm run build\n
  1. In the frontend directory, run pnpm run generate-client to update the TypeScript client with the new OpenAPI schema.
  2. Update the changelog in CHANGELOG.md.
  3. Open a PR with the changes.
  4. Once the PR is merged, run the Release GitHub Action to create a draft release.
  5. Review the draft release, ensure the description has at least the associated changelog entry, and publish it.
  6. Once the review is published, the Publish GitHub Action will automatically run to publish the new version to PyPI and npm. Check the logs to ensure the publication was successful.
"},{"location":"frontend/","title":"Frontend","text":"

Django AI Assistant has a frontend TypeScript client to facilitate the integration with the Django backend.

","boost":2},{"location":"frontend/#installation","title":"Installation","text":"

Install the frontend client using pnpm:

pnpm install django-ai-assistant-client\n
","boost":2},{"location":"frontend/#client-configuration","title":"Client Configuration","text":"

First, you'll need to check what base path you used when setting up the Django AI Assistant backend. The base path is the URL prefix that the Django AI Assistant API is served under. Below the base path would be ai-assistant:

myproject/urls.py
from django.urls import include, path\n\nurlpatterns = [\n    path(\"ai-assistant/\", include(\"django_ai_assistant.urls\")),\n    ...\n]\n

Before using the frontend client, import the configAIAssistant and configure it with the base path. If you're using React, a good place to do this is in the App.tsx file:

example/assets/js/App.tsx
import { configAIAssistant } from \"django-ai-assistant-client\";\nimport React from \"react\";\n\nconfigAIAssistant({ BASE: \"ai-assistant\" });\n

Note in the configuration above, the Django server and the frontend client are using the same base path. If you're using a different base path, make sure to adjust the configuration accordingly.

Now you can use the frontend client to interact with the Django AI Assistant backend. Here's an example of how to create a message:

import { aiCreateThreadMessage } from \"django-ai-assistant-client\";\n\nawait aiCreateThreadMessage({\n    threadId: 1,\n    requestBody: {\n        assistant_id: 1,\n        message: \"What's the weather like today in NYC?\"\n    }\n});\n
","boost":2},{"location":"frontend/#advanced-client-configuration","title":"Advanced Client Configuration","text":"

By default the frontend client is authenticated via cookies (CREDENTIALS === 'include'). You can configure the client differently. Below is the default config:

configAIAssistant({\n    // Base path of the Django AI Assistant API, can be a relative or full URL:\n    BASE: '',\n    // Credentials mode for fetch requests:\n    CREDENTIALS: 'include',\n    // Record<string, unknown> with headers to be sent with each request:\n    HEADERS: undefined,\n    // Basic authentication username:\n    USERNAME: undefined,\n    // Basic authentication password:\n    PASSWORD: undefined,\n    // Token for authentication:\n    TOKEN: undefined,\n});\n
","boost":2},{"location":"frontend/#client-functions","title":"Client Functions","text":"

The frontend client provides the following functions:

","boost":2},{"location":"frontend/#ailistassistants","title":"aiListAssistants","text":"

List all assistants the user has access to. Param: none Return: a Promise that resolves to an Array of Assistant.

","boost":2},{"location":"frontend/#aigetassistant","title":"aiGetAssistant","text":"

Get an assistant by ID. Param: { assistantId: string } Return: Promise that resolves to Assistant.

","boost":2},{"location":"frontend/#ailistthreads","title":"aiListThreads","text":"

List all threads the user has access to. Param: none Return: a Promise that resolves to an Array of Thread.

","boost":2},{"location":"frontend/#aicreatethread","title":"aiCreateThread","text":"

Create a new thread. Param: { requestBody: { name: string } } Return: a Promise that resolves to a Thread.

","boost":2},{"location":"frontend/#aigetthread","title":"aiGetThread","text":"

Get a thread by ID. Param: { threadId: string } Return: a Promise that resolves to a Thread.

","boost":2},{"location":"frontend/#aiupdatethread","title":"aiUpdateThread","text":"

Update a thread by ID. Param: { threadId: string, requestBody: { name: string, assistant_id: string } } Return: a Promise that resolves to a Thread.

","boost":2},{"location":"frontend/#aideletethread","title":"aiDeleteThread","text":"

Delete a thread by ID. Param: { threadId: string } Return: a Promise that resolves to void.

","boost":2},{"location":"frontend/#ailistthreadmessages","title":"aiListThreadMessages","text":"

List all messages in a thread. Param: { threadId: string } Return: a Promise that resolves to an Array of ThreadMessage.

","boost":2},{"location":"frontend/#aicreatethreadmessage","title":"aiCreateThreadMessage","text":"

Create a new message in a thread. Param: { threadId: string, requestBody: { assistant_id: string, message: string } } Return: a Promise that resolves to void.

","boost":2},{"location":"frontend/#aideletethreadmessage","title":"aiDeleteThreadMessage","text":"

Delete a message in a thread. Param: { threadId: string, messageId: string } Return: a Promise that resolves to void.

Note

These functions correspond to the Django AI Assistant API endpoints. Make sure to read the API documentation to learn about permissions.

","boost":2},{"location":"frontend/#type-definitions","title":"Type definitions","text":"

The type definitions are available at frontend/src/client/types.gen.ts. You can import the schemas directly from django-ai-assistant-client root:

import {\n    Assistant,\n    Thread,\n    ThreadMessage\n} from \"django-ai-assistant-client\";\n
","boost":2},{"location":"frontend/#react-hooks","title":"React Hooks","text":"

The frontend client also provides React hooks to streamline application building.

Warning

You still have to call configAIAssistant on your application before using the hooks.

","boost":2},{"location":"frontend/#useassistantlist","title":"useAssistantList","text":"

React hook to manage the list of Assistants. Use like this:

import { useAssistantList } from \"django-ai-assistant-client\";\n\nexport function MyComponent() {\n    const {\n        assistants,\n        fetchAssistants,\n        loadingFetchAssistants\n    } = useAssistantList();\n    // ...\n}\n
","boost":2},{"location":"frontend/#useassistant","title":"useAssistant","text":"

React hook to manage a single Assistant. Use like this:

import { useAssistant } from \"django-ai-assistant-client\";\n\nexport function MyComponent() {\n    const {\n        assistant,\n        fetchAssistant,\n        loadingFetchAssistant\n    } = useAssistant();\n    // ...\n}\n
","boost":2},{"location":"frontend/#usethreadlist","title":"useThreadList","text":"

React hook to manage the list, create, and delete of Threads. Use like this:

import { useThreadList } from \"django-ai-assistant-client\";\n\nexport function MyComponent() {\n    const {\n        threads,\n        fetchThreads,\n        createThread,\n        updateThread,\n        deleteThread,\n        loadingFetchThreads,\n        loadingCreateThread,\n        loadingUpdateThread,\n        loadingDeleteThread\n    } = useThreadList();\n    // ...\n}\n
","boost":2},{"location":"frontend/#usemessagelist","title":"useMessageList","text":"

React hook to manage the list, create, and delete of Messages. Use like this:

import { useMessageList, Thread } from \"django-ai-assistant-client\";\n\nexport function MyComponent() {\n    const [activeThread, setActiveThread] = useState<Thread | null>(null);\n    const {\n        messages,\n        fetchMessages,\n        createMessage,\n        deleteMessage,\n        loadingFetchMessages,\n        loadingCreateMessage,\n        loadingDeleteMessage\n    } = useMessageList({ threadId: activeThread?.id });\n    // ...\n}\n
","boost":2},{"location":"frontend/#example-project","title":"Example project","text":"

The example project makes good use of the React hooks to build LLM-powered applications. Make sure to check it out!

","boost":2},{"location":"get-started/","title":"Get started","text":"","boost":2},{"location":"get-started/#prerequisites","title":"Prerequisites","text":"
  • Python:
  • Django:
","boost":2},{"location":"get-started/#how-to-install","title":"How to install","text":"

Install Django AI Assistant package:

pip install django-ai-assistant\n

Add Django AI Assistant to your Django project's INSTALLED_APPS:

myproject/settings.py
INSTALLED_APPS = [\n    ...\n    'django_ai_assistant',\n    ...\n]\n

Run the migrations:

python manage.py migrate\n

Learn how to use the package in the Tutorial section.

","boost":2},{"location":"support/","title":"Support","text":"

If you have any questions or need help, feel free to create a thread on GitHub Discussions.

In case you're facing a bug, please check existing issues and create a new one if needed.

"},{"location":"support/#commercial-support","title":"Commercial Support","text":"

This is an open-source project maintained by Vinta Software. We are always looking for exciting work! If you need any commercial support, feel free to get in touch: contact@vinta.com.br

"},{"location":"tutorial/","title":"Tutorial","text":"

In this tutorial, you will learn how to use Django AI Assistant to supercharge your Django project with LLM capabilities.

","boost":2},{"location":"tutorial/#prerequisites","title":"Prerequisites","text":"

Make sure you properly configured Django AI Assistant as described in the Get Started guide.

","boost":2},{"location":"tutorial/#setting-up-api-keys","title":"Setting up API keys","text":"

The tutorial below uses OpenAI's gpt-4o model, so make sure you have OPENAI_API_KEY set as an environment variable for your Django project. You can also use other models, keep reading to learn more. Just make sure their keys are properly set.

Note

An easy way to set environment variables is to use a .env file in your project's root directory and use python-dotenv to load them. Our example project uses this approach.

","boost":2},{"location":"tutorial/#what-ai-assistants-can-do","title":"What AI Assistants can do","text":"

AI Assistants are LLMs that can answer to user queries as ChatGPT does, i.e. inputting and outputting strings. But when integrated with Django, they can also do anything a Django view can, such as accessing the database, checking permissions, sending emails, downloading and uploading media files, etc. This is possible by defining \"tools\" the AI can use. These tools are methods in an AI Assistant class on the Django side.

","boost":2},{"location":"tutorial/#defining-an-ai-assistant","title":"Defining an AI Assistant","text":"","boost":2},{"location":"tutorial/#registering","title":"Registering","text":"

To create an AI Assistant, you need to:

  1. Create an ai_assistants.py file;
  2. Define a class that inherits from AIAssistant;
  3. Provide an id, a name, some instructions for the LLM (a system prompt), and a model name:
myapp/ai_assistants.py
from django_ai_assistant import AIAssistant\n\nclass WeatherAIAssistant(AIAssistant):\n    id = \"weather_assistant\"\n    name = \"Weather Assistant\"\n    instructions = \"You are a weather bot.\"\n    model = \"gpt-4o\"\n
","boost":2},{"location":"tutorial/#defining-tools","title":"Defining tools","text":"

Useful tools give abilities the LLM doesn't have out-of-the-box, such as getting the current date and finding the current weather by calling an API.

Use the @method_tool decorator to define a tool method in the AI Assistant:

myapp/ai_assistants.py
from django.utils import timezone\nfrom django_ai_assistant import AIAssistant, method_tool\nimport json\n\nclass WeatherAIAssistant(AIAssistant):\n    id = \"weather_assistant\"\n    name = \"Weather Assistant\"\n    instructions = \"You are a weather bot.\"\n    model = \"gpt-4o\"\n\n    def get_instructions(self):\n        return f\"{self.instructions} Today is {timezone.now().isoformat()}.\"\n\n    @method_tool\n    def get_weather(self, location: str) -> str:\n        \"\"\"Fetch the current weather data for a location\"\"\"\n        return json.dumps({\n            \"location\": location,\n            \"temperature\": \"25\u00b0C\",\n            \"weather\": \"sunny\"\n        })  # imagine some weather API here, this is just a placeholder\n

The get_weather method is a tool that the AI Assistant can use to get the current weather for a location, when the user asks for it. The tool method must be fully type-hinted (all parameters and return value), and it must include a descriptive docstring. This is necessary for the LLM model to understand the tool's purpose.

A conversation with this Weather Assistant looks like this:

User: What's the weather in New York City?\nAI: The weather in NYC is sunny with a temperature of 25\u00b0C.\n

Note

State of the art models such as gpt-4o can process JSON well. You can return a json.dumps(api_output) from a tool method and the model will be able to process it before responding the user.

","boost":2},{"location":"tutorial/#tool-parameters","title":"Tool parameters","text":"

It's possible to define more complex parameters for tools. As long as they're JSON serializable, the underlying LLM model should be able to call tools with the right arguments.

In the MovieRecommendationAIAssistant from the example project, we have a reorder_backlog tool method that receives a list of IMDb URLs that represent the user's movie backlog order. Note the Sequence[str] parameter:

example/movies/ai_assistants.py
from django_ai_assistant import AIAssistant, method_tool\n\nclass MovieRecommendationAIAssistant(AIAssistant):\n    ...\n\n    @method_tool\n    def reorder_backlog(self, imdb_url_list: Sequence[str]) -> str:\n        \"\"\"Reorder movies in user's backlog.\"\"\"\n        ...\n

In WeatherAIAssistant, another assistant from the example project, we have a fetch_forecast_weather method tool with a args_schema parameter that defines a JSON schema for the tool arguments:

example/weather/ai_assistants.py
from django_ai_assistant import AIAssistant, method_tool, BaseModel, Field\n\nclass WeatherAIAssistant(AIAssistant):\n    ...\n\n    class FetchForecastWeatherInput(BaseModel):\n        location: str = Field(description=\"Location to fetch the forecast weather for\")\n        forecast_date: date = Field(description=\"Date in the format 'YYYY-MM-DD'\")\n\n    @method_tool(args_schema=FetchForecastWeatherInput)\n    def fetch_forecast_weather(self, location, forecast_date) -> dict:\n        \"\"\"Fetch the forecast weather data for a location\"\"\"\n        # forecast_date is a `date` object here\n        ...\n

Note

It's important to provide a description for each field from args_schema. This improves the LLM's understanding of the tool's arguments.

","boost":2},{"location":"tutorial/#using-django-logic-in-tools","title":"Using Django logic in tools","text":"

You have access to the current request user in tools:

myapp/ai_assistants.py
from django_ai_assistant import AIAssistant, method_tool\n\nclass PersonalAIAssistant(AIAssistant):\n    id = \"personal_assistant\"\n    name = \"Personal Assistant\"\n    instructions = \"You are a personal assistant.\"\n    model = \"gpt-4o\"\n\n    @method_tool\n    def get_current_user_username(self) -> str:\n        \"\"\"Get the username of the current user\"\"\"\n        return self._user.username\n

You can also add any Django logic to tools, such as querying the database:

myapp/ai_assistants.py
from django_ai_assistant import AIAssistant, method_tool\nimport json\n\nclass IssueManagementAIAssistant(AIAssistant):\n    id = \"issue_mgmt_assistant\"\n    name = \"Issue Management Assistant\"\n    instructions = \"You are an issue management bot.\"\n    model = \"gpt-4o\"\n\n    @method_tool\n    def get_current_user_assigned_issues(self) -> str:\n        \"\"\"Get the issues assigned to the current user\"\"\"\n        return json.dumps({\n            \"issues\": list(Issue.objects.filter(assignee=self._user).values())\n        })\n

Warning

Make sure you only return to the LLM what the user can see, considering permissions and privacy. Code the tools as if they were Django views.

","boost":2},{"location":"tutorial/#using-pre-implemented-tools","title":"Using pre-implemented tools","text":"

Django AI Assistant works with any LangChain-compatible tool. Just override the get_tools method in your AI Assistant class to include the tools you want to use.

For example, you can use the TavilySearch tool to provide your AI Assistant with the ability to search the web for information about upcoming movies.

First install dependencies:

pip install -U langchain-community tavily-python\n

Then, set the TAVILY_API_KEY environment variable. You'll need to sign up at Tavily.

Finally, add the tool to your AI Assistant class by overriding the get_tools method:

myapp/ai_assistants.py
from django_ai_assistant import AIAssistant\nfrom langchain_community.tools.tavily_search import TavilySearchResults\n\nclass MovieSearchAIAssistant(AIAssistant):\n    id = \"movie_search_assistant\"  # noqa: A003\n    instructions = (\n        \"You're a helpful movie search assistant. \"\n        \"Help the user find more information about movies. \"\n        \"Use the provided tools to search the web for upcoming movies. \"\n    )\n    name = \"Movie Search Assistant\"\n    model = \"gpt-4o\"\n\n    def get_instructions(self):\n        return f\"{self.instructions} Today is {timezone.now().isoformat()}.\"\n\n    def get_tools(self):\n        return [\n            TavilySearchResults(),\n            *super().get_tools(),\n        ]\n

Note

As of now, Django AI Assistant is powered by LangChain and LangGraph, but knowledge on these tools is NOT necessary to use this library, at least for the main use cases.

","boost":2},{"location":"tutorial/#using-an-ai-assistant","title":"Using an AI Assistant","text":"","boost":2},{"location":"tutorial/#manually-calling-an-ai-assistant","title":"Manually calling an AI Assistant","text":"

You can manually call an AI Assistant from anywhere in your Django application:

from myapp.ai_assistants import WeatherAIAssistant\n\nassistant = WeatherAIAssistant()\noutput = assistant.run(\"What's the weather in New York City?\")\nassert output == \"The weather in NYC is sunny with a temperature of 25\u00b0C.\"\n

The constructor of AIAssistant receives user, request, view as optional parameters, which can be used in the tools with self._user, self._request, self._view. Also, any extra parameters passed in constructor are stored at self._init_kwargs.

","boost":2},{"location":"tutorial/#threads-of-messages","title":"Threads of Messages","text":"

The django-ai-assistant app provides two models Thread and Message to store and retrieve conversations with AI Assistants. LLMs are stateless by design, meaning they don't hold any context between calls. All they know is the current input. But by using the AIAssistant class, the conversation state is stored in the database as multiple Message of a Thread, and automatically retrieved then passed to the LLM when calling the AI Assistant.

To create a Thread, you can use a helper from the django_ai_assistant.use_cases module. For example:

from django_ai_assistant.use_cases import create_thread, get_thread_messages\nfrom myapp.ai_assistants import WeatherAIAssistant\n\nthread = create_thread(name=\"Weather Chat\", user=user)\nassistant = WeatherAIAssistant()\nassistant.run(\"What's the weather in New York City?\", thread_id=thread.id)\n\nmessages = get_thread_messages(thread=thread, user=user)  # returns both user and AI messages\n

More CRUD helpers are available at django_ai_assistant.use_cases module. Check the Reference for more information.

","boost":2},{"location":"tutorial/#using-built-in-api-views","title":"Using built-in API views","text":"

You can use the built-in API views to interact with AI Assistants via HTTP requests from any frontend, such as a React application or a mobile app. Add the following to your Django project's urls.py:

myproject/urls.py
from django.urls import include, path\n\nurlpatterns = [\n    path(\"ai-assistant/\", include(\"django_ai_assistant.urls\")),\n    ...\n]\n

The built-in API supports retrieval of Assistants info, as well as CRUD for Threads and Messages. It has a OpenAPI schema that you can explore at http://localhost:8000/ai-assistant/docs, when running your project locally.

","boost":2},{"location":"tutorial/#configuring-the-api","title":"Configuring the API","text":"

The built-in API is implemented using Django Ninja. By default, it is initialized with the following setting:

myproject/settings.py
AI_ASSISTANT_INIT_API_FN = \"django_ai_assistant.api.views.init_api\"\n

You can override this setting in your Django project's settings.py to customize the API, such as using a different authentication method or modifying other configurations.

The method signature for AI_ASSISTANT_INIT_API_FN is as follows:

from ninja import NinjaAPI\n\ndef init_api():\n    return NinjaAPI(...)\n

By providing your own implementation of init_api, you can tailor the API setup to better fit your project's requirements.

","boost":2},{"location":"tutorial/#configuring-permissions","title":"Configuring permissions","text":"

The API uses the helpers from the django_ai_assistant.use_cases module, which have permission checks to ensure the user can use a certain AI Assistant or do CRUD on Threads and Messages.

By default, any authenticated user can use any AI Assistant, and create a thread. Users can manage both their own threads and the messages on them. Therefore, the default permissions are:

myproject/settings.py
AI_ASSISTANT_CAN_CREATE_THREAD_FN = \"django_ai_assistant.permissions.allow_all\"\nAI_ASSISTANT_CAN_VIEW_THREAD_FN = \"django_ai_assistant.permissions.owns_thread\"\nAI_ASSISTANT_CAN_UPDATE_THREAD_FN = \"django_ai_assistant.permissions.owns_thread\"\nAI_ASSISTANT_CAN_DELETE_THREAD_FN = \"django_ai_assistant.permissions.owns_thread\"\nAI_ASSISTANT_CAN_CREATE_MESSAGE_FN = \"django_ai_assistant.permissions.owns_thread\"\nAI_ASSISTANT_CAN_UPDATE_MESSAGE_FN = \"django_ai_assistant.permissions.owns_thread\"\nAI_ASSISTANT_CAN_DELETE_MESSAGE_FN = \"django_ai_assistant.permissions.owns_thread\"\nAI_ASSISTANT_CAN_RUN_ASSISTANT = \"django_ai_assistant.permissions.allow_all\"\n

You can override these settings in your Django project's settings.py to customize the permissions.

Thread permission signatures look like this:

from django_ai_assistant.models import Thread\nfrom django.http import HttpRequest\n\ndef check_custom_thread_permission(\n        thread: Thread,\n        user: Any,\n        request: HttpRequest | None = None) -> bool:\n    return ...\n

While Message permission signatures look like this:

from django_ai_assistant.models import Thread, Message\nfrom django.http import HttpRequest\n\ndef check_custom_message_permission(\n        message: Message,\n        thread: Thread,\n        user: Any,\n        request: HttpRequest | None = None) -> bool:\n    return ...\n
","boost":2},{"location":"tutorial/#frontend-integration","title":"Frontend integration","text":"

You can integrate Django AI Assistant with frontend frameworks like React or Vue.js. Please check the frontend documentation.

If you want to use traditional Django templates, you can try using HTMX to avoid page refreshes. Check the example project, it includes a HTMX application.

","boost":2},{"location":"tutorial/#advanced-usage","title":"Advanced usage","text":"","boost":2},{"location":"tutorial/#using-other-ai-models","title":"Using other AI models","text":"

By default the supported models are OpenAI ones, but you can use any chat model from LangChain that supports Tool Calling by overriding get_llm:

myapp/ai_assistants.py
from django_ai_assistant import AIAssistant\nfrom langchain_anthropic import ChatAnthropic\n\nclass WeatherAIAssistant(AIAssistant):\n    id = \"weather_assistant\"\n    name = \"Weather Assistant\"\n    instructions = \"You are a weather bot.\"\n    model = \"claude-3-opus-20240229\"\n\n    def get_llm(self):\n        model = self.get_model()\n        temperature = self.get_temperature()\n        model_kwargs = self.get_model_kwargs()\n        return ChatAnthropic(\n            model_name=model,\n            temperature=temperature,\n            model_kwargs=model_kwargs,\n            timeout=None,\n            max_retries=2,\n        )\n
","boost":2},{"location":"tutorial/#composing-ai-assistants","title":"Composing AI Assistants","text":"

One AI Assistant can call another AI Assistant as a tool. This is useful for composing complex AI Assistants. Use the as_tool method for that:

myapp/ai_assistants.py
class SimpleAssistant(AIAssistant):\n    ...\n\nclass AnotherSimpleAssistant(AIAssistant):\n    ...\n\nclass ComplexAssistant(AIAssistant):\n    ...\n\n    def get_tools(self) -> Sequence[BaseTool]:\n        return [\n            SimpleAssistant().as_tool(\n                description=\"Tool to <...add a meaningful description here...>\"),\n            AnotherSimpleAssistant().as_tool(\n                description=\"Tool to <...add a meaningful description here...>\"),\n            *super().get_tools(),\n        ]\n

The movies/ai_assistants.py file in the example project shows an example of a composed AI Assistant that's able to recommend movies and manage the user's movie backlog.

","boost":2},{"location":"tutorial/#retrieval-augmented-generation-rag","title":"Retrieval Augmented Generation (RAG)","text":"

You can use RAG in your AI Assistants. RAG means using a retriever to fetch chunks of textual data from a pre-existing DB to give context to the LLM. This means the LLM will have access to a context your retriever logic provides when generating the response, thereby improving the quality of the response by avoiding generic or off-topic answers.

For this to work, your must do the following in your AI Assistant:

  1. Add has_rag = True as a class attribute;
  2. Override the get_retriever method to return a LangChain Retriever.

For example:

myapp/ai_assistants.py
from django_ai_assistant import AIAssistant\n\nclass DocsAssistant(AIAssistant):\n    id = \"docs_assistant\"  # noqa: A003\n    name = \"Docs Assistant\"\n    instructions = (\n        \"You are an assistant for answering questions related to the provided context. \"\n        \"Use the following pieces of retrieved context to answer the user's question. \"\n    )\n    model = \"gpt-4o\"\n    has_rag = True\n\n    def get_retriever(self) -> BaseRetriever:\n        return ...  # use a LangChain Retriever here\n

The rag/ai_assistants.py file in the example project shows an example of a RAG-powered AI Assistant that's able to answer questions about Django using the Django Documentation as context.

","boost":2},{"location":"tutorial/#support-for-other-types-of-primary-key-pk","title":"Support for other types of Primary Key (PK)","text":"

You can have Django AI Assistant models use other types of primary key, such as strings, UUIDs, etc. This is useful if you're concerned about leaking IDs that exponse thread count, message count, etc. to the frontend. When using UUIDs, it will prevent users from figuring out if a thread or message exist or not (due to HTTP 404 vs 403).

Here are the files you have to change if you need the ids to be UUID:

myapp/fields.py
import uuid\nfrom django.db.backends.base.operations import BaseDatabaseOperations\nfrom django.db.models import AutoField, UUIDField\n\nBaseDatabaseOperations.integer_field_ranges['UUIDField'] = (0, 0)\n\nclass UUIDAutoField(UUIDField, AutoField):\n    def __init__(self, *args, **kwargs):\n        kwargs.setdefault('default', uuid.uuid4)\n        kwargs.setdefault('editable', False)\n        super().__init__(*args, **kwargs)\n
myapp/apps.py
from django_ai_assistant.apps import AIAssistantConfig\n\nclass AIAssistantConfigOverride(AIAssistantConfig):\n    default_auto_field = \"django_ai_assistant.api.fields.UUIDAutoField\"\n
myproject/settings.py
INSTALLED_APPS = [\n    # \"django_ai_assistant\", remove this line and add the one below\n    \"example.apps.AIAssistantConfigOverride\",\n]\n

Make sure to run migrations after those changes:

python manage.py makemigrations\npython manage.py migrate\n

For more information, check Django docs on overriding AppConfig.

","boost":2},{"location":"tutorial/#further-configuration-of-ai-assistants","title":"Further configuration of AI Assistants","text":"

You can further configure the AIAssistant subclasses by overriding its public methods. Check the Reference for more information.

","boost":2},{"location":"reference/","title":"Reference","text":"

This is the reference documentation for the Django AI Assistant library.

"},{"location":"reference/#modules","title":"Modules","text":"
  • django_ai_assistant.helpers.use_cases
  • django_ai_assistant.helpers.assistants
  • django_ai_assistant.models
"},{"location":"reference/assistants-ref/","title":"django_ai_assistant.helpers.assistants","text":""},{"location":"reference/assistants-ref/#django_ai_assistant.helpers.assistants.AIAssistant","title":"AIAssistant","text":"

Base class for AI Assistants. Subclasses must define at least the following attributes:

  • id: str
  • name: str
  • instructions: str
  • model: str

Subclasses can override the public methods to customize the behavior of the assistant.

Tools can be added to the assistant by decorating methods with @method_tool.

Check the docs Tutorial for more info on how to build an AI Assistant.

Source code in django_ai_assistant/helpers/assistants.py
class AIAssistant(abc.ABC):  # noqa: F821\n    \"\"\"Base class for AI Assistants. Subclasses must define at least the following attributes:\n\n    * id: str\n    * name: str\n    * instructions: str\n    * model: str\n\n    Subclasses can override the public methods to customize the behavior of the assistant.\\n\n    Tools can be added to the assistant by decorating methods with `@method_tool`.\\n\n    Check the docs Tutorial for more info on how to build an AI Assistant.\n    \"\"\"\n\n    id: ClassVar[str]  # noqa: A003\n    \"\"\"Class variable with the id of the assistant. Used to select the assistant to use.\\n\n    Must be unique across the whole Django project and match the pattern '^[a-zA-Z0-9_-]+$'.\"\"\"\n    name: ClassVar[str]\n    \"\"\"Class variable with the name of the assistant.\n    Should be a friendly name to optionally display to users.\"\"\"\n    instructions: str\n    \"\"\"Instructions for the AI assistant knowing what to do. This is the LLM system prompt.\"\"\"\n    model: str\n    \"\"\"LLM model name to use for the assistant.\\n\n    Should be a valid model name from OpenAI, because the default `get_llm` method uses OpenAI.\\n\n    `get_llm` can be overridden to use a different LLM implementation.\n    \"\"\"\n    temperature: float = 1.0\n    \"\"\"Temperature to use for the assistant LLM model.\\nDefaults to `1.0`.\"\"\"\n    tool_max_concurrency: int = 1\n    \"\"\"Maximum number of tools to run concurrently / in parallel.\\nDefaults to `1` (no concurrency).\"\"\"\n    has_rag: bool = False\n    \"\"\"Whether the assistant uses RAG (Retrieval-Augmented Generation) or not.\\n\n    Defaults to `False`.\n    When True, the assistant will use a retriever to get documents to provide as context to the LLM.\n    Additionally, the assistant class should implement the `get_retriever` method to return\n    the retriever to use.\"\"\"\n    structured_output: Dict[str, Any] | Type[BaseModel] | Type | None = None\n    \"\"\"Structured output to use for the assistant.\\n\n    Defaults to `None`.\n    When not `None`, the assistant will return a structured output in the provided format.\n    See https://python.langchain.com/v0.3/docs/how_to/structured_output/ for the available formats.\n    \"\"\"\n    _user: Any | None\n    \"\"\"The current user the assistant is helping. A model instance.\\n\n    Set by the constructor.\n    When API views are used, this is set to the current request user.\\n\n    Can be used in any `@method_tool` to customize behavior.\"\"\"\n    _request: Any | None\n    \"\"\"The current Django request the assistant was initialized with. A request instance.\\n\n    Set by the constructor.\\n\n    Can be used in any `@method_tool` to customize behavior.\"\"\"\n    _view: Any | None\n    \"\"\"The current Django view the assistant was initialized with. A view instance.\\n\n    Set by the constructor.\\n\n    Can be used in any `@method_tool` to customize behavior.\"\"\"\n    _init_kwargs: dict[str, Any]\n    \"\"\"Extra keyword arguments passed to the constructor.\\n\n    Set by the constructor.\\n\n    Can be used in any `@method_tool` to customize behavior.\"\"\"\n    _method_tools: Sequence[BaseTool]\n    \"\"\"List of `@method_tool` tools the assistant can use. Automatically set by the constructor.\"\"\"\n\n    _registry: ClassVar[dict[str, type[\"AIAssistant\"]]] = {}\n    \"\"\"Registry of all AIAssistant subclasses by their id.\\n\n    Automatically populated by when a subclass is declared.\\n\n    Use `get_cls_registry` and `get_cls` to access the registry.\"\"\"\n\n    def __init__(self, *, user=None, request=None, view=None, **kwargs: Any):\n        \"\"\"Initialize the AIAssistant instance.\\n\n        Optionally set the current user, request, and view for the assistant.\\n\n        Those can be used in any `@method_tool` to customize behavior.\\n\n\n        Args:\n            user (Any | None): The current user the assistant is helping. A model instance.\n                Defaults to `None`. Stored in `self._user`.\n            request (Any | None): The current Django request the assistant was initialized with.\n                A request instance. Defaults to `None`. Stored in `self._request`.\n            view (Any | None): The current Django view the assistant was initialized with.\n                A view instance. Defaults to `None`. Stored in `self._view`.\n            **kwargs: Extra keyword arguments passed to the constructor. Stored in `self._init_kwargs`.\n        \"\"\"\n\n        self._user = user\n        self._request = request\n        self._view = view\n        self._init_kwargs = kwargs\n\n        self._set_method_tools()\n\n    def __init_subclass__(cls, **kwargs: Any):\n        \"\"\"Called when a class is subclassed from AIAssistant.\n\n        This method is automatically invoked when a new subclass of AIAssistant\n        is created. It allows AIAssistant to perform additional setup or configuration\n        for the subclass, such as registering the subclass in a registry.\n\n        Args:\n            cls (type): The newly created subclass.\n            **kwargs: Additional keyword arguments passed during subclass creation.\n        \"\"\"\n        super().__init_subclass__(**kwargs)\n\n        if not hasattr(cls, \"id\"):\n            raise AIAssistantMisconfiguredError(f\"Assistant id is not defined at {cls.__name__}\")\n        if cls.id is None:\n            raise AIAssistantMisconfiguredError(f\"Assistant id is None at {cls.__name__}\")\n        if not re.match(r\"^[a-zA-Z0-9_-]+$\", cls.id):\n            # id should match the pattern '^[a-zA-Z0-9_-]+$ to support as_tool in OpenAI\n            raise AIAssistantMisconfiguredError(\n                f\"Assistant id '{cls.id}' does not match the pattern '^[a-zA-Z0-9_-]+$'\"\n                f\"at {cls.__name__}\"\n            )\n\n        cls._registry[cls.id] = cls\n\n    def _set_method_tools(self):\n        # Find tool methods (decorated with `@method_tool` from django_ai_assistant/tools.py):\n        members = inspect.getmembers(\n            self,\n            predicate=lambda m: inspect.ismethod(m) and getattr(m, \"_is_tool\", False),\n        )\n        tool_methods = [m for _, m in members]\n\n        # Sort tool methods by the order they appear in the source code,\n        # since this can be meaningful:\n        tool_methods.sort(key=lambda m: inspect.getsourcelines(m)[1])\n\n        # Transform tool methods into tool objects:\n        tools = []\n        for method in tool_methods:\n            if hasattr(method, \"_tool_maker_args\"):\n                tool = tool_decorator(\n                    *method._tool_maker_args,\n                    **method._tool_maker_kwargs,\n                )(method)\n            else:\n                tool = tool_decorator(method)\n            tools.append(cast(BaseTool, tool))\n\n        # Remove self from each tool args_schema:\n        for tool in tools:\n            if tool.args_schema:\n                if isinstance(tool.args_schema.__fields_set__, set):\n                    tool.args_schema.__fields_set__.remove(\"self\")\n                tool.args_schema.__fields__.pop(\"self\", None)\n\n        self._method_tools = tools\n\n    @classmethod\n    def get_cls_registry(cls) -> dict[str, type[\"AIAssistant\"]]:\n        \"\"\"Get the registry of AIAssistant classes.\n\n        Returns:\n            dict[str, type[AIAssistant]]: A dictionary mapping assistant ids to their classes.\n        \"\"\"\n        return cls._registry\n\n    @classmethod\n    def get_cls(cls, assistant_id: str) -> type[\"AIAssistant\"]:\n        \"\"\"Get the AIAssistant class for the given assistant ID.\n\n        Args:\n            assistant_id (str): The ID of the assistant to get.\n        Returns:\n            type[AIAssistant]: The AIAssistant subclass for the given ID.\n        \"\"\"\n        return cls.get_cls_registry()[assistant_id]\n\n    @classmethod\n    def clear_cls_registry(cls: type[\"AIAssistant\"]) -> None:\n        \"\"\"Clear the registry of AIAssistant classes.\"\"\"\n\n        cls._registry.clear()\n\n    def get_instructions(self) -> str:\n        \"\"\"Get the instructions for the assistant. By default, this is the `instructions` attribute.\\n\n        Override the `instructions` attribute or this method to use different instructions.\n\n        Returns:\n            str: The instructions for the assistant, i.e., the LLM system prompt.\n        \"\"\"\n        return self.instructions\n\n    def get_model(self) -> str:\n        \"\"\"Get the LLM model name for the assistant. By default, this is the `model` attribute.\\n\n        Used by the `get_llm` method to create the LLM instance.\\n\n        Override the `model` attribute or this method to use a different LLM model.\n\n        Returns:\n            str: The LLM model name for the assistant.\n        \"\"\"\n        return self.model\n\n    def get_temperature(self) -> float:\n        \"\"\"Get the temperature to use for the assistant LLM model.\n        By default, this is the `temperature` attribute, which is `1.0` by default.\\n\n        Used by the `get_llm` method to create the LLM instance.\\n\n        Override the `temperature` attribute or this method to use a different temperature.\n\n        Returns:\n            float: The temperature to use for the assistant LLM model.\n        \"\"\"\n        return self.temperature\n\n    def get_model_kwargs(self) -> dict[str, Any]:\n        \"\"\"Get additional keyword arguments to pass to the LLM model constructor.\\n\n        Used by the `get_llm` method to create the LLM instance.\\n\n        Override this method to pass additional keyword arguments to the LLM model constructor.\n\n        Returns:\n            dict[str, Any]: Additional keyword arguments to pass to the LLM model constructor.\n        \"\"\"\n        return {}\n\n    def get_llm(self) -> BaseChatModel:\n        \"\"\"Get the LangChain LLM instance for the assistant.\n        By default, this uses the OpenAI implementation.\\n\n        `get_model`, `get_temperature`, and `get_model_kwargs` are used to create the LLM instance.\\n\n        Override this method to use a different LLM implementation.\n\n        Returns:\n            BaseChatModel: The LLM instance for the assistant.\n        \"\"\"\n        model = self.get_model()\n        temperature = self.get_temperature()\n        model_kwargs = self.get_model_kwargs()\n        return ChatOpenAI(\n            model=model,\n            temperature=temperature,\n            model_kwargs=model_kwargs,\n        )\n\n    def get_structured_output_llm(self) -> Runnable:\n        \"\"\"Get the LLM model to use for the structured output.\n\n        Returns:\n            BaseChatModel: The LLM model to use for the structured output.\n        \"\"\"\n        if not self.structured_output:\n            raise ValueError(\"structured_output is not defined\")\n\n        llm = self.get_llm()\n\n        method = \"json_mode\"\n        if isinstance(llm, ChatOpenAI):\n            # When using ChatOpenAI, it's better to use json_schema method\n            # because it enables strict mode.\n            # https://platform.openai.com/docs/guides/structured-outputs\n            method = \"json_schema\"\n\n        return llm.with_structured_output(self.structured_output, method=method)\n\n    def get_tools(self) -> Sequence[BaseTool]:\n        \"\"\"Get the list of method tools the assistant can use.\n        By default, this is the `_method_tools` attribute, which are all `@method_tool`s.\\n\n        Override and call super to add additional tools,\n        such as [any langchain_community tools](https://python.langchain.com/v0.3/docs/integrations/tools/).\n\n        Returns:\n            Sequence[BaseTool]: The list of tools the assistant can use.\n        \"\"\"\n        return self._method_tools\n\n    def get_document_separator(self) -> str:\n        \"\"\"Get the RAG document separator to use in the prompt. Only used when `has_rag=True`.\\n\n        Defaults to `\"\\\\n\\\\n\"`, which is the LangChain default.\\n\n        Override this method to use a different separator.\n\n        Returns:\n            str: a separator for documents in the prompt.\n        \"\"\"\n        return DEFAULT_DOCUMENT_SEPARATOR\n\n    def get_document_prompt(self) -> PromptTemplate:\n        \"\"\"Get the PromptTemplate template to use when rendering RAG documents in the prompt.\n        Only used when `has_rag=True`.\\n\n        Defaults to `PromptTemplate.from_template(\"{page_content}\")`, which is the LangChain default.\\n\n        Override this method to use a different template.\n\n        Returns:\n            PromptTemplate: a prompt template for RAG documents.\n        \"\"\"\n        return DEFAULT_DOCUMENT_PROMPT\n\n    def get_retriever(self) -> BaseRetriever:\n        \"\"\"Get the RAG retriever to use for fetching documents.\\n\n        Must be implemented by subclasses when `has_rag=True`.\\n\n\n        Returns:\n            BaseRetriever: the RAG retriever to use for fetching documents.\n        \"\"\"\n        raise NotImplementedError(\n            f\"Override the get_retriever with your implementation at {self.__class__.__name__}\"\n        )\n\n    def get_contextualize_prompt(self) -> ChatPromptTemplate:\n        \"\"\"Get the contextualize prompt template for the assistant.\\n\n        This is used when `has_rag=True` and there are previous messages in the thread.\n        Since the latest user question might reference the chat history,\n        the LLM needs to generate a new standalone question,\n        and use that question to query the retriever for relevant documents.\\n\n        By default, this is a prompt that asks the LLM to\n        reformulate the latest user question without the chat history.\\n\n        Override this method to use a different contextualize prompt.\\n\n        See `get_history_aware_retriever` for how this prompt is used.\\n\n\n        Returns:\n            ChatPromptTemplate: The contextualize prompt template for the assistant.\n        \"\"\"\n        contextualize_q_system_prompt = (\n            \"Given a chat history and the latest user question \"\n            \"which might reference context in the chat history, \"\n            \"formulate a standalone question which can be understood \"\n            \"without the chat history. Do NOT answer the question, \"\n            \"just reformulate it if needed and otherwise return it as is.\"\n        )\n        return ChatPromptTemplate.from_messages(\n            [\n                (\"system\", contextualize_q_system_prompt),\n                # TODO: make history key configurable?\n                MessagesPlaceholder(\"history\"),\n                # TODO: make input key configurable?\n                (\"human\", \"{input}\"),\n            ]\n        )\n\n    def get_history_aware_retriever(self) -> Runnable[dict, RetrieverOutput]:\n        \"\"\"Get the history-aware retriever LangChain chain for the assistant.\\n\n        This is used when `has_rag=True` to fetch documents based on the chat history.\\n\n        By default, this is a chain that checks if there is chat history,\n        and if so, it uses the chat history to generate a new standalone question\n        to query the retriever for relevant documents.\\n\n        When there is no chat history, it just passes the input to the retriever.\\n\n        Override this method to use a different history-aware retriever chain.\n\n        Read more about the history-aware retriever in the\n        [LangChain docs](https://python.langchain.com/v0.2/docs/how_to/qa_chat_history_how_to/).\n\n        Returns:\n            Runnable[dict, RetrieverOutput]: a history-aware retriever LangChain chain.\n        \"\"\"\n        llm = self.get_llm()\n        retriever = self.get_retriever()\n        prompt = self.get_contextualize_prompt()\n\n        # Based on create_history_aware_retriever:\n        return RunnableBranch(\n            (\n                lambda x: not x.get(\"history\", False),  # pyright: ignore[reportAttributeAccessIssue]\n                # If no chat history, then we just pass input to retriever\n                (lambda x: x[\"input\"]) | retriever,\n            ),\n            # If chat history, then we pass inputs to LLM chain, then to retriever\n            prompt | llm | StrOutputParser() | retriever,\n        )\n\n    @with_cast_id\n    def as_graph(self, thread_id: Any | None = None) -> Runnable[dict, dict]:\n        \"\"\"Create the LangGraph graph for the assistant.\\n\n        This graph is an agent that supports chat history, tool calling, and RAG (if `has_rag=True`).\\n\n        `as_graph` uses many other methods to create the graph for the assistant.\n        Prefer to override the other methods to customize the graph for the assistant.\n        Only override this method if you need to customize the graph at a lower level.\n\n        Args:\n            thread_id (Any | None): The thread ID for the chat message history.\n                If `None`, an in-memory chat message history is used.\n\n        Returns:\n            the compiled graph\n        \"\"\"\n        from django_ai_assistant.models import Thread\n\n        llm = self.get_llm()\n        tools = self.get_tools()\n        llm_with_tools = llm.bind_tools(tools) if tools else llm\n        if thread_id:\n            thread = Thread.objects.get(id=thread_id)\n        else:\n            thread = None\n\n        def custom_add_messages(left: list[BaseMessage], right: list[BaseMessage]):\n            result = add_messages(left, right)  # type: ignore\n            if thread:\n                # Save all messages, except the initial system message:\n                thread_messages = [m for m in result if not isinstance(m, SystemMessage)]\n                save_django_messages(cast(list[BaseMessage], thread_messages), thread=thread)\n            return result\n\n        class AgentState(TypedDict):\n            messages: Annotated[list[AnyMessage], custom_add_messages]\n            input: str | None  # noqa: A003\n            output: Any\n\n        def setup(state: AgentState):\n            system_prompt = self.get_instructions()\n            return {\"messages\": [SystemMessage(content=system_prompt)]}\n\n        def history(state: AgentState):\n            messages = thread.get_messages(include_extra_messages=True) if thread else []\n            if state[\"input\"]:\n                messages.append(HumanMessage(content=state[\"input\"]))\n\n            return {\"messages\": messages}\n\n        def retriever(state: AgentState):\n            if not self.has_rag:\n                return\n\n            retriever = self.get_history_aware_retriever()\n            # Remove the initial instructions to prevent having two SystemMessages\n            # This is necessary for compatibility with Anthropic\n            messages_to_summarize = state[\"messages\"][1:-1]\n            input_message = state[\"messages\"][-1]\n            docs = retriever.invoke({\"input\": input_message, \"history\": messages_to_summarize})\n\n            document_separator = self.get_document_separator()\n            document_prompt = self.get_document_prompt()\n\n            formatted_docs = document_separator.join(\n                format_document(doc, document_prompt) for doc in docs\n            )\n\n            system_message = state[\"messages\"][0]\n            system_message.content += (\n                f\"\\n\\n---START OF CONTEXT---\\n{formatted_docs}---END OF CONTEXT---\\n\\n\"\n            )\n\n        def agent(state: AgentState):\n            response = llm_with_tools.invoke(state[\"messages\"])\n\n            return {\"messages\": [response]}\n\n        def tool_selector(state: AgentState):\n            last_message = state[\"messages\"][-1]\n\n            if isinstance(last_message, AIMessage) and last_message.tool_calls:\n                return \"call_tool\"\n\n            return \"continue\"\n\n        def record_response(state: AgentState):\n            # Structured output must happen in the end, to avoid disabling tool calling.\n            # Tool calling + structured output is not supported by OpenAI:\n            if self.structured_output:\n                messages = state[\"messages\"]\n\n                # Change the original system prompt:\n                if isinstance(messages[0], SystemMessage):\n                    messages[0].content += \"\\nUse the chat history to produce a JSON output.\"\n\n                # Add a final message asking for JSON generation / structured output:\n                json_request_message = HumanMessage(\n                    content=\"Use the chat history to produce a JSON output.\"\n                )\n                messages.append(json_request_message)\n\n                llm_with_structured_output = self.get_structured_output_llm()\n                response = llm_with_structured_output.invoke(messages)\n            else:\n                response = state[\"messages\"][-1].content\n\n            return {\"output\": response}\n\n        workflow = StateGraph(AgentState)\n\n        workflow.add_node(\"setup\", setup)\n        workflow.add_node(\"history\", history)\n        workflow.add_node(\"retriever\", retriever)\n        workflow.add_node(\"agent\", agent)\n        workflow.add_node(\"tools\", ToolNode(tools))\n        workflow.add_node(\"respond\", record_response)\n\n        workflow.set_entry_point(\"setup\")\n        workflow.add_edge(\"setup\", \"history\")\n        workflow.add_edge(\"history\", \"retriever\")\n        workflow.add_edge(\"retriever\", \"agent\")\n        workflow.add_conditional_edges(\n            \"agent\",\n            tool_selector,\n            {\n                \"call_tool\": \"tools\",\n                \"continue\": \"respond\",\n            },\n        )\n        workflow.add_edge(\"tools\", \"agent\")\n        workflow.add_edge(\"respond\", END)\n\n        return workflow.compile()\n\n    @with_cast_id\n    def invoke(self, *args: Any, thread_id: Any | None, **kwargs: Any) -> dict:\n        \"\"\"Invoke the assistant LangChain graph with the given arguments and keyword arguments.\\n\n        This is the lower-level method to run the assistant.\\n\n        The graph is created by the `as_graph` method.\\n\n\n        Args:\n            *args: Positional arguments to pass to the graph.\n                To add a new message, use a dict like `{\"input\": \"user message\"}`.\n                If thread already has a `HumanMessage` in the end, you can invoke without args.\n            thread_id (Any | None): The thread ID for the chat message history.\n                If `None`, an in-memory chat message history is used.\n            **kwargs: Keyword arguments to pass to the graph.\n\n        Returns:\n            dict: The output of the assistant graph,\n                structured like `{\"output\": \"assistant response\", \"history\": ...}`.\n        \"\"\"\n        graph = self.as_graph(thread_id)\n        config = kwargs.pop(\"config\", {})\n        config[\"max_concurrency\"] = config.pop(\"max_concurrency\", self.tool_max_concurrency)\n        return graph.invoke(*args, config=config, **kwargs)\n\n    @with_cast_id\n    def run(self, message: str, thread_id: Any | None = None, **kwargs: Any) -> Any:\n        \"\"\"Run the assistant with the given message and thread ID.\\n\n        This is the higher-level method to run the assistant.\\n\n\n        Args:\n            message (str): The user message to pass to the assistant.\n            thread_id (Any | None): The thread ID for the chat message history.\n                If `None`, an in-memory chat message history is used.\n            **kwargs: Additional keyword arguments to pass to the graph.\n\n        Returns:\n            Any: The assistant response to the user message.\n        \"\"\"\n        return self.invoke(\n            {\n                \"input\": message,\n            },\n            thread_id=thread_id,\n            **kwargs,\n        )[\"output\"]\n\n    def _run_as_tool(self, message: str, **kwargs: Any) -> Any:\n        return self.run(message, thread_id=None, **kwargs)\n\n    def as_tool(self, description: str) -> BaseTool:\n        \"\"\"Create a tool from the assistant.\\n\n        This is useful to compose assistants.\\n\n\n        Args:\n            description (str): The description for the tool.\n\n        Returns:\n            BaseTool: A tool that runs the assistant. The tool name is this assistant's id.\n        \"\"\"\n        return StructuredTool.from_function(\n            func=self._run_as_tool,\n            name=self.id,\n            description=description,\n        )\n
"},{"location":"reference/assistants-ref/#django_ai_assistant.helpers.assistants.AIAssistant.id","title":"id: str class-attribute","text":"

Class variable with the id of the assistant. Used to select the assistant to use.

Must be unique across the whole Django project and match the pattern '^[a-zA-Z0-9_-]+$'.

"},{"location":"reference/assistants-ref/#django_ai_assistant.helpers.assistants.AIAssistant.name","title":"name: str class-attribute","text":"

Class variable with the name of the assistant. Should be a friendly name to optionally display to users.

"},{"location":"reference/assistants-ref/#django_ai_assistant.helpers.assistants.AIAssistant.instructions","title":"instructions: str instance-attribute","text":"

Instructions for the AI assistant knowing what to do. This is the LLM system prompt.

"},{"location":"reference/assistants-ref/#django_ai_assistant.helpers.assistants.AIAssistant.model","title":"model: str instance-attribute","text":"

LLM model name to use for the assistant.

Should be a valid model name from OpenAI, because the default get_llm method uses OpenAI.

get_llm can be overridden to use a different LLM implementation.

"},{"location":"reference/assistants-ref/#django_ai_assistant.helpers.assistants.AIAssistant.temperature","title":"temperature: float = 1.0 class-attribute instance-attribute","text":"

Temperature to use for the assistant LLM model. Defaults to 1.0.

"},{"location":"reference/assistants-ref/#django_ai_assistant.helpers.assistants.AIAssistant.tool_max_concurrency","title":"tool_max_concurrency: int = 1 class-attribute instance-attribute","text":"

Maximum number of tools to run concurrently / in parallel. Defaults to 1 (no concurrency).

"},{"location":"reference/assistants-ref/#django_ai_assistant.helpers.assistants.AIAssistant.has_rag","title":"has_rag: bool = False class-attribute instance-attribute","text":"

Whether the assistant uses RAG (Retrieval-Augmented Generation) or not.

Defaults to False. When True, the assistant will use a retriever to get documents to provide as context to the LLM. Additionally, the assistant class should implement the get_retriever method to return the retriever to use.

"},{"location":"reference/assistants-ref/#django_ai_assistant.helpers.assistants.AIAssistant.structured_output","title":"structured_output: Dict[str, Any] | Type[BaseModel] | Type | None = None class-attribute instance-attribute","text":"

Structured output to use for the assistant.

Defaults to None. When not None, the assistant will return a structured output in the provided format. See https://python.langchain.com/v0.3/docs/how_to/structured_output/ for the available formats.

"},{"location":"reference/assistants-ref/#django_ai_assistant.helpers.assistants.AIAssistant._method_tools","title":"_method_tools: Sequence[BaseTool] instance-attribute","text":"

List of @method_tool tools the assistant can use. Automatically set by the constructor.

"},{"location":"reference/assistants-ref/#django_ai_assistant.helpers.assistants.AIAssistant._registry","title":"_registry: dict[str, type[AIAssistant]] = {} class-attribute","text":"

Registry of all AIAssistant subclasses by their id.

Automatically populated by when a subclass is declared.

Use get_cls_registry and get_cls to access the registry.

"},{"location":"reference/assistants-ref/#django_ai_assistant.helpers.assistants.AIAssistant._user","title":"_user: Any | None = user instance-attribute","text":"

The current user the assistant is helping. A model instance.

Set by the constructor. When API views are used, this is set to the current request user.

Can be used in any @method_tool to customize behavior.

"},{"location":"reference/assistants-ref/#django_ai_assistant.helpers.assistants.AIAssistant._request","title":"_request: Any | None = request instance-attribute","text":"

The current Django request the assistant was initialized with. A request instance.

Set by the constructor.

Can be used in any @method_tool to customize behavior.

"},{"location":"reference/assistants-ref/#django_ai_assistant.helpers.assistants.AIAssistant._view","title":"_view: Any | None = view instance-attribute","text":"

The current Django view the assistant was initialized with. A view instance.

Set by the constructor.

Can be used in any @method_tool to customize behavior.

"},{"location":"reference/assistants-ref/#django_ai_assistant.helpers.assistants.AIAssistant._init_kwargs","title":"_init_kwargs: dict[str, Any] = kwargs instance-attribute","text":"

Extra keyword arguments passed to the constructor.

Set by the constructor.

Can be used in any @method_tool to customize behavior.

"},{"location":"reference/assistants-ref/#django_ai_assistant.helpers.assistants.AIAssistant.__init__","title":"__init__(*, user=None, request=None, view=None, **kwargs)","text":"

Initialize the AIAssistant instance.

Optionally set the current user, request, and view for the assistant.

Those can be used in any @method_tool to customize behavior.

Parameters:

Name Type Description Default user Any | None

The current user the assistant is helping. A model instance. Defaults to None. Stored in self._user.

None request Any | None

The current Django request the assistant was initialized with. A request instance. Defaults to None. Stored in self._request.

None view Any | None

The current Django view the assistant was initialized with. A view instance. Defaults to None. Stored in self._view.

None **kwargs Any

Extra keyword arguments passed to the constructor. Stored in self._init_kwargs.

{} Source code in django_ai_assistant/helpers/assistants.py
def __init__(self, *, user=None, request=None, view=None, **kwargs: Any):\n    \"\"\"Initialize the AIAssistant instance.\\n\n    Optionally set the current user, request, and view for the assistant.\\n\n    Those can be used in any `@method_tool` to customize behavior.\\n\n\n    Args:\n        user (Any | None): The current user the assistant is helping. A model instance.\n            Defaults to `None`. Stored in `self._user`.\n        request (Any | None): The current Django request the assistant was initialized with.\n            A request instance. Defaults to `None`. Stored in `self._request`.\n        view (Any | None): The current Django view the assistant was initialized with.\n            A view instance. Defaults to `None`. Stored in `self._view`.\n        **kwargs: Extra keyword arguments passed to the constructor. Stored in `self._init_kwargs`.\n    \"\"\"\n\n    self._user = user\n    self._request = request\n    self._view = view\n    self._init_kwargs = kwargs\n\n    self._set_method_tools()\n
"},{"location":"reference/assistants-ref/#django_ai_assistant.helpers.assistants.AIAssistant.get_cls_registry","title":"get_cls_registry() classmethod","text":"

Get the registry of AIAssistant classes.

Returns:

Type Description dict[str, type[AIAssistant]]

dict[str, type[AIAssistant]]: A dictionary mapping assistant ids to their classes.

Source code in django_ai_assistant/helpers/assistants.py
@classmethod\ndef get_cls_registry(cls) -> dict[str, type[\"AIAssistant\"]]:\n    \"\"\"Get the registry of AIAssistant classes.\n\n    Returns:\n        dict[str, type[AIAssistant]]: A dictionary mapping assistant ids to their classes.\n    \"\"\"\n    return cls._registry\n
"},{"location":"reference/assistants-ref/#django_ai_assistant.helpers.assistants.AIAssistant.get_cls","title":"get_cls(assistant_id) classmethod","text":"

Get the AIAssistant class for the given assistant ID.

Parameters:

Name Type Description Default assistant_id str

The ID of the assistant to get.

required

Returns: type[AIAssistant]: The AIAssistant subclass for the given ID.

Source code in django_ai_assistant/helpers/assistants.py
@classmethod\ndef get_cls(cls, assistant_id: str) -> type[\"AIAssistant\"]:\n    \"\"\"Get the AIAssistant class for the given assistant ID.\n\n    Args:\n        assistant_id (str): The ID of the assistant to get.\n    Returns:\n        type[AIAssistant]: The AIAssistant subclass for the given ID.\n    \"\"\"\n    return cls.get_cls_registry()[assistant_id]\n
"},{"location":"reference/assistants-ref/#django_ai_assistant.helpers.assistants.AIAssistant.clear_cls_registry","title":"clear_cls_registry() classmethod","text":"

Clear the registry of AIAssistant classes.

Source code in django_ai_assistant/helpers/assistants.py
@classmethod\ndef clear_cls_registry(cls: type[\"AIAssistant\"]) -> None:\n    \"\"\"Clear the registry of AIAssistant classes.\"\"\"\n\n    cls._registry.clear()\n
"},{"location":"reference/assistants-ref/#django_ai_assistant.helpers.assistants.AIAssistant.get_instructions","title":"get_instructions()","text":"

Get the instructions for the assistant. By default, this is the instructions attribute.

Override the instructions attribute or this method to use different instructions.

Returns:

Name Type Description str str

The instructions for the assistant, i.e., the LLM system prompt.

Source code in django_ai_assistant/helpers/assistants.py
def get_instructions(self) -> str:\n    \"\"\"Get the instructions for the assistant. By default, this is the `instructions` attribute.\\n\n    Override the `instructions` attribute or this method to use different instructions.\n\n    Returns:\n        str: The instructions for the assistant, i.e., the LLM system prompt.\n    \"\"\"\n    return self.instructions\n
"},{"location":"reference/assistants-ref/#django_ai_assistant.helpers.assistants.AIAssistant.get_model","title":"get_model()","text":"

Get the LLM model name for the assistant. By default, this is the model attribute.

Used by the get_llm method to create the LLM instance.

Override the model attribute or this method to use a different LLM model.

Returns:

Name Type Description str str

The LLM model name for the assistant.

Source code in django_ai_assistant/helpers/assistants.py
def get_model(self) -> str:\n    \"\"\"Get the LLM model name for the assistant. By default, this is the `model` attribute.\\n\n    Used by the `get_llm` method to create the LLM instance.\\n\n    Override the `model` attribute or this method to use a different LLM model.\n\n    Returns:\n        str: The LLM model name for the assistant.\n    \"\"\"\n    return self.model\n
"},{"location":"reference/assistants-ref/#django_ai_assistant.helpers.assistants.AIAssistant.get_temperature","title":"get_temperature()","text":"

Get the temperature to use for the assistant LLM model. By default, this is the temperature attribute, which is 1.0 by default.

Used by the get_llm method to create the LLM instance.

Override the temperature attribute or this method to use a different temperature.

Returns:

Name Type Description float float

The temperature to use for the assistant LLM model.

Source code in django_ai_assistant/helpers/assistants.py
def get_temperature(self) -> float:\n    \"\"\"Get the temperature to use for the assistant LLM model.\n    By default, this is the `temperature` attribute, which is `1.0` by default.\\n\n    Used by the `get_llm` method to create the LLM instance.\\n\n    Override the `temperature` attribute or this method to use a different temperature.\n\n    Returns:\n        float: The temperature to use for the assistant LLM model.\n    \"\"\"\n    return self.temperature\n
"},{"location":"reference/assistants-ref/#django_ai_assistant.helpers.assistants.AIAssistant.get_model_kwargs","title":"get_model_kwargs()","text":"

Get additional keyword arguments to pass to the LLM model constructor.

Used by the get_llm method to create the LLM instance.

Override this method to pass additional keyword arguments to the LLM model constructor.

Returns:

Type Description dict[str, Any]

dict[str, Any]: Additional keyword arguments to pass to the LLM model constructor.

Source code in django_ai_assistant/helpers/assistants.py
def get_model_kwargs(self) -> dict[str, Any]:\n    \"\"\"Get additional keyword arguments to pass to the LLM model constructor.\\n\n    Used by the `get_llm` method to create the LLM instance.\\n\n    Override this method to pass additional keyword arguments to the LLM model constructor.\n\n    Returns:\n        dict[str, Any]: Additional keyword arguments to pass to the LLM model constructor.\n    \"\"\"\n    return {}\n
"},{"location":"reference/assistants-ref/#django_ai_assistant.helpers.assistants.AIAssistant.get_llm","title":"get_llm()","text":"

Get the LangChain LLM instance for the assistant. By default, this uses the OpenAI implementation.

get_model, get_temperature, and get_model_kwargs are used to create the LLM instance.

Override this method to use a different LLM implementation.

Returns:

Name Type Description BaseChatModel BaseChatModel

The LLM instance for the assistant.

Source code in django_ai_assistant/helpers/assistants.py
def get_llm(self) -> BaseChatModel:\n    \"\"\"Get the LangChain LLM instance for the assistant.\n    By default, this uses the OpenAI implementation.\\n\n    `get_model`, `get_temperature`, and `get_model_kwargs` are used to create the LLM instance.\\n\n    Override this method to use a different LLM implementation.\n\n    Returns:\n        BaseChatModel: The LLM instance for the assistant.\n    \"\"\"\n    model = self.get_model()\n    temperature = self.get_temperature()\n    model_kwargs = self.get_model_kwargs()\n    return ChatOpenAI(\n        model=model,\n        temperature=temperature,\n        model_kwargs=model_kwargs,\n    )\n
"},{"location":"reference/assistants-ref/#django_ai_assistant.helpers.assistants.AIAssistant.get_structured_output_llm","title":"get_structured_output_llm()","text":"

Get the LLM model to use for the structured output.

Returns:

Name Type Description BaseChatModel Runnable

The LLM model to use for the structured output.

Source code in django_ai_assistant/helpers/assistants.py
def get_structured_output_llm(self) -> Runnable:\n    \"\"\"Get the LLM model to use for the structured output.\n\n    Returns:\n        BaseChatModel: The LLM model to use for the structured output.\n    \"\"\"\n    if not self.structured_output:\n        raise ValueError(\"structured_output is not defined\")\n\n    llm = self.get_llm()\n\n    method = \"json_mode\"\n    if isinstance(llm, ChatOpenAI):\n        # When using ChatOpenAI, it's better to use json_schema method\n        # because it enables strict mode.\n        # https://platform.openai.com/docs/guides/structured-outputs\n        method = \"json_schema\"\n\n    return llm.with_structured_output(self.structured_output, method=method)\n
"},{"location":"reference/assistants-ref/#django_ai_assistant.helpers.assistants.AIAssistant.get_tools","title":"get_tools()","text":"

Get the list of method tools the assistant can use. By default, this is the _method_tools attribute, which are all @method_tools.

Override and call super to add additional tools, such as any langchain_community tools.

Returns:

Type Description Sequence[BaseTool]

Sequence[BaseTool]: The list of tools the assistant can use.

Source code in django_ai_assistant/helpers/assistants.py
def get_tools(self) -> Sequence[BaseTool]:\n    \"\"\"Get the list of method tools the assistant can use.\n    By default, this is the `_method_tools` attribute, which are all `@method_tool`s.\\n\n    Override and call super to add additional tools,\n    such as [any langchain_community tools](https://python.langchain.com/v0.3/docs/integrations/tools/).\n\n    Returns:\n        Sequence[BaseTool]: The list of tools the assistant can use.\n    \"\"\"\n    return self._method_tools\n
"},{"location":"reference/assistants-ref/#django_ai_assistant.helpers.assistants.AIAssistant.get_document_separator","title":"get_document_separator()","text":"

Get the RAG document separator to use in the prompt. Only used when has_rag=True.

Defaults to \"\\n\\n\", which is the LangChain default.

Override this method to use a different separator.

Returns:

Name Type Description str str

a separator for documents in the prompt.

Source code in django_ai_assistant/helpers/assistants.py
def get_document_separator(self) -> str:\n    \"\"\"Get the RAG document separator to use in the prompt. Only used when `has_rag=True`.\\n\n    Defaults to `\"\\\\n\\\\n\"`, which is the LangChain default.\\n\n    Override this method to use a different separator.\n\n    Returns:\n        str: a separator for documents in the prompt.\n    \"\"\"\n    return DEFAULT_DOCUMENT_SEPARATOR\n
"},{"location":"reference/assistants-ref/#django_ai_assistant.helpers.assistants.AIAssistant.get_document_prompt","title":"get_document_prompt()","text":"

Get the PromptTemplate template to use when rendering RAG documents in the prompt. Only used when has_rag=True.

Defaults to PromptTemplate.from_template(\"{page_content}\"), which is the LangChain default.

Override this method to use a different template.

Returns:

Name Type Description PromptTemplate PromptTemplate

a prompt template for RAG documents.

Source code in django_ai_assistant/helpers/assistants.py
def get_document_prompt(self) -> PromptTemplate:\n    \"\"\"Get the PromptTemplate template to use when rendering RAG documents in the prompt.\n    Only used when `has_rag=True`.\\n\n    Defaults to `PromptTemplate.from_template(\"{page_content}\")`, which is the LangChain default.\\n\n    Override this method to use a different template.\n\n    Returns:\n        PromptTemplate: a prompt template for RAG documents.\n    \"\"\"\n    return DEFAULT_DOCUMENT_PROMPT\n
"},{"location":"reference/assistants-ref/#django_ai_assistant.helpers.assistants.AIAssistant.get_retriever","title":"get_retriever()","text":"

Get the RAG retriever to use for fetching documents.

Must be implemented by subclasses when has_rag=True.

Returns:

Name Type Description BaseRetriever BaseRetriever

the RAG retriever to use for fetching documents.

Source code in django_ai_assistant/helpers/assistants.py
def get_retriever(self) -> BaseRetriever:\n    \"\"\"Get the RAG retriever to use for fetching documents.\\n\n    Must be implemented by subclasses when `has_rag=True`.\\n\n\n    Returns:\n        BaseRetriever: the RAG retriever to use for fetching documents.\n    \"\"\"\n    raise NotImplementedError(\n        f\"Override the get_retriever with your implementation at {self.__class__.__name__}\"\n    )\n
"},{"location":"reference/assistants-ref/#django_ai_assistant.helpers.assistants.AIAssistant.get_contextualize_prompt","title":"get_contextualize_prompt()","text":"

Get the contextualize prompt template for the assistant.

This is used when has_rag=True and there are previous messages in the thread. Since the latest user question might reference the chat history, the LLM needs to generate a new standalone question, and use that question to query the retriever for relevant documents.

By default, this is a prompt that asks the LLM to reformulate the latest user question without the chat history.

Override this method to use a different contextualize prompt.

See get_history_aware_retriever for how this prompt is used.

Returns:

Name Type Description ChatPromptTemplate ChatPromptTemplate

The contextualize prompt template for the assistant.

Source code in django_ai_assistant/helpers/assistants.py
def get_contextualize_prompt(self) -> ChatPromptTemplate:\n    \"\"\"Get the contextualize prompt template for the assistant.\\n\n    This is used when `has_rag=True` and there are previous messages in the thread.\n    Since the latest user question might reference the chat history,\n    the LLM needs to generate a new standalone question,\n    and use that question to query the retriever for relevant documents.\\n\n    By default, this is a prompt that asks the LLM to\n    reformulate the latest user question without the chat history.\\n\n    Override this method to use a different contextualize prompt.\\n\n    See `get_history_aware_retriever` for how this prompt is used.\\n\n\n    Returns:\n        ChatPromptTemplate: The contextualize prompt template for the assistant.\n    \"\"\"\n    contextualize_q_system_prompt = (\n        \"Given a chat history and the latest user question \"\n        \"which might reference context in the chat history, \"\n        \"formulate a standalone question which can be understood \"\n        \"without the chat history. Do NOT answer the question, \"\n        \"just reformulate it if needed and otherwise return it as is.\"\n    )\n    return ChatPromptTemplate.from_messages(\n        [\n            (\"system\", contextualize_q_system_prompt),\n            # TODO: make history key configurable?\n            MessagesPlaceholder(\"history\"),\n            # TODO: make input key configurable?\n            (\"human\", \"{input}\"),\n        ]\n    )\n
"},{"location":"reference/assistants-ref/#django_ai_assistant.helpers.assistants.AIAssistant.get_history_aware_retriever","title":"get_history_aware_retriever()","text":"

Get the history-aware retriever LangChain chain for the assistant.

This is used when has_rag=True to fetch documents based on the chat history.

By default, this is a chain that checks if there is chat history, and if so, it uses the chat history to generate a new standalone question to query the retriever for relevant documents.

When there is no chat history, it just passes the input to the retriever.

Override this method to use a different history-aware retriever chain.

Read more about the history-aware retriever in the LangChain docs.

Returns:

Type Description Runnable[dict, RetrieverOutput]

Runnable[dict, RetrieverOutput]: a history-aware retriever LangChain chain.

Source code in django_ai_assistant/helpers/assistants.py
def get_history_aware_retriever(self) -> Runnable[dict, RetrieverOutput]:\n    \"\"\"Get the history-aware retriever LangChain chain for the assistant.\\n\n    This is used when `has_rag=True` to fetch documents based on the chat history.\\n\n    By default, this is a chain that checks if there is chat history,\n    and if so, it uses the chat history to generate a new standalone question\n    to query the retriever for relevant documents.\\n\n    When there is no chat history, it just passes the input to the retriever.\\n\n    Override this method to use a different history-aware retriever chain.\n\n    Read more about the history-aware retriever in the\n    [LangChain docs](https://python.langchain.com/v0.2/docs/how_to/qa_chat_history_how_to/).\n\n    Returns:\n        Runnable[dict, RetrieverOutput]: a history-aware retriever LangChain chain.\n    \"\"\"\n    llm = self.get_llm()\n    retriever = self.get_retriever()\n    prompt = self.get_contextualize_prompt()\n\n    # Based on create_history_aware_retriever:\n    return RunnableBranch(\n        (\n            lambda x: not x.get(\"history\", False),  # pyright: ignore[reportAttributeAccessIssue]\n            # If no chat history, then we just pass input to retriever\n            (lambda x: x[\"input\"]) | retriever,\n        ),\n        # If chat history, then we pass inputs to LLM chain, then to retriever\n        prompt | llm | StrOutputParser() | retriever,\n    )\n
"},{"location":"reference/assistants-ref/#django_ai_assistant.helpers.assistants.AIAssistant.as_graph","title":"as_graph(thread_id=None)","text":"

Create the LangGraph graph for the assistant.

This graph is an agent that supports chat history, tool calling, and RAG (if has_rag=True).

as_graph uses many other methods to create the graph for the assistant. Prefer to override the other methods to customize the graph for the assistant. Only override this method if you need to customize the graph at a lower level.

Parameters:

Name Type Description Default thread_id Any | None

The thread ID for the chat message history. If None, an in-memory chat message history is used.

None

Returns:

Type Description Runnable[dict, dict]

the compiled graph

Source code in django_ai_assistant/helpers/assistants.py
@with_cast_id\ndef as_graph(self, thread_id: Any | None = None) -> Runnable[dict, dict]:\n    \"\"\"Create the LangGraph graph for the assistant.\\n\n    This graph is an agent that supports chat history, tool calling, and RAG (if `has_rag=True`).\\n\n    `as_graph` uses many other methods to create the graph for the assistant.\n    Prefer to override the other methods to customize the graph for the assistant.\n    Only override this method if you need to customize the graph at a lower level.\n\n    Args:\n        thread_id (Any | None): The thread ID for the chat message history.\n            If `None`, an in-memory chat message history is used.\n\n    Returns:\n        the compiled graph\n    \"\"\"\n    from django_ai_assistant.models import Thread\n\n    llm = self.get_llm()\n    tools = self.get_tools()\n    llm_with_tools = llm.bind_tools(tools) if tools else llm\n    if thread_id:\n        thread = Thread.objects.get(id=thread_id)\n    else:\n        thread = None\n\n    def custom_add_messages(left: list[BaseMessage], right: list[BaseMessage]):\n        result = add_messages(left, right)  # type: ignore\n        if thread:\n            # Save all messages, except the initial system message:\n            thread_messages = [m for m in result if not isinstance(m, SystemMessage)]\n            save_django_messages(cast(list[BaseMessage], thread_messages), thread=thread)\n        return result\n\n    class AgentState(TypedDict):\n        messages: Annotated[list[AnyMessage], custom_add_messages]\n        input: str | None  # noqa: A003\n        output: Any\n\n    def setup(state: AgentState):\n        system_prompt = self.get_instructions()\n        return {\"messages\": [SystemMessage(content=system_prompt)]}\n\n    def history(state: AgentState):\n        messages = thread.get_messages(include_extra_messages=True) if thread else []\n        if state[\"input\"]:\n            messages.append(HumanMessage(content=state[\"input\"]))\n\n        return {\"messages\": messages}\n\n    def retriever(state: AgentState):\n        if not self.has_rag:\n            return\n\n        retriever = self.get_history_aware_retriever()\n        # Remove the initial instructions to prevent having two SystemMessages\n        # This is necessary for compatibility with Anthropic\n        messages_to_summarize = state[\"messages\"][1:-1]\n        input_message = state[\"messages\"][-1]\n        docs = retriever.invoke({\"input\": input_message, \"history\": messages_to_summarize})\n\n        document_separator = self.get_document_separator()\n        document_prompt = self.get_document_prompt()\n\n        formatted_docs = document_separator.join(\n            format_document(doc, document_prompt) for doc in docs\n        )\n\n        system_message = state[\"messages\"][0]\n        system_message.content += (\n            f\"\\n\\n---START OF CONTEXT---\\n{formatted_docs}---END OF CONTEXT---\\n\\n\"\n        )\n\n    def agent(state: AgentState):\n        response = llm_with_tools.invoke(state[\"messages\"])\n\n        return {\"messages\": [response]}\n\n    def tool_selector(state: AgentState):\n        last_message = state[\"messages\"][-1]\n\n        if isinstance(last_message, AIMessage) and last_message.tool_calls:\n            return \"call_tool\"\n\n        return \"continue\"\n\n    def record_response(state: AgentState):\n        # Structured output must happen in the end, to avoid disabling tool calling.\n        # Tool calling + structured output is not supported by OpenAI:\n        if self.structured_output:\n            messages = state[\"messages\"]\n\n            # Change the original system prompt:\n            if isinstance(messages[0], SystemMessage):\n                messages[0].content += \"\\nUse the chat history to produce a JSON output.\"\n\n            # Add a final message asking for JSON generation / structured output:\n            json_request_message = HumanMessage(\n                content=\"Use the chat history to produce a JSON output.\"\n            )\n            messages.append(json_request_message)\n\n            llm_with_structured_output = self.get_structured_output_llm()\n            response = llm_with_structured_output.invoke(messages)\n        else:\n            response = state[\"messages\"][-1].content\n\n        return {\"output\": response}\n\n    workflow = StateGraph(AgentState)\n\n    workflow.add_node(\"setup\", setup)\n    workflow.add_node(\"history\", history)\n    workflow.add_node(\"retriever\", retriever)\n    workflow.add_node(\"agent\", agent)\n    workflow.add_node(\"tools\", ToolNode(tools))\n    workflow.add_node(\"respond\", record_response)\n\n    workflow.set_entry_point(\"setup\")\n    workflow.add_edge(\"setup\", \"history\")\n    workflow.add_edge(\"history\", \"retriever\")\n    workflow.add_edge(\"retriever\", \"agent\")\n    workflow.add_conditional_edges(\n        \"agent\",\n        tool_selector,\n        {\n            \"call_tool\": \"tools\",\n            \"continue\": \"respond\",\n        },\n    )\n    workflow.add_edge(\"tools\", \"agent\")\n    workflow.add_edge(\"respond\", END)\n\n    return workflow.compile()\n
"},{"location":"reference/assistants-ref/#django_ai_assistant.helpers.assistants.AIAssistant.invoke","title":"invoke(*args, thread_id, **kwargs)","text":"

Invoke the assistant LangChain graph with the given arguments and keyword arguments.

This is the lower-level method to run the assistant.

The graph is created by the as_graph method.

Parameters:

Name Type Description Default *args Any

Positional arguments to pass to the graph. To add a new message, use a dict like {\"input\": \"user message\"}. If thread already has a HumanMessage in the end, you can invoke without args.

() thread_id Any | None

The thread ID for the chat message history. If None, an in-memory chat message history is used.

required **kwargs Any

Keyword arguments to pass to the graph.

{}

Returns:

Name Type Description dict dict

The output of the assistant graph, structured like {\"output\": \"assistant response\", \"history\": ...}.

Source code in django_ai_assistant/helpers/assistants.py
@with_cast_id\ndef invoke(self, *args: Any, thread_id: Any | None, **kwargs: Any) -> dict:\n    \"\"\"Invoke the assistant LangChain graph with the given arguments and keyword arguments.\\n\n    This is the lower-level method to run the assistant.\\n\n    The graph is created by the `as_graph` method.\\n\n\n    Args:\n        *args: Positional arguments to pass to the graph.\n            To add a new message, use a dict like `{\"input\": \"user message\"}`.\n            If thread already has a `HumanMessage` in the end, you can invoke without args.\n        thread_id (Any | None): The thread ID for the chat message history.\n            If `None`, an in-memory chat message history is used.\n        **kwargs: Keyword arguments to pass to the graph.\n\n    Returns:\n        dict: The output of the assistant graph,\n            structured like `{\"output\": \"assistant response\", \"history\": ...}`.\n    \"\"\"\n    graph = self.as_graph(thread_id)\n    config = kwargs.pop(\"config\", {})\n    config[\"max_concurrency\"] = config.pop(\"max_concurrency\", self.tool_max_concurrency)\n    return graph.invoke(*args, config=config, **kwargs)\n
"},{"location":"reference/assistants-ref/#django_ai_assistant.helpers.assistants.AIAssistant.run","title":"run(message, thread_id=None, **kwargs)","text":"

Run the assistant with the given message and thread ID.

This is the higher-level method to run the assistant.

Parameters:

Name Type Description Default message str

The user message to pass to the assistant.

required thread_id Any | None

The thread ID for the chat message history. If None, an in-memory chat message history is used.

None **kwargs Any

Additional keyword arguments to pass to the graph.

{}

Returns:

Name Type Description Any Any

The assistant response to the user message.

Source code in django_ai_assistant/helpers/assistants.py
@with_cast_id\ndef run(self, message: str, thread_id: Any | None = None, **kwargs: Any) -> Any:\n    \"\"\"Run the assistant with the given message and thread ID.\\n\n    This is the higher-level method to run the assistant.\\n\n\n    Args:\n        message (str): The user message to pass to the assistant.\n        thread_id (Any | None): The thread ID for the chat message history.\n            If `None`, an in-memory chat message history is used.\n        **kwargs: Additional keyword arguments to pass to the graph.\n\n    Returns:\n        Any: The assistant response to the user message.\n    \"\"\"\n    return self.invoke(\n        {\n            \"input\": message,\n        },\n        thread_id=thread_id,\n        **kwargs,\n    )[\"output\"]\n
"},{"location":"reference/assistants-ref/#django_ai_assistant.helpers.assistants.AIAssistant.as_tool","title":"as_tool(description)","text":"

Create a tool from the assistant.

This is useful to compose assistants.

Parameters:

Name Type Description Default description str

The description for the tool.

required

Returns:

Name Type Description BaseTool BaseTool

A tool that runs the assistant. The tool name is this assistant's id.

Source code in django_ai_assistant/helpers/assistants.py
def as_tool(self, description: str) -> BaseTool:\n    \"\"\"Create a tool from the assistant.\\n\n    This is useful to compose assistants.\\n\n\n    Args:\n        description (str): The description for the tool.\n\n    Returns:\n        BaseTool: A tool that runs the assistant. The tool name is this assistant's id.\n    \"\"\"\n    return StructuredTool.from_function(\n        func=self._run_as_tool,\n        name=self.id,\n        description=description,\n    )\n
"},{"location":"reference/models-ref/","title":"django_ai_assistant.models","text":""},{"location":"reference/models-ref/#django_ai_assistant.models.Thread","title":"Thread","text":"

Bases: Model

Thread model. A thread is a collection of messages between a user and the AI assistant. Also called conversation or session.

Source code in django_ai_assistant/models.py
class Thread(models.Model):\n    \"\"\"Thread model. A thread is a collection of messages between a user and the AI assistant.\n    Also called conversation or session.\"\"\"\n\n    id: Any  # noqa: A003\n    messages: Manager[\"Message\"]\n    name = models.CharField(max_length=255, blank=True)\n    \"\"\"Name of the thread. Can be blank.\"\"\"\n    created_by = models.ForeignKey(\n        settings.AUTH_USER_MODEL,\n        on_delete=models.SET_NULL,\n        related_name=\"ai_assistant_threads\",\n        null=True,\n    )\n    \"\"\"User who created the thread. Can be null. Set to null/None when user is deleted.\"\"\"\n    assistant_id = models.CharField(max_length=255, blank=True)\n    \"\"\"Associated assistant ID. Can be empty.\"\"\"\n    created_at = models.DateTimeField(auto_now_add=True)\n    \"\"\"Date and time when the thread was created.\n    Automatically set when the thread is created.\"\"\"\n    updated_at = models.DateTimeField(auto_now=True)\n    \"\"\"Date and time when the thread was last updated.\n    Automatically set when the thread is updated.\"\"\"\n\n    class Meta:\n        verbose_name = \"Thread\"\n        verbose_name_plural = \"Threads\"\n        ordering = (\"-created_at\",)\n        indexes = (Index(F(\"created_at\").desc(), name=\"thread_created_at_desc\"),)\n\n    def __str__(self) -> str:\n        \"\"\"Return the name of the thread as the string representation of the thread.\"\"\"\n        return self.name\n\n    def __repr__(self) -> str:\n        \"\"\"Return the string representation of the thread like '<Thread name>'\"\"\"\n        return f\"<Thread {self.name}>\"\n\n    def get_messages(self, include_extra_messages: bool = False) -> list[BaseMessage]:\n        \"\"\"\n        Get LangChain messages objects from the thread.\n\n        Args:\n            include_extra_messages (bool): Whether to include non-chat messages (like tool calls).\n\n        Returns:\n            list[BaseMessage]: List of messages\n        \"\"\"\n\n        messages = messages_from_dict(\n            cast(\n                Sequence[dict[str, BaseMessage]],\n                Message.objects.filter(thread=self)\n                .order_by(\"created_at\")\n                .values_list(\"message\", flat=True),\n            )\n        )\n        if not include_extra_messages:\n            messages = [\n                m\n                for m in messages\n                if isinstance(m, HumanMessage | ChatMessage)\n                or (isinstance(m, AIMessage) and not m.tool_calls)\n            ]\n        return cast(list[BaseMessage], messages)\n
"},{"location":"reference/models-ref/#django_ai_assistant.models.Thread.name","title":"name = models.CharField(max_length=255, blank=True) class-attribute instance-attribute","text":"

Name of the thread. Can be blank.

"},{"location":"reference/models-ref/#django_ai_assistant.models.Thread.created_by","title":"created_by = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.SET_NULL, related_name='ai_assistant_threads', null=True) class-attribute instance-attribute","text":"

User who created the thread. Can be null. Set to null/None when user is deleted.

"},{"location":"reference/models-ref/#django_ai_assistant.models.Thread.assistant_id","title":"assistant_id = models.CharField(max_length=255, blank=True) class-attribute instance-attribute","text":"

Associated assistant ID. Can be empty.

"},{"location":"reference/models-ref/#django_ai_assistant.models.Thread.created_at","title":"created_at = models.DateTimeField(auto_now_add=True) class-attribute instance-attribute","text":"

Date and time when the thread was created. Automatically set when the thread is created.

"},{"location":"reference/models-ref/#django_ai_assistant.models.Thread.updated_at","title":"updated_at = models.DateTimeField(auto_now=True) class-attribute instance-attribute","text":"

Date and time when the thread was last updated. Automatically set when the thread is updated.

"},{"location":"reference/models-ref/#django_ai_assistant.models.Thread.__str__","title":"__str__()","text":"

Return the name of the thread as the string representation of the thread.

Source code in django_ai_assistant/models.py
def __str__(self) -> str:\n    \"\"\"Return the name of the thread as the string representation of the thread.\"\"\"\n    return self.name\n
"},{"location":"reference/models-ref/#django_ai_assistant.models.Thread.__repr__","title":"__repr__()","text":"

Return the string representation of the thread like '' Source code in django_ai_assistant/models.py

def __repr__(self) -> str:\n    \"\"\"Return the string representation of the thread like '<Thread name>'\"\"\"\n    return f\"<Thread {self.name}>\"\n
"},{"location":"reference/models-ref/#django_ai_assistant.models.Thread.get_messages","title":"get_messages(include_extra_messages=False)","text":"

Get LangChain messages objects from the thread.

Parameters:

Name Type Description Default include_extra_messages bool

Whether to include non-chat messages (like tool calls).

False

Returns:

Type Description list[BaseMessage]

list[BaseMessage]: List of messages

Source code in django_ai_assistant/models.py
def get_messages(self, include_extra_messages: bool = False) -> list[BaseMessage]:\n    \"\"\"\n    Get LangChain messages objects from the thread.\n\n    Args:\n        include_extra_messages (bool): Whether to include non-chat messages (like tool calls).\n\n    Returns:\n        list[BaseMessage]: List of messages\n    \"\"\"\n\n    messages = messages_from_dict(\n        cast(\n            Sequence[dict[str, BaseMessage]],\n            Message.objects.filter(thread=self)\n            .order_by(\"created_at\")\n            .values_list(\"message\", flat=True),\n        )\n    )\n    if not include_extra_messages:\n        messages = [\n            m\n            for m in messages\n            if isinstance(m, HumanMessage | ChatMessage)\n            or (isinstance(m, AIMessage) and not m.tool_calls)\n        ]\n    return cast(list[BaseMessage], messages)\n
"},{"location":"reference/models-ref/#django_ai_assistant.models.Message","title":"Message","text":"

Bases: Model

Message model. A message is a text that is part of a thread. A message can be sent by a user or the AI assistant.

The message data is stored as a JSON field called message.

Source code in django_ai_assistant/models.py
class Message(models.Model):\n    \"\"\"Message model. A message is a text that is part of a thread.\n    A message can be sent by a user or the AI assistant.\\n\n    The message data is stored as a JSON field called `message`.\"\"\"\n\n    id: Any  # noqa: A003\n    thread = models.ForeignKey(Thread, on_delete=models.CASCADE, related_name=\"messages\")\n    \"\"\"Thread to which the message belongs.\"\"\"\n    thread_id: Any\n    message = models.JSONField()\n    \"\"\"Message content. This is a serialized LangChain `BaseMessage` that was serialized\n    with `message_to_dict` and can be deserialized with `messages_from_dict`.\"\"\"\n    created_at = models.DateTimeField(auto_now_add=True)\n    \"\"\"Date and time when the message was created.\n    Automatically set when the message is created.\"\"\"\n\n    class Meta:\n        verbose_name = \"Message\"\n        verbose_name_plural = \"Messages\"\n        ordering = (\"created_at\",)\n        indexes = (Index(F(\"created_at\"), name=\"message_created_at\"),)\n\n    def __str__(self) -> str:\n        \"\"\"Return internal message data from `message` attribute\n        as the string representation of the message.\"\"\"\n        return json.dumps(self.message)\n\n    def __repr__(self) -> str:\n        \"\"\"Return the string representation of the message like '<Message id at thread_id>'\"\"\"\n        return f\"<Message {self.id} at {self.thread_id}>\"\n
"},{"location":"reference/models-ref/#django_ai_assistant.models.Message.thread","title":"thread = models.ForeignKey(Thread, on_delete=models.CASCADE, related_name='messages') class-attribute instance-attribute","text":"

Thread to which the message belongs.

"},{"location":"reference/models-ref/#django_ai_assistant.models.Message.message","title":"message = models.JSONField() class-attribute instance-attribute","text":"

Message content. This is a serialized LangChain BaseMessage that was serialized with message_to_dict and can be deserialized with messages_from_dict.

"},{"location":"reference/models-ref/#django_ai_assistant.models.Message.created_at","title":"created_at = models.DateTimeField(auto_now_add=True) class-attribute instance-attribute","text":"

Date and time when the message was created. Automatically set when the message is created.

"},{"location":"reference/models-ref/#django_ai_assistant.models.Message.__str__","title":"__str__()","text":"

Return internal message data from message attribute as the string representation of the message.

Source code in django_ai_assistant/models.py
def __str__(self) -> str:\n    \"\"\"Return internal message data from `message` attribute\n    as the string representation of the message.\"\"\"\n    return json.dumps(self.message)\n
"},{"location":"reference/models-ref/#django_ai_assistant.models.Message.__repr__","title":"__repr__()","text":"

Return the string representation of the message like '' Source code in django_ai_assistant/models.py

def __repr__(self) -> str:\n    \"\"\"Return the string representation of the message like '<Message id at thread_id>'\"\"\"\n    return f\"<Message {self.id} at {self.thread_id}>\"\n
"},{"location":"reference/use-cases-ref/","title":"django_ai_assistant.helpers.use_cases","text":""},{"location":"reference/use-cases-ref/#django_ai_assistant.helpers.use_cases.get_assistant_cls","title":"get_assistant_cls(assistant_id, user, request=None)","text":"

Get assistant class by id.

Uses AI_ASSISTANT_CAN_RUN_ASSISTANT_FN permission to check if user can run the assistant.

Parameters:

Name Type Description Default assistant_id str

Assistant id to get

required user Any

Current user

required request HttpRequest | None

Current request, if any

None

Returns: type[AIAssistant]: Assistant class with the given id Raises: AIAssistantNotDefinedError: If assistant with the given id is not found AIUserNotAllowedError: If user is not allowed to use the assistant

Source code in django_ai_assistant/helpers/use_cases.py
def get_assistant_cls(\n    assistant_id: str,\n    user: Any,\n    request: HttpRequest | None = None,\n) -> type[AIAssistant]:\n    \"\"\"Get assistant class by id.\\n\n    Uses `AI_ASSISTANT_CAN_RUN_ASSISTANT_FN` permission to check if user can run the assistant.\n\n    Args:\n        assistant_id (str): Assistant id to get\n        user (Any): Current user\n        request (HttpRequest | None): Current request, if any\n    Returns:\n        type[AIAssistant]: Assistant class with the given id\n    Raises:\n        AIAssistantNotDefinedError: If assistant with the given id is not found\n        AIUserNotAllowedError: If user is not allowed to use the assistant\n    \"\"\"\n    if assistant_id not in AIAssistant.get_cls_registry():\n        raise AIAssistantNotDefinedError(f\"Assistant with id={assistant_id} not found\")\n    assistant_cls = AIAssistant.get_cls(assistant_id)\n    if not can_run_assistant(\n        assistant_cls=assistant_cls,\n        user=user,\n        request=request,\n    ):\n        raise AIUserNotAllowedError(\"User is not allowed to use this assistant\")\n    return assistant_cls\n
"},{"location":"reference/use-cases-ref/#django_ai_assistant.helpers.use_cases.get_single_assistant_info","title":"get_single_assistant_info(assistant_id, user, request=None)","text":"

Get assistant info id. Returns a dictionary with the assistant id and name.

Uses AI_ASSISTANT_CAN_RUN_ASSISTANT_FN permission to check if user can see the assistant.

Parameters:

Name Type Description Default assistant_id str

Assistant id to get

required user Any

Current user

required request HttpRequest | None

Current request, if any

None

Returns: dict[str, str]: dict like {\"id\": \"personal_ai\", \"name\": \"Personal AI\"} Raises: AIAssistantNotDefinedError: If assistant with the given id is not found AIUserNotAllowedError: If user is not allowed to see the assistant

Source code in django_ai_assistant/helpers/use_cases.py
def get_single_assistant_info(\n    assistant_id: str,\n    user: Any,\n    request: HttpRequest | None = None,\n) -> dict[str, str]:\n    \"\"\"Get assistant info id. Returns a dictionary with the assistant id and name.\\n\n    Uses `AI_ASSISTANT_CAN_RUN_ASSISTANT_FN` permission to check if user can see the assistant.\n\n    Args:\n        assistant_id (str): Assistant id to get\n        user (Any): Current user\n        request (HttpRequest | None): Current request, if any\n    Returns:\n        dict[str, str]: dict like `{\"id\": \"personal_ai\", \"name\": \"Personal AI\"}`\n    Raises:\n        AIAssistantNotDefinedError: If assistant with the given id is not found\n        AIUserNotAllowedError: If user is not allowed to see the assistant\n    \"\"\"\n    assistant_cls = get_assistant_cls(assistant_id, user, request)\n\n    return {\n        \"id\": assistant_id,\n        \"name\": assistant_cls.name,\n    }\n
"},{"location":"reference/use-cases-ref/#django_ai_assistant.helpers.use_cases.get_assistants_info","title":"get_assistants_info(user, request=None)","text":"

Get all assistants info. Returns a list of dictionaries with the assistant id and name.

Uses AI_ASSISTANT_CAN_RUN_ASSISTANT_FN permission to check the assistants the user can see, and returns only the ones the user can see.

Parameters:

Name Type Description Default user Any

Current user

required request HttpRequest | None

Current request, if any

None

Returns: list[dict[str, str]]: List of dicts like [{\"id\": \"personal_ai\", \"name\": \"Personal AI\"}, ...]

Source code in django_ai_assistant/helpers/use_cases.py
def get_assistants_info(\n    user: Any,\n    request: HttpRequest | None = None,\n) -> list[dict[str, str]]:\n    \"\"\"Get all assistants info. Returns a list of dictionaries with the assistant id and name.\\n\n    Uses `AI_ASSISTANT_CAN_RUN_ASSISTANT_FN` permission to check the assistants the user can see,\n    and returns only the ones the user can see.\n\n    Args:\n        user (Any): Current user\n        request (HttpRequest | None): Current request, if any\n    Returns:\n        list[dict[str, str]]: List of dicts like `[{\"id\": \"personal_ai\", \"name\": \"Personal AI\"}, ...]`\n    \"\"\"\n    assistant_info_list = []\n    for assistant_id in AIAssistant.get_cls_registry().keys():\n        try:\n            info = get_single_assistant_info(assistant_id, user, request)\n            assistant_info_list.append(info)\n        except AIUserNotAllowedError:\n            continue\n    return assistant_info_list\n
"},{"location":"reference/use-cases-ref/#django_ai_assistant.helpers.use_cases.create_message","title":"create_message(assistant_id, thread, user, content, request=None)","text":"

Create a message in a thread, and right after runs the assistant to get the AI response.

Uses AI_ASSISTANT_CAN_RUN_ASSISTANT_FN permission to check if user can run the assistant.

Uses AI_ASSISTANT_CAN_CREATE_MESSAGE_FN permission to check if user can create a message in the thread.

Parameters:

Name Type Description Default assistant_id str

Assistant id to use to get the AI response

required thread Thread

Thread where to create the message

required user Any

Current user

required content Any

Message content, usually a string

required request HttpRequest | None

Current request, if any

None

Returns: dict: The output of the assistant, structured like {\"output\": \"assistant response\", \"history\": ...} Raises: AIUserNotAllowedError: If user is not allowed to create messages in the thread

Source code in django_ai_assistant/helpers/use_cases.py
def create_message(\n    assistant_id: str,\n    thread: Thread,\n    user: Any,\n    content: Any,\n    request: HttpRequest | None = None,\n) -> dict:\n    \"\"\"Create a message in a thread, and right after runs the assistant to get the AI response.\\n\n    Uses `AI_ASSISTANT_CAN_RUN_ASSISTANT_FN` permission to check if user can run the assistant.\\n\n    Uses `AI_ASSISTANT_CAN_CREATE_MESSAGE_FN` permission to check if user can create a message in the thread.\n\n    Args:\n        assistant_id (str): Assistant id to use to get the AI response\n        thread (Thread): Thread where to create the message\n        user (Any): Current user\n        content (Any): Message content, usually a string\n        request (HttpRequest | None): Current request, if any\n    Returns:\n        dict: The output of the assistant,\n            structured like `{\"output\": \"assistant response\", \"history\": ...}`\n    Raises:\n        AIUserNotAllowedError: If user is not allowed to create messages in the thread\n    \"\"\"\n    assistant_cls = get_assistant_cls(assistant_id, user, request)\n\n    if not can_create_message(thread=thread, user=user, request=request):\n        raise AIUserNotAllowedError(\"User is not allowed to create messages in this thread\")\n\n    # TODO: Check if we can separate the message creation from the invoke\n    assistant = assistant_cls(user=user, request=request)\n    assistant_message = assistant.invoke(\n        {\"input\": content},\n        thread_id=thread.id,\n    )\n    return assistant_message\n
"},{"location":"reference/use-cases-ref/#django_ai_assistant.helpers.use_cases.create_thread","title":"create_thread(name, user, assistant_id=None, request=None)","text":"

Create a thread.

Uses AI_ASSISTANT_CAN_CREATE_THREAD_FN permission to check if user can create a thread.

Parameters:

Name Type Description Default name str

Thread name

required assistant_id str | None

Assistant ID to associate the thread with. If empty or None, the thread is not associated with any assistant.

None user Any

Current user

required request HttpRequest | None

Current request, if any

None

Returns: Thread: Created thread model instance Raises: AIUserNotAllowedError: If user is not allowed to create threads

Source code in django_ai_assistant/helpers/use_cases.py
def create_thread(\n    name: str,\n    user: Any,\n    assistant_id: str | None = None,\n    request: HttpRequest | None = None,\n) -> Thread:\n    \"\"\"Create a thread.\\n\n    Uses `AI_ASSISTANT_CAN_CREATE_THREAD_FN` permission to check if user can create a thread.\n\n    Args:\n        name (str): Thread name\n        assistant_id (str | None): Assistant ID to associate the thread with.\n            If empty or None, the thread is not associated with any assistant.\n        user (Any): Current user\n        request (HttpRequest | None): Current request, if any\n    Returns:\n        Thread: Created thread model instance\n    Raises:\n        AIUserNotAllowedError: If user is not allowed to create threads\n    \"\"\"\n    if not can_create_thread(user=user, request=request):\n        raise AIUserNotAllowedError(\"User is not allowed to create threads\")\n\n    thread = Thread.objects.create(name=name, created_by=user, assistant_id=assistant_id or \"\")\n    return thread\n
"},{"location":"reference/use-cases-ref/#django_ai_assistant.helpers.use_cases.get_single_thread","title":"get_single_thread(thread_id, user, request=None)","text":"

Get a single thread by id.

Uses AI_ASSISTANT_CAN_VIEW_THREAD_FN permission to check if user can view the thread.

Parameters:

Name Type Description Default thread_id str

Thread id to get

required user Any

Current user

required request HttpRequest | None

Current request, if any

None

Returns: Thread: Thread model instance Raises: AIUserNotAllowedError: If user is not allowed to view the thread

Source code in django_ai_assistant/helpers/use_cases.py
def get_single_thread(\n    thread_id: Any,\n    user: Any,\n    request: HttpRequest | None = None,\n) -> Thread:\n    \"\"\"Get a single thread by id.\\n\n    Uses `AI_ASSISTANT_CAN_VIEW_THREAD_FN` permission to check if user can view the thread.\n\n    Args:\n        thread_id (str): Thread id to get\n        user (Any): Current user\n        request (HttpRequest | None): Current request, if any\n    Returns:\n        Thread: Thread model instance\n    Raises:\n        AIUserNotAllowedError: If user is not allowed to view the thread\n    \"\"\"\n    thread = Thread.objects.get(id=thread_id)\n\n    if not can_view_thread(thread=thread, user=user, request=request):\n        raise AIUserNotAllowedError(\"User is not allowed to view this thread\")\n\n    return thread\n
"},{"location":"reference/use-cases-ref/#django_ai_assistant.helpers.use_cases.get_threads","title":"get_threads(user, assistant_id=None, request=None)","text":"

Get all threads for the user.

Uses AI_ASSISTANT_CAN_VIEW_THREAD_FN permission to check the threads the user can see, and returns only the ones the user can see.

Parameters:

Name Type Description Default user Any

Current user

required assistant_id str | None

Assistant ID to filter threads by. If empty or None, all threads for the user are returned.

None request HttpRequest | None

Current request, if any

None

Returns: list[Thread]: List of thread model instances

Source code in django_ai_assistant/helpers/use_cases.py
def get_threads(\n    user: Any,\n    assistant_id: str | None = None,\n    request: HttpRequest | None = None,\n) -> list[Thread]:\n    \"\"\"Get all threads for the user.\\n\n    Uses `AI_ASSISTANT_CAN_VIEW_THREAD_FN` permission to check the threads the user can see,\n    and returns only the ones the user can see.\n\n    Args:\n        user (Any): Current user\n        assistant_id (str | None): Assistant ID to filter threads by.\n            If empty or None, all threads for the user are returned.\n        request (HttpRequest | None): Current request, if any\n    Returns:\n        list[Thread]: List of thread model instances\n    \"\"\"\n    threads = Thread.objects.filter(created_by=user)\n\n    if assistant_id:\n        threads = threads.filter(assistant_id=assistant_id)\n\n    return list(\n        threads.filter(\n            id__in=[\n                thread.id\n                for thread in threads\n                if can_view_thread(thread=thread, user=user, request=request)\n            ]\n        )\n    )\n
"},{"location":"reference/use-cases-ref/#django_ai_assistant.helpers.use_cases.update_thread","title":"update_thread(thread, name, user, request=None)","text":"

Update thread name.

Uses AI_ASSISTANT_CAN_UPDATE_THREAD_FN permission to check if user can update the thread.

Parameters:

Name Type Description Default thread Thread

Thread model instance to update

required name str

New thread name

required user Any

Current user

required request HttpRequest | None

Current request, if any

None

Returns: Thread: Updated thread model instance Raises: AIUserNotAllowedError: If user is not allowed to update the thread

Source code in django_ai_assistant/helpers/use_cases.py
def update_thread(\n    thread: Thread,\n    name: str,\n    user: Any,\n    request: HttpRequest | None = None,\n) -> Thread:\n    \"\"\"Update thread name.\\n\n    Uses `AI_ASSISTANT_CAN_UPDATE_THREAD_FN` permission to check if user can update the thread.\n\n    Args:\n        thread (Thread): Thread model instance to update\n        name (str): New thread name\n        user (Any): Current user\n        request (HttpRequest | None): Current request, if any\n    Returns:\n        Thread: Updated thread model instance\n    Raises:\n        AIUserNotAllowedError: If user is not allowed to update the thread\n    \"\"\"\n    if not can_update_thread(thread=thread, user=user, request=request):\n        raise AIUserNotAllowedError(\"User is not allowed to update this thread\")\n\n    thread.name = name\n    thread.save()\n    return thread\n
"},{"location":"reference/use-cases-ref/#django_ai_assistant.helpers.use_cases.delete_thread","title":"delete_thread(thread, user, request=None)","text":"

Delete a thread.

Uses AI_ASSISTANT_CAN_DELETE_THREAD_FN permission to check if user can delete the thread.

Parameters:

Name Type Description Default thread Thread

Thread model instance to delete

required user Any

Current user

required request HttpRequest | None

Current request, if any

None

Raises: AIUserNotAllowedError: If user is not allowed to delete the thread

Source code in django_ai_assistant/helpers/use_cases.py
def delete_thread(\n    thread: Thread,\n    user: Any,\n    request: HttpRequest | None = None,\n) -> None:\n    \"\"\"Delete a thread.\\n\n    Uses `AI_ASSISTANT_CAN_DELETE_THREAD_FN` permission to check if user can delete the thread.\n\n    Args:\n        thread (Thread): Thread model instance to delete\n        user (Any): Current user\n        request (HttpRequest | None): Current request, if any\n    Raises:\n        AIUserNotAllowedError: If user is not allowed to delete the thread\n    \"\"\"\n    if not can_delete_thread(thread=thread, user=user, request=request):\n        raise AIUserNotAllowedError(\"User is not allowed to delete this thread\")\n\n    thread.delete()\n
"},{"location":"reference/use-cases-ref/#django_ai_assistant.helpers.use_cases.get_thread_messages","title":"get_thread_messages(thread, user, request=None)","text":"

Get all messages in a thread.

Uses AI_ASSISTANT_CAN_VIEW_THREAD_FN permission to check if user can view the thread.

Parameters:

Name Type Description Default thread Thread

Thread model instance to get messages from

required user Any

Current user

required request HttpRequest | None

Current request, if any

None

Returns: list[BaseMessage]: List of message instances

Source code in django_ai_assistant/helpers/use_cases.py
def get_thread_messages(\n    thread: Thread,\n    user: Any,\n    request: HttpRequest | None = None,\n) -> list[BaseMessage]:\n    \"\"\"Get all messages in a thread.\\n\n    Uses `AI_ASSISTANT_CAN_VIEW_THREAD_FN` permission to check if user can view the thread.\n\n    Args:\n        thread (Thread): Thread model instance to get messages from\n        user (Any): Current user\n        request (HttpRequest | None): Current request, if any\n    Returns:\n        list[BaseMessage]: List of message instances\n    \"\"\"\n    # TODO: have more permissions for threads? View thread permission?\n    if user != thread.created_by:\n        raise AIUserNotAllowedError(\"User is not allowed to view messages in this thread\")\n\n    return thread.get_messages(include_extra_messages=False)\n
"},{"location":"reference/use-cases-ref/#django_ai_assistant.helpers.use_cases.delete_message","title":"delete_message(message, user, request=None)","text":"

Delete a message.

Uses AI_ASSISTANT_CAN_DELETE_MESSAGE_FN permission to check if user can delete the message.

Parameters:

Name Type Description Default message Message

Message model instance to delete

required user Any

Current user

required request HttpRequest | None

Current request, if any

None

Raises: AIUserNotAllowedError: If user is not allowed to delete the message

Source code in django_ai_assistant/helpers/use_cases.py
def delete_message(\n    message: Message,\n    user: Any,\n    request: HttpRequest | None = None,\n):\n    \"\"\"Delete a message.\\n\n    Uses `AI_ASSISTANT_CAN_DELETE_MESSAGE_FN` permission to check if user can delete the message.\n\n    Args:\n        message (Message): Message model instance to delete\n        user (Any): Current user\n        request (HttpRequest | None): Current request, if any\n    Raises:\n        AIUserNotAllowedError: If user is not allowed to delete the message\n    \"\"\"\n    if not can_delete_message(message=message, user=user, request=request):\n        raise AIUserNotAllowedError(\"User is not allowed to delete this message\")\n\n    return message.delete()\n
"}]} \ No newline at end of file +{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"Django AI Assistant","text":"

Combine the power of Large Language Models with Django's productivity to build intelligent applications.

Regardless of the feasibility of AGI, AI assistants are a new paradigm for computation. AI agents and assistants allow devs to easily build applications that make smart decisions.

The latest LLMs from major AI providers have a \"killer feature\" called Tool Calling, which enables AI models to call methods from Django's side, and essentially do anything a Django view can, such as DB queries, file management, external API calls, etc.

While users commonly interact with LLMs via conversations, AI Assistants can do a lot with any kind of string input, including JSON. Your end users won't even realize that a LLM is doing the heavy-lifting behind the scenes! Some ideas for innovative AI assistants include:

  • A movie recommender chatbot that helps users manage their movie backlogs
  • An autofill button for forms of your application
  • Tailored email reminders that consider users' activity
  • A real-time tourist guide that recommends attractions given the user's current location

We provide examples for some of those applications. Get Started now!

"},{"location":"changelog/","title":"Changelog","text":"

This changelog references changes made both to the Django backend, django-ai-assistant, and the frontend TypeScript client, django-ai-assistant-client.

Note

The backend and the frontend are versioned together, that is, they have the same version number. When you update the backend, you should also update the frontend to the same version.

"},{"location":"changelog/#0.1.0","title":"0.1.0 October 11, 2024","text":"
  • Refactor the code to use LangGraph instead of LangChain LCEL (except for RAG functionality, see the get_history_aware_retriever method).
  • Store all messages in the Thread model, including tool calls and their outputs.
  • Allow separation of threads per assistant: assistant_id in the Thread model.
  • New updateThread function from useThreadList hook.
  • Improved examples:
    • Add markdown rendering to HTMX example.
    • Better Movie Recommendation example.
    • Better Tour Guide example.
"},{"location":"changelog/#0.0.4","title":"0.0.4 July 5, 2024","text":"
  • Fix frontend README.
"},{"location":"changelog/#0.0.3","title":"0.0.3 July 5, 2024","text":"
  • Less restrictive Python version in pyproject.toml. Support future Python versions.
"},{"location":"changelog/#0.0.2","title":"0.0.2 June 28, 2024","text":"
  • Add support for Django 4.2 LTS
  • Add support for Python 3.10 and 3.11
"},{"location":"changelog/#0.0.1","title":"0.0.1 June 25, 2024","text":"
  • Initial release
"},{"location":"contributing/","title":"Contributing","text":"

We can always use your help to improve Django AI Assistant! Please feel free to tackle existing issues. If you have a new idea, please create a thread on Discussions.

Please follow this guide to learn more about how to develop and test the project locally, before opening a pull request.

"},{"location":"contributing/#local-dev-setup","title":"Local Dev Setup","text":""},{"location":"contributing/#clone-the-repo","title":"Clone the repo","text":"
git clone git@github.com:vintasoftware/django-ai-assistant.git\n
"},{"location":"contributing/#install-development-tools","title":"Install development tools","text":"

This project uses Poetry for dependency and virtual environment management.

If you need to install the version of Python recommended for the project, you can use Pyenv.

For installing Node, we recommend NVM.

"},{"location":"contributing/#install-dependencies","title":"Install dependencies","text":""},{"location":"contributing/#backend","title":"Backend","text":"

Go to the project root. To instantiate the virtual environment, run

poetry shell\n

Install the Python dependencies:

poetry install\n

If you encounter an error regarding the Python version required for the project, you can use pyenv to install the appropriate version based on .python-version:

pyenv install\n
"},{"location":"contributing/#frontend","title":"Frontend","text":"

Go to the frontend directory and install the Node dependencies:

cd frontend\npnpm install\n
"},{"location":"contributing/#install-pre-commit-hooks","title":"Install pre-commit hooks","text":"
pre-commit install\n

It's critical to run the pre-commit hooks before pushing your code to follow the project's code style, and avoid linting errors.

"},{"location":"contributing/#updating-the-openapi-schema","title":"Updating the OpenAPI schema","text":"

It's critical to update the OpenAPI schema when you make changes to the django_ai_assistant/api/views.py or related files:

poetry run python manage.py generate_openapi_schema --output frontend/openapi_schema.json\nsh -c 'cd frontend && pnpm run generate-client'\n
"},{"location":"contributing/#developing-with-the-example-project","title":"Developing with the example project","text":"

Run the frontend project in build:watch mode:

cd frontend\npnpm run build:watch\n

Go to the example project, install the dependencies, and link the frontend project:

cd ..  # back to project root directory\ncd example\npnpm install\npnpm remove django-ai-assistant-client  # remove the distributed package to use the local one\npnpm link ../frontend\n

Then follow the instructions in the example README to run the example project.

"},{"location":"contributing/#tests","title":"Tests","text":"

Before running tests copy the .env.example file to .env.tests.

cp .env.example .env.tests\n

Run tests with:

poetry run pytest\n

The tests use pytest-vcr to record and replay HTTP requests to AI models.

If you're implementing a new test that needs to call a real AI model, you need to set the OPENAI_API_KEY environment variable with a real API key in the .env.tests file.

Then, you will run the tests in record mode:

poetry run pytest --record-mode=once\n

To run frontend tests:

cd frontend\npnpm run test\n
"},{"location":"contributing/#documentation","title":"Documentation","text":"

We use mkdocs-material to generate the documentation from markdown files. Check the files in the docs directory.

To run the documentation locally, you need to run:

poetry run mkdocs serve\n
"},{"location":"contributing/#release","title":"Release","text":"

Info

The backend and the frontend are versioned together, that is, they should have the same version number.

To release and publish a new version, follow these steps:

  1. Update the version in pyproject.toml, frontend/package.json and example/package.json.
  2. Re-install the local version of the Python project: poetry install
  3. In the project root, run poetry run python manage.py generate_openapi_schema --output frontend/openapi_schema.json to update the OpenAPI schema.
  4. Re-install the local version of the frontend project:
cd frontend\npnpm install\npnpm run build\n
  1. In the frontend directory, run pnpm run generate-client to update the TypeScript client with the new OpenAPI schema.
  2. Update the changelog in CHANGELOG.md.
  3. Open a PR with the changes.
  4. Once the PR is merged, run the Release GitHub Action to create a draft release.
  5. Review the draft release, ensure the description has at least the associated changelog entry, and publish it.
  6. Once the review is published, the Publish GitHub Action will automatically run to publish the new version to PyPI and npm. Check the logs to ensure the publication was successful.
"},{"location":"frontend/","title":"Frontend","text":"

Django AI Assistant has a frontend TypeScript client to facilitate the integration with the Django backend.

","boost":2},{"location":"frontend/#installation","title":"Installation","text":"

Install the frontend client using pnpm:

pnpm install django-ai-assistant-client\n
","boost":2},{"location":"frontend/#client-configuration","title":"Client Configuration","text":"

First, you'll need to check what base path you used when setting up the Django AI Assistant backend. The base path is the URL prefix that the Django AI Assistant API is served under. Below the base path would be ai-assistant:

myproject/urls.py
from django.urls import include, path\n\nurlpatterns = [\n    path(\"ai-assistant/\", include(\"django_ai_assistant.urls\")),\n    ...\n]\n

Before using the frontend client, import the configAIAssistant and configure it with the base path. If you're using React, a good place to do this is in the App.tsx file:

example/assets/js/App.tsx
import { configAIAssistant } from \"django-ai-assistant-client\";\nimport React from \"react\";\n\nconfigAIAssistant({ BASE: \"ai-assistant\" });\n

Note in the configuration above, the Django server and the frontend client are using the same base path. If you're using a different base path, make sure to adjust the configuration accordingly.

Now you can use the frontend client to interact with the Django AI Assistant backend. Here's an example of how to create a message:

import { aiCreateThreadMessage } from \"django-ai-assistant-client\";\n\nawait aiCreateThreadMessage({\n    threadId: 1,\n    requestBody: {\n        assistant_id: 1,\n        message: \"What's the weather like today in NYC?\"\n    }\n});\n
","boost":2},{"location":"frontend/#advanced-client-configuration","title":"Advanced Client Configuration","text":"

By default the frontend client is authenticated via cookies (CREDENTIALS === 'include'). You can configure the client differently. Below is the default config:

configAIAssistant({\n    // Base path of the Django AI Assistant API, can be a relative or full URL:\n    BASE: '',\n    // Credentials mode for fetch requests:\n    CREDENTIALS: 'include',\n    // Record<string, unknown> with headers to be sent with each request:\n    HEADERS: undefined,\n    // Basic authentication username:\n    USERNAME: undefined,\n    // Basic authentication password:\n    PASSWORD: undefined,\n    // Token for authentication:\n    TOKEN: undefined,\n});\n
","boost":2},{"location":"frontend/#client-functions","title":"Client Functions","text":"

The frontend client provides the following functions:

","boost":2},{"location":"frontend/#ailistassistants","title":"aiListAssistants","text":"

List all assistants the user has access to. Param: none Return: a Promise that resolves to an Array of Assistant.

","boost":2},{"location":"frontend/#aigetassistant","title":"aiGetAssistant","text":"

Get an assistant by ID. Param: { assistantId: string } Return: Promise that resolves to Assistant.

","boost":2},{"location":"frontend/#ailistthreads","title":"aiListThreads","text":"

List all threads the user has access to. Param: none Return: a Promise that resolves to an Array of Thread.

","boost":2},{"location":"frontend/#aicreatethread","title":"aiCreateThread","text":"

Create a new thread. Param: { requestBody: { name: string } } Return: a Promise that resolves to a Thread.

","boost":2},{"location":"frontend/#aigetthread","title":"aiGetThread","text":"

Get a thread by ID. Param: { threadId: string } Return: a Promise that resolves to a Thread.

","boost":2},{"location":"frontend/#aiupdatethread","title":"aiUpdateThread","text":"

Update a thread by ID. Param: { threadId: string, requestBody: { name: string, assistant_id: string } } Return: a Promise that resolves to a Thread.

","boost":2},{"location":"frontend/#aideletethread","title":"aiDeleteThread","text":"

Delete a thread by ID. Param: { threadId: string } Return: a Promise that resolves to void.

","boost":2},{"location":"frontend/#ailistthreadmessages","title":"aiListThreadMessages","text":"

List all messages in a thread. Param: { threadId: string } Return: a Promise that resolves to an Array of ThreadMessage.

","boost":2},{"location":"frontend/#aicreatethreadmessage","title":"aiCreateThreadMessage","text":"

Create a new message in a thread. Param: { threadId: string, requestBody: { assistant_id: string, message: string } } Return: a Promise that resolves to void.

","boost":2},{"location":"frontend/#aideletethreadmessage","title":"aiDeleteThreadMessage","text":"

Delete a message in a thread. Param: { threadId: string, messageId: string } Return: a Promise that resolves to void.

Note

These functions correspond to the Django AI Assistant API endpoints. Make sure to read the API documentation to learn about permissions.

","boost":2},{"location":"frontend/#type-definitions","title":"Type definitions","text":"

The type definitions are available at frontend/src/client/types.gen.ts. You can import the schemas directly from django-ai-assistant-client root:

import {\n    Assistant,\n    Thread,\n    ThreadMessage\n} from \"django-ai-assistant-client\";\n
","boost":2},{"location":"frontend/#react-hooks","title":"React Hooks","text":"

The frontend client also provides React hooks to streamline application building.

Warning

You still have to call configAIAssistant on your application before using the hooks.

","boost":2},{"location":"frontend/#useassistantlist","title":"useAssistantList","text":"

React hook to manage the list of Assistants. Use like this:

import { useAssistantList } from \"django-ai-assistant-client\";\n\nexport function MyComponent() {\n    const {\n        assistants,\n        fetchAssistants,\n        loadingFetchAssistants\n    } = useAssistantList();\n    // ...\n}\n
","boost":2},{"location":"frontend/#useassistant","title":"useAssistant","text":"

React hook to manage a single Assistant. Use like this:

import { useAssistant } from \"django-ai-assistant-client\";\n\nexport function MyComponent() {\n    const {\n        assistant,\n        fetchAssistant,\n        loadingFetchAssistant\n    } = useAssistant();\n    // ...\n}\n
","boost":2},{"location":"frontend/#usethreadlist","title":"useThreadList","text":"

React hook to manage the list, create, and delete of Threads. Use like this:

import { useThreadList } from \"django-ai-assistant-client\";\n\nexport function MyComponent() {\n    const {\n        threads,\n        fetchThreads,\n        createThread,\n        updateThread,\n        deleteThread,\n        loadingFetchThreads,\n        loadingCreateThread,\n        loadingUpdateThread,\n        loadingDeleteThread\n    } = useThreadList();\n    // ...\n}\n
","boost":2},{"location":"frontend/#usemessagelist","title":"useMessageList","text":"

React hook to manage the list, create, and delete of Messages. Use like this:

import { useMessageList, Thread } from \"django-ai-assistant-client\";\n\nexport function MyComponent() {\n    const [activeThread, setActiveThread] = useState<Thread | null>(null);\n    const {\n        messages,\n        fetchMessages,\n        createMessage,\n        deleteMessage,\n        loadingFetchMessages,\n        loadingCreateMessage,\n        loadingDeleteMessage\n    } = useMessageList({ threadId: activeThread?.id });\n    // ...\n}\n
","boost":2},{"location":"frontend/#example-project","title":"Example project","text":"

The example project makes good use of the React hooks to build LLM-powered applications. Make sure to check it out!

","boost":2},{"location":"get-started/","title":"Get started","text":"","boost":2},{"location":"get-started/#prerequisites","title":"Prerequisites","text":"
  • Python:
  • Django:
","boost":2},{"location":"get-started/#how-to-install","title":"How to install","text":"

Install Django AI Assistant package:

pip install django-ai-assistant\n

Add Django AI Assistant to your Django project's INSTALLED_APPS:

myproject/settings.py
INSTALLED_APPS = [\n    ...\n    'django_ai_assistant',\n    ...\n]\n

Run the migrations:

python manage.py migrate\n

Learn how to use the package in the Tutorial section.

","boost":2},{"location":"support/","title":"Support","text":"

If you have any questions or need help, feel free to create a thread on GitHub Discussions.

In case you're facing a bug, please check existing issues and create a new one if needed.

"},{"location":"support/#commercial-support","title":"Commercial Support","text":"

This is an open-source project maintained by Vinta Software. We are always looking for exciting work! If you need any commercial support, feel free to get in touch: contact@vinta.com.br

"},{"location":"tutorial/","title":"Tutorial","text":"

In this tutorial, you will learn how to use Django AI Assistant to supercharge your Django project with LLM capabilities.

","boost":2},{"location":"tutorial/#prerequisites","title":"Prerequisites","text":"

Make sure you properly configured Django AI Assistant as described in the Get Started guide.

","boost":2},{"location":"tutorial/#setting-up-api-keys","title":"Setting up API keys","text":"

The tutorial below uses OpenAI's gpt-4o model, so make sure you have OPENAI_API_KEY set as an environment variable for your Django project. You can also use other models, keep reading to learn more. Just make sure their keys are properly set.

Note

An easy way to set environment variables is to use a .env file in your project's root directory and use python-dotenv to load them. Our example project uses this approach.

","boost":2},{"location":"tutorial/#what-ai-assistants-can-do","title":"What AI Assistants can do","text":"

AI Assistants are LLMs that can answer to user queries as ChatGPT does, i.e. inputting and outputting strings. But when integrated with Django, they can also do anything a Django view can, such as accessing the database, checking permissions, sending emails, downloading and uploading media files, etc. This is possible by defining \"tools\" the AI can use. These tools are methods in an AI Assistant class on the Django side.

","boost":2},{"location":"tutorial/#defining-an-ai-assistant","title":"Defining an AI Assistant","text":"","boost":2},{"location":"tutorial/#registering","title":"Registering","text":"

To create an AI Assistant, you need to:

  1. Create an ai_assistants.py file;
  2. Define a class that inherits from AIAssistant;
  3. Provide an id, a name, some instructions for the LLM (a system prompt), and a model name:
myapp/ai_assistants.py
from django_ai_assistant import AIAssistant\n\nclass WeatherAIAssistant(AIAssistant):\n    id = \"weather_assistant\"\n    name = \"Weather Assistant\"\n    instructions = \"You are a weather bot.\"\n    model = \"gpt-4o\"\n
","boost":2},{"location":"tutorial/#defining-tools","title":"Defining tools","text":"

Useful tools give abilities the LLM doesn't have out-of-the-box, such as getting the current date and finding the current weather by calling an API.

Use the @method_tool decorator to define a tool method in the AI Assistant:

myapp/ai_assistants.py
from django.utils import timezone\nfrom django_ai_assistant import AIAssistant, method_tool\nimport json\n\nclass WeatherAIAssistant(AIAssistant):\n    id = \"weather_assistant\"\n    name = \"Weather Assistant\"\n    instructions = \"You are a weather bot.\"\n    model = \"gpt-4o\"\n\n    def get_instructions(self):\n        return f\"{self.instructions} Today is {timezone.now().isoformat()}.\"\n\n    @method_tool\n    def get_weather(self, location: str) -> str:\n        \"\"\"Fetch the current weather data for a location\"\"\"\n        return json.dumps({\n            \"location\": location,\n            \"temperature\": \"25\u00b0C\",\n            \"weather\": \"sunny\"\n        })  # imagine some weather API here, this is just a placeholder\n

The get_weather method is a tool that the AI Assistant can use to get the current weather for a location, when the user asks for it. The tool method must be fully type-hinted (all parameters and return value), and it must include a descriptive docstring. This is necessary for the LLM model to understand the tool's purpose.

A conversation with this Weather Assistant looks like this:

User: What's the weather in New York City?\nAI: The weather in NYC is sunny with a temperature of 25\u00b0C.\n

Note

State of the art models such as gpt-4o can process JSON well. You can return a json.dumps(api_output) from a tool method and the model will be able to process it before responding the user.

","boost":2},{"location":"tutorial/#tool-parameters","title":"Tool parameters","text":"

It's possible to define more complex parameters for tools. As long as they're JSON serializable, the underlying LLM model should be able to call tools with the right arguments.

In the MovieRecommendationAIAssistant from the example project, we have a reorder_backlog tool method that receives a list of IMDb URLs that represent the user's movie backlog order. Note the Sequence[str] parameter:

example/movies/ai_assistants.py
from django_ai_assistant import AIAssistant, method_tool\n\nclass MovieRecommendationAIAssistant(AIAssistant):\n    ...\n\n    @method_tool\n    def reorder_backlog(self, imdb_url_list: Sequence[str]) -> str:\n        \"\"\"Reorder movies in user's backlog.\"\"\"\n        ...\n

In WeatherAIAssistant, another assistant from the example project, we have a fetch_forecast_weather method tool with a args_schema parameter that defines a JSON schema for the tool arguments:

example/weather/ai_assistants.py
from django_ai_assistant import AIAssistant, method_tool, BaseModel, Field\n\nclass WeatherAIAssistant(AIAssistant):\n    ...\n\n    class FetchForecastWeatherInput(BaseModel):\n        location: str = Field(description=\"Location to fetch the forecast weather for\")\n        forecast_date: date = Field(description=\"Date in the format 'YYYY-MM-DD'\")\n\n    @method_tool(args_schema=FetchForecastWeatherInput)\n    def fetch_forecast_weather(self, location, forecast_date) -> dict:\n        \"\"\"Fetch the forecast weather data for a location\"\"\"\n        # forecast_date is a `date` object here\n        ...\n

Note

It's important to provide a description for each field from args_schema. This improves the LLM's understanding of the tool's arguments.

","boost":2},{"location":"tutorial/#using-django-logic-in-tools","title":"Using Django logic in tools","text":"

You have access to the current request user in tools:

myapp/ai_assistants.py
from django_ai_assistant import AIAssistant, method_tool\n\nclass PersonalAIAssistant(AIAssistant):\n    id = \"personal_assistant\"\n    name = \"Personal Assistant\"\n    instructions = \"You are a personal assistant.\"\n    model = \"gpt-4o\"\n\n    @method_tool\n    def get_current_user_username(self) -> str:\n        \"\"\"Get the username of the current user\"\"\"\n        return self._user.username\n

You can also add any Django logic to tools, such as querying the database:

myapp/ai_assistants.py
from django_ai_assistant import AIAssistant, method_tool\nimport json\n\nclass IssueManagementAIAssistant(AIAssistant):\n    id = \"issue_mgmt_assistant\"\n    name = \"Issue Management Assistant\"\n    instructions = \"You are an issue management bot.\"\n    model = \"gpt-4o\"\n\n    @method_tool\n    def get_current_user_assigned_issues(self) -> str:\n        \"\"\"Get the issues assigned to the current user\"\"\"\n        return json.dumps({\n            \"issues\": list(Issue.objects.filter(assignee=self._user).values())\n        })\n

Warning

Make sure you only return to the LLM what the user can see, considering permissions and privacy. Code the tools as if they were Django views.

","boost":2},{"location":"tutorial/#using-pre-implemented-tools","title":"Using pre-implemented tools","text":"

Django AI Assistant works with any LangChain-compatible tool. Just override the get_tools method in your AI Assistant class to include the tools you want to use.

For example, you can use the TavilySearch tool to provide your AI Assistant with the ability to search the web for information about upcoming movies.

First install dependencies:

pip install -U langchain-community tavily-python\n

Then, set the TAVILY_API_KEY environment variable. You'll need to sign up at Tavily.

Finally, add the tool to your AI Assistant class by overriding the get_tools method:

myapp/ai_assistants.py
from django_ai_assistant import AIAssistant\nfrom langchain_community.tools.tavily_search import TavilySearchResults\n\nclass MovieSearchAIAssistant(AIAssistant):\n    id = \"movie_search_assistant\"  # noqa: A003\n    instructions = (\n        \"You're a helpful movie search assistant. \"\n        \"Help the user find more information about movies. \"\n        \"Use the provided tools to search the web for upcoming movies. \"\n    )\n    name = \"Movie Search Assistant\"\n    model = \"gpt-4o\"\n\n    def get_instructions(self):\n        return f\"{self.instructions} Today is {timezone.now().isoformat()}.\"\n\n    def get_tools(self):\n        return [\n            TavilySearchResults(),\n            *super().get_tools(),\n        ]\n

Note

As of now, Django AI Assistant is powered by LangChain and LangGraph, but knowledge on these tools is NOT necessary to use this library, at least for the main use cases.

","boost":2},{"location":"tutorial/#using-an-ai-assistant","title":"Using an AI Assistant","text":"","boost":2},{"location":"tutorial/#manually-calling-an-ai-assistant","title":"Manually calling an AI Assistant","text":"

You can manually call an AI Assistant from anywhere in your Django application:

from myapp.ai_assistants import WeatherAIAssistant\n\nassistant = WeatherAIAssistant()\noutput = assistant.run(\"What's the weather in New York City?\")\nassert output == \"The weather in NYC is sunny with a temperature of 25\u00b0C.\"\n

The constructor of AIAssistant receives user, request, view as optional parameters, which can be used in the tools with self._user, self._request, self._view. Also, any extra parameters passed in constructor are stored at self._init_kwargs.

","boost":2},{"location":"tutorial/#threads-of-messages","title":"Threads of Messages","text":"

The django-ai-assistant app provides two models Thread and Message to store and retrieve conversations with AI Assistants. LLMs are stateless by design, meaning they don't hold any context between calls. All they know is the current input. But by using the AIAssistant class, the conversation state is stored in the database as multiple Message of a Thread, and automatically retrieved then passed to the LLM when calling the AI Assistant.

To create a Thread, you can use a helper from the django_ai_assistant.use_cases module. For example:

from django_ai_assistant.use_cases import create_thread, get_thread_messages\nfrom myapp.ai_assistants import WeatherAIAssistant\n\nthread = create_thread(name=\"Weather Chat\", user=user)\nassistant = WeatherAIAssistant()\nassistant.run(\"What's the weather in New York City?\", thread_id=thread.id)\n\nmessages = get_thread_messages(thread=thread, user=user)  # returns both user and AI messages\n

More CRUD helpers are available at django_ai_assistant.use_cases module. Check the Reference for more information.

","boost":2},{"location":"tutorial/#using-built-in-api-views","title":"Using built-in API views","text":"

You can use the built-in API views to interact with AI Assistants via HTTP requests from any frontend, such as a React application or a mobile app. Add the following to your Django project's urls.py:

myproject/urls.py
from django.urls import include, path\n\nurlpatterns = [\n    path(\"ai-assistant/\", include(\"django_ai_assistant.urls\")),\n    ...\n]\n

The built-in API supports retrieval of Assistants info, as well as CRUD for Threads and Messages. It has a OpenAPI schema that you can explore at http://localhost:8000/ai-assistant/docs, when running your project locally.

","boost":2},{"location":"tutorial/#configuring-the-api","title":"Configuring the API","text":"

The built-in API is implemented using Django Ninja. By default, it is initialized with the following setting:

myproject/settings.py
AI_ASSISTANT_INIT_API_FN = \"django_ai_assistant.api.views.init_api\"\n

You can override this setting in your Django project's settings.py to customize the API, such as using a different authentication method or modifying other configurations.

The method signature for AI_ASSISTANT_INIT_API_FN is as follows:

from ninja import NinjaAPI\n\ndef init_api():\n    return NinjaAPI(...)\n

By providing your own implementation of init_api, you can tailor the API setup to better fit your project's requirements.

","boost":2},{"location":"tutorial/#configuring-permissions","title":"Configuring permissions","text":"

The API uses the helpers from the django_ai_assistant.use_cases module, which have permission checks to ensure the user can use a certain AI Assistant or do CRUD on Threads and Messages.

By default, any authenticated user can use any AI Assistant, and create a thread. Users can manage both their own threads and the messages on them. Therefore, the default permissions are:

myproject/settings.py
AI_ASSISTANT_CAN_CREATE_THREAD_FN = \"django_ai_assistant.permissions.allow_all\"\nAI_ASSISTANT_CAN_VIEW_THREAD_FN = \"django_ai_assistant.permissions.owns_thread\"\nAI_ASSISTANT_CAN_UPDATE_THREAD_FN = \"django_ai_assistant.permissions.owns_thread\"\nAI_ASSISTANT_CAN_DELETE_THREAD_FN = \"django_ai_assistant.permissions.owns_thread\"\nAI_ASSISTANT_CAN_CREATE_MESSAGE_FN = \"django_ai_assistant.permissions.owns_thread\"\nAI_ASSISTANT_CAN_UPDATE_MESSAGE_FN = \"django_ai_assistant.permissions.owns_thread\"\nAI_ASSISTANT_CAN_DELETE_MESSAGE_FN = \"django_ai_assistant.permissions.owns_thread\"\nAI_ASSISTANT_CAN_RUN_ASSISTANT = \"django_ai_assistant.permissions.allow_all\"\n

You can override these settings in your Django project's settings.py to customize the permissions.

Thread permission signatures look like this:

from django_ai_assistant.models import Thread\nfrom django.http import HttpRequest\n\ndef check_custom_thread_permission(\n        thread: Thread,\n        user: Any,\n        request: HttpRequest | None = None) -> bool:\n    return ...\n

While Message permission signatures look like this:

from django_ai_assistant.models import Thread, Message\nfrom django.http import HttpRequest\n\ndef check_custom_message_permission(\n        message: Message,\n        thread: Thread,\n        user: Any,\n        request: HttpRequest | None = None) -> bool:\n    return ...\n
","boost":2},{"location":"tutorial/#frontend-integration","title":"Frontend integration","text":"

You can integrate Django AI Assistant with frontend frameworks like React or Vue.js. Please check the frontend documentation.

If you want to use traditional Django templates, you can try using HTMX to avoid page refreshes. Check the example project, it includes a HTMX application.

","boost":2},{"location":"tutorial/#advanced-usage","title":"Advanced usage","text":"","boost":2},{"location":"tutorial/#using-other-ai-models","title":"Using other AI models","text":"

By default the supported models are OpenAI ones, but you can use any chat model from LangChain that supports Tool Calling by overriding get_llm:

myapp/ai_assistants.py
from django_ai_assistant import AIAssistant\nfrom langchain_anthropic import ChatAnthropic\n\nclass WeatherAIAssistant(AIAssistant):\n    id = \"weather_assistant\"\n    name = \"Weather Assistant\"\n    instructions = \"You are a weather bot.\"\n    model = \"claude-3-opus-20240229\"\n\n    def get_llm(self):\n        model = self.get_model()\n        temperature = self.get_temperature()\n        model_kwargs = self.get_model_kwargs()\n        return ChatAnthropic(\n            model_name=model,\n            temperature=temperature,\n            model_kwargs=model_kwargs,\n            timeout=None,\n            max_retries=2,\n        )\n
","boost":2},{"location":"tutorial/#composing-ai-assistants","title":"Composing AI Assistants","text":"

One AI Assistant can call another AI Assistant as a tool. This is useful for composing complex AI Assistants. Use the as_tool method for that:

myapp/ai_assistants.py
class SimpleAssistant(AIAssistant):\n    ...\n\nclass AnotherSimpleAssistant(AIAssistant):\n    ...\n\nclass ComplexAssistant(AIAssistant):\n    ...\n\n    def get_tools(self) -> Sequence[BaseTool]:\n        return [\n            SimpleAssistant().as_tool(\n                description=\"Tool to <...add a meaningful description here...>\"),\n            AnotherSimpleAssistant().as_tool(\n                description=\"Tool to <...add a meaningful description here...>\"),\n            *super().get_tools(),\n        ]\n

The movies/ai_assistants.py file in the example project shows an example of a composed AI Assistant that's able to recommend movies and manage the user's movie backlog.

","boost":2},{"location":"tutorial/#retrieval-augmented-generation-rag","title":"Retrieval Augmented Generation (RAG)","text":"

You can use RAG in your AI Assistants. RAG means using a retriever to fetch chunks of textual data from a pre-existing DB to give context to the LLM. This means the LLM will have access to a context your retriever logic provides when generating the response, thereby improving the quality of the response by avoiding generic or off-topic answers.

For this to work, your must do the following in your AI Assistant:

  1. Add has_rag = True as a class attribute;
  2. Override the get_retriever method to return a LangChain Retriever.

For example:

myapp/ai_assistants.py
from django_ai_assistant import AIAssistant\n\nclass DocsAssistant(AIAssistant):\n    id = \"docs_assistant\"  # noqa: A003\n    name = \"Docs Assistant\"\n    instructions = (\n        \"You are an assistant for answering questions related to the provided context. \"\n        \"Use the following pieces of retrieved context to answer the user's question. \"\n    )\n    model = \"gpt-4o\"\n    has_rag = True\n\n    def get_retriever(self) -> BaseRetriever:\n        return ...  # use a LangChain Retriever here\n

The rag/ai_assistants.py file in the example project shows an example of a RAG-powered AI Assistant that's able to answer questions about Django using the Django Documentation as context.

","boost":2},{"location":"tutorial/#support-for-other-types-of-primary-key-pk","title":"Support for other types of Primary Key (PK)","text":"

You can have Django AI Assistant models use other types of primary key, such as strings, UUIDs, etc. This is useful if you're concerned about leaking IDs that exponse thread count, message count, etc. to the frontend. When using UUIDs, it will prevent users from figuring out if a thread or message exist or not (due to HTTP 404 vs 403).

Here are the files you have to change if you need the ids to be UUID:

myapp/fields.py
import uuid\nfrom django.db.backends.base.operations import BaseDatabaseOperations\nfrom django.db.models import AutoField, UUIDField\n\nBaseDatabaseOperations.integer_field_ranges['UUIDField'] = (0, 0)\n\nclass UUIDAutoField(UUIDField, AutoField):\n    def __init__(self, *args, **kwargs):\n        kwargs.setdefault('default', uuid.uuid4)\n        kwargs.setdefault('editable', False)\n        super().__init__(*args, **kwargs)\n
myapp/apps.py
from django_ai_assistant.apps import AIAssistantConfig\n\nclass AIAssistantConfigOverride(AIAssistantConfig):\n    default_auto_field = \"django_ai_assistant.api.fields.UUIDAutoField\"\n
myproject/settings.py
INSTALLED_APPS = [\n    # \"django_ai_assistant\", remove this line and add the one below\n    \"example.apps.AIAssistantConfigOverride\",\n]\n

Make sure to run migrations after those changes:

python manage.py makemigrations\npython manage.py migrate\n

For more information, check Django docs on overriding AppConfig.

","boost":2},{"location":"tutorial/#further-configuration-of-ai-assistants","title":"Further configuration of AI Assistants","text":"

You can further configure the AIAssistant subclasses by overriding its public methods. Check the Reference for more information.

","boost":2},{"location":"reference/","title":"Reference","text":"

This is the reference documentation for the Django AI Assistant library.

"},{"location":"reference/#modules","title":"Modules","text":"
  • django_ai_assistant.helpers.use_cases
  • django_ai_assistant.helpers.assistants
  • django_ai_assistant.models
"},{"location":"reference/assistants-ref/","title":"django_ai_assistant.helpers.assistants","text":""},{"location":"reference/assistants-ref/#django_ai_assistant.helpers.assistants.AIAssistant","title":"AIAssistant","text":"

Base class for AI Assistants. Subclasses must define at least the following attributes:

  • id: str
  • name: str
  • instructions: str
  • model: str

Subclasses can override the public methods to customize the behavior of the assistant.

Tools can be added to the assistant by decorating methods with @method_tool.

Check the docs Tutorial for more info on how to build an AI Assistant.

Source code in django_ai_assistant/helpers/assistants.py
class AIAssistant(abc.ABC):  # noqa: F821\n    \"\"\"Base class for AI Assistants. Subclasses must define at least the following attributes:\n\n    * id: str\n    * name: str\n    * instructions: str\n    * model: str\n\n    Subclasses can override the public methods to customize the behavior of the assistant.\\n\n    Tools can be added to the assistant by decorating methods with `@method_tool`.\\n\n    Check the docs Tutorial for more info on how to build an AI Assistant.\n    \"\"\"\n\n    id: ClassVar[str]  # noqa: A003\n    \"\"\"Class variable with the id of the assistant. Used to select the assistant to use.\\n\n    Must be unique across the whole Django project and match the pattern '^[a-zA-Z0-9_-]+$'.\"\"\"\n    name: ClassVar[str]\n    \"\"\"Class variable with the name of the assistant.\n    Should be a friendly name to optionally display to users.\"\"\"\n    instructions: str\n    \"\"\"Instructions for the AI assistant knowing what to do. This is the LLM system prompt.\"\"\"\n    model: str\n    \"\"\"LLM model name to use for the assistant.\\n\n    Should be a valid model name from OpenAI, because the default `get_llm` method uses OpenAI.\\n\n    `get_llm` can be overridden to use a different LLM implementation.\n    \"\"\"\n    temperature: float = 1.0\n    \"\"\"Temperature to use for the assistant LLM model.\\nDefaults to `1.0`.\"\"\"\n    tool_max_concurrency: int = 1\n    \"\"\"Maximum number of tools to run concurrently / in parallel.\\nDefaults to `1` (no concurrency).\"\"\"\n    has_rag: bool = False\n    \"\"\"Whether the assistant uses RAG (Retrieval-Augmented Generation) or not.\\n\n    Defaults to `False`.\n    When True, the assistant will use a retriever to get documents to provide as context to the LLM.\n    Additionally, the assistant class should implement the `get_retriever` method to return\n    the retriever to use.\"\"\"\n    structured_output: Dict[str, Any] | Type[BaseModel] | Type | None = None\n    \"\"\"Structured output to use for the assistant.\\n\n    Defaults to `None`.\n    When not `None`, the assistant will return a structured output in the provided format.\n    See https://python.langchain.com/v0.3/docs/how_to/structured_output/ for the available formats.\n    \"\"\"\n    _user: Any | None\n    \"\"\"The current user the assistant is helping. A model instance.\\n\n    Set by the constructor.\n    When API views are used, this is set to the current request user.\\n\n    Can be used in any `@method_tool` to customize behavior.\"\"\"\n    _request: Any | None\n    \"\"\"The current Django request the assistant was initialized with. A request instance.\\n\n    Set by the constructor.\\n\n    Can be used in any `@method_tool` to customize behavior.\"\"\"\n    _view: Any | None\n    \"\"\"The current Django view the assistant was initialized with. A view instance.\\n\n    Set by the constructor.\\n\n    Can be used in any `@method_tool` to customize behavior.\"\"\"\n    _init_kwargs: dict[str, Any]\n    \"\"\"Extra keyword arguments passed to the constructor.\\n\n    Set by the constructor.\\n\n    Can be used in any `@method_tool` to customize behavior.\"\"\"\n    _method_tools: Sequence[BaseTool]\n    \"\"\"List of `@method_tool` tools the assistant can use. Automatically set by the constructor.\"\"\"\n\n    _registry: ClassVar[dict[str, type[\"AIAssistant\"]]] = {}\n    \"\"\"Registry of all AIAssistant subclasses by their id.\\n\n    Automatically populated by when a subclass is declared.\\n\n    Use `get_cls_registry` and `get_cls` to access the registry.\"\"\"\n\n    def __init__(self, *, user=None, request=None, view=None, **kwargs: Any):\n        \"\"\"Initialize the AIAssistant instance.\\n\n        Optionally set the current user, request, and view for the assistant.\\n\n        Those can be used in any `@method_tool` to customize behavior.\\n\n\n        Args:\n            user (Any | None): The current user the assistant is helping. A model instance.\n                Defaults to `None`. Stored in `self._user`.\n            request (Any | None): The current Django request the assistant was initialized with.\n                A request instance. Defaults to `None`. Stored in `self._request`.\n            view (Any | None): The current Django view the assistant was initialized with.\n                A view instance. Defaults to `None`. Stored in `self._view`.\n            **kwargs: Extra keyword arguments passed to the constructor. Stored in `self._init_kwargs`.\n        \"\"\"\n\n        self._user = user\n        self._request = request\n        self._view = view\n        self._init_kwargs = kwargs\n\n        self._set_method_tools()\n\n    def __init_subclass__(cls, **kwargs: Any):\n        \"\"\"Called when a class is subclassed from AIAssistant.\n\n        This method is automatically invoked when a new subclass of AIAssistant\n        is created. It allows AIAssistant to perform additional setup or configuration\n        for the subclass, such as registering the subclass in a registry.\n\n        Args:\n            cls (type): The newly created subclass.\n            **kwargs: Additional keyword arguments passed during subclass creation.\n        \"\"\"\n        super().__init_subclass__(**kwargs)\n\n        if not hasattr(cls, \"id\"):\n            raise AIAssistantMisconfiguredError(f\"Assistant id is not defined at {cls.__name__}\")\n        if cls.id is None:\n            raise AIAssistantMisconfiguredError(f\"Assistant id is None at {cls.__name__}\")\n        if not re.match(r\"^[a-zA-Z0-9_-]+$\", cls.id):\n            # id should match the pattern '^[a-zA-Z0-9_-]+$ to support as_tool in OpenAI\n            raise AIAssistantMisconfiguredError(\n                f\"Assistant id '{cls.id}' does not match the pattern '^[a-zA-Z0-9_-]+$'\"\n                f\"at {cls.__name__}\"\n            )\n\n        cls._registry[cls.id] = cls\n\n    def _set_method_tools(self):\n        # Find tool methods (decorated with `@method_tool` from django_ai_assistant/tools.py):\n        members = inspect.getmembers(\n            self,\n            predicate=lambda m: inspect.ismethod(m) and getattr(m, \"_is_tool\", False),\n        )\n        tool_methods = [m for _, m in members]\n\n        # Sort tool methods by the order they appear in the source code,\n        # since this can be meaningful:\n        tool_methods.sort(key=lambda m: inspect.getsourcelines(m)[1])\n\n        # Transform tool methods into tool objects:\n        tools = []\n        for method in tool_methods:\n            if hasattr(method, \"_tool_maker_args\"):\n                tool = tool_decorator(\n                    *method._tool_maker_args,\n                    **method._tool_maker_kwargs,\n                )(method)\n            else:\n                tool = tool_decorator(method)\n            tools.append(cast(BaseTool, tool))\n\n        # Remove self from each tool args_schema:\n        for tool in tools:\n            if tool.args_schema:\n                if isinstance(tool.args_schema.__fields_set__, set):\n                    tool.args_schema.__fields_set__.remove(\"self\")\n                tool.args_schema.__fields__.pop(\"self\", None)\n\n        self._method_tools = tools\n\n    @classmethod\n    def get_cls_registry(cls) -> dict[str, type[\"AIAssistant\"]]:\n        \"\"\"Get the registry of AIAssistant classes.\n\n        Returns:\n            dict[str, type[AIAssistant]]: A dictionary mapping assistant ids to their classes.\n        \"\"\"\n        return cls._registry\n\n    @classmethod\n    def get_cls(cls, assistant_id: str) -> type[\"AIAssistant\"]:\n        \"\"\"Get the AIAssistant class for the given assistant ID.\n\n        Args:\n            assistant_id (str): The ID of the assistant to get.\n        Returns:\n            type[AIAssistant]: The AIAssistant subclass for the given ID.\n        \"\"\"\n        return cls.get_cls_registry()[assistant_id]\n\n    @classmethod\n    def clear_cls_registry(cls: type[\"AIAssistant\"]) -> None:\n        \"\"\"Clear the registry of AIAssistant classes.\"\"\"\n\n        cls._registry.clear()\n\n    def get_instructions(self) -> str:\n        \"\"\"Get the instructions for the assistant. By default, this is the `instructions` attribute.\\n\n        Override the `instructions` attribute or this method to use different instructions.\n\n        Returns:\n            str: The instructions for the assistant, i.e., the LLM system prompt.\n        \"\"\"\n        return self.instructions\n\n    def get_model(self) -> str:\n        \"\"\"Get the LLM model name for the assistant. By default, this is the `model` attribute.\\n\n        Used by the `get_llm` method to create the LLM instance.\\n\n        Override the `model` attribute or this method to use a different LLM model.\n\n        Returns:\n            str: The LLM model name for the assistant.\n        \"\"\"\n        return self.model\n\n    def get_temperature(self) -> float:\n        \"\"\"Get the temperature to use for the assistant LLM model.\n        By default, this is the `temperature` attribute, which is `1.0` by default.\\n\n        Used by the `get_llm` method to create the LLM instance.\\n\n        Override the `temperature` attribute or this method to use a different temperature.\n\n        Returns:\n            float: The temperature to use for the assistant LLM model.\n        \"\"\"\n        return self.temperature\n\n    def get_model_kwargs(self) -> dict[str, Any]:\n        \"\"\"Get additional keyword arguments to pass to the LLM model constructor.\\n\n        Used by the `get_llm` method to create the LLM instance.\\n\n        Override this method to pass additional keyword arguments to the LLM model constructor.\n\n        Returns:\n            dict[str, Any]: Additional keyword arguments to pass to the LLM model constructor.\n        \"\"\"\n        return {}\n\n    def get_llm(self) -> BaseChatModel:\n        \"\"\"Get the LangChain LLM instance for the assistant.\n        By default, this uses the OpenAI implementation.\\n\n        `get_model`, `get_temperature`, and `get_model_kwargs` are used to create the LLM instance.\\n\n        Override this method to use a different LLM implementation.\n\n        Returns:\n            BaseChatModel: The LLM instance for the assistant.\n        \"\"\"\n        model = self.get_model()\n        temperature = self.get_temperature()\n        model_kwargs = self.get_model_kwargs()\n        return ChatOpenAI(\n            model=model,\n            temperature=temperature,\n            model_kwargs=model_kwargs,\n        )\n\n    def get_structured_output_llm(self) -> Runnable:\n        \"\"\"Get the LLM model to use for the structured output.\n\n        Returns:\n            BaseChatModel: The LLM model to use for the structured output.\n        \"\"\"\n        if not self.structured_output:\n            raise ValueError(\"structured_output is not defined\")\n\n        llm = self.get_llm()\n\n        method = \"json_mode\"\n        if isinstance(llm, ChatOpenAI):\n            # When using ChatOpenAI, it's better to use json_schema method\n            # because it enables strict mode.\n            # https://platform.openai.com/docs/guides/structured-outputs\n            method = \"json_schema\"\n\n        return llm.with_structured_output(self.structured_output, method=method)\n\n    def get_tools(self) -> Sequence[BaseTool]:\n        \"\"\"Get the list of method tools the assistant can use.\n        By default, this is the `_method_tools` attribute, which are all `@method_tool`s.\\n\n        Override and call super to add additional tools,\n        such as [any langchain_community tools](https://python.langchain.com/v0.3/docs/integrations/tools/).\n\n        Returns:\n            Sequence[BaseTool]: The list of tools the assistant can use.\n        \"\"\"\n        return self._method_tools\n\n    def get_document_separator(self) -> str:\n        \"\"\"Get the RAG document separator to use in the prompt. Only used when `has_rag=True`.\\n\n        Defaults to `\"\\\\n\\\\n\"`, which is the LangChain default.\\n\n        Override this method to use a different separator.\n\n        Returns:\n            str: a separator for documents in the prompt.\n        \"\"\"\n        return DEFAULT_DOCUMENT_SEPARATOR\n\n    def get_document_prompt(self) -> PromptTemplate:\n        \"\"\"Get the PromptTemplate template to use when rendering RAG documents in the prompt.\n        Only used when `has_rag=True`.\\n\n        Defaults to `PromptTemplate.from_template(\"{page_content}\")`, which is the LangChain default.\\n\n        Override this method to use a different template.\n\n        Returns:\n            PromptTemplate: a prompt template for RAG documents.\n        \"\"\"\n        return DEFAULT_DOCUMENT_PROMPT\n\n    def get_retriever(self) -> BaseRetriever:\n        \"\"\"Get the RAG retriever to use for fetching documents.\\n\n        Must be implemented by subclasses when `has_rag=True`.\\n\n\n        Returns:\n            BaseRetriever: the RAG retriever to use for fetching documents.\n        \"\"\"\n        raise NotImplementedError(\n            f\"Override the get_retriever with your implementation at {self.__class__.__name__}\"\n        )\n\n    def get_contextualize_prompt(self) -> ChatPromptTemplate:\n        \"\"\"Get the contextualize prompt template for the assistant.\\n\n        This is used when `has_rag=True` and there are previous messages in the thread.\n        Since the latest user question might reference the chat history,\n        the LLM needs to generate a new standalone question,\n        and use that question to query the retriever for relevant documents.\\n\n        By default, this is a prompt that asks the LLM to\n        reformulate the latest user question without the chat history.\\n\n        Override this method to use a different contextualize prompt.\\n\n        See `get_history_aware_retriever` for how this prompt is used.\\n\n\n        Returns:\n            ChatPromptTemplate: The contextualize prompt template for the assistant.\n        \"\"\"\n        contextualize_q_system_prompt = (\n            \"Given a chat history and the latest user question \"\n            \"which might reference context in the chat history, \"\n            \"formulate a standalone question which can be understood \"\n            \"without the chat history. Do NOT answer the question, \"\n            \"just reformulate it if needed and otherwise return it as is.\"\n        )\n        return ChatPromptTemplate.from_messages(\n            [\n                (\"system\", contextualize_q_system_prompt),\n                # TODO: make history key configurable?\n                MessagesPlaceholder(\"history\"),\n                # TODO: make input key configurable?\n                (\"human\", \"{input}\"),\n            ]\n        )\n\n    def get_history_aware_retriever(self) -> Runnable[dict, RetrieverOutput]:\n        \"\"\"Get the history-aware retriever LangChain chain for the assistant.\\n\n        This is used when `has_rag=True` to fetch documents based on the chat history.\\n\n        By default, this is a chain that checks if there is chat history,\n        and if so, it uses the chat history to generate a new standalone question\n        to query the retriever for relevant documents.\\n\n        When there is no chat history, it just passes the input to the retriever.\\n\n        Override this method to use a different history-aware retriever chain.\n\n        Read more about the history-aware retriever in the\n        [LangChain docs](https://python.langchain.com/v0.2/docs/how_to/qa_chat_history_how_to/).\n\n        Returns:\n            Runnable[dict, RetrieverOutput]: a history-aware retriever LangChain chain.\n        \"\"\"\n        llm = self.get_llm()\n        retriever = self.get_retriever()\n        prompt = self.get_contextualize_prompt()\n\n        # Based on create_history_aware_retriever:\n        return RunnableBranch(\n            (\n                lambda x: not x.get(\"history\", False),  # pyright: ignore[reportAttributeAccessIssue]\n                # If no chat history, then we just pass input to retriever\n                (lambda x: x[\"input\"]) | retriever,\n            ),\n            # If chat history, then we pass inputs to LLM chain, then to retriever\n            prompt | llm | StrOutputParser() | retriever,\n        )\n\n    @with_cast_id\n    def as_graph(self, thread_id: Any | None = None) -> Runnable[dict, dict]:\n        \"\"\"Create the LangGraph graph for the assistant.\\n\n        This graph is an agent that supports chat history, tool calling, and RAG (if `has_rag=True`).\\n\n        `as_graph` uses many other methods to create the graph for the assistant.\n        Prefer to override the other methods to customize the graph for the assistant.\n        Only override this method if you need to customize the graph at a lower level.\n\n        Args:\n            thread_id (Any | None): The thread ID for the chat message history.\n                If `None`, an in-memory chat message history is used.\n\n        Returns:\n            the compiled graph\n        \"\"\"\n        from django_ai_assistant.models import Thread\n\n        llm = self.get_llm()\n        tools = self.get_tools()\n        llm_with_tools = llm.bind_tools(tools) if tools else llm\n        if thread_id:\n            thread = Thread.objects.get(id=thread_id)\n        else:\n            thread = None\n\n        def custom_add_messages(left: list[BaseMessage], right: list[BaseMessage]):\n            result = add_messages(left, right)  # type: ignore\n            if thread:\n                # Save all messages, except the initial system message:\n                thread_messages = [m for m in result if not isinstance(m, SystemMessage)]\n                save_django_messages(cast(list[BaseMessage], thread_messages), thread=thread)\n            return result\n\n        class AgentState(TypedDict):\n            messages: Annotated[list[AnyMessage], custom_add_messages]\n            input: str | None  # noqa: A003\n            output: Any\n\n        def setup(state: AgentState):\n            system_prompt = self.get_instructions()\n            return {\"messages\": [SystemMessage(content=system_prompt)]}\n\n        def history(state: AgentState):\n            messages = thread.get_messages(include_extra_messages=True) if thread else []\n            if state[\"input\"]:\n                messages.append(HumanMessage(content=state[\"input\"]))\n\n            return {\"messages\": messages}\n\n        def retriever(state: AgentState):\n            if not self.has_rag:\n                return\n\n            retriever = self.get_history_aware_retriever()\n            # Remove the initial instructions to prevent having two SystemMessages\n            # This is necessary for compatibility with Anthropic\n            messages_to_summarize = state[\"messages\"][1:-1]\n            input_message = state[\"messages\"][-1]\n            docs = retriever.invoke(\n                {\"input\": input_message.content, \"history\": messages_to_summarize}\n            )\n\n            document_separator = self.get_document_separator()\n            document_prompt = self.get_document_prompt()\n\n            formatted_docs = document_separator.join(\n                format_document(doc, document_prompt) for doc in docs\n            )\n\n            system_message = state[\"messages\"][0]\n            system_message.content += (\n                f\"\\n\\n---START OF CONTEXT---\\n{formatted_docs}---END OF CONTEXT---\\n\\n\"\n            )\n\n        def agent(state: AgentState):\n            response = llm_with_tools.invoke(state[\"messages\"])\n\n            return {\"messages\": [response]}\n\n        def tool_selector(state: AgentState):\n            last_message = state[\"messages\"][-1]\n\n            if isinstance(last_message, AIMessage) and last_message.tool_calls:\n                return \"call_tool\"\n\n            return \"continue\"\n\n        def record_response(state: AgentState):\n            # Structured output must happen in the end, to avoid disabling tool calling.\n            # Tool calling + structured output is not supported by OpenAI:\n            if self.structured_output:\n                messages = state[\"messages\"]\n\n                # Change the original system prompt:\n                if isinstance(messages[0], SystemMessage):\n                    messages[0].content += \"\\nUse the chat history to produce a JSON output.\"\n\n                # Add a final message asking for JSON generation / structured output:\n                json_request_message = HumanMessage(\n                    content=\"Use the chat history to produce a JSON output.\"\n                )\n                messages.append(json_request_message)\n\n                llm_with_structured_output = self.get_structured_output_llm()\n                response = llm_with_structured_output.invoke(messages)\n            else:\n                response = state[\"messages\"][-1].content\n\n            return {\"output\": response}\n\n        workflow = StateGraph(AgentState)\n\n        workflow.add_node(\"setup\", setup)\n        workflow.add_node(\"history\", history)\n        workflow.add_node(\"retriever\", retriever)\n        workflow.add_node(\"agent\", agent)\n        workflow.add_node(\"tools\", ToolNode(tools))\n        workflow.add_node(\"respond\", record_response)\n\n        workflow.set_entry_point(\"setup\")\n        workflow.add_edge(\"setup\", \"history\")\n        workflow.add_edge(\"history\", \"retriever\")\n        workflow.add_edge(\"retriever\", \"agent\")\n        workflow.add_conditional_edges(\n            \"agent\",\n            tool_selector,\n            {\n                \"call_tool\": \"tools\",\n                \"continue\": \"respond\",\n            },\n        )\n        workflow.add_edge(\"tools\", \"agent\")\n        workflow.add_edge(\"respond\", END)\n\n        return workflow.compile()\n\n    @with_cast_id\n    def invoke(self, *args: Any, thread_id: Any | None, **kwargs: Any) -> dict:\n        \"\"\"Invoke the assistant LangChain graph with the given arguments and keyword arguments.\\n\n        This is the lower-level method to run the assistant.\\n\n        The graph is created by the `as_graph` method.\\n\n\n        Args:\n            *args: Positional arguments to pass to the graph.\n                To add a new message, use a dict like `{\"input\": \"user message\"}`.\n                If thread already has a `HumanMessage` in the end, you can invoke without args.\n            thread_id (Any | None): The thread ID for the chat message history.\n                If `None`, an in-memory chat message history is used.\n            **kwargs: Keyword arguments to pass to the graph.\n\n        Returns:\n            dict: The output of the assistant graph,\n                structured like `{\"output\": \"assistant response\", \"history\": ...}`.\n        \"\"\"\n        graph = self.as_graph(thread_id)\n        config = kwargs.pop(\"config\", {})\n        config[\"max_concurrency\"] = config.pop(\"max_concurrency\", self.tool_max_concurrency)\n        return graph.invoke(*args, config=config, **kwargs)\n\n    @with_cast_id\n    def run(self, message: str, thread_id: Any | None = None, **kwargs: Any) -> Any:\n        \"\"\"Run the assistant with the given message and thread ID.\\n\n        This is the higher-level method to run the assistant.\\n\n\n        Args:\n            message (str): The user message to pass to the assistant.\n            thread_id (Any | None): The thread ID for the chat message history.\n                If `None`, an in-memory chat message history is used.\n            **kwargs: Additional keyword arguments to pass to the graph.\n\n        Returns:\n            Any: The assistant response to the user message.\n        \"\"\"\n        return self.invoke(\n            {\n                \"input\": message,\n            },\n            thread_id=thread_id,\n            **kwargs,\n        )[\"output\"]\n\n    def _run_as_tool(self, message: str, **kwargs: Any) -> Any:\n        return self.run(message, thread_id=None, **kwargs)\n\n    def as_tool(self, description: str) -> BaseTool:\n        \"\"\"Create a tool from the assistant.\\n\n        This is useful to compose assistants.\\n\n\n        Args:\n            description (str): The description for the tool.\n\n        Returns:\n            BaseTool: A tool that runs the assistant. The tool name is this assistant's id.\n        \"\"\"\n        return StructuredTool.from_function(\n            func=self._run_as_tool,\n            name=self.id,\n            description=description,\n        )\n
"},{"location":"reference/assistants-ref/#django_ai_assistant.helpers.assistants.AIAssistant.id","title":"id: str class-attribute","text":"

Class variable with the id of the assistant. Used to select the assistant to use.

Must be unique across the whole Django project and match the pattern '^[a-zA-Z0-9_-]+$'.

"},{"location":"reference/assistants-ref/#django_ai_assistant.helpers.assistants.AIAssistant.name","title":"name: str class-attribute","text":"

Class variable with the name of the assistant. Should be a friendly name to optionally display to users.

"},{"location":"reference/assistants-ref/#django_ai_assistant.helpers.assistants.AIAssistant.instructions","title":"instructions: str instance-attribute","text":"

Instructions for the AI assistant knowing what to do. This is the LLM system prompt.

"},{"location":"reference/assistants-ref/#django_ai_assistant.helpers.assistants.AIAssistant.model","title":"model: str instance-attribute","text":"

LLM model name to use for the assistant.

Should be a valid model name from OpenAI, because the default get_llm method uses OpenAI.

get_llm can be overridden to use a different LLM implementation.

"},{"location":"reference/assistants-ref/#django_ai_assistant.helpers.assistants.AIAssistant.temperature","title":"temperature: float = 1.0 class-attribute instance-attribute","text":"

Temperature to use for the assistant LLM model. Defaults to 1.0.

"},{"location":"reference/assistants-ref/#django_ai_assistant.helpers.assistants.AIAssistant.tool_max_concurrency","title":"tool_max_concurrency: int = 1 class-attribute instance-attribute","text":"

Maximum number of tools to run concurrently / in parallel. Defaults to 1 (no concurrency).

"},{"location":"reference/assistants-ref/#django_ai_assistant.helpers.assistants.AIAssistant.has_rag","title":"has_rag: bool = False class-attribute instance-attribute","text":"

Whether the assistant uses RAG (Retrieval-Augmented Generation) or not.

Defaults to False. When True, the assistant will use a retriever to get documents to provide as context to the LLM. Additionally, the assistant class should implement the get_retriever method to return the retriever to use.

"},{"location":"reference/assistants-ref/#django_ai_assistant.helpers.assistants.AIAssistant.structured_output","title":"structured_output: Dict[str, Any] | Type[BaseModel] | Type | None = None class-attribute instance-attribute","text":"

Structured output to use for the assistant.

Defaults to None. When not None, the assistant will return a structured output in the provided format. See https://python.langchain.com/v0.3/docs/how_to/structured_output/ for the available formats.

"},{"location":"reference/assistants-ref/#django_ai_assistant.helpers.assistants.AIAssistant._method_tools","title":"_method_tools: Sequence[BaseTool] instance-attribute","text":"

List of @method_tool tools the assistant can use. Automatically set by the constructor.

"},{"location":"reference/assistants-ref/#django_ai_assistant.helpers.assistants.AIAssistant._registry","title":"_registry: dict[str, type[AIAssistant]] = {} class-attribute","text":"

Registry of all AIAssistant subclasses by their id.

Automatically populated by when a subclass is declared.

Use get_cls_registry and get_cls to access the registry.

"},{"location":"reference/assistants-ref/#django_ai_assistant.helpers.assistants.AIAssistant._user","title":"_user: Any | None = user instance-attribute","text":"

The current user the assistant is helping. A model instance.

Set by the constructor. When API views are used, this is set to the current request user.

Can be used in any @method_tool to customize behavior.

"},{"location":"reference/assistants-ref/#django_ai_assistant.helpers.assistants.AIAssistant._request","title":"_request: Any | None = request instance-attribute","text":"

The current Django request the assistant was initialized with. A request instance.

Set by the constructor.

Can be used in any @method_tool to customize behavior.

"},{"location":"reference/assistants-ref/#django_ai_assistant.helpers.assistants.AIAssistant._view","title":"_view: Any | None = view instance-attribute","text":"

The current Django view the assistant was initialized with. A view instance.

Set by the constructor.

Can be used in any @method_tool to customize behavior.

"},{"location":"reference/assistants-ref/#django_ai_assistant.helpers.assistants.AIAssistant._init_kwargs","title":"_init_kwargs: dict[str, Any] = kwargs instance-attribute","text":"

Extra keyword arguments passed to the constructor.

Set by the constructor.

Can be used in any @method_tool to customize behavior.

"},{"location":"reference/assistants-ref/#django_ai_assistant.helpers.assistants.AIAssistant.__init__","title":"__init__(*, user=None, request=None, view=None, **kwargs)","text":"

Initialize the AIAssistant instance.

Optionally set the current user, request, and view for the assistant.

Those can be used in any @method_tool to customize behavior.

Parameters:

Name Type Description Default user Any | None

The current user the assistant is helping. A model instance. Defaults to None. Stored in self._user.

None request Any | None

The current Django request the assistant was initialized with. A request instance. Defaults to None. Stored in self._request.

None view Any | None

The current Django view the assistant was initialized with. A view instance. Defaults to None. Stored in self._view.

None **kwargs Any

Extra keyword arguments passed to the constructor. Stored in self._init_kwargs.

{} Source code in django_ai_assistant/helpers/assistants.py
def __init__(self, *, user=None, request=None, view=None, **kwargs: Any):\n    \"\"\"Initialize the AIAssistant instance.\\n\n    Optionally set the current user, request, and view for the assistant.\\n\n    Those can be used in any `@method_tool` to customize behavior.\\n\n\n    Args:\n        user (Any | None): The current user the assistant is helping. A model instance.\n            Defaults to `None`. Stored in `self._user`.\n        request (Any | None): The current Django request the assistant was initialized with.\n            A request instance. Defaults to `None`. Stored in `self._request`.\n        view (Any | None): The current Django view the assistant was initialized with.\n            A view instance. Defaults to `None`. Stored in `self._view`.\n        **kwargs: Extra keyword arguments passed to the constructor. Stored in `self._init_kwargs`.\n    \"\"\"\n\n    self._user = user\n    self._request = request\n    self._view = view\n    self._init_kwargs = kwargs\n\n    self._set_method_tools()\n
"},{"location":"reference/assistants-ref/#django_ai_assistant.helpers.assistants.AIAssistant.get_cls_registry","title":"get_cls_registry() classmethod","text":"

Get the registry of AIAssistant classes.

Returns:

Type Description dict[str, type[AIAssistant]]

dict[str, type[AIAssistant]]: A dictionary mapping assistant ids to their classes.

Source code in django_ai_assistant/helpers/assistants.py
@classmethod\ndef get_cls_registry(cls) -> dict[str, type[\"AIAssistant\"]]:\n    \"\"\"Get the registry of AIAssistant classes.\n\n    Returns:\n        dict[str, type[AIAssistant]]: A dictionary mapping assistant ids to their classes.\n    \"\"\"\n    return cls._registry\n
"},{"location":"reference/assistants-ref/#django_ai_assistant.helpers.assistants.AIAssistant.get_cls","title":"get_cls(assistant_id) classmethod","text":"

Get the AIAssistant class for the given assistant ID.

Parameters:

Name Type Description Default assistant_id str

The ID of the assistant to get.

required

Returns: type[AIAssistant]: The AIAssistant subclass for the given ID.

Source code in django_ai_assistant/helpers/assistants.py
@classmethod\ndef get_cls(cls, assistant_id: str) -> type[\"AIAssistant\"]:\n    \"\"\"Get the AIAssistant class for the given assistant ID.\n\n    Args:\n        assistant_id (str): The ID of the assistant to get.\n    Returns:\n        type[AIAssistant]: The AIAssistant subclass for the given ID.\n    \"\"\"\n    return cls.get_cls_registry()[assistant_id]\n
"},{"location":"reference/assistants-ref/#django_ai_assistant.helpers.assistants.AIAssistant.clear_cls_registry","title":"clear_cls_registry() classmethod","text":"

Clear the registry of AIAssistant classes.

Source code in django_ai_assistant/helpers/assistants.py
@classmethod\ndef clear_cls_registry(cls: type[\"AIAssistant\"]) -> None:\n    \"\"\"Clear the registry of AIAssistant classes.\"\"\"\n\n    cls._registry.clear()\n
"},{"location":"reference/assistants-ref/#django_ai_assistant.helpers.assistants.AIAssistant.get_instructions","title":"get_instructions()","text":"

Get the instructions for the assistant. By default, this is the instructions attribute.

Override the instructions attribute or this method to use different instructions.

Returns:

Name Type Description str str

The instructions for the assistant, i.e., the LLM system prompt.

Source code in django_ai_assistant/helpers/assistants.py
def get_instructions(self) -> str:\n    \"\"\"Get the instructions for the assistant. By default, this is the `instructions` attribute.\\n\n    Override the `instructions` attribute or this method to use different instructions.\n\n    Returns:\n        str: The instructions for the assistant, i.e., the LLM system prompt.\n    \"\"\"\n    return self.instructions\n
"},{"location":"reference/assistants-ref/#django_ai_assistant.helpers.assistants.AIAssistant.get_model","title":"get_model()","text":"

Get the LLM model name for the assistant. By default, this is the model attribute.

Used by the get_llm method to create the LLM instance.

Override the model attribute or this method to use a different LLM model.

Returns:

Name Type Description str str

The LLM model name for the assistant.

Source code in django_ai_assistant/helpers/assistants.py
def get_model(self) -> str:\n    \"\"\"Get the LLM model name for the assistant. By default, this is the `model` attribute.\\n\n    Used by the `get_llm` method to create the LLM instance.\\n\n    Override the `model` attribute or this method to use a different LLM model.\n\n    Returns:\n        str: The LLM model name for the assistant.\n    \"\"\"\n    return self.model\n
"},{"location":"reference/assistants-ref/#django_ai_assistant.helpers.assistants.AIAssistant.get_temperature","title":"get_temperature()","text":"

Get the temperature to use for the assistant LLM model. By default, this is the temperature attribute, which is 1.0 by default.

Used by the get_llm method to create the LLM instance.

Override the temperature attribute or this method to use a different temperature.

Returns:

Name Type Description float float

The temperature to use for the assistant LLM model.

Source code in django_ai_assistant/helpers/assistants.py
def get_temperature(self) -> float:\n    \"\"\"Get the temperature to use for the assistant LLM model.\n    By default, this is the `temperature` attribute, which is `1.0` by default.\\n\n    Used by the `get_llm` method to create the LLM instance.\\n\n    Override the `temperature` attribute or this method to use a different temperature.\n\n    Returns:\n        float: The temperature to use for the assistant LLM model.\n    \"\"\"\n    return self.temperature\n
"},{"location":"reference/assistants-ref/#django_ai_assistant.helpers.assistants.AIAssistant.get_model_kwargs","title":"get_model_kwargs()","text":"

Get additional keyword arguments to pass to the LLM model constructor.

Used by the get_llm method to create the LLM instance.

Override this method to pass additional keyword arguments to the LLM model constructor.

Returns:

Type Description dict[str, Any]

dict[str, Any]: Additional keyword arguments to pass to the LLM model constructor.

Source code in django_ai_assistant/helpers/assistants.py
def get_model_kwargs(self) -> dict[str, Any]:\n    \"\"\"Get additional keyword arguments to pass to the LLM model constructor.\\n\n    Used by the `get_llm` method to create the LLM instance.\\n\n    Override this method to pass additional keyword arguments to the LLM model constructor.\n\n    Returns:\n        dict[str, Any]: Additional keyword arguments to pass to the LLM model constructor.\n    \"\"\"\n    return {}\n
"},{"location":"reference/assistants-ref/#django_ai_assistant.helpers.assistants.AIAssistant.get_llm","title":"get_llm()","text":"

Get the LangChain LLM instance for the assistant. By default, this uses the OpenAI implementation.

get_model, get_temperature, and get_model_kwargs are used to create the LLM instance.

Override this method to use a different LLM implementation.

Returns:

Name Type Description BaseChatModel BaseChatModel

The LLM instance for the assistant.

Source code in django_ai_assistant/helpers/assistants.py
def get_llm(self) -> BaseChatModel:\n    \"\"\"Get the LangChain LLM instance for the assistant.\n    By default, this uses the OpenAI implementation.\\n\n    `get_model`, `get_temperature`, and `get_model_kwargs` are used to create the LLM instance.\\n\n    Override this method to use a different LLM implementation.\n\n    Returns:\n        BaseChatModel: The LLM instance for the assistant.\n    \"\"\"\n    model = self.get_model()\n    temperature = self.get_temperature()\n    model_kwargs = self.get_model_kwargs()\n    return ChatOpenAI(\n        model=model,\n        temperature=temperature,\n        model_kwargs=model_kwargs,\n    )\n
"},{"location":"reference/assistants-ref/#django_ai_assistant.helpers.assistants.AIAssistant.get_structured_output_llm","title":"get_structured_output_llm()","text":"

Get the LLM model to use for the structured output.

Returns:

Name Type Description BaseChatModel Runnable

The LLM model to use for the structured output.

Source code in django_ai_assistant/helpers/assistants.py
def get_structured_output_llm(self) -> Runnable:\n    \"\"\"Get the LLM model to use for the structured output.\n\n    Returns:\n        BaseChatModel: The LLM model to use for the structured output.\n    \"\"\"\n    if not self.structured_output:\n        raise ValueError(\"structured_output is not defined\")\n\n    llm = self.get_llm()\n\n    method = \"json_mode\"\n    if isinstance(llm, ChatOpenAI):\n        # When using ChatOpenAI, it's better to use json_schema method\n        # because it enables strict mode.\n        # https://platform.openai.com/docs/guides/structured-outputs\n        method = \"json_schema\"\n\n    return llm.with_structured_output(self.structured_output, method=method)\n
"},{"location":"reference/assistants-ref/#django_ai_assistant.helpers.assistants.AIAssistant.get_tools","title":"get_tools()","text":"

Get the list of method tools the assistant can use. By default, this is the _method_tools attribute, which are all @method_tools.

Override and call super to add additional tools, such as any langchain_community tools.

Returns:

Type Description Sequence[BaseTool]

Sequence[BaseTool]: The list of tools the assistant can use.

Source code in django_ai_assistant/helpers/assistants.py
def get_tools(self) -> Sequence[BaseTool]:\n    \"\"\"Get the list of method tools the assistant can use.\n    By default, this is the `_method_tools` attribute, which are all `@method_tool`s.\\n\n    Override and call super to add additional tools,\n    such as [any langchain_community tools](https://python.langchain.com/v0.3/docs/integrations/tools/).\n\n    Returns:\n        Sequence[BaseTool]: The list of tools the assistant can use.\n    \"\"\"\n    return self._method_tools\n
"},{"location":"reference/assistants-ref/#django_ai_assistant.helpers.assistants.AIAssistant.get_document_separator","title":"get_document_separator()","text":"

Get the RAG document separator to use in the prompt. Only used when has_rag=True.

Defaults to \"\\n\\n\", which is the LangChain default.

Override this method to use a different separator.

Returns:

Name Type Description str str

a separator for documents in the prompt.

Source code in django_ai_assistant/helpers/assistants.py
def get_document_separator(self) -> str:\n    \"\"\"Get the RAG document separator to use in the prompt. Only used when `has_rag=True`.\\n\n    Defaults to `\"\\\\n\\\\n\"`, which is the LangChain default.\\n\n    Override this method to use a different separator.\n\n    Returns:\n        str: a separator for documents in the prompt.\n    \"\"\"\n    return DEFAULT_DOCUMENT_SEPARATOR\n
"},{"location":"reference/assistants-ref/#django_ai_assistant.helpers.assistants.AIAssistant.get_document_prompt","title":"get_document_prompt()","text":"

Get the PromptTemplate template to use when rendering RAG documents in the prompt. Only used when has_rag=True.

Defaults to PromptTemplate.from_template(\"{page_content}\"), which is the LangChain default.

Override this method to use a different template.

Returns:

Name Type Description PromptTemplate PromptTemplate

a prompt template for RAG documents.

Source code in django_ai_assistant/helpers/assistants.py
def get_document_prompt(self) -> PromptTemplate:\n    \"\"\"Get the PromptTemplate template to use when rendering RAG documents in the prompt.\n    Only used when `has_rag=True`.\\n\n    Defaults to `PromptTemplate.from_template(\"{page_content}\")`, which is the LangChain default.\\n\n    Override this method to use a different template.\n\n    Returns:\n        PromptTemplate: a prompt template for RAG documents.\n    \"\"\"\n    return DEFAULT_DOCUMENT_PROMPT\n
"},{"location":"reference/assistants-ref/#django_ai_assistant.helpers.assistants.AIAssistant.get_retriever","title":"get_retriever()","text":"

Get the RAG retriever to use for fetching documents.

Must be implemented by subclasses when has_rag=True.

Returns:

Name Type Description BaseRetriever BaseRetriever

the RAG retriever to use for fetching documents.

Source code in django_ai_assistant/helpers/assistants.py
def get_retriever(self) -> BaseRetriever:\n    \"\"\"Get the RAG retriever to use for fetching documents.\\n\n    Must be implemented by subclasses when `has_rag=True`.\\n\n\n    Returns:\n        BaseRetriever: the RAG retriever to use for fetching documents.\n    \"\"\"\n    raise NotImplementedError(\n        f\"Override the get_retriever with your implementation at {self.__class__.__name__}\"\n    )\n
"},{"location":"reference/assistants-ref/#django_ai_assistant.helpers.assistants.AIAssistant.get_contextualize_prompt","title":"get_contextualize_prompt()","text":"

Get the contextualize prompt template for the assistant.

This is used when has_rag=True and there are previous messages in the thread. Since the latest user question might reference the chat history, the LLM needs to generate a new standalone question, and use that question to query the retriever for relevant documents.

By default, this is a prompt that asks the LLM to reformulate the latest user question without the chat history.

Override this method to use a different contextualize prompt.

See get_history_aware_retriever for how this prompt is used.

Returns:

Name Type Description ChatPromptTemplate ChatPromptTemplate

The contextualize prompt template for the assistant.

Source code in django_ai_assistant/helpers/assistants.py
def get_contextualize_prompt(self) -> ChatPromptTemplate:\n    \"\"\"Get the contextualize prompt template for the assistant.\\n\n    This is used when `has_rag=True` and there are previous messages in the thread.\n    Since the latest user question might reference the chat history,\n    the LLM needs to generate a new standalone question,\n    and use that question to query the retriever for relevant documents.\\n\n    By default, this is a prompt that asks the LLM to\n    reformulate the latest user question without the chat history.\\n\n    Override this method to use a different contextualize prompt.\\n\n    See `get_history_aware_retriever` for how this prompt is used.\\n\n\n    Returns:\n        ChatPromptTemplate: The contextualize prompt template for the assistant.\n    \"\"\"\n    contextualize_q_system_prompt = (\n        \"Given a chat history and the latest user question \"\n        \"which might reference context in the chat history, \"\n        \"formulate a standalone question which can be understood \"\n        \"without the chat history. Do NOT answer the question, \"\n        \"just reformulate it if needed and otherwise return it as is.\"\n    )\n    return ChatPromptTemplate.from_messages(\n        [\n            (\"system\", contextualize_q_system_prompt),\n            # TODO: make history key configurable?\n            MessagesPlaceholder(\"history\"),\n            # TODO: make input key configurable?\n            (\"human\", \"{input}\"),\n        ]\n    )\n
"},{"location":"reference/assistants-ref/#django_ai_assistant.helpers.assistants.AIAssistant.get_history_aware_retriever","title":"get_history_aware_retriever()","text":"

Get the history-aware retriever LangChain chain for the assistant.

This is used when has_rag=True to fetch documents based on the chat history.

By default, this is a chain that checks if there is chat history, and if so, it uses the chat history to generate a new standalone question to query the retriever for relevant documents.

When there is no chat history, it just passes the input to the retriever.

Override this method to use a different history-aware retriever chain.

Read more about the history-aware retriever in the LangChain docs.

Returns:

Type Description Runnable[dict, RetrieverOutput]

Runnable[dict, RetrieverOutput]: a history-aware retriever LangChain chain.

Source code in django_ai_assistant/helpers/assistants.py
def get_history_aware_retriever(self) -> Runnable[dict, RetrieverOutput]:\n    \"\"\"Get the history-aware retriever LangChain chain for the assistant.\\n\n    This is used when `has_rag=True` to fetch documents based on the chat history.\\n\n    By default, this is a chain that checks if there is chat history,\n    and if so, it uses the chat history to generate a new standalone question\n    to query the retriever for relevant documents.\\n\n    When there is no chat history, it just passes the input to the retriever.\\n\n    Override this method to use a different history-aware retriever chain.\n\n    Read more about the history-aware retriever in the\n    [LangChain docs](https://python.langchain.com/v0.2/docs/how_to/qa_chat_history_how_to/).\n\n    Returns:\n        Runnable[dict, RetrieverOutput]: a history-aware retriever LangChain chain.\n    \"\"\"\n    llm = self.get_llm()\n    retriever = self.get_retriever()\n    prompt = self.get_contextualize_prompt()\n\n    # Based on create_history_aware_retriever:\n    return RunnableBranch(\n        (\n            lambda x: not x.get(\"history\", False),  # pyright: ignore[reportAttributeAccessIssue]\n            # If no chat history, then we just pass input to retriever\n            (lambda x: x[\"input\"]) | retriever,\n        ),\n        # If chat history, then we pass inputs to LLM chain, then to retriever\n        prompt | llm | StrOutputParser() | retriever,\n    )\n
"},{"location":"reference/assistants-ref/#django_ai_assistant.helpers.assistants.AIAssistant.as_graph","title":"as_graph(thread_id=None)","text":"

Create the LangGraph graph for the assistant.

This graph is an agent that supports chat history, tool calling, and RAG (if has_rag=True).

as_graph uses many other methods to create the graph for the assistant. Prefer to override the other methods to customize the graph for the assistant. Only override this method if you need to customize the graph at a lower level.

Parameters:

Name Type Description Default thread_id Any | None

The thread ID for the chat message history. If None, an in-memory chat message history is used.

None

Returns:

Type Description Runnable[dict, dict]

the compiled graph

Source code in django_ai_assistant/helpers/assistants.py
@with_cast_id\ndef as_graph(self, thread_id: Any | None = None) -> Runnable[dict, dict]:\n    \"\"\"Create the LangGraph graph for the assistant.\\n\n    This graph is an agent that supports chat history, tool calling, and RAG (if `has_rag=True`).\\n\n    `as_graph` uses many other methods to create the graph for the assistant.\n    Prefer to override the other methods to customize the graph for the assistant.\n    Only override this method if you need to customize the graph at a lower level.\n\n    Args:\n        thread_id (Any | None): The thread ID for the chat message history.\n            If `None`, an in-memory chat message history is used.\n\n    Returns:\n        the compiled graph\n    \"\"\"\n    from django_ai_assistant.models import Thread\n\n    llm = self.get_llm()\n    tools = self.get_tools()\n    llm_with_tools = llm.bind_tools(tools) if tools else llm\n    if thread_id:\n        thread = Thread.objects.get(id=thread_id)\n    else:\n        thread = None\n\n    def custom_add_messages(left: list[BaseMessage], right: list[BaseMessage]):\n        result = add_messages(left, right)  # type: ignore\n        if thread:\n            # Save all messages, except the initial system message:\n            thread_messages = [m for m in result if not isinstance(m, SystemMessage)]\n            save_django_messages(cast(list[BaseMessage], thread_messages), thread=thread)\n        return result\n\n    class AgentState(TypedDict):\n        messages: Annotated[list[AnyMessage], custom_add_messages]\n        input: str | None  # noqa: A003\n        output: Any\n\n    def setup(state: AgentState):\n        system_prompt = self.get_instructions()\n        return {\"messages\": [SystemMessage(content=system_prompt)]}\n\n    def history(state: AgentState):\n        messages = thread.get_messages(include_extra_messages=True) if thread else []\n        if state[\"input\"]:\n            messages.append(HumanMessage(content=state[\"input\"]))\n\n        return {\"messages\": messages}\n\n    def retriever(state: AgentState):\n        if not self.has_rag:\n            return\n\n        retriever = self.get_history_aware_retriever()\n        # Remove the initial instructions to prevent having two SystemMessages\n        # This is necessary for compatibility with Anthropic\n        messages_to_summarize = state[\"messages\"][1:-1]\n        input_message = state[\"messages\"][-1]\n        docs = retriever.invoke(\n            {\"input\": input_message.content, \"history\": messages_to_summarize}\n        )\n\n        document_separator = self.get_document_separator()\n        document_prompt = self.get_document_prompt()\n\n        formatted_docs = document_separator.join(\n            format_document(doc, document_prompt) for doc in docs\n        )\n\n        system_message = state[\"messages\"][0]\n        system_message.content += (\n            f\"\\n\\n---START OF CONTEXT---\\n{formatted_docs}---END OF CONTEXT---\\n\\n\"\n        )\n\n    def agent(state: AgentState):\n        response = llm_with_tools.invoke(state[\"messages\"])\n\n        return {\"messages\": [response]}\n\n    def tool_selector(state: AgentState):\n        last_message = state[\"messages\"][-1]\n\n        if isinstance(last_message, AIMessage) and last_message.tool_calls:\n            return \"call_tool\"\n\n        return \"continue\"\n\n    def record_response(state: AgentState):\n        # Structured output must happen in the end, to avoid disabling tool calling.\n        # Tool calling + structured output is not supported by OpenAI:\n        if self.structured_output:\n            messages = state[\"messages\"]\n\n            # Change the original system prompt:\n            if isinstance(messages[0], SystemMessage):\n                messages[0].content += \"\\nUse the chat history to produce a JSON output.\"\n\n            # Add a final message asking for JSON generation / structured output:\n            json_request_message = HumanMessage(\n                content=\"Use the chat history to produce a JSON output.\"\n            )\n            messages.append(json_request_message)\n\n            llm_with_structured_output = self.get_structured_output_llm()\n            response = llm_with_structured_output.invoke(messages)\n        else:\n            response = state[\"messages\"][-1].content\n\n        return {\"output\": response}\n\n    workflow = StateGraph(AgentState)\n\n    workflow.add_node(\"setup\", setup)\n    workflow.add_node(\"history\", history)\n    workflow.add_node(\"retriever\", retriever)\n    workflow.add_node(\"agent\", agent)\n    workflow.add_node(\"tools\", ToolNode(tools))\n    workflow.add_node(\"respond\", record_response)\n\n    workflow.set_entry_point(\"setup\")\n    workflow.add_edge(\"setup\", \"history\")\n    workflow.add_edge(\"history\", \"retriever\")\n    workflow.add_edge(\"retriever\", \"agent\")\n    workflow.add_conditional_edges(\n        \"agent\",\n        tool_selector,\n        {\n            \"call_tool\": \"tools\",\n            \"continue\": \"respond\",\n        },\n    )\n    workflow.add_edge(\"tools\", \"agent\")\n    workflow.add_edge(\"respond\", END)\n\n    return workflow.compile()\n
"},{"location":"reference/assistants-ref/#django_ai_assistant.helpers.assistants.AIAssistant.invoke","title":"invoke(*args, thread_id, **kwargs)","text":"

Invoke the assistant LangChain graph with the given arguments and keyword arguments.

This is the lower-level method to run the assistant.

The graph is created by the as_graph method.

Parameters:

Name Type Description Default *args Any

Positional arguments to pass to the graph. To add a new message, use a dict like {\"input\": \"user message\"}. If thread already has a HumanMessage in the end, you can invoke without args.

() thread_id Any | None

The thread ID for the chat message history. If None, an in-memory chat message history is used.

required **kwargs Any

Keyword arguments to pass to the graph.

{}

Returns:

Name Type Description dict dict

The output of the assistant graph, structured like {\"output\": \"assistant response\", \"history\": ...}.

Source code in django_ai_assistant/helpers/assistants.py
@with_cast_id\ndef invoke(self, *args: Any, thread_id: Any | None, **kwargs: Any) -> dict:\n    \"\"\"Invoke the assistant LangChain graph with the given arguments and keyword arguments.\\n\n    This is the lower-level method to run the assistant.\\n\n    The graph is created by the `as_graph` method.\\n\n\n    Args:\n        *args: Positional arguments to pass to the graph.\n            To add a new message, use a dict like `{\"input\": \"user message\"}`.\n            If thread already has a `HumanMessage` in the end, you can invoke without args.\n        thread_id (Any | None): The thread ID for the chat message history.\n            If `None`, an in-memory chat message history is used.\n        **kwargs: Keyword arguments to pass to the graph.\n\n    Returns:\n        dict: The output of the assistant graph,\n            structured like `{\"output\": \"assistant response\", \"history\": ...}`.\n    \"\"\"\n    graph = self.as_graph(thread_id)\n    config = kwargs.pop(\"config\", {})\n    config[\"max_concurrency\"] = config.pop(\"max_concurrency\", self.tool_max_concurrency)\n    return graph.invoke(*args, config=config, **kwargs)\n
"},{"location":"reference/assistants-ref/#django_ai_assistant.helpers.assistants.AIAssistant.run","title":"run(message, thread_id=None, **kwargs)","text":"

Run the assistant with the given message and thread ID.

This is the higher-level method to run the assistant.

Parameters:

Name Type Description Default message str

The user message to pass to the assistant.

required thread_id Any | None

The thread ID for the chat message history. If None, an in-memory chat message history is used.

None **kwargs Any

Additional keyword arguments to pass to the graph.

{}

Returns:

Name Type Description Any Any

The assistant response to the user message.

Source code in django_ai_assistant/helpers/assistants.py
@with_cast_id\ndef run(self, message: str, thread_id: Any | None = None, **kwargs: Any) -> Any:\n    \"\"\"Run the assistant with the given message and thread ID.\\n\n    This is the higher-level method to run the assistant.\\n\n\n    Args:\n        message (str): The user message to pass to the assistant.\n        thread_id (Any | None): The thread ID for the chat message history.\n            If `None`, an in-memory chat message history is used.\n        **kwargs: Additional keyword arguments to pass to the graph.\n\n    Returns:\n        Any: The assistant response to the user message.\n    \"\"\"\n    return self.invoke(\n        {\n            \"input\": message,\n        },\n        thread_id=thread_id,\n        **kwargs,\n    )[\"output\"]\n
"},{"location":"reference/assistants-ref/#django_ai_assistant.helpers.assistants.AIAssistant.as_tool","title":"as_tool(description)","text":"

Create a tool from the assistant.

This is useful to compose assistants.

Parameters:

Name Type Description Default description str

The description for the tool.

required

Returns:

Name Type Description BaseTool BaseTool

A tool that runs the assistant. The tool name is this assistant's id.

Source code in django_ai_assistant/helpers/assistants.py
def as_tool(self, description: str) -> BaseTool:\n    \"\"\"Create a tool from the assistant.\\n\n    This is useful to compose assistants.\\n\n\n    Args:\n        description (str): The description for the tool.\n\n    Returns:\n        BaseTool: A tool that runs the assistant. The tool name is this assistant's id.\n    \"\"\"\n    return StructuredTool.from_function(\n        func=self._run_as_tool,\n        name=self.id,\n        description=description,\n    )\n
"},{"location":"reference/models-ref/","title":"django_ai_assistant.models","text":""},{"location":"reference/models-ref/#django_ai_assistant.models.Thread","title":"Thread","text":"

Bases: Model

Thread model. A thread is a collection of messages between a user and the AI assistant. Also called conversation or session.

Source code in django_ai_assistant/models.py
class Thread(models.Model):\n    \"\"\"Thread model. A thread is a collection of messages between a user and the AI assistant.\n    Also called conversation or session.\"\"\"\n\n    id: Any  # noqa: A003\n    messages: Manager[\"Message\"]\n    name = models.CharField(max_length=255, blank=True)\n    \"\"\"Name of the thread. Can be blank.\"\"\"\n    created_by = models.ForeignKey(\n        settings.AUTH_USER_MODEL,\n        on_delete=models.SET_NULL,\n        related_name=\"ai_assistant_threads\",\n        null=True,\n    )\n    \"\"\"User who created the thread. Can be null. Set to null/None when user is deleted.\"\"\"\n    assistant_id = models.CharField(max_length=255, blank=True)\n    \"\"\"Associated assistant ID. Can be empty.\"\"\"\n    created_at = models.DateTimeField(auto_now_add=True)\n    \"\"\"Date and time when the thread was created.\n    Automatically set when the thread is created.\"\"\"\n    updated_at = models.DateTimeField(auto_now=True)\n    \"\"\"Date and time when the thread was last updated.\n    Automatically set when the thread is updated.\"\"\"\n\n    class Meta:\n        verbose_name = \"Thread\"\n        verbose_name_plural = \"Threads\"\n        ordering = (\"-created_at\",)\n        indexes = (Index(F(\"created_at\").desc(), name=\"thread_created_at_desc\"),)\n\n    def __str__(self) -> str:\n        \"\"\"Return the name of the thread as the string representation of the thread.\"\"\"\n        return self.name\n\n    def __repr__(self) -> str:\n        \"\"\"Return the string representation of the thread like '<Thread name>'\"\"\"\n        return f\"<Thread {self.name}>\"\n\n    def get_messages(self, include_extra_messages: bool = False) -> list[BaseMessage]:\n        \"\"\"\n        Get LangChain messages objects from the thread.\n\n        Args:\n            include_extra_messages (bool): Whether to include non-chat messages (like tool calls).\n\n        Returns:\n            list[BaseMessage]: List of messages\n        \"\"\"\n\n        messages = messages_from_dict(\n            cast(\n                Sequence[dict[str, BaseMessage]],\n                Message.objects.filter(thread=self)\n                .order_by(\"created_at\")\n                .values_list(\"message\", flat=True),\n            )\n        )\n        if not include_extra_messages:\n            messages = [\n                m\n                for m in messages\n                if isinstance(m, HumanMessage | ChatMessage)\n                or (isinstance(m, AIMessage) and not m.tool_calls)\n            ]\n        return cast(list[BaseMessage], messages)\n
"},{"location":"reference/models-ref/#django_ai_assistant.models.Thread.name","title":"name = models.CharField(max_length=255, blank=True) class-attribute instance-attribute","text":"

Name of the thread. Can be blank.

"},{"location":"reference/models-ref/#django_ai_assistant.models.Thread.created_by","title":"created_by = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.SET_NULL, related_name='ai_assistant_threads', null=True) class-attribute instance-attribute","text":"

User who created the thread. Can be null. Set to null/None when user is deleted.

"},{"location":"reference/models-ref/#django_ai_assistant.models.Thread.assistant_id","title":"assistant_id = models.CharField(max_length=255, blank=True) class-attribute instance-attribute","text":"

Associated assistant ID. Can be empty.

"},{"location":"reference/models-ref/#django_ai_assistant.models.Thread.created_at","title":"created_at = models.DateTimeField(auto_now_add=True) class-attribute instance-attribute","text":"

Date and time when the thread was created. Automatically set when the thread is created.

"},{"location":"reference/models-ref/#django_ai_assistant.models.Thread.updated_at","title":"updated_at = models.DateTimeField(auto_now=True) class-attribute instance-attribute","text":"

Date and time when the thread was last updated. Automatically set when the thread is updated.

"},{"location":"reference/models-ref/#django_ai_assistant.models.Thread.__str__","title":"__str__()","text":"

Return the name of the thread as the string representation of the thread.

Source code in django_ai_assistant/models.py
def __str__(self) -> str:\n    \"\"\"Return the name of the thread as the string representation of the thread.\"\"\"\n    return self.name\n
"},{"location":"reference/models-ref/#django_ai_assistant.models.Thread.__repr__","title":"__repr__()","text":"

Return the string representation of the thread like '' Source code in django_ai_assistant/models.py

def __repr__(self) -> str:\n    \"\"\"Return the string representation of the thread like '<Thread name>'\"\"\"\n    return f\"<Thread {self.name}>\"\n
"},{"location":"reference/models-ref/#django_ai_assistant.models.Thread.get_messages","title":"get_messages(include_extra_messages=False)","text":"

Get LangChain messages objects from the thread.

Parameters:

Name Type Description Default include_extra_messages bool

Whether to include non-chat messages (like tool calls).

False

Returns:

Type Description list[BaseMessage]

list[BaseMessage]: List of messages

Source code in django_ai_assistant/models.py
def get_messages(self, include_extra_messages: bool = False) -> list[BaseMessage]:\n    \"\"\"\n    Get LangChain messages objects from the thread.\n\n    Args:\n        include_extra_messages (bool): Whether to include non-chat messages (like tool calls).\n\n    Returns:\n        list[BaseMessage]: List of messages\n    \"\"\"\n\n    messages = messages_from_dict(\n        cast(\n            Sequence[dict[str, BaseMessage]],\n            Message.objects.filter(thread=self)\n            .order_by(\"created_at\")\n            .values_list(\"message\", flat=True),\n        )\n    )\n    if not include_extra_messages:\n        messages = [\n            m\n            for m in messages\n            if isinstance(m, HumanMessage | ChatMessage)\n            or (isinstance(m, AIMessage) and not m.tool_calls)\n        ]\n    return cast(list[BaseMessage], messages)\n
"},{"location":"reference/models-ref/#django_ai_assistant.models.Message","title":"Message","text":"

Bases: Model

Message model. A message is a text that is part of a thread. A message can be sent by a user or the AI assistant.

The message data is stored as a JSON field called message.

Source code in django_ai_assistant/models.py
class Message(models.Model):\n    \"\"\"Message model. A message is a text that is part of a thread.\n    A message can be sent by a user or the AI assistant.\\n\n    The message data is stored as a JSON field called `message`.\"\"\"\n\n    id: Any  # noqa: A003\n    thread = models.ForeignKey(Thread, on_delete=models.CASCADE, related_name=\"messages\")\n    \"\"\"Thread to which the message belongs.\"\"\"\n    thread_id: Any\n    message = models.JSONField()\n    \"\"\"Message content. This is a serialized LangChain `BaseMessage` that was serialized\n    with `message_to_dict` and can be deserialized with `messages_from_dict`.\"\"\"\n    created_at = models.DateTimeField(auto_now_add=True)\n    \"\"\"Date and time when the message was created.\n    Automatically set when the message is created.\"\"\"\n\n    class Meta:\n        verbose_name = \"Message\"\n        verbose_name_plural = \"Messages\"\n        ordering = (\"created_at\",)\n        indexes = (Index(F(\"created_at\"), name=\"message_created_at\"),)\n\n    def __str__(self) -> str:\n        \"\"\"Return internal message data from `message` attribute\n        as the string representation of the message.\"\"\"\n        return json.dumps(self.message)\n\n    def __repr__(self) -> str:\n        \"\"\"Return the string representation of the message like '<Message id at thread_id>'\"\"\"\n        return f\"<Message {self.id} at {self.thread_id}>\"\n
"},{"location":"reference/models-ref/#django_ai_assistant.models.Message.thread","title":"thread = models.ForeignKey(Thread, on_delete=models.CASCADE, related_name='messages') class-attribute instance-attribute","text":"

Thread to which the message belongs.

"},{"location":"reference/models-ref/#django_ai_assistant.models.Message.message","title":"message = models.JSONField() class-attribute instance-attribute","text":"

Message content. This is a serialized LangChain BaseMessage that was serialized with message_to_dict and can be deserialized with messages_from_dict.

"},{"location":"reference/models-ref/#django_ai_assistant.models.Message.created_at","title":"created_at = models.DateTimeField(auto_now_add=True) class-attribute instance-attribute","text":"

Date and time when the message was created. Automatically set when the message is created.

"},{"location":"reference/models-ref/#django_ai_assistant.models.Message.__str__","title":"__str__()","text":"

Return internal message data from message attribute as the string representation of the message.

Source code in django_ai_assistant/models.py
def __str__(self) -> str:\n    \"\"\"Return internal message data from `message` attribute\n    as the string representation of the message.\"\"\"\n    return json.dumps(self.message)\n
"},{"location":"reference/models-ref/#django_ai_assistant.models.Message.__repr__","title":"__repr__()","text":"

Return the string representation of the message like '' Source code in django_ai_assistant/models.py

def __repr__(self) -> str:\n    \"\"\"Return the string representation of the message like '<Message id at thread_id>'\"\"\"\n    return f\"<Message {self.id} at {self.thread_id}>\"\n
"},{"location":"reference/use-cases-ref/","title":"django_ai_assistant.helpers.use_cases","text":""},{"location":"reference/use-cases-ref/#django_ai_assistant.helpers.use_cases.get_assistant_cls","title":"get_assistant_cls(assistant_id, user, request=None)","text":"

Get assistant class by id.

Uses AI_ASSISTANT_CAN_RUN_ASSISTANT_FN permission to check if user can run the assistant.

Parameters:

Name Type Description Default assistant_id str

Assistant id to get

required user Any

Current user

required request HttpRequest | None

Current request, if any

None

Returns: type[AIAssistant]: Assistant class with the given id Raises: AIAssistantNotDefinedError: If assistant with the given id is not found AIUserNotAllowedError: If user is not allowed to use the assistant

Source code in django_ai_assistant/helpers/use_cases.py
def get_assistant_cls(\n    assistant_id: str,\n    user: Any,\n    request: HttpRequest | None = None,\n) -> type[AIAssistant]:\n    \"\"\"Get assistant class by id.\\n\n    Uses `AI_ASSISTANT_CAN_RUN_ASSISTANT_FN` permission to check if user can run the assistant.\n\n    Args:\n        assistant_id (str): Assistant id to get\n        user (Any): Current user\n        request (HttpRequest | None): Current request, if any\n    Returns:\n        type[AIAssistant]: Assistant class with the given id\n    Raises:\n        AIAssistantNotDefinedError: If assistant with the given id is not found\n        AIUserNotAllowedError: If user is not allowed to use the assistant\n    \"\"\"\n    if assistant_id not in AIAssistant.get_cls_registry():\n        raise AIAssistantNotDefinedError(f\"Assistant with id={assistant_id} not found\")\n    assistant_cls = AIAssistant.get_cls(assistant_id)\n    if not can_run_assistant(\n        assistant_cls=assistant_cls,\n        user=user,\n        request=request,\n    ):\n        raise AIUserNotAllowedError(\"User is not allowed to use this assistant\")\n    return assistant_cls\n
"},{"location":"reference/use-cases-ref/#django_ai_assistant.helpers.use_cases.get_single_assistant_info","title":"get_single_assistant_info(assistant_id, user, request=None)","text":"

Get assistant info id. Returns a dictionary with the assistant id and name.

Uses AI_ASSISTANT_CAN_RUN_ASSISTANT_FN permission to check if user can see the assistant.

Parameters:

Name Type Description Default assistant_id str

Assistant id to get

required user Any

Current user

required request HttpRequest | None

Current request, if any

None

Returns: dict[str, str]: dict like {\"id\": \"personal_ai\", \"name\": \"Personal AI\"} Raises: AIAssistantNotDefinedError: If assistant with the given id is not found AIUserNotAllowedError: If user is not allowed to see the assistant

Source code in django_ai_assistant/helpers/use_cases.py
def get_single_assistant_info(\n    assistant_id: str,\n    user: Any,\n    request: HttpRequest | None = None,\n) -> dict[str, str]:\n    \"\"\"Get assistant info id. Returns a dictionary with the assistant id and name.\\n\n    Uses `AI_ASSISTANT_CAN_RUN_ASSISTANT_FN` permission to check if user can see the assistant.\n\n    Args:\n        assistant_id (str): Assistant id to get\n        user (Any): Current user\n        request (HttpRequest | None): Current request, if any\n    Returns:\n        dict[str, str]: dict like `{\"id\": \"personal_ai\", \"name\": \"Personal AI\"}`\n    Raises:\n        AIAssistantNotDefinedError: If assistant with the given id is not found\n        AIUserNotAllowedError: If user is not allowed to see the assistant\n    \"\"\"\n    assistant_cls = get_assistant_cls(assistant_id, user, request)\n\n    return {\n        \"id\": assistant_id,\n        \"name\": assistant_cls.name,\n    }\n
"},{"location":"reference/use-cases-ref/#django_ai_assistant.helpers.use_cases.get_assistants_info","title":"get_assistants_info(user, request=None)","text":"

Get all assistants info. Returns a list of dictionaries with the assistant id and name.

Uses AI_ASSISTANT_CAN_RUN_ASSISTANT_FN permission to check the assistants the user can see, and returns only the ones the user can see.

Parameters:

Name Type Description Default user Any

Current user

required request HttpRequest | None

Current request, if any

None

Returns: list[dict[str, str]]: List of dicts like [{\"id\": \"personal_ai\", \"name\": \"Personal AI\"}, ...]

Source code in django_ai_assistant/helpers/use_cases.py
def get_assistants_info(\n    user: Any,\n    request: HttpRequest | None = None,\n) -> list[dict[str, str]]:\n    \"\"\"Get all assistants info. Returns a list of dictionaries with the assistant id and name.\\n\n    Uses `AI_ASSISTANT_CAN_RUN_ASSISTANT_FN` permission to check the assistants the user can see,\n    and returns only the ones the user can see.\n\n    Args:\n        user (Any): Current user\n        request (HttpRequest | None): Current request, if any\n    Returns:\n        list[dict[str, str]]: List of dicts like `[{\"id\": \"personal_ai\", \"name\": \"Personal AI\"}, ...]`\n    \"\"\"\n    assistant_info_list = []\n    for assistant_id in AIAssistant.get_cls_registry().keys():\n        try:\n            info = get_single_assistant_info(assistant_id, user, request)\n            assistant_info_list.append(info)\n        except AIUserNotAllowedError:\n            continue\n    return assistant_info_list\n
"},{"location":"reference/use-cases-ref/#django_ai_assistant.helpers.use_cases.create_message","title":"create_message(assistant_id, thread, user, content, request=None)","text":"

Create a message in a thread, and right after runs the assistant to get the AI response.

Uses AI_ASSISTANT_CAN_RUN_ASSISTANT_FN permission to check if user can run the assistant.

Uses AI_ASSISTANT_CAN_CREATE_MESSAGE_FN permission to check if user can create a message in the thread.

Parameters:

Name Type Description Default assistant_id str

Assistant id to use to get the AI response

required thread Thread

Thread where to create the message

required user Any

Current user

required content Any

Message content, usually a string

required request HttpRequest | None

Current request, if any

None

Returns: dict: The output of the assistant, structured like {\"output\": \"assistant response\", \"history\": ...} Raises: AIUserNotAllowedError: If user is not allowed to create messages in the thread

Source code in django_ai_assistant/helpers/use_cases.py
def create_message(\n    assistant_id: str,\n    thread: Thread,\n    user: Any,\n    content: Any,\n    request: HttpRequest | None = None,\n) -> dict:\n    \"\"\"Create a message in a thread, and right after runs the assistant to get the AI response.\\n\n    Uses `AI_ASSISTANT_CAN_RUN_ASSISTANT_FN` permission to check if user can run the assistant.\\n\n    Uses `AI_ASSISTANT_CAN_CREATE_MESSAGE_FN` permission to check if user can create a message in the thread.\n\n    Args:\n        assistant_id (str): Assistant id to use to get the AI response\n        thread (Thread): Thread where to create the message\n        user (Any): Current user\n        content (Any): Message content, usually a string\n        request (HttpRequest | None): Current request, if any\n    Returns:\n        dict: The output of the assistant,\n            structured like `{\"output\": \"assistant response\", \"history\": ...}`\n    Raises:\n        AIUserNotAllowedError: If user is not allowed to create messages in the thread\n    \"\"\"\n    assistant_cls = get_assistant_cls(assistant_id, user, request)\n\n    if not can_create_message(thread=thread, user=user, request=request):\n        raise AIUserNotAllowedError(\"User is not allowed to create messages in this thread\")\n\n    # TODO: Check if we can separate the message creation from the invoke\n    assistant = assistant_cls(user=user, request=request)\n    assistant_message = assistant.invoke(\n        {\"input\": content},\n        thread_id=thread.id,\n    )\n    return assistant_message\n
"},{"location":"reference/use-cases-ref/#django_ai_assistant.helpers.use_cases.create_thread","title":"create_thread(name, user, assistant_id=None, request=None)","text":"

Create a thread.

Uses AI_ASSISTANT_CAN_CREATE_THREAD_FN permission to check if user can create a thread.

Parameters:

Name Type Description Default name str

Thread name

required assistant_id str | None

Assistant ID to associate the thread with. If empty or None, the thread is not associated with any assistant.

None user Any

Current user

required request HttpRequest | None

Current request, if any

None

Returns: Thread: Created thread model instance Raises: AIUserNotAllowedError: If user is not allowed to create threads

Source code in django_ai_assistant/helpers/use_cases.py
def create_thread(\n    name: str,\n    user: Any,\n    assistant_id: str | None = None,\n    request: HttpRequest | None = None,\n) -> Thread:\n    \"\"\"Create a thread.\\n\n    Uses `AI_ASSISTANT_CAN_CREATE_THREAD_FN` permission to check if user can create a thread.\n\n    Args:\n        name (str): Thread name\n        assistant_id (str | None): Assistant ID to associate the thread with.\n            If empty or None, the thread is not associated with any assistant.\n        user (Any): Current user\n        request (HttpRequest | None): Current request, if any\n    Returns:\n        Thread: Created thread model instance\n    Raises:\n        AIUserNotAllowedError: If user is not allowed to create threads\n    \"\"\"\n    if not can_create_thread(user=user, request=request):\n        raise AIUserNotAllowedError(\"User is not allowed to create threads\")\n\n    thread = Thread.objects.create(name=name, created_by=user, assistant_id=assistant_id or \"\")\n    return thread\n
"},{"location":"reference/use-cases-ref/#django_ai_assistant.helpers.use_cases.get_single_thread","title":"get_single_thread(thread_id, user, request=None)","text":"

Get a single thread by id.

Uses AI_ASSISTANT_CAN_VIEW_THREAD_FN permission to check if user can view the thread.

Parameters:

Name Type Description Default thread_id str

Thread id to get

required user Any

Current user

required request HttpRequest | None

Current request, if any

None

Returns: Thread: Thread model instance Raises: AIUserNotAllowedError: If user is not allowed to view the thread

Source code in django_ai_assistant/helpers/use_cases.py
def get_single_thread(\n    thread_id: Any,\n    user: Any,\n    request: HttpRequest | None = None,\n) -> Thread:\n    \"\"\"Get a single thread by id.\\n\n    Uses `AI_ASSISTANT_CAN_VIEW_THREAD_FN` permission to check if user can view the thread.\n\n    Args:\n        thread_id (str): Thread id to get\n        user (Any): Current user\n        request (HttpRequest | None): Current request, if any\n    Returns:\n        Thread: Thread model instance\n    Raises:\n        AIUserNotAllowedError: If user is not allowed to view the thread\n    \"\"\"\n    thread = Thread.objects.get(id=thread_id)\n\n    if not can_view_thread(thread=thread, user=user, request=request):\n        raise AIUserNotAllowedError(\"User is not allowed to view this thread\")\n\n    return thread\n
"},{"location":"reference/use-cases-ref/#django_ai_assistant.helpers.use_cases.get_threads","title":"get_threads(user, assistant_id=None, request=None)","text":"

Get all threads for the user.

Uses AI_ASSISTANT_CAN_VIEW_THREAD_FN permission to check the threads the user can see, and returns only the ones the user can see.

Parameters:

Name Type Description Default user Any

Current user

required assistant_id str | None

Assistant ID to filter threads by. If empty or None, all threads for the user are returned.

None request HttpRequest | None

Current request, if any

None

Returns: list[Thread]: List of thread model instances

Source code in django_ai_assistant/helpers/use_cases.py
def get_threads(\n    user: Any,\n    assistant_id: str | None = None,\n    request: HttpRequest | None = None,\n) -> list[Thread]:\n    \"\"\"Get all threads for the user.\\n\n    Uses `AI_ASSISTANT_CAN_VIEW_THREAD_FN` permission to check the threads the user can see,\n    and returns only the ones the user can see.\n\n    Args:\n        user (Any): Current user\n        assistant_id (str | None): Assistant ID to filter threads by.\n            If empty or None, all threads for the user are returned.\n        request (HttpRequest | None): Current request, if any\n    Returns:\n        list[Thread]: List of thread model instances\n    \"\"\"\n    threads = Thread.objects.filter(created_by=user)\n\n    if assistant_id:\n        threads = threads.filter(assistant_id=assistant_id)\n\n    return list(\n        threads.filter(\n            id__in=[\n                thread.id\n                for thread in threads\n                if can_view_thread(thread=thread, user=user, request=request)\n            ]\n        )\n    )\n
"},{"location":"reference/use-cases-ref/#django_ai_assistant.helpers.use_cases.update_thread","title":"update_thread(thread, name, user, request=None)","text":"

Update thread name.

Uses AI_ASSISTANT_CAN_UPDATE_THREAD_FN permission to check if user can update the thread.

Parameters:

Name Type Description Default thread Thread

Thread model instance to update

required name str

New thread name

required user Any

Current user

required request HttpRequest | None

Current request, if any

None

Returns: Thread: Updated thread model instance Raises: AIUserNotAllowedError: If user is not allowed to update the thread

Source code in django_ai_assistant/helpers/use_cases.py
def update_thread(\n    thread: Thread,\n    name: str,\n    user: Any,\n    request: HttpRequest | None = None,\n) -> Thread:\n    \"\"\"Update thread name.\\n\n    Uses `AI_ASSISTANT_CAN_UPDATE_THREAD_FN` permission to check if user can update the thread.\n\n    Args:\n        thread (Thread): Thread model instance to update\n        name (str): New thread name\n        user (Any): Current user\n        request (HttpRequest | None): Current request, if any\n    Returns:\n        Thread: Updated thread model instance\n    Raises:\n        AIUserNotAllowedError: If user is not allowed to update the thread\n    \"\"\"\n    if not can_update_thread(thread=thread, user=user, request=request):\n        raise AIUserNotAllowedError(\"User is not allowed to update this thread\")\n\n    thread.name = name\n    thread.save()\n    return thread\n
"},{"location":"reference/use-cases-ref/#django_ai_assistant.helpers.use_cases.delete_thread","title":"delete_thread(thread, user, request=None)","text":"

Delete a thread.

Uses AI_ASSISTANT_CAN_DELETE_THREAD_FN permission to check if user can delete the thread.

Parameters:

Name Type Description Default thread Thread

Thread model instance to delete

required user Any

Current user

required request HttpRequest | None

Current request, if any

None

Raises: AIUserNotAllowedError: If user is not allowed to delete the thread

Source code in django_ai_assistant/helpers/use_cases.py
def delete_thread(\n    thread: Thread,\n    user: Any,\n    request: HttpRequest | None = None,\n) -> None:\n    \"\"\"Delete a thread.\\n\n    Uses `AI_ASSISTANT_CAN_DELETE_THREAD_FN` permission to check if user can delete the thread.\n\n    Args:\n        thread (Thread): Thread model instance to delete\n        user (Any): Current user\n        request (HttpRequest | None): Current request, if any\n    Raises:\n        AIUserNotAllowedError: If user is not allowed to delete the thread\n    \"\"\"\n    if not can_delete_thread(thread=thread, user=user, request=request):\n        raise AIUserNotAllowedError(\"User is not allowed to delete this thread\")\n\n    thread.delete()\n
"},{"location":"reference/use-cases-ref/#django_ai_assistant.helpers.use_cases.get_thread_messages","title":"get_thread_messages(thread, user, request=None)","text":"

Get all messages in a thread.

Uses AI_ASSISTANT_CAN_VIEW_THREAD_FN permission to check if user can view the thread.

Parameters:

Name Type Description Default thread Thread

Thread model instance to get messages from

required user Any

Current user

required request HttpRequest | None

Current request, if any

None

Returns: list[BaseMessage]: List of message instances

Source code in django_ai_assistant/helpers/use_cases.py
def get_thread_messages(\n    thread: Thread,\n    user: Any,\n    request: HttpRequest | None = None,\n) -> list[BaseMessage]:\n    \"\"\"Get all messages in a thread.\\n\n    Uses `AI_ASSISTANT_CAN_VIEW_THREAD_FN` permission to check if user can view the thread.\n\n    Args:\n        thread (Thread): Thread model instance to get messages from\n        user (Any): Current user\n        request (HttpRequest | None): Current request, if any\n    Returns:\n        list[BaseMessage]: List of message instances\n    \"\"\"\n    # TODO: have more permissions for threads? View thread permission?\n    if user != thread.created_by:\n        raise AIUserNotAllowedError(\"User is not allowed to view messages in this thread\")\n\n    return thread.get_messages(include_extra_messages=False)\n
"},{"location":"reference/use-cases-ref/#django_ai_assistant.helpers.use_cases.delete_message","title":"delete_message(message, user, request=None)","text":"

Delete a message.

Uses AI_ASSISTANT_CAN_DELETE_MESSAGE_FN permission to check if user can delete the message.

Parameters:

Name Type Description Default message Message

Message model instance to delete

required user Any

Current user

required request HttpRequest | None

Current request, if any

None

Raises: AIUserNotAllowedError: If user is not allowed to delete the message

Source code in django_ai_assistant/helpers/use_cases.py
def delete_message(\n    message: Message,\n    user: Any,\n    request: HttpRequest | None = None,\n):\n    \"\"\"Delete a message.\\n\n    Uses `AI_ASSISTANT_CAN_DELETE_MESSAGE_FN` permission to check if user can delete the message.\n\n    Args:\n        message (Message): Message model instance to delete\n        user (Any): Current user\n        request (HttpRequest | None): Current request, if any\n    Raises:\n        AIUserNotAllowedError: If user is not allowed to delete the message\n    \"\"\"\n    if not can_delete_message(message=message, user=user, request=request):\n        raise AIUserNotAllowedError(\"User is not allowed to delete this message\")\n\n    return message.delete()\n
"}]} \ No newline at end of file diff --git a/dev/sitemap.xml b/dev/sitemap.xml index 1d1b27a..d428866 100644 --- a/dev/sitemap.xml +++ b/dev/sitemap.xml @@ -2,46 +2,46 @@ https://vintasoftware.github.io/django-ai-assistant/dev/ - 2024-10-11 + 2024-11-19 https://vintasoftware.github.io/django-ai-assistant/dev/changelog/ - 2024-10-11 + 2024-11-19 https://vintasoftware.github.io/django-ai-assistant/dev/contributing/ - 2024-10-11 + 2024-11-19 https://vintasoftware.github.io/django-ai-assistant/dev/frontend/ - 2024-10-11 + 2024-11-19 https://vintasoftware.github.io/django-ai-assistant/dev/get-started/ - 2024-10-11 + 2024-11-19 https://vintasoftware.github.io/django-ai-assistant/dev/support/ - 2024-10-11 + 2024-11-19 https://vintasoftware.github.io/django-ai-assistant/dev/tutorial/ - 2024-10-11 + 2024-11-19 https://vintasoftware.github.io/django-ai-assistant/dev/reference/ - 2024-10-11 + 2024-11-19 https://vintasoftware.github.io/django-ai-assistant/dev/reference/assistants-ref/ - 2024-10-11 + 2024-11-19 https://vintasoftware.github.io/django-ai-assistant/dev/reference/models-ref/ - 2024-10-11 + 2024-11-19 https://vintasoftware.github.io/django-ai-assistant/dev/reference/use-cases-ref/ - 2024-10-11 + 2024-11-19 \ No newline at end of file diff --git a/dev/sitemap.xml.gz b/dev/sitemap.xml.gz index 0c3bc1ac1d442d46ebe04fdef5580717f589281f..625b5f2543cdf95eec2e3384236370c7df3c85a6 100644 GIT binary patch delta 273 zcmV+s0q*{z0;2*eABzYG0M$EZ0{?SqbY*Q}a4vXlYyi!bL2kn!5JmSng~c9-qb{1t zc6L3HD;$4ZzGq*pkkG}@idr2-1)-0F3$_)XAMf&2T)IP(lSiP6hXb}^B3j3JfP!5p=lH4VNf}k)jOH5P_5M_$V8D9=jf)NrwP*5V$Q%A{SiG-X4zF4}9 XAbW%GN5I>{zg71Gm|Tj4-39;vRcn2I