Skip to content

Commit

Permalink
Doc fixes
Browse files Browse the repository at this point in the history
  • Loading branch information
fjsj committed Oct 11, 2024
1 parent ba3d4ec commit 99e3abc
Show file tree
Hide file tree
Showing 4 changed files with 20 additions and 19 deletions.
20 changes: 10 additions & 10 deletions django_ai_assistant/helpers/assistants.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ class AIAssistant(abc.ABC): # noqa: F821
"""Structured output to use for the assistant.\n
Defaults to `None`.
When not `None`, the assistant will return a structured output in the provided format.
See https://python.langchain.com/v0.2/docs/how_to/structured_output/ for the available formats.
See https://python.langchain.com/v0.3/docs/how_to/structured_output/ for the available formats.
"""
_user: Any | None
"""The current user the assistant is helping. A model instance.\n
Expand Down Expand Up @@ -260,7 +260,7 @@ def get_model_kwargs(self) -> dict[str, Any]:
return {}

def get_llm(self) -> BaseChatModel:
"""Get the Langchain LLM instance for the assistant.
"""Get the LangChain LLM instance for the assistant.
By default, this uses the OpenAI implementation.\n
`get_model`, `get_temperature`, and `get_model_kwargs` are used to create the LLM instance.\n
Override this method to use a different LLM implementation.
Expand Down Expand Up @@ -301,7 +301,7 @@ def get_tools(self) -> Sequence[BaseTool]:
"""Get the list of method tools the assistant can use.
By default, this is the `_method_tools` attribute, which are all `@method_tool`s.\n
Override and call super to add additional tools,
such as [any langchain_community tools](https://python.langchain.com/v0.2/docs/integrations/tools/).
such as [any langchain_community tools](https://python.langchain.com/v0.3/docs/integrations/tools/).
Returns:
Sequence[BaseTool]: The list of tools the assistant can use.
Expand All @@ -310,7 +310,7 @@ def get_tools(self) -> Sequence[BaseTool]:

def get_document_separator(self) -> str:
"""Get the RAG document separator to use in the prompt. Only used when `has_rag=True`.\n
Defaults to `"\\n\\n"`, which is the Langchain default.\n
Defaults to `"\\n\\n"`, which is the LangChain default.\n
Override this method to use a different separator.
Returns:
Expand All @@ -321,7 +321,7 @@ def get_document_separator(self) -> str:
def get_document_prompt(self) -> PromptTemplate:
"""Get the PromptTemplate template to use when rendering RAG documents in the prompt.
Only used when `has_rag=True`.\n
Defaults to `PromptTemplate.from_template("{page_content}")`, which is the Langchain default.\n
Defaults to `PromptTemplate.from_template("{page_content}")`, which is the LangChain default.\n
Override this method to use a different template.
Returns:
Expand Down Expand Up @@ -372,7 +372,7 @@ def get_contextualize_prompt(self) -> ChatPromptTemplate:
)

def get_history_aware_retriever(self) -> Runnable[dict, RetrieverOutput]:
"""Get the history-aware retriever Langchain chain for the assistant.\n
"""Get the history-aware retriever LangChain chain for the assistant.\n
This is used when `has_rag=True` to fetch documents based on the chat history.\n
By default, this is a chain that checks if there is chat history,
and if so, it uses the chat history to generate a new standalone question
Expand All @@ -381,10 +381,10 @@ def get_history_aware_retriever(self) -> Runnable[dict, RetrieverOutput]:
Override this method to use a different history-aware retriever chain.
Read more about the history-aware retriever in the
[Langchain docs](https://python.langchain.com/v0.2/docs/how_to/qa_chat_history_how_to/).
[LangChain docs](https://python.langchain.com/v0.2/docs/how_to/qa_chat_history_how_to/).
Returns:
Runnable[dict, RetrieverOutput]: a history-aware retriever Langchain chain.
Runnable[dict, RetrieverOutput]: a history-aware retriever LangChain chain.
"""
llm = self.get_llm()
retriever = self.get_retriever()
Expand All @@ -403,7 +403,7 @@ def get_history_aware_retriever(self) -> Runnable[dict, RetrieverOutput]:

@with_cast_id
def as_graph(self, thread_id: Any | None = None) -> Runnable[dict, dict]:
"""Create the Langchain graph for the assistant.\n
"""Create the LangGraph graph for the assistant.\n
This graph is an agent that supports chat history, tool calling, and RAG (if `has_rag=True`).\n
`as_graph` uses many other methods to create the graph for the assistant.
Prefer to override the other methods to customize the graph for the assistant.
Expand Down Expand Up @@ -537,7 +537,7 @@ def record_response(state: AgentState):

@with_cast_id
def invoke(self, *args: Any, thread_id: Any | None, **kwargs: Any) -> dict:
"""Invoke the assistant Langchain graph with the given arguments and keyword arguments.\n
"""Invoke the assistant LangChain graph with the given arguments and keyword arguments.\n
This is the lower-level method to run the assistant.\n
The graph is created by the `as_graph` method.\n
Expand Down
2 changes: 1 addition & 1 deletion django_ai_assistant/helpers/use_cases.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@ def create_message(
if not can_create_message(thread=thread, user=user, request=request):
raise AIUserNotAllowedError("User is not allowed to create messages in this thread")

# TODO: Check if we can separate the message creation from the chain invoke
# TODO: Check if we can separate the message creation from the invoke
assistant = assistant_cls(user=user, request=request)
assistant_message = assistant.invoke(
{"input": content},
Expand Down
4 changes: 2 additions & 2 deletions django_ai_assistant/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ def __repr__(self) -> str:

def get_messages(self, include_extra_messages: bool = False) -> list[BaseMessage]:
"""
Get Langchain messages objects from the thread.
Get LangChain messages objects from the thread.
Args:
include_extra_messages (bool): Whether to include non-chat messages (like tool calls).
Expand Down Expand Up @@ -91,7 +91,7 @@ class Message(models.Model):
"""Thread to which the message belongs."""
thread_id: Any
message = models.JSONField()
"""Message content. This is a serialized Langchain `BaseMessage` that was serialized
"""Message content. This is a serialized LangChain `BaseMessage` that was serialized
with `message_to_dict` and can be deserialized with `messages_from_dict`."""
created_at = models.DateTimeField(auto_now_add=True)
"""Date and time when the message was created.
Expand Down
13 changes: 7 additions & 6 deletions docs/tutorial.md
Original file line number Diff line number Diff line change
Expand Up @@ -178,7 +178,7 @@ class IssueManagementAIAssistant(AIAssistant):

### Using pre-implemented tools

Django AI Assistant works with [any LangChain-compatible tool](https://python.langchain.com/v0.2/docs/integrations/tools/).
Django AI Assistant works with [any LangChain-compatible tool](https://python.langchain.com/v0.3/docs/integrations/tools/).
Just override the `get_tools` method in your AI Assistant class to include the tools you want to use.

For example, you can use the `TavilySearch` tool to provide your AI Assistant with the ability to search the web
Expand Down Expand Up @@ -219,8 +219,9 @@ class MovieSearchAIAssistant(AIAssistant):
```

!!! note
As of now, Django AI Assistant is powered by [LangChain](https://python.langchain.com/v0.2/docs/introduction/),
but previous knowledge on LangChain is NOT necessary to use this library, at least for the main use cases.
As of now, Django AI Assistant is powered by [LangChain](https://python.langchain.com/v0.3/docs/introduction/)
and [LangGraph](https://langchain-ai.github.io/langgraph/tutorials/introduction/),
but knowledge on these tools is NOT necessary to use this library, at least for the main use cases.

## Using an AI Assistant

Expand Down Expand Up @@ -359,7 +360,7 @@ If you want to use traditional Django templates, you can try using HTMX to avoid
### Using other AI models

By default the supported models are OpenAI ones,
but you can use [any chat model from Langchain that supports Tool Calling](https://python.langchain.com/v0.2/docs/integrations/chat/#advanced-features) by overriding `get_llm`:
but you can use [any chat model from LangChain that supports Tool Calling](https://python.langchain.com/docs/integrations/chat/#featured-providers) by overriding `get_llm`:

```python title="myapp/ai_assistants.py"
from django_ai_assistant import AIAssistant
Expand Down Expand Up @@ -421,7 +422,7 @@ thereby improving the quality of the response by avoiding generic or off-topic a
For this to work, your must do the following in your AI Assistant:

1. Add `has_rag = True` as a class attribute;
2. Override the `get_retriever` method to return a [Langchain Retriever](https://python.langchain.com/v0.2/docs/how_to/#retrievers).
2. Override the `get_retriever` method to return a [LangChain Retriever](https://python.langchain.com/v0.3/docs/how_to/#retrievers).

For example:

Expand All @@ -439,7 +440,7 @@ class DocsAssistant(AIAssistant):
has_rag = True
def get_retriever(self) -> BaseRetriever:
return ... # use a Langchain Retriever here
return ... # use a LangChain Retriever here
```

The `rag/ai_assistants.py` file in the [example project](https://github.com/vintasoftware/django-ai-assistant/tree/main/example#readme)
Expand Down

0 comments on commit 99e3abc

Please sign in to comment.