diff --git a/backend/app/api/admin/route.py b/backend/app/api/admin/route.py index 2ae3ba0..b5dad85 100644 --- a/backend/app/api/admin/route.py +++ b/backend/app/api/admin/route.py @@ -17,7 +17,7 @@ from llama_index.core.ingestion import IngestionPipeline from llama_index.core.node_parser import SentenceSplitter from llama_index.core.settings import Settings -from app.engine.vectordb import get_vector_store +from app.api.chat.engine.vectordb import get_vector_store from llama_index.core import SimpleDirectoryReader from llama_index.core.schema import Document diff --git a/backend/app/api/chat/chat_config.py b/backend/app/api/chat/chat_config.py index 5f29c86..18d3ea2 100644 --- a/backend/app/api/chat/chat_config.py +++ b/backend/app/api/chat/chat_config.py @@ -11,16 +11,6 @@ logger = logging.getLogger("uvicorn") -# @r.get("") -# async def chat_config() -> ChatConfig: -# starter_questions = None -# # conversation_starters = os.getenv("CONVERSATION_STARTERS") -# conversation_starters = "Tell me about CognitiveLab \n What are some open source projects \n Tell me about Open Source AI \n What are AI Agents" -# if conversation_starters and conversation_starters.strip(): -# starter_questions = conversation_starters.strip().split("\n") -# return ChatConfig(starter_questions=starter_questions) - - @r.get("") async def chat_config() -> ChatConfig: return await config_service.get_chat_config() diff --git a/backend/app/engine/__init__.py b/backend/app/api/chat/engine/__init__.py similarity index 100% rename from backend/app/engine/__init__.py rename to backend/app/api/chat/engine/__init__.py diff --git a/backend/app/engine/engine.py b/backend/app/api/chat/engine/engine.py similarity index 92% rename from backend/app/engine/engine.py rename to backend/app/api/chat/engine/engine.py index cc9f8c9..05ffc9a 100644 --- a/backend/app/engine/engine.py +++ b/backend/app/api/chat/engine/engine.py @@ -1,7 +1,7 @@ import os -from app.engine.index import get_index -from app.engine.node_postprocessors import NodeCitationProcessor +from app.api.chat.engine.index import get_index +from app.api.chat.engine.node_postprocessors import NodeCitationProcessor from fastapi import HTTPException from llama_index.core.chat_engine import CondensePlusContextChatEngine diff --git a/backend/app/engine/generate.py b/backend/app/api/chat/engine/generate.py similarity index 95% rename from backend/app/engine/generate.py rename to backend/app/api/chat/engine/generate.py index 1bca2e2..232d737 100644 --- a/backend/app/engine/generate.py +++ b/backend/app/api/chat/engine/generate.py @@ -6,8 +6,8 @@ import logging import os -from app.engine.loaders import get_documents -from app.engine.vectordb import get_vector_store +from app.api.chat.engine.loaders import get_documents +from app.api.chat.engine.vectordb import get_vector_store from app.settings import init_settings from llama_index.core.ingestion import IngestionPipeline from llama_index.core.node_parser import SentenceSplitter diff --git a/backend/app/engine/index.py b/backend/app/api/chat/engine/index.py similarity index 90% rename from backend/app/engine/index.py rename to backend/app/api/chat/engine/index.py index e1adcb8..4f91838 100644 --- a/backend/app/engine/index.py +++ b/backend/app/api/chat/engine/index.py @@ -1,6 +1,6 @@ import logging from llama_index.core.indices import VectorStoreIndex -from app.engine.vectordb import get_vector_store +from app.api.chat.engine.vectordb import get_vector_store logger = logging.getLogger("uvicorn") diff --git a/backend/app/engine/loaders/__init__.py b/backend/app/api/chat/engine/loaders/__init__.py similarity index 80% rename from backend/app/engine/loaders/__init__.py rename to backend/app/api/chat/engine/loaders/__init__.py index 4a278a4..4c3f062 100644 --- a/backend/app/engine/loaders/__init__.py +++ b/backend/app/api/chat/engine/loaders/__init__.py @@ -1,9 +1,9 @@ import logging import yaml -from app.engine.loaders.db import DBLoaderConfig, get_db_documents -from app.engine.loaders.file import FileLoaderConfig, get_file_documents -from app.engine.loaders.web import WebLoaderConfig, get_web_documents +from app.api.chat.engine.loaders.db import DBLoaderConfig, get_db_documents +from app.api.chat.engine.loaders.file import FileLoaderConfig, get_file_documents +from app.api.chat.engine.loaders.web import WebLoaderConfig, get_web_documents logger = logging.getLogger(__name__) diff --git a/backend/app/engine/loaders/db.py b/backend/app/api/chat/engine/loaders/db.py similarity index 100% rename from backend/app/engine/loaders/db.py rename to backend/app/api/chat/engine/loaders/db.py diff --git a/backend/app/engine/loaders/file.py b/backend/app/api/chat/engine/loaders/file.py similarity index 100% rename from backend/app/engine/loaders/file.py rename to backend/app/api/chat/engine/loaders/file.py diff --git a/backend/app/engine/loaders/web.py b/backend/app/api/chat/engine/loaders/web.py similarity index 100% rename from backend/app/engine/loaders/web.py rename to backend/app/api/chat/engine/loaders/web.py diff --git a/backend/app/engine/node_postprocessors.py b/backend/app/api/chat/engine/node_postprocessors.py similarity index 100% rename from backend/app/engine/node_postprocessors.py rename to backend/app/api/chat/engine/node_postprocessors.py diff --git a/backend/app/engine/query_filter.py b/backend/app/api/chat/engine/query_filter.py similarity index 100% rename from backend/app/engine/query_filter.py rename to backend/app/api/chat/engine/query_filter.py diff --git a/backend/app/engine/vectordb.py b/backend/app/api/chat/engine/vectordb.py similarity index 100% rename from backend/app/engine/vectordb.py rename to backend/app/api/chat/engine/vectordb.py diff --git a/backend/app/api/chat/route.py b/backend/app/api/chat/route.py index b955145..cadad06 100644 --- a/backend/app/api/chat/route.py +++ b/backend/app/api/chat/route.py @@ -20,8 +20,8 @@ SourceNodes, ) from app.api.chat.vercel_response import VercelStreamResponse -from app.engine import get_chat_engine -from app.engine.query_filter import generate_filters +from app.api.chat.engine import get_chat_engine +from app.api.chat.engine.query_filter import generate_filters from app.models.user_model import User from app.core.user import get_current_user from app.api.chat.summary import summary_generator @@ -184,7 +184,7 @@ async def enhanced_content_generator(): # ): # try: # # Start background tasks to download documents from LlamaCloud if needed -# from app.engine.service import LLamaCloudFileService +# from app.api.chat.engine.service import LLamaCloudFileService # LLamaCloudFileService.download_files_from_nodes(nodes, background_tasks) # except ImportError: diff --git a/backend/app/api/chat/services/file.py b/backend/app/api/chat/services/file.py index 72107f8..1ac0d14 100644 --- a/backend/app/api/chat/services/file.py +++ b/backend/app/api/chat/services/file.py @@ -6,7 +6,7 @@ from typing import Any, List, Tuple -from app.engine.index import get_index +from app.api.chat.engine.index import get_index from llama_index.core import VectorStoreIndex from llama_index.core.ingestion import IngestionPipeline from llama_index.core.readers.file.base import ( @@ -18,8 +18,8 @@ def get_llamaparse_parser(): - from app.engine.loaders import load_configs - from app.engine.loaders.file import FileLoaderConfig, llama_parse_parser + from app.api.chat.engine.loaders import load_configs + from app.api.chat.engine.loaders.file import FileLoaderConfig, llama_parse_parser config = load_configs() file_loader_config = FileLoaderConfig(**config["file"]) @@ -81,7 +81,7 @@ def process_file(file_name: str, base64_content: str, params: Any) -> List[str]: # Insert the documents into the index if isinstance(current_index, LlamaCloudIndex): - from app.engine.service import LLamaCloudFileService + from app.api.chat.engine.service import LLamaCloudFileService project_id = current_index._get_project_id() pipeline_id = current_index._get_pipeline_id() diff --git a/backend/app/core/config.py b/backend/app/core/config.py index 983ef6d..3706134 100644 --- a/backend/app/core/config.py +++ b/backend/app/core/config.py @@ -10,10 +10,10 @@ class Settings(BaseSettings): JWT_SECRET_KEY: str = config("JWT_SECRET_KEY", cast=str) JWT_REFRESH_SECRET_KEY: str = config("JWT_REFRESH_SECRET_KEY", cast=str) ALGORITHM: ClassVar[str] = "HS256" - ACCESS_TOKEN_EXPIRE_MINUTES: int = 60 + ACCESS_TOKEN_EXPIRE_MINUTES: int = 60 * 24 REFRESH_TOKEN_EXPIRE_MINUTES: int = 60 * 24 * 7 # 7 days BACKEND_CORS_ORIGINS: List[AnyHttpUrl] = ["http://localhost:3000"] - PROJECT_NAME: str = "FODOIST" + PROJECT_NAME: str = "RAGSAAS" COOKIE_SECURE: bool = False # Database diff --git a/backend/pyproject.toml b/backend/pyproject.toml index a6e3244..c7a32cc 100644 --- a/backend/pyproject.toml +++ b/backend/pyproject.toml @@ -7,7 +7,7 @@ authors = [ "Adithya S K " ] readme = "README.md" [tool.poetry.scripts] -generate = "app.engine.generate:generate_datasource" +generate = "app.api.chat.engine.generate:generate_datasource" [tool.poetry.dependencies] python = ">=3.11,<3.12"