diff --git a/.github/workflows/cicd.yml b/.github/workflows/cicd.yml index bf7b3a2..0680568 100644 --- a/.github/workflows/cicd.yml +++ b/.github/workflows/cicd.yml @@ -77,7 +77,7 @@ jobs: - name: Remove old Docker image - run: docker rmi -f $(docker images -aq) || true + run: docker rmi -f $(docker images -aq) || true - name: Docker run run: | diff --git a/app/main.py b/app/main.py index 78505ba..c6bf025 100644 --- a/app/main.py +++ b/app/main.py @@ -3,21 +3,7 @@ from dotenv import load_dotenv # BACKEND -from fastapi import FastAPI from fastapi import FastAPI, HTTPException -import asyncio - -# VECTOR DB Module -import app.database.chroma_db as vectordb - -# AI -from langchain.chat_models import ChatOpenAI -from langchain.prompts import PromptTemplate - -# DTO -from app.dto import openai_dto -from app.prompt import openai_prompt -from app.dto.db_dto import AddScheduleDTO # ETC import os diff --git a/app/routers/chat.py b/app/routers/chat.py index 8e32c10..0e9d66f 100644 --- a/app/routers/chat.py +++ b/app/routers/chat.py @@ -43,7 +43,7 @@ async def get_langchain_case(data: openai_dto.PromptRequest): my_template = openai_prompt.Template.case_classify_template prompt = PromptTemplate.from_template(my_template) - case = chat_model.predict(prompt.format(question=question)) + case = await chat_model.predict(prompt.format(question=question)) print(case) case = int(case) @@ -73,14 +73,12 @@ async def get_langchain_normal(data: openai_dto.PromptRequest): # case 1 : norma openai_api_key=OPENAI_API_KEY # API 키 ) question = data.prompt - # 기존 모델 문제 생김 - # chat_model = LangchainOpenAI(openai_api_key=OPENAI_API_KEY) # description: give NESS's ideal instruction as template my_template = openai_prompt.Template.case1_template prompt = PromptTemplate.from_template(my_template) - response = chat_model.predict(prompt.format(output_language="Korean", question=question)) + response = await chat_model.predict(prompt.format(output_language="Korean", question=question)) print(response) return response @@ -98,7 +96,7 @@ async def get_langchain_schedule(data: openai_dto.PromptRequest): case2_template = openai_prompt.Template.case2_template prompt = PromptTemplate.from_template(case2_template) - response = chat_model.predict(prompt.format(output_language="Korean", question=question)) + response = await chat_model.predict(prompt.format(output_language="Korean", question=question)) print(response) return response @@ -124,6 +122,6 @@ async def get_langchain_rag(data: openai_dto.PromptRequest): # 여기서는 chat_model.predict가 비동기 함수인지, 동기 함수인지에 따라 처리가 달라질 수 있습니다. # 만약 비동기 함수라면 await를 사용해야 합니다. 아래 코드는 동기 함수를 가정하고 작성되었습니다. # 비동기 함수라면, 예: response = await chat_model.predict(...) 형태로 수정해야 합니다. - response = chat_model.predict(prompt.format(output_language="Korean", question=question, schedule=schedule)) + response = await chat_model.predict(prompt.format(output_language="Korean", question=question, schedule=schedule)) print(response) return response diff --git a/app/routers/recommendation.py b/app/routers/recommendation.py index 0e8ed28..3d133d3 100644 --- a/app/routers/recommendation.py +++ b/app/routers/recommendation.py @@ -23,7 +23,7 @@ config.read(CONFIG_FILE_PATH) @router.get("/main") -def get_recommendation(): +async def get_recommendation(): # 모델 chat_model = ChatOpenAI(temperature=0, # 창의성 (0.0 ~ 2.0) @@ -37,4 +37,4 @@ def get_recommendation(): recommendation_template = openai_prompt.Template.recommendation_template prompt = PromptTemplate.from_template(recommendation_template) - return chat_model.predict(prompt.format()) \ No newline at end of file + return await chat_model.predict(prompt.format()) \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index d73debe..e6ace11 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,10 +1,9 @@ chromadb==0.4.22 fastapi==0.109.0 uvicorn==0.26.0 -langchain==0.1.1 langchain-community==0.0.13 openai==1.8.0 python-dotenv==1.0.0 starlette==0.35.1 pydantic==2.5.3 -sentence-transformers==2.5.1 \ No newline at end of file +sentence-transformers==2.5.1