Skip to content

Commit

Permalink
feat: Added cometml LLM log chain
Browse files Browse the repository at this point in the history
  • Loading branch information
Joywalker committed May 25, 2024
1 parent ea4ca77 commit e0c6fcf
Show file tree
Hide file tree
Showing 2 changed files with 25 additions and 0 deletions.
3 changes: 3 additions & 0 deletions course/module-5/inference_pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,5 +63,8 @@ def generate(
output=answer,
metadata=metadata,
)
self.prompt_monitoring_manager.log_chain(
query=query, response=answer, eval_output=evaluation_result
)

return {"answer": answer, "llm_evaluation_result": evaluation_result}
22 changes: 22 additions & 0 deletions course/module-5/monitoring/prompt_monitoring.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,3 +30,25 @@ def log(
output=output,
metadata=metadata,
)

@classmethod
def log_chain(cls, query: str, response: str, eval_output: str):
comet_llm.init(project=f"{settings.COMET_PROJECT}-monitoring")
comet_llm.start_chain(
inputs={"user_query": query},
project=f"{settings.COMET_PROJECT}-monitoring",
api_key=settings.COMET_API_KEY,
workspace=settings.COMET_WORKSPACE,
)
with comet_llm.Span(
category="twin_response",
inputs={"user_query": query},
) as span:
span.set_outputs(outputs=response)

with comet_llm.Span(
category="gpt3.5-eval",
inputs={"eval_result": eval_output},
) as span:
span.set_outputs(outputs=response)
comet_llm.end_chain(outputs={"response": response, "eval_output": eval_output})

0 comments on commit e0c6fcf

Please sign in to comment.