Skip to content

Commit b2e1173

Browse files
committed
WIP
1 parent de9a583 commit b2e1173

File tree

3 files changed

+12
-3
lines changed

3 files changed

+12
-3
lines changed

langdspy/model.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -88,6 +88,10 @@ def get_prompt_history(self):
8888
prompt_history.append((runner_name, entry))
8989
return prompt_history
9090

91+
def clear_prompt_history(self):
92+
for runner_name, runner in self.prompt_runners:
93+
runner.clear_prompt_history()
94+
9195
def get_failed_prompts(self):
9296
failed_prompts = []
9397
prompt_history = self.get_prompt_history()

langdspy/prompt_runners.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -50,6 +50,9 @@ def add_entry(self, llm, prompt, llm_response, parsed_output, error, start_time,
5050
"timestamp": end_time,
5151
})
5252

53+
def reset(self):
54+
self.history = []
55+
5356

5457

5558
class PromptRunner(RunnableSerializable):
@@ -100,6 +103,9 @@ def _determine_llm_model(self, llm):
100103

101104
def get_prompt_history(self):
102105
return self.prompt_history.history
106+
107+
def clear_prompt_history(self):
108+
self.prompt_history.reset()
103109

104110
def _invoke_with_retries(self, chain, input, max_tries=1, config: Optional[RunnableConfig] = {}):
105111
total_max_tries = max_tries
@@ -143,6 +149,8 @@ def _invoke_with_retries(self, chain, input, max_tries=1, config: Optional[Runna
143149
invoke_args = {**input, 'print_prompt': print_prompt, **kwargs, 'trained_state': trained_state, 'use_training': config.get('use_training', True), 'llm_type': llm_type}
144150
formatted_prompt = self.template.format_prompt(**invoke_args)
145151

152+
if print_prompt:
153+
print(formatted_prompt)
146154

147155
# logger.debug(f"Invoke args: {invoke_args}")
148156
res = chain.invoke(invoke_args, config=config)

langdspy/prompt_strategies.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -106,9 +106,6 @@ def format_prompt(self, **kwargs: Any) -> str:
106106
elif llm_type == 'anthropic':
107107
prompt = self._format_anthropic_prompt(trained_state, use_training, examples, **kwargs)
108108

109-
if print_prompt:
110-
print(prompt)
111-
112109
return prompt
113110
except Exception as e:
114111
logger.error(f"Failed to format prompt with kwargs: {kwargs}")

0 commit comments

Comments
 (0)