-
Notifications
You must be signed in to change notification settings - Fork 268
/
05_hf.py
33 lines (25 loc) · 1008 Bytes
/
05_hf.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
from dotenv import load_dotenv
from langchain import HuggingFaceHub, LLMChain
from langchain.prompts import PromptTemplate
load_dotenv()
# hub_llm = HuggingFaceHub(repo_id="mrm8488/t5-base-finetuned-wikiSQL")
# prompt = PromptTemplate(
# input_variables=["question"],
# template="Translate English to SQL: {question}"
# )
# hub_chain = LLMChain(prompt=prompt, llm=hub_llm, verbose=True)
# print(hub_chain.run("What is the average age of the respondents using a mobile device?"))
# second example below:
hub_llm = HuggingFaceHub(
repo_id='gpt2',
model_kwargs={'temperature': 0.7, 'max_length': 100}
)
prompt = PromptTemplate(
input_variables=["profession"],
template="You had one job 😡! You're the {profession} and you didn't have to be sarcastic"
)
hub_chain = LLMChain(prompt=prompt, llm=hub_llm, verbose=True)
print(hub_chain.run("customer service agent"))
print(hub_chain.run("politician"))
print(hub_chain.run("Fintech CEO"))
print(hub_chain.run("insurance agent"))