forked from lean-dojo/LeanCopilot
-
Notifications
You must be signed in to change notification settings - Fork 0
/
server.py
93 lines (76 loc) · 2.37 KB
/
server.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
from typing import Optional
from fastapi import FastAPI
from pydantic import BaseModel
from models import *
from external_models import *
app = FastAPI()
models = {
# "EleutherAI/llemma_7b": DecoderOnlyTransformer(
# "EleutherAI/llemma_7b", num_return_sequences=2, max_length=64, device="auto"
# ),
"gpt4": OpenAIRunner(
model="gpt-4-turbo-preview",
temperature=0.9,
max_tokens=1024,
top_p=0.9,
frequency_penalty=0,
presence_penalty=0,
num_return_sequences=16,
openai_timeout=45,
),
"InternLM": VLLMTacticGenerator(
model="internlm/internlm2-math-plus-1_8b",
tensor_parallel_size=2,
temperature=0.6,
max_tokens=1024,
top_p=0.9,
length_penalty=0,
n=32,
do_sample=True,
output_scores=True,
output_logits=False,
return_dict_in_generate=True,
device="auto",
),
"wellecks/llmstep-mathlib4-pythia2.8b": PythiaTacticGenerator(
num_return_sequences=32, max_length=1024, device="auto"
),
"t5-small": EncoderDecoderTransformer(
"t5-small", num_return_sequences=3, max_length=1024
),
"kaiyuy/leandojo-lean4-tacgen-byt5-small": EncoderDecoderTransformer(
"kaiyuy/leandojo-lean4-tacgen-byt5-small",
num_return_sequences=32,
max_length=1024,
),
"kaiyuy/leandojo-lean4-retriever-byt5-small": EncoderOnlyTransformer(
"kaiyuy/leandojo-lean4-retriever-byt5-small"
),
}
class GeneratorRequest(BaseModel):
name: str
input: str
prefix: Optional[str]
class Generation(BaseModel):
output: str
score: float
class GeneratorResponse(BaseModel):
outputs: List[Generation]
class EncoderRequest(BaseModel):
name: str
input: str
class EncoderResponse(BaseModel):
outputs: List[float]
@app.post("/generate")
async def generate(req: GeneratorRequest) -> GeneratorResponse:
model = models[req.name]
target_prefix = req.prefix if req.prefix is not None else ""
outputs = model.generate(req.input, target_prefix)
return GeneratorResponse(
outputs=[Generation(output=out[0], score=out[1]) for out in outputs]
)
@app.post("/encode")
async def encode(req: EncoderRequest) -> EncoderResponse:
model = models[req.name]
feature = model.encode(req.input)
return EncoderResponse(outputs=feature.tolist())