-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathserver.py
More file actions
95 lines (72 loc) · 3.06 KB
/
server.py
File metadata and controls
95 lines (72 loc) · 3.06 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
import base64
import logging
import sys
from fastapi import FastAPI, Body
from pydantic import BaseModel
from typing import Any, List, Dict
from llm_factory import get_llm
# if INFO message is not logged, check if some place calls logging at startup.
# for example, global VectorStoreCache instance. could set force=True in
# basicConfig, but the logs before config will be lost. better to fix the init.
format = '%(asctime)s [%(levelname)s] %(process)d %(threadName)s ' \
'%(filename)s:%(lineno)d - %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format=format)
app = FastAPI()
llm = get_llm()
# the embedding texts and chat messages (question/answer) may include arbitrary
# string. encode them to make sure FastAPI can process it.
class EmbeddingTexts(BaseModel):
texts: List[str]
def encode_to_str(self) -> str:
jstr = self.model_dump_json()
return base64.b64encode(jstr.encode('utf-8')).decode('utf-8')
@staticmethod
def decode_from_str(s: str) -> "EmbeddingTexts":
jstr = base64.b64decode(s.encode('utf-8')).decode('utf-8')
return EmbeddingTexts.model_validate_json(jstr)
class GetEmbeddingsRequest(BaseModel):
encoded_embedding_texts: str
class GetEmbeddingsResponse(BaseModel):
embeddings: List[List[float]]
class ChatMessages(BaseModel):
messages: List[Dict[str, str]]
def encode_to_str(self) -> str:
jstr = self.model_dump_json()
return base64.b64encode(jstr.encode('utf-8')).decode('utf-8')
@staticmethod
def decode_from_str(s: str) -> "ChatMessages":
jstr = base64.b64decode(s.encode('utf-8')).decode('utf-8')
return ChatMessages.model_validate_json(jstr)
class ChatAnswer(BaseModel):
answer: str
input_tokens: int
output_tokens: int
def encode_to_str(self) -> str:
jstr = self.model_dump_json()
return base64.b64encode(jstr.encode('utf-8')).decode('utf-8')
@staticmethod
def decode_from_str(s: str) -> "ChatAnswer":
jstr = base64.b64decode(s.encode('utf-8')).decode('utf-8')
return ChatAnswer.model_validate_json(jstr)
class GetChatCompletionRequest(BaseModel):
encoded_messages: str
class GetChatCompletionResponse(BaseModel):
encoded_answer: str
@app.post(
"/get_embeddings",
response_model=GetEmbeddingsResponse,
)
def get_embeddings(request: GetEmbeddingsRequest = Body(...)):
eb = EmbeddingTexts.decode_from_str(request.encoded_embedding_texts)
embeddings = llm.get_embeddings(eb.texts)
return GetEmbeddingsResponse(embeddings=embeddings)
@app.post(
"/get_chat_completion",
response_model=GetChatCompletionResponse,
)
async def get_chat_completion(request: GetChatCompletionRequest = Body(...)):
chat_msgs = ChatMessages.decode_from_str(request.encoded_messages)
answer, input_tokens, output_tokens = llm.get_chat_completion(chat_msgs.messages)
chat_answer = ChatAnswer(answer=answer, input_tokens=input_tokens,
output_tokens=output_tokens)
return GetChatCompletionResponse(encoded_answer=chat_answer.encode_to_str())