-
Notifications
You must be signed in to change notification settings - Fork 87
Expand file tree
/
Copy pathconfig.example.yaml
More file actions
256 lines (235 loc) · 6.06 KB
/
config.example.yaml
File metadata and controls
256 lines (235 loc) · 6.06 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
# AIProxy Configuration File
# Priority: Environment Variables > Config File > Database
# This file allows you to configure channels, model configs, and options without using the database
# Channels Configuration
# Note: Channels from YAML are assigned negative IDs automatically and are not persisted to database
# They are merged with database channels in memory
channels:
- name: "openai-channel-1"
type_name: "openai" # Can use type_name instead of numeric type
key: "sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
base_url: "https://api.openai.com"
models:
- "gpt-4"
- "gpt-3.5-turbo"
model_mapping:
"gpt-4": "gpt-4-0613"
status: 1 # 1=Enabled, 2=Disabled
priority: 0
balance: 100.0
balance_threshold: 10.0
enabled_auto_balance_check: true
sets:
- "default"
config:
spec:
organization: "org-xxxxx"
- name: "azure-channel-1"
type_name: "azure" # Type name is case-insensitive
key: "your-azure-api-key"
base_url: "https://your-resource.openai.azure.com"
models:
- "gpt-4"
status: 1
priority: 1
sets:
- "default"
- name: "claude-channel-1"
type_name: "claude" # "claude" is an alias for "anthropic"
key: "sk-ant-xxxxx"
base_url: "https://api.anthropic.com"
models:
- "claude-3-opus-20240229"
- "claude-3-sonnet-20240229"
status: 1
priority: 0
sets:
- "default"
- "premium"
# You can also use numeric type if preferred
- name: "gemini-channel-1"
type: 24 # Google Gemini
key: "your-gemini-api-key"
models:
- "gemini-pro"
status: 1
- name: "fake-channel-1"
type_name: "fake"
key: "fake-key"
base_url: "https://fake.local/v1"
models:
- "fake-chat"
- "fake-completion"
- "fake-response"
- "fake-anthropic"
- "fake-gemini"
- "fake-embedding"
- "fake-image"
- "fake-rerank"
status: 1
priority: 100
sets:
- "default"
- "debug"
configs:
static_text: "Fake adaptor says hello."
response_prefix: "[debug] "
response_suffix: " <eom>"
reasoning_text: "This result is synthesized locally for testing."
delay_ms: 0
stream_chunks: 4
stream_chunk_size: 8
usage:
input_tokens: 32
output_tokens: 16
cached_tokens: 4
reasoning_tokens: 3
image_input_tokens: 12
image_output_tokens: 64
embedding:
dimensions: 12
base: "fake-embedding-seed"
image:
url: "https://fake.local/assets/fake-image.png"
b64_json: "ZmFrZS1pbWFnZQ=="
revised_prompt: "fake revised prompt"
image_tokens_in: 12
image_tokens_out: 64
rerank:
base_score: 0.98
step: 0.07
return_documents: true
response:
store: true
status: "completed"
parallel_tool_calls: true
anthropic:
stop_reason: "end_turn"
type: "message"
gemini:
finish_reason: "STOP"
model_version: "fake-1.0"
metadata:
environment: "local"
owner: "qa"
openapi:
spec_version: "3.1.0"
info:
title: "Fake Adaptor Template"
version: "1.0.0"
description: "OpenAPI-style config payload for the fake adaptor"
components:
examples:
chat:
summary: "Fake chat response"
# Model Configurations
modelconfigs:
- model: "gpt-4"
owner: "openai"
type_name: "chat" # Can use type_name instead of numeric type
# OR use numeric type:
# type: 1 # ChatCompletions
rpm: 3500
tpm: 80000
retry_times: 3
timeout_config:
request_timeout: 300
stream_request_timeout: 600
warn_error_rate: 0.5
max_error_rate: 0.8
price:
input: 0.03
output: 0.06
config:
max_context_tokens: 8192
max_output_tokens: 4096
vision: false
tool_choice: true
- model: "gpt-3.5-turbo"
owner: "openai"
type_name: "chat"
rpm: 3500
tpm: 90000
price:
input: 0.0005
output: 0.0015
config:
max_context_tokens: 16384
max_output_tokens: 4096
- model: "claude-3-opus-20240229"
owner: "anthropic"
type_name: "chat"
rpm: 4000
tpm: 400000
price:
input: 0.015
output: 0.075
config:
max_context_tokens: 200000
max_output_tokens: 4096
vision: true
- model: "claude-3-sonnet-20240229"
owner: "anthropic"
type_name: "chat"
rpm: 4000
tpm: 400000
price:
input: 0.003
output: 0.015
config:
max_context_tokens: 200000
max_output_tokens: 4096
vision: true
- model: "text-embedding-3-small"
owner: "openai"
type_name: "embedding" # Embedding model
rpm: 3000
tpm: 1000000
price:
input: 0.00002
output: 0
config:
max_context_tokens: 8191
- model: "dall-e-3"
owner: "openai"
type_name: "image" # Image generation
rpm: 50
image_quality_prices:
"1024x1024":
"standard": 0.040
"hd": 0.080
"1024x1792":
"standard": 0.080
"hd": 0.120
"1792x1024":
"standard": 0.080
"hd": 0.120
# System Options Configuration
options:
# Log retention settings (in hours)
LogStorageHours: "168" # 7 days
RetryLogStorageHours: "72" # 3 days
LogDetailStorageHours: "24" # 1 day
# Clean log batch size
CleanLogBatchSize: "1000"
# IP rate limiting
IPGroupsThreshold: "100" # Requests per minute
IPGroupsBanThreshold: "200" # Ban threshold
# Log detail settings
SaveAllLogDetail: "false"
LogDetailRequestBodyMaxSize: "10000"
LogDetailResponseBodyMaxSize: "10000"
# Retry settings
RetryTimes: "3"
# Group settings
GroupMaxTokenNum: "0" # 0 means unlimited
GroupConsumeLevelRatio: '{"1":1,"2":0.9,"3":0.8}'
# Error rate alerts
DefaultWarnNotifyErrorRate: "0.5"
# Usage alerts
UsageAlertThreshold: "100"
UsageAlertMinAvgThreshold: "10"
# Fuzzy token threshold
FuzzyTokenThreshold: "240000"
# Disable serve (for maintenance)
DisableServe: "false"