-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathapi.mjs
More file actions
executable file
·211 lines (187 loc) · 6.85 KB
/
api.mjs
File metadata and controls
executable file
·211 lines (187 loc) · 6.85 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
#!/bin/env node
/**
* api.mjs
*
* Purpose: Centralized API bridge for interacting with the local Ollama instance.
* Provides a robust interface for agents to query LLMs via HTTP or UNIX socket.
*/
import http from 'http';
import { Buffer } from 'buffer';
const DEFAULT_HOST = '127.0.0.1';
const DEFAULT_PORT = 11434;
const DEFAULT_SOCKET_PATH = '/run/ollama.sock';
const DEFAULT_MODEL = 'llama3'; // Adjust based on available models
// Configuration: Prefer Environment Variables
const OLLAMA_HOST = process.env.OLLAMA_HOST || DEFAULT_HOST;
const OLLAMA_PORT = process.env.OLLAMA_PORT || DEFAULT_PORT;
const USE_SOCKET = process.env.OLLAMA_USE_SOCKET === 'true'; // Set to true to force socket usage
/**
* Sends a request to the Ollama API.
* @param {string} endpoint - API endpoint (e.g., '/api/generate')
* @param {object} payload - JSON payload
* @param {function} [onChunk] - Optional callback for streaming data
* @returns {Promise<object>} - The JSON response (or final object if streaming)
*/
function post(endpoint, payload, onChunk) {
return new Promise((resolve, reject) => {
const postData = JSON.stringify(payload);
const options = {
method: 'POST',
path: endpoint,
headers: {
'Content-Type': 'application/json',
'Content-Length': Buffer.byteLength(postData),
},
};
if (USE_SOCKET) {
options.socketPath = DEFAULT_SOCKET_PATH;
} else {
options.hostname = OLLAMA_HOST;
options.port = OLLAMA_PORT;
}
const req = http.request(options, (res) => {
let data = '';
res.on('data', (chunk) => {
if (onChunk) {
onChunk(chunk);
} else {
data += chunk;
}
});
res.on('end', () => {
if (onChunk) {
resolve({ done: true });
return;
}
try {
if (res.statusCode >= 200 && res.statusCode < 300) {
const json = JSON.parse(data);
resolve(json);
} else {
reject(new Error(`API Error: ${res.statusCode} ${res.statusMessage} - ${data}`));
}
} catch (e) {
reject(new Error(`Failed to parse response: ${e.message}`));
}
});
});
req.on('error', (e) => {
reject(new Error(`Network Error: ${e.message}`));
});
// Write data to request body
req.write(postData);
req.end();
});
}
/**
* Generates text completion using a specified model.
* @param {string} prompt - The input prompt.
* @param {string} [model] - The model to use (defaults to config).
* @param {object} [options] - Additional options (temperature, system prompt, etc.)
* @param {function} [onToken] - Callback for streaming tokens
* @returns {Promise<string>} - The generated text.
*/
export async function generate(prompt, model = DEFAULT_MODEL, options = {}, onToken) {
try {
const payload = {
model: model,
prompt: prompt,
stream: !!onToken || options.stream || false,
...options
};
let fullResponse = "";
await post('/api/generate', payload, onToken ? (chunk) => {
const str = chunk.toString();
// Ollama streams multiple JSON objects in one chunk sometimes
const lines = str.split('\n').filter(line => line.trim() !== '');
for (const line of lines) {
try {
const json = JSON.parse(line);
if (json.response) {
onToken(json.response);
fullResponse += json.response;
}
if (json.done) {
// stream ended
}
} catch (e) {
// Partial JSON, ignore or buffer (simple implementation assumes line integrity)
}
}
} : undefined);
if (onToken) return fullResponse;
const response = await post('/api/generate', payload);
return response.response;
} catch (error) {
console.error(`[API] Generate failed: ${error.message}`);
throw error;
}
}
/**
* Chats with the model (maintaining context is up to the caller or handled here if extended).
* @param {Array} messages - Array of message objects [{role: 'user', content: '...'}]
* @param {string} [model]
* @returns {Promise<object>} - The full message object {role: 'assistant', content: '...'}
*/
export async function chat(messages, model = DEFAULT_MODEL) {
try {
const payload = {
model: model,
messages: messages,
stream: false,
};
const response = await post('/api/chat', payload);
return response.message;
} catch (error) {
console.error(`[API] Chat failed: ${error.message}`);
throw error;
}
}
/**
* Checks if the API is reachable (Health Check).
* @returns {Promise<boolean>}
*/
export async function checkHealth() {
try {
await new Promise((resolve, reject) => {
const options = {
method: 'GET',
path: '/', // Ollama root often returns "Ollama is running"
};
if (USE_SOCKET) {
options.socketPath = DEFAULT_SOCKET_PATH;
} else {
options.hostname = OLLAMA_HOST;
options.port = OLLAMA_PORT;
}
const req = http.request(options, (res) => {
if (res.statusCode === 200) resolve(true);
else reject(new Error('Status not 200'));
});
req.on('error', (e) => reject(e));
req.end();
});
return true;
} catch (e) {
return false;
}
}
/**
* Dispatches a control signal to the AI agent.
* @param {object} payload - The control payload (e.g. { agent: 'CORE', phase: 0 })
* @param {function} [onToken] - Optional streaming callback
* @returns {Promise<string>} - The generated response
*/
export async function dispatchControl(payload, onToken = null) {
// Pipeline extension: Fasten up parallel tokenizes
// We treat this as a signal to the model to 'think' or 'reason' about its state
const prompt = `[SYSTEM: CONTROL_SIGNAL] Agent: ${payload.agent}, Phase: ${payload.phase}, Angle: ${payload.angle}\nTASK: ${payload.prompt || 'Maintain Entropy'}`;
// Return the promise so the caller (nexus-control) can handle the data
return generate(prompt, DEFAULT_MODEL, { num_predict: 100 }, onToken);
}
export default {
generate,
chat,
checkHealth,
dispatchControl
};