@@ -11,21 +11,37 @@ import { generateErrorContext } from './constraintExtractor';
1111export class PromptOptimizer {
1212 private anthropic ?: Anthropic ;
1313 private openai ?: OpenAI ;
14+ private openrouter ?: OpenAI ;
1415
1516 constructor ( ) {
1617 const anthropicKey = process . env . ANTHROPIC_API_KEY ;
17- if ( anthropicKey && ! anthropicKey . startsWith ( 'api_key' ) && anthropicKey !== 'phc_xxxxx' ) {
18+ if ( anthropicKey &&
19+ ! anthropicKey . includes ( 'your_key' ) &&
20+ ! anthropicKey . startsWith ( 'api_key' ) &&
21+ anthropicKey !== 'phc_xxxxx' ) {
1822 this . anthropic = new Anthropic ( {
1923 apiKey : anthropicKey
2024 } ) ;
2125 }
2226
2327 const openaiKey = process . env . OPENAI_API_KEY ;
24- if ( openaiKey && ! openaiKey . startsWith ( 'api_key ') ) {
28+ if ( openaiKey && ! openaiKey . includes ( 'your_key ') ) {
2529 this . openai = new OpenAI ( {
2630 apiKey : openaiKey
2731 } ) ;
2832 }
33+
34+ const openrouterKey = process . env . OPENROUTER_API_KEY ;
35+ if ( openrouterKey && ! openrouterKey . includes ( 'your_key' ) ) {
36+ this . openrouter = new OpenAI ( {
37+ baseURL : 'https://openrouter.ai/api/v1' ,
38+ apiKey : openrouterKey ,
39+ defaultHeaders : {
40+ 'HTTP-Referer' : 'https://tuneprompt.xyz' ,
41+ 'X-Title' : 'TunePrompt CLI' ,
42+ } ,
43+ } ) ;
44+ }
2945 }
3046
3147 /**
@@ -103,7 +119,7 @@ export class PromptOptimizer {
103119 if ( provider === 'anthropic' && this . anthropic ) {
104120 console . log ( `⚡ Using Anthropic for candidate generation...` ) ;
105121 const response = await this . anthropic . messages . create ( {
106- model : 'claude-sonnet-4-20250514 ' ,
122+ model : 'claude-3-5-sonnet-20240620 ' ,
107123 max_tokens : 4000 ,
108124 temperature : 0.7 , // Some creativity for prompt rewriting
109125 messages : [ {
@@ -163,16 +179,36 @@ export class PromptOptimizer {
163179 score : 0
164180 }
165181 ] ;
166- } else if ( provider === 'openrouter' ) {
167- // For OpenRouter, we'll use the shadowTester to get a response
182+ } else if ( provider === 'openrouter' && this . openrouter ) {
168183 console . log ( `⚡ Using OpenRouter for candidate generation...` ) ;
169- // Since OpenRouter is used in shadow testing, we'll use a different approach
170- // For now, we'll return a basic fallback since OpenRouter doesn't support structured outputs as well
171- return [ {
172- prompt : this . createFallbackPrompt ( failedTest ) ,
173- reasoning : 'Generated using fallback method' ,
174- score : 0
175- } ] ;
184+ const response = await this . openrouter . chat . completions . create ( {
185+ model : 'anthropic/claude-3-sonnet' , // Default robust model on OpenRouter
186+ messages : [ {
187+ role : 'user' ,
188+ content : metaPrompt
189+ } ] ,
190+ response_format : { type : 'json_object' }
191+ } ) ;
192+
193+ const content = response . choices [ 0 ] ?. message ?. content ;
194+ if ( ! content ) {
195+ // Fallback if model doesn't support JSON mode or returns empty
196+ throw new Error ( 'No content returned from OpenRouter' ) ;
197+ }
198+
199+ const parsed = JSON . parse ( content ) ;
200+ return [
201+ {
202+ prompt : parsed . candidateA . prompt ,
203+ reasoning : parsed . candidateA . reasoning ,
204+ score : 0
205+ } ,
206+ {
207+ prompt : parsed . candidateB . prompt ,
208+ reasoning : parsed . candidateB . reasoning ,
209+ score : 0
210+ }
211+ ] ;
176212 }
177213 } catch ( error : any ) {
178214 console . log ( `⚠️ ${ provider } provider failed for candidate generation: ${ error . message } ` ) ;
@@ -184,7 +220,7 @@ export class PromptOptimizer {
184220 console . error ( 'All providers failed for candidate generation' ) ;
185221 return [ {
186222 prompt : this . createFallbackPrompt ( failedTest ) ,
187- reasoning : 'Fallback prompt with basic improvements ' ,
223+ reasoning : 'Generated using fallback method ' ,
188224 score : 0
189225 } ] ;
190226 }
0 commit comments