|
1 | | -import asyncio |
2 | | -import os |
3 | | - |
4 | | -from agent_framework import ChatAgent |
5 | | -from agent_framework.openai import OpenAIChatClient |
6 | | -from azure.identity.aio import DefaultAzureCredential, get_bearer_token_provider |
7 | | -from dotenv import load_dotenv |
8 | | -from rich import print |
9 | | - |
10 | | -# Configure OpenAI client based on environment |
11 | | -load_dotenv(override=True) |
12 | | -API_HOST = os.getenv("API_HOST", "github") |
13 | | - |
14 | | -async_credential = None |
15 | | -if API_HOST == "azure": |
16 | | - async_credential = DefaultAzureCredential() |
17 | | - token_provider = get_bearer_token_provider(async_credential, "https://cognitiveservices.azure.com/.default") |
18 | | - client = OpenAIChatClient( |
19 | | - base_url=f"{os.environ['AZURE_OPENAI_ENDPOINT']}/openai/v1/", |
20 | | - api_key=token_provider, |
21 | | - model_id=os.environ["AZURE_OPENAI_CHAT_DEPLOYMENT"], |
22 | | - ) |
23 | | -elif API_HOST == "github": |
24 | | - client = OpenAIChatClient( |
25 | | - base_url="https://models.github.ai/inference", |
26 | | - api_key=os.environ["GITHUB_TOKEN"], |
27 | | - model_id=os.getenv("GITHUB_MODEL", "openai/gpt-5-mini"), |
28 | | - ) |
29 | | -elif API_HOST == "ollama": |
30 | | - client = OpenAIChatClient( |
31 | | - base_url=os.environ.get("OLLAMA_ENDPOINT", "http://localhost:11434/v1"), |
32 | | - api_key="none", |
33 | | - model_id=os.environ.get("OLLAMA_MODEL", "llama3.1:latest"), |
34 | | - ) |
35 | | -else: |
36 | | - client = OpenAIChatClient(api_key=os.environ["OPENAI_API_KEY"], model_id=os.environ.get("OPENAI_MODEL", "gpt-5-mini")) |
37 | | - |
38 | | -agent = ChatAgent(chat_client=client, instructions="You're an informational agent. Answer questions cheerfully.") |
39 | | - |
40 | | - |
41 | | -async def main(): |
42 | | - response = await agent.run("Whats weather today in San Francisco?") |
43 | | - print(response.text) |
44 | | - |
45 | | - if async_credential: |
46 | | - await async_credential.close() |
47 | | - |
48 | | - |
49 | | -if __name__ == "__main__": |
50 | | - asyncio.run(main()) |
| 1 | +import asyncio |
| 2 | +import os |
| 3 | + |
| 4 | +from agent_framework import ChatAgent |
| 5 | +from agent_framework.openai import OpenAIChatClient |
| 6 | +from azure.identity.aio import DefaultAzureCredential, get_bearer_token_provider |
| 7 | +from dotenv import load_dotenv |
| 8 | +from rich import print |
| 9 | + |
| 10 | +# Configure OpenAI client based on environment |
| 11 | +load_dotenv(override=True) |
| 12 | +API_HOST = os.getenv("API_HOST", "github") |
| 13 | + |
| 14 | +async_credential = None |
| 15 | +if API_HOST == "azure": |
| 16 | + async_credential = DefaultAzureCredential() |
| 17 | + token_provider = get_bearer_token_provider(async_credential, "https://cognitiveservices.azure.com/.default") |
| 18 | + client = OpenAIChatClient( |
| 19 | + base_url=f"{os.environ['AZURE_OPENAI_ENDPOINT']}/openai/v1/", |
| 20 | + api_key=token_provider, |
| 21 | + model_id=os.environ["AZURE_OPENAI_CHAT_DEPLOYMENT"], |
| 22 | + ) |
| 23 | +elif API_HOST == "github": |
| 24 | + client = OpenAIChatClient( |
| 25 | + base_url="https://models.github.ai/inference", |
| 26 | + api_key=os.environ["GITHUB_TOKEN"], |
| 27 | + model_id=os.getenv("GITHUB_MODEL", "openai/gpt-5-mini"), |
| 28 | + ) |
| 29 | +else: |
| 30 | + client = OpenAIChatClient( |
| 31 | + api_key=os.environ["OPENAI_API_KEY"], model_id=os.environ.get("OPENAI_MODEL", "gpt-5-mini") |
| 32 | + ) |
| 33 | + |
| 34 | +agent = ChatAgent(chat_client=client, instructions="You're an informational agent. Answer questions cheerfully.") |
| 35 | + |
| 36 | + |
| 37 | +async def main(): |
| 38 | + response = await agent.run("Whats weather today in San Francisco?") |
| 39 | + print(response.text) |
| 40 | + |
| 41 | + if async_credential: |
| 42 | + await async_credential.close() |
| 43 | + |
| 44 | + |
| 45 | +if __name__ == "__main__": |
| 46 | + asyncio.run(main()) |
0 commit comments