-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathcryptoAi.py
More file actions
168 lines (146 loc) · 5.53 KB
/
cryptoAi.py
File metadata and controls
168 lines (146 loc) · 5.53 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
import os
import json
import requests
from crewai import Crew, Agent, Task, Process
from langchain_groq import ChatGroq
from google.colab import userdata
# Ensure the API keys are set in the environment variables
os.environ["GROQ_API_KEY"] = userdata.get('GROQ_API_KEY')
SERP_API_KEY = userdata.get('SERP_API_KEY')
if SERP_API_KEY is None:
raise ValueError("SERP_API_KEY secret not found in Colab. Please add it in the secrets section.")
os.environ["SERP_API_KEY"] = SERP_API_KEY
# Initialize the Groq language model
GROQ_LLM = ChatGroq(model="llama3-70b-8192")
# Define the function to fetch data from the API
def fetch_data():
response = requests.get("https://api.wazirx.com/sapi/v1/tickers/24hr")
return response.json()[:5] # Limit to first 5 entries for simplicity
# Define the agents
class TestAgents:
def make_data_fecher(self):
return Agent(
role='Data Fecher Agent',
goal="""Fetch all the data from the given API.""",
backstory="""You are skilled at fetching data from APIs.""",
llm=GROQ_LLM,
verbose=True,
allow_delegation=False,
max_iter=5,
memory=True,
)
def make_summary_agent(self):
return Agent(
role='Data Summary Agent',
goal="""Summarize the Crypto data and compare it with Bitcoin and give a rating from one to ten on if you should invest in this coin or not.""",
backstory="""You are very good at making decisions on crypto money and analyzing it.""",
llm=GROQ_LLM,
verbose=True,
allow_delegation=False,
max_iter=6,
memory=True,
)
def make_max_agent(self):
return Agent(
role='Max Value Agent',
goal="""Extract high price from crypto data.""",
backstory="""You are very good at finding many statistical values.""",
llm=GROQ_LLM,
verbose=True,
allow_delegation=False,
max_iter=6,
memory=True,
)
# Define the tasks
class TestTasks:
def data_feche(self):
return Task(
description="""Fetch data from the API.""",
expected_output="""Data in the following format:
{
"symbol": "trxinr",
"baseAsset": "trx",
"quoteAsset": "inr",
"openPrice": "9.8324",
"lowPrice": "9.7003",
"highPrice": "9.9861",
"lastPrice": "9.72",
"volume": "352842.0",
"bidPrice": "9.72",
"askPrice": "9.724",
"at": 1716804798000
}
""",
output_file="crypto_data.json",
agent=data_fecher_agent
)
def summarize_data(self, data):
return Task(
description="""Summarize the Crypto data that has been fetched.""",
expected_output="""A summary of the Crypto data in two to three lines.""",
context=[{
"description": "Context for summarizing data",
"expected_output": "Expected summary output",
"data": data
}],
agent=summary_agent
)
def extract_max_value(self, data):
return Task(
description="""Fetch the high price from the data that has been given to you.""",
expected_output="""The highest price in the provided data.""",
context=[{
"description": "Context for extracting max value",
"expected_output": "Expected max value output",
"data": data
}],
agent=max_agent
)
# Instantiate agents
agents = TestAgents()
data_fecher_agent = agents.make_data_fecher()
summary_agent = agents.make_summary_agent()
max_agent = agents.make_max_agent()
# Fetch data using the data fetcher agent
data = fetch_data()
# Save the fetched data to a JSON file
with open('crypto_data.json', 'w') as f:
json.dump(data, f, indent=4)
# Process the fetched data and create tasks
tasks = TestTasks()
data_fecher_task = tasks.data_feche()
# Create summary and max value tasks for each item
task_list = []
for item in data:
summary_task = tasks.summarize_data(item)
max_value_task = tasks.extract_max_value(item)
task_list.extend([summary_task, max_value_task])
# Instantiate your crew with a sequential process
crew = Crew(
agents=[data_fecher_agent, summary_agent, max_agent],
tasks=task_list,
verbose=2,
process=Process.sequential,
full_output=True,
share_crew=False,
step_callback=lambda x: print(f"Step completed: {x}")
)
results = crew.kickoff()
print("Crew Work Results:")
# Access the results using the appropriate methods
print(results.raw) # Access the raw output
# Inspect the structure of the raw output
tasks_outputs = results.raw.get('tasks_outputs', [])
print(f"Tasks Outputs: {tasks_outputs}")
# Format the results and write to report.txt
with open('report.txt', 'w') as report_file:
for i, item in enumerate(data):
report_file.write(json.dumps(item, indent=4))
report_file.write("\nsummary:\n")
# Always expect summary and max value outputs
summary_output = tasks_outputs[i*2].get('exported_output', 'Summary not available')
max_value_output = tasks_outputs[i*2 + 1].get('exported_output', 'Max value not available')
report_file.write(f"{summary_output}\n")
report_file.write(f"{max_value_output}\n")
report_file.write("\n")
print("Report generated: report.txt")