Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
24 commits
Select commit Hold shift + click to select a range
4f7ed34
(chore): upgrade package newspaper3k -> newspaper4k [compatibility] p…
khushal1512 Dec 28, 2025
eec9a7e
(feat): Replace GoogleSearch Tool with LangGraph Native DuckDuckGo Se…
khushal1512 Dec 29, 2025
b48af96
docs: minor fix in env variables needed to setup
khushal1512 Dec 29, 2025
f963b97
(refactor): Now Graph runs asynchronously due to parallel executing n…
khushal1512 Dec 29, 2025
0bc84cc
fix: update chunk_rag_data to handle dynamic keys and prevent crashes
khushal1512 Dec 29, 2025
ef1f0bc
fix: updated schema and fact parsing in generate_perspective
khushal1512 Dec 29, 2025
32beeb8
chore(backend): update dependencies for NLTK and FastAPI compatibility
khushal1512 Jan 25, 2026
938853a
chore(config): update tailwind config and add static assets
khushal1512 Jan 25, 2026
fd52659
feat(ai): refine prompt templates for perspective generation
khushal1512 Jan 25, 2026
c15403c
fix(backend): optimize fact check utility functions for thread safety
khushal1512 Jan 25, 2026
27efddf
feat(ai): implement core perspective generation node logic
khushal1512 Jan 25, 2026
cad3477
feat(ai): update graph builder to include new perspective nodes
khushal1512 Jan 25, 2026
a568e48
feat(landing): implement navbar, footer, and hero components
khushal1512 Jan 26, 2026
8710410
feat(landing): add features showcase with isometric card design
khushal1512 Jan 26, 2026
71d432d
feat(landing): implement stats counter and final call-to-action section
khushal1512 Jan 26, 2026
c550c7e
feat(search): implement search bar validation and routing logic
khushal1512 Jan 26, 2026
47d5753
refactor(frontend): extract api logic to usePerspective custom hook
khushal1512 Jan 26, 2026
6cb458a
feat(ui): add modular bias gauge and sidebar components
khushal1512 Jan 26, 2026
3a526a4
fix(frontend): connect perspective page to backend and fix mobile res…
khushal1512 Jan 26, 2026
aa21615
Final Working Frontend with BYOK + citations and OmniChat
khushal1512 Mar 3, 2026
804304d
(fix); hide scrollbar in chat section
khushal1512 Mar 3, 2026
2dc6c16
(update): add .env.example for easy setup
khushal1512 Mar 3, 2026
3ec6771
(fix): minor bug fixes
khushal1512 Mar 3, 2026
feed492
(fix): Hide SideBar in Chat Section
khushal1512 Mar 3, 2026
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -167,6 +167,7 @@ GROQ_API_KEY= <groq_api_key>
PINECONE_API_KEY = <your_pinecone_API_KEY>
PORT = 8000
SEARCH_KEY = <your_Google_custom_search_engine_API_key>
HF_TOKEN = <your_huggingface_access_token>
```

*Run backend:*
Expand Down
7 changes: 7 additions & 0 deletions backend/.env.example
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
GROQ_API_KEY=
GROQ_MODEL=llama-3.3-70b-versatile
PINECONE_API_KEY =
PORT = 5555
HF_TOKEN=
GEMINI_MODEL=
GEMINI_API_KEY=
43 changes: 43 additions & 0 deletions backend/app/llm_config.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
import os
from dotenv import load_dotenv

load_dotenv()

LLM_MODEL = os.getenv("GROQ_MODEL_NAME", "llama-3.3-70b-versatile")


def get_llm(provider: str = "groq", temperature: float = 0.7):
"""Return a LangChain chat model for the requested provider.

Supported providers: ``"groq"`` (default), ``"gemini"``.
API keys and model names are read from environment variables so that
users can bring their own keys (BYOK).
"""

if provider == "gemini":
from langchain_google_genai import ChatGoogleGenerativeAI

api_key = os.getenv("GEMINI_API_KEY")
model_name = os.getenv("GEMINI_MODEL_NAME", "gemini-2.5-flash")
if not api_key:
raise ValueError(
"GEMINI_API_KEY environment variable is required for Gemini"
)
return ChatGoogleGenerativeAI(
model=model_name,
google_api_key=api_key,
temperature=temperature,
)

# Default → Groq
from langchain_groq import ChatGroq

api_key = os.getenv("GROQ_API_KEY")
model_name = os.getenv("GROQ_MODEL_NAME", "llama-3.3-70b-versatile")
if not api_key:
raise ValueError("GROQ_API_KEY environment variable is required for Groq")
return ChatGroq(
model=model_name,
api_key=api_key,
temperature=temperature,
)
3 changes: 2 additions & 1 deletion backend/app/modules/bias_detection/check_bias.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
from dotenv import load_dotenv
import json
from app.logging.logging_config import setup_logger
from app.llm_config import LLM_MODEL

logger = setup_logger(__name__)

Expand Down Expand Up @@ -61,7 +62,7 @@ def check_bias(text):
"content": (f"Give bias score to the following article \n\n{text}"),
},
],
model="gemma2-9b-it",
model=LLM_MODEL,
temperature=0.3,
max_tokens=512,
)
Expand Down
97 changes: 97 additions & 0 deletions backend/app/modules/chat/chat_graph.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,97 @@
"""
chat_graph.py
-------------
LangGraph-based conversational agent with persistent memory.
"""

from typing import Annotated
from langgraph.graph import StateGraph, START, END
from langgraph.graph.message import add_messages
from langgraph.checkpoint.memory import MemorySaver
from langchain_core.messages import SystemMessage, HumanMessage
from typing_extensions import TypedDict
from app.llm_config import get_llm
from app.logging.logging_config import setup_logger

logger = setup_logger(__name__)


class ChatState(TypedDict):
messages: Annotated[list, add_messages]


_memory = MemorySaver()


def _chatbot_node(state: ChatState, config: dict):
provider = config.get("configurable", {}).get("provider", "groq")
llm = get_llm(provider=provider, temperature=0.7)
response = llm.invoke(state["messages"])
return {"messages": [response]}


_graph = StateGraph(ChatState)
_graph.add_node("chatbot", _chatbot_node)
_graph.add_edge(START, "chatbot")
_graph.add_edge("chatbot", END)
chat_app = _graph.compile(checkpointer=_memory)


async def initialize_chat_thread(thread_id: str, analysis_result: dict) -> None:
perspective_obj = analysis_result.get("perspective", {})
if hasattr(perspective_obj, "model_dump"):
p = perspective_obj.model_dump()
elif hasattr(perspective_obj, "dict"):
p = perspective_obj.dict()
elif isinstance(perspective_obj, dict):
p = perspective_obj
else:
p = {"perspective": str(perspective_obj)}

facts = analysis_result.get("facts", [])
facts_text = "\n".join(
f"- {f.get('claim', 'N/A')}: {f.get('status', '?')} -- {f.get('reason', '')}"
for f in facts
) or "No facts were verified."

citations = analysis_result.get("web_search_citations", [])
citations_text = "\n".join(
f"- {c.get('title', 'Untitled')} ({c.get('url', '')})"
for c in citations
) or "No citations available."

summary = analysis_result.get("article_summary", "No summary available.")
sentiment = analysis_result.get("sentiment", "unknown")
perspective_text = p.get("perspective", "")

system_content = (
"You are an AI assistant helping the user understand and discuss a "
"news article that has been analyzed. Here is the full analysis:\n\n"
f"**Article Summary:**\n{summary}\n\n"
f"**Detected Sentiment:** {sentiment}\n\n"
f"**Counter-Perspective:**\n{perspective_text}\n\n"
f"**Fact-Check Results:**\n{facts_text}\n\n"
f"**Web Search Citations:**\n{citations_text}\n\n"
"Guidelines:\n"
"- Answer questions using this context.\n"
"- Be balanced and cite facts when relevant.\n"
"- Be transparent if something is not covered.\n"
"- Keep responses concise but thorough."
)

config = {"configurable": {"thread_id": thread_id, "provider": "groq"}}
await chat_app.ainvoke(
{"messages": [SystemMessage(content=system_content)]}, config=config
)
logger.info(f"Chat thread {thread_id} initialised with article context.")


async def send_chat_message(
thread_id: str, message: str, provider: str = "groq"
) -> str:
config = {"configurable": {"thread_id": thread_id, "provider": provider}}
result = await chat_app.ainvoke(
{"messages": [HumanMessage(content=message)]}, config=config
)
ai_msg = result["messages"][-1]
return ai_msg.content if hasattr(ai_msg, "content") else str(ai_msg)
2 changes: 1 addition & 1 deletion backend/app/modules/chat/get_rag_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@

load_dotenv()

pc = Pinecone(os.getenv("PINECONE_API_KEY"))
pc = Pinecone(api_key=os.getenv("PINECONE_API_KEY"))
index = pc.Index("perspective")


Expand Down
3 changes: 2 additions & 1 deletion backend/app/modules/chat/llm_processing.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
from groq import Groq
from dotenv import load_dotenv
from app.logging.logging_config import setup_logger
from app.llm_config import LLM_MODEL

logger = setup_logger(__name__)

Expand Down Expand Up @@ -55,7 +56,7 @@ def ask_llm(question, docs):
"""

response = client.chat.completions.create(
model="gemma2-9b-it",
model=LLM_MODEL,
messages=[
{"role": "system", "content": "Use only the context to answer."},
{"role": "user", "content": prompt},
Expand Down
Loading