Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions sentry_sdk/integrations/langchain.py
Original file line number Diff line number Diff line change
Expand Up @@ -374,6 +374,12 @@ def on_llm_start(
)
span = watched_span.span

span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "generate_text")

pipeline_name = kwargs.get("name")
if pipeline_name:
span.set_data(SPANDATA.GEN_AI_PIPELINE_NAME, pipeline_name)

if model:
span.set_data(
SPANDATA.GEN_AI_REQUEST_MODEL,
Expand Down
6 changes: 5 additions & 1 deletion tests/integrations/langchain/test_langchain.py
Original file line number Diff line number Diff line change
Expand Up @@ -1040,6 +1040,7 @@ def test_langchain_message_truncation(sentry_init, capture_events):
serialized=serialized,
prompts=prompts,
run_id=run_id,
name="my_pipeline",
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

following test changes, can you pass config={"run_name": "my_pipeline"} to llm.invoke() in test_langchain_text_completion()?

That way we ensure that we pick up the run_name as the pipeline name, so the contract with the user is tested (whereas by calling the hook manually we guarantee that whatever ends up in name is set as the pipeline name, which is unclear 😅 )

invocation_params={
"temperature": 0.7,
"max_tokens": 100,
Expand Down Expand Up @@ -1069,8 +1070,10 @@ def test_langchain_message_truncation(sentry_init, capture_events):
assert len(llm_spans) > 0

llm_span = llm_spans[0]
assert SPANDATA.GEN_AI_REQUEST_MESSAGES in llm_span["data"]
assert llm_span["data"]["gen_ai.operation.name"] == "generate_text"
assert llm_span["data"][SPANDATA.GEN_AI_PIPELINE_NAME] == "my_pipeline"

assert SPANDATA.GEN_AI_REQUEST_MESSAGES in llm_span["data"]
messages_data = llm_span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]
assert isinstance(messages_data, str)

Expand Down Expand Up @@ -1783,6 +1786,7 @@ def test_langchain_response_model_extraction(
assert len(llm_spans) > 0

llm_span = llm_spans[0]
assert llm_span["data"]["gen_ai.operation.name"] == "generate_text"

if expected_model is not None:
assert SPANDATA.GEN_AI_RESPONSE_MODEL in llm_span["data"]
Expand Down
Loading