From 840eced41cfb5af4ecada300e20f453aa3567107 Mon Sep 17 00:00:00 2001 From: Erica Pisani Date: Tue, 24 Mar 2026 13:21:47 +0100 Subject: [PATCH 1/2] test(langchain): Add pipeline name attribute assertions to on_llm_start tests Add name kwarg to on_llm_start call in test_langchain_message_truncation and assert that gen_ai.pipeline.name is set correctly when a pipeline name is provided. Also add gen_ai.operation.name assertions to relevant tests. Co-Authored-By: Claude Sonnet 4.6 --- sentry_sdk/integrations/langchain.py | 6 ++++++ tests/integrations/langchain/test_langchain.py | 6 +++++- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/sentry_sdk/integrations/langchain.py b/sentry_sdk/integrations/langchain.py index f4ec75310d..1d77001684 100644 --- a/sentry_sdk/integrations/langchain.py +++ b/sentry_sdk/integrations/langchain.py @@ -374,6 +374,12 @@ def on_llm_start( ) span = watched_span.span + span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "generate_text") + + pipeline_name = kwargs.get("name") + if pipeline_name: + span.set_data(SPANDATA.GEN_AI_PIPELINE_NAME, pipeline_name) + if model: span.set_data( SPANDATA.GEN_AI_REQUEST_MODEL, diff --git a/tests/integrations/langchain/test_langchain.py b/tests/integrations/langchain/test_langchain.py index de787fd564..c7ef453402 100644 --- a/tests/integrations/langchain/test_langchain.py +++ b/tests/integrations/langchain/test_langchain.py @@ -1040,6 +1040,7 @@ def test_langchain_message_truncation(sentry_init, capture_events): serialized=serialized, prompts=prompts, run_id=run_id, + name="my_pipeline", invocation_params={ "temperature": 0.7, "max_tokens": 100, @@ -1069,8 +1070,10 @@ def test_langchain_message_truncation(sentry_init, capture_events): assert len(llm_spans) > 0 llm_span = llm_spans[0] - assert SPANDATA.GEN_AI_REQUEST_MESSAGES in llm_span["data"] + assert llm_span["data"]["gen_ai.operation.name"] == "generate_text" + assert llm_span["data"][SPANDATA.GEN_AI_PIPELINE_NAME] == "my_pipeline" + assert SPANDATA.GEN_AI_REQUEST_MESSAGES in llm_span["data"] messages_data = llm_span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] assert isinstance(messages_data, str) @@ -1783,6 +1786,7 @@ def test_langchain_response_model_extraction( assert len(llm_spans) > 0 llm_span = llm_spans[0] + assert llm_span["data"]["gen_ai.operation.name"] == "generate_text" if expected_model is not None: assert SPANDATA.GEN_AI_RESPONSE_MODEL in llm_span["data"] From 940a4552ebd2c7fd938bce823e4f194b257bfbfc Mon Sep 17 00:00:00 2001 From: Erica Pisani Date: Wed, 25 Mar 2026 10:17:07 +0100 Subject: [PATCH 2/2] Address CR comment --- tests/integrations/langchain/test_langchain.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/integrations/langchain/test_langchain.py b/tests/integrations/langchain/test_langchain.py index f0adab7802..af5a1ce79d 100644 --- a/tests/integrations/langchain/test_langchain.py +++ b/tests/integrations/langchain/test_langchain.py @@ -147,7 +147,7 @@ def test_langchain_text_completion( ) as _: with start_transaction(): input_text = "What is the capital of France?" - model.invoke(input_text) + model.invoke(input_text, config={"run_name": "my-snazzy-pipeline"}) tx = events[0] assert tx["type"] == "transaction" @@ -159,6 +159,8 @@ def test_langchain_text_completion( llm_span = llm_spans[0] assert llm_span["description"] == "generate_text gpt-3.5-turbo" + assert llm_span["data"]["gen_ai.system"] == "openai" + assert llm_span["data"]["gen_ai.pipeline.name"] == "my-snazzy-pipeline" assert llm_span["data"]["gen_ai.request.model"] == "gpt-3.5-turbo" assert llm_span["data"]["gen_ai.response.text"] == "The capital of France is Paris." assert llm_span["data"]["gen_ai.usage.total_tokens"] == 25