Skip to content

Commit d8697a0

Browse files
sararobcopybara-github
authored andcommitted
chore: remove duplicate types and use types from GenAI SDK
PiperOrigin-RevId: 885534243
1 parent 394253a commit d8697a0

File tree

10 files changed

+182
-1324
lines changed

10 files changed

+182
-1324
lines changed

tests/unit/vertexai/genai/replays/test_batch_evaluate.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,11 +18,12 @@
1818

1919
from tests.unit.vertexai.genai.replays import pytest_helper
2020
from vertexai._genai import types
21+
from google.genai import types as genai_types
2122

2223

2324
def test_batch_eval(client):
2425
eval_dataset = types.EvaluationDataset(
25-
gcs_source=types.GcsSource(
26+
gcs_source=genai_types.GcsSource(
2627
uris=["gs://genai-eval-sdk-replay-test/test_data/inference_results.jsonl"]
2728
)
2829
)
@@ -52,7 +53,7 @@ def test_batch_eval(client):
5253
@pytest.mark.asyncio
5354
async def test_batch_eval_async(client):
5455
eval_dataset = types.EvaluationDataset(
55-
gcs_source=types.GcsSource(
56+
gcs_source=genai_types.GcsSource(
5657
uris=["gs://genai-eval-sdk-replay-test/test_data/inference_results.jsonl"]
5758
)
5859
)

tests/unit/vertexai/genai/replays/test_create_agent_engine_a2a_task.py

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@
1616

1717
from tests.unit.vertexai.genai.replays import pytest_helper
1818
from vertexai._genai import types
19+
from google.genai import types as genai_types
1920
import pytest
2021

2122

@@ -44,7 +45,7 @@ def test_create_simple_a2a_task(client):
4445
role="user",
4546
message_id="message123",
4647
parts=[
47-
types.Part(
48+
genai_types.Part(
4849
text="hello123",
4950
)
5051
],
@@ -60,7 +61,7 @@ def test_create_simple_a2a_task(client):
6061
display_name="display_name123",
6162
description="description123",
6263
parts=[
63-
types.Part(
64+
genai_types.Part(
6465
text="hello456",
6566
)
6667
],
@@ -125,7 +126,7 @@ async def test_create_simple_a2a_task_async(client):
125126
role="user",
126127
message_id="message123",
127128
parts=[
128-
types.Part(
129+
genai_types.Part(
129130
text="hello123",
130131
)
131132
],
@@ -141,7 +142,7 @@ async def test_create_simple_a2a_task_async(client):
141142
display_name="display_name123",
142143
description="description123",
143144
parts=[
144-
types.Part(
145+
genai_types.Part(
145146
text="hello456",
146147
)
147148
],

tests/unit/vertexai/genai/replays/test_create_evaluation_run.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -24,23 +24,23 @@
2424
GENERAL_QUALITY_METRIC = types.EvaluationRunMetric(
2525
metric="general_quality_v1",
2626
metric_config=types.UnifiedMetric(
27-
predefined_metric_spec=types.PredefinedMetricSpec(
27+
predefined_metric_spec=genai_types.PredefinedMetricSpec(
2828
metric_spec_name="general_quality_v1",
2929
)
3030
),
3131
)
3232
FINAL_RESPONSE_QUALITY_METRIC = types.EvaluationRunMetric(
3333
metric="final_response_quality_v1",
3434
metric_config=types.UnifiedMetric(
35-
predefined_metric_spec=types.PredefinedMetricSpec(
35+
predefined_metric_spec=genai_types.PredefinedMetricSpec(
3636
metric_spec_name="final_response_quality_v1",
3737
)
3838
),
3939
)
4040
LLM_METRIC = types.EvaluationRunMetric(
4141
metric="llm_metric",
4242
metric_config=types.UnifiedMetric(
43-
llm_based_metric_spec=types.LLMBasedMetricSpec(
43+
llm_based_metric_spec=genai_types.LLMBasedMetricSpec(
4444
metric_prompt_template=(
4545
"\nEvaluate the fluency of the response. Provide a score from 1-5."
4646
)
@@ -50,15 +50,15 @@
5050
EXACT_MATCH_COMPUTATION_BASED_METRIC = types.EvaluationRunMetric(
5151
metric="exact_match",
5252
metric_config=types.UnifiedMetric(
53-
computation_based_metric_spec=types.ComputationBasedMetricSpec(
54-
type=types.ComputationBasedMetricType.EXACT_MATCH,
53+
computation_based_metric_spec=genai_types.ComputationBasedMetricSpec(
54+
type=genai_types.ComputationBasedMetricType.EXACT_MATCH,
5555
)
5656
),
5757
)
5858
BLEU_COMPUTATION_BASED_METRIC = types.EvaluationRunMetric(
5959
metric="exact_match_2",
6060
metric_config=types.UnifiedMetric(
61-
computation_based_metric_spec=types.ComputationBasedMetricSpec(
61+
computation_based_metric_spec=genai_types.ComputationBasedMetricSpec(
6262
type=types.ComputationBasedMetricType.BLEU,
6363
parameters={"use_effective_order": True},
6464
)

tests/unit/vertexai/genai/replays/test_custom_code_execution_metric.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@
1616

1717
from tests.unit.vertexai.genai.replays import pytest_helper
1818
from vertexai._genai import types
19+
from google.genai import types as genai_types
1920
import pandas as pd
2021

2122

@@ -84,7 +85,7 @@ def evaluate(instance):
8485
)
8586

8687
eval_dataset = types.EvaluationDataset(
87-
gcs_source=types.GcsSource(
88+
gcs_source=genai_types.GcsSource(
8889
uris=["gs://genai-eval-sdk-replay-test/test_data/inference_results.jsonl"]
8990
),
9091
)

tests/unit/vertexai/genai/replays/test_internal_generate_rubrics.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,6 @@
1616

1717

1818
from tests.unit.vertexai.genai.replays import pytest_helper
19-
from vertexai._genai import types
2019
from google.genai import types as genai_types
2120

2221
_TEST_RUBRIC_GENERATION_PROMPT = """SPECIAL INSTRUCTION: think silently. Silent thinking token budget: 16384.
@@ -157,7 +156,7 @@ def test_internal_method_generate_rubrics(client):
157156
]
158157
response = client.evals._generate_rubrics(
159158
contents=test_contents,
160-
rubric_generation_spec=types.RubricGenerationSpec(
159+
rubric_generation_spec=genai_types.RubricGenerationSpec(
161160
prompt_template=_TEST_RUBRIC_GENERATION_PROMPT,
162161
),
163162
)

tests/unit/vertexai/genai/test_evals.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -678,7 +678,7 @@ def test_inference_with_gcs_destination(
678678
inference_result.eval_dataset_df, expected_df_to_save
679679
)
680680
assert inference_result.candidate_name == "gemini-pro"
681-
assert inference_result.gcs_source == vertexai_genai_types.GcsSource(
681+
assert inference_result.gcs_source == genai_types.GcsSource(
682682
uris=[expected_gcs_path]
683683
)
684684

@@ -5227,7 +5227,7 @@ def test_execute_evaluation_with_agent_info(
52275227
input_dataset = vertexai_genai_types.EvaluationDataset(
52285228
eval_dataset_df=dataset_df
52295229
)
5230-
predefined_metric = vertexai_genai_types.PredefinedMetricSpec(
5230+
predefined_metric = genai_types.PredefinedMetricSpec(
52315231
metric_spec_name="tool_search_validity"
52325232
)
52335233
tool = {
@@ -5825,7 +5825,7 @@ def test_execute_evaluation_deduplicates_candidate_names(
58255825
vertexai_genai_types.EvaluateInstancesResponse(
58265826
exact_match_results=vertexai_genai_types.ExactMatchResults(
58275827
exact_match_metric_values=[
5828-
vertexai_genai_types.ExactMatchMetricValue(score=1.0)
5828+
genai_types.ExactMatchMetricValue(score=1.0)
58295829
]
58305830
)
58315831
)

vertexai/_genai/_evals_common.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1349,8 +1349,8 @@ def _resolve_evaluation_run_metrics(
13491349
resolved_metrics_list.append(
13501350
types.EvaluationRunMetric(
13511351
metric=resolved_metric.name,
1352-
metric_config=types.UnifiedMetric(
1353-
predefined_metric_spec=types.PredefinedMetricSpec(
1352+
metric_config=genai_types.UnifiedMetric(
1353+
predefined_metric_spec=genai_types.PredefinedMetricSpec(
13541354
metric_spec_name=resolved_metric.name,
13551355
)
13561356
),
@@ -1390,8 +1390,8 @@ def _resolve_evaluation_run_metrics(
13901390
resolved_metrics_list.append(
13911391
types.EvaluationRunMetric(
13921392
metric=resolved_metric.name,
1393-
metric_config=types.UnifiedMetric(
1394-
predefined_metric_spec=types.PredefinedMetricSpec(
1393+
metric_config=genai_types.UnifiedMetric(
1394+
predefined_metric_spec=genai_types.PredefinedMetricSpec(
13951395
metric_spec_name=resolved_metric.name,
13961396
)
13971397
),

vertexai/_genai/evals.py

Lines changed: 8 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -575,9 +575,7 @@ def _GenerateInstanceRubricsRequest_to_vertex(
575575
setv(
576576
to_object,
577577
["rubricGenerationSpec"],
578-
_RubricGenerationSpec_to_vertex(
579-
getv(from_object, ["rubric_generation_spec"]), to_object
580-
),
578+
getv(from_object, ["rubric_generation_spec"]),
581579
)
582580

583581
if getv(from_object, ["config"]) is not None:
@@ -768,37 +766,9 @@ def _RubricBasedMetricSpec_to_vertex(
768766
setv(
769767
to_object,
770768
["rubricGenerationSpec"],
771-
_RubricGenerationSpec_to_vertex(
772-
getv(from_object, ["rubric_generation_spec"]), to_object
773-
),
774-
)
775-
776-
return to_object
777-
778-
779-
def _RubricGenerationSpec_to_vertex(
780-
from_object: Union[dict[str, Any], object],
781-
parent_object: Optional[dict[str, Any]] = None,
782-
) -> dict[str, Any]:
783-
to_object: dict[str, Any] = {}
784-
if getv(from_object, ["prompt_template"]) is not None:
785-
setv(to_object, ["promptTemplate"], getv(from_object, ["prompt_template"]))
786-
787-
if getv(from_object, ["rubric_content_type"]) is not None:
788-
setv(
789-
to_object, ["rubricContentType"], getv(from_object, ["rubric_content_type"])
769+
getv(from_object, ["rubric_generation_spec"]),
790770
)
791771

792-
if getv(from_object, ["rubric_type_ontology"]) is not None:
793-
setv(
794-
to_object,
795-
["rubricTypeOntology"],
796-
getv(from_object, ["rubric_type_ontology"]),
797-
)
798-
799-
if getv(from_object, ["generator_model_config"]) is not None:
800-
setv(to_object, ["model_config"], getv(from_object, ["generator_model_config"]))
801-
802772
return to_object
803773

804774

@@ -1236,9 +1206,9 @@ def _generate_rubrics(
12361206
*,
12371207
contents: list[genai_types.ContentOrDict],
12381208
predefined_rubric_generation_spec: Optional[
1239-
types.PredefinedMetricSpecOrDict
1209+
genai_types.PredefinedMetricSpecOrDict
12401210
] = None,
1241-
rubric_generation_spec: Optional[types.RubricGenerationSpecOrDict] = None,
1211+
rubric_generation_spec: Optional[genai_types.RubricGenerationSpecOrDict] = None,
12421212
config: Optional[types.RubricGenerationConfigOrDict] = None,
12431213
metric_resource_name: Optional[str] = None,
12441214
) -> types.GenerateInstanceRubricsResponse:
@@ -2009,7 +1979,7 @@ def generate_rubrics(
20091979
"Could not determine metric_spec_name from predefined_spec_name"
20101980
)
20111981

2012-
predefined_spec = types.PredefinedMetricSpec(
1982+
predefined_spec = genai_types.PredefinedMetricSpec(
20131983
metric_spec_name=actual_predefined_spec_name,
20141984
metric_spec_parameters=metric_spec_parameters,
20151985
)
@@ -2025,7 +1995,7 @@ def generate_rubrics(
20251995
"generator_model_config": generator_model_config,
20261996
}
20271997
spec_dict = {k: v for k, v in spec_dict.items() if v is not None}
2028-
rubric_gen_spec = types.RubricGenerationSpec.model_validate(spec_dict)
1998+
rubric_gen_spec = genai_types.RubricGenerationSpec.model_validate(spec_dict)
20291999
else:
20302000
raise ValueError(
20312001
"Either metric, predefined_spec_name or prompt_template must be provided."
@@ -2758,9 +2728,9 @@ async def _generate_rubrics(
27582728
*,
27592729
contents: list[genai_types.ContentOrDict],
27602730
predefined_rubric_generation_spec: Optional[
2761-
types.PredefinedMetricSpecOrDict
2731+
genai_types.PredefinedMetricSpecOrDict
27622732
] = None,
2763-
rubric_generation_spec: Optional[types.RubricGenerationSpecOrDict] = None,
2733+
rubric_generation_spec: Optional[genai_types.RubricGenerationSpecOrDict] = None,
27642734
config: Optional[types.RubricGenerationConfigOrDict] = None,
27652735
metric_resource_name: Optional[str] = None,
27662736
) -> types.GenerateInstanceRubricsResponse:

0 commit comments

Comments
 (0)