Skip to content

Commit 9df146b

Browse files
jaycee-licopybara-github
authored andcommitted
chore: Maintain backward-compatible field ordering in generated data types
PiperOrigin-RevId: 882441109
1 parent 64ef223 commit 9df146b

File tree

3 files changed

+229
-229
lines changed

3 files changed

+229
-229
lines changed

vertexai/_genai/evals.py

Lines changed: 34 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -131,18 +131,18 @@ def _CustomCodeExecutionSpec_from_vertex(
131131
parent_object: Optional[dict[str, Any]] = None,
132132
) -> dict[str, Any]:
133133
to_object: dict[str, Any] = {}
134-
if getv(from_object, ["evaluation_function"]) is not None:
134+
if getv(from_object, ["evaluationFunction"]) is not None:
135135
setv(
136136
to_object,
137-
["remote_custom_function"],
138-
getv(from_object, ["evaluation_function"]),
137+
["evaluation_function"],
138+
getv(from_object, ["evaluationFunction"]),
139139
)
140140

141-
if getv(from_object, ["evaluationFunction"]) is not None:
141+
if getv(from_object, ["evaluation_function"]) is not None:
142142
setv(
143143
to_object,
144-
["evaluation_function"],
145-
getv(from_object, ["evaluationFunction"]),
144+
["remote_custom_function"],
145+
getv(from_object, ["evaluation_function"]),
146146
)
147147

148148
return to_object
@@ -153,18 +153,18 @@ def _CustomCodeExecutionSpec_to_vertex(
153153
parent_object: Optional[dict[str, Any]] = None,
154154
) -> dict[str, Any]:
155155
to_object: dict[str, Any] = {}
156-
if getv(from_object, ["remote_custom_function"]) is not None:
156+
if getv(from_object, ["evaluation_function"]) is not None:
157157
setv(
158158
to_object,
159-
["evaluation_function"],
160-
getv(from_object, ["remote_custom_function"]),
159+
["evaluationFunction"],
160+
getv(from_object, ["evaluation_function"]),
161161
)
162162

163-
if getv(from_object, ["evaluation_function"]) is not None:
163+
if getv(from_object, ["remote_custom_function"]) is not None:
164164
setv(
165165
to_object,
166-
["evaluationFunction"],
167-
getv(from_object, ["evaluation_function"]),
166+
["evaluation_function"],
167+
getv(from_object, ["remote_custom_function"]),
168168
)
169169

170170
return to_object
@@ -392,18 +392,18 @@ def _EvaluationRunMetric_from_vertex(
392392
if getv(from_object, ["metric"]) is not None:
393393
setv(to_object, ["metric"], getv(from_object, ["metric"]))
394394

395-
if getv(from_object, ["metricResourceName"]) is not None:
395+
if getv(from_object, ["metricConfig"]) is not None:
396396
setv(
397397
to_object,
398-
["metric_resource_name"],
399-
getv(from_object, ["metricResourceName"]),
398+
["metric_config"],
399+
_UnifiedMetric_from_vertex(getv(from_object, ["metricConfig"]), to_object),
400400
)
401401

402-
if getv(from_object, ["metricConfig"]) is not None:
402+
if getv(from_object, ["metricResourceName"]) is not None:
403403
setv(
404404
to_object,
405-
["metric_config"],
406-
_UnifiedMetric_from_vertex(getv(from_object, ["metricConfig"]), to_object),
405+
["metric_resource_name"],
406+
getv(from_object, ["metricResourceName"]),
407407
)
408408

409409
return to_object
@@ -417,18 +417,18 @@ def _EvaluationRunMetric_to_vertex(
417417
if getv(from_object, ["metric"]) is not None:
418418
setv(to_object, ["metric"], getv(from_object, ["metric"]))
419419

420-
if getv(from_object, ["metric_resource_name"]) is not None:
420+
if getv(from_object, ["metric_config"]) is not None:
421421
setv(
422422
to_object,
423-
["metricResourceName"],
424-
getv(from_object, ["metric_resource_name"]),
423+
["metricConfig"],
424+
_UnifiedMetric_to_vertex(getv(from_object, ["metric_config"]), to_object),
425425
)
426426

427-
if getv(from_object, ["metric_config"]) is not None:
427+
if getv(from_object, ["metric_resource_name"]) is not None:
428428
setv(
429429
to_object,
430-
["metricConfig"],
431-
_UnifiedMetric_to_vertex(getv(from_object, ["metric_config"]), to_object),
430+
["metricResourceName"],
431+
getv(from_object, ["metric_resource_name"]),
432432
)
433433

434434
return to_object
@@ -526,16 +526,16 @@ def _GenerateInstanceRubricsRequest_to_vertex(
526526
),
527527
)
528528

529+
if getv(from_object, ["config"]) is not None:
530+
setv(to_object, ["config"], getv(from_object, ["config"]))
531+
529532
if getv(from_object, ["metric_resource_name"]) is not None:
530533
setv(
531534
to_object,
532535
["metricResourceName"],
533536
getv(from_object, ["metric_resource_name"]),
534537
)
535538

536-
if getv(from_object, ["config"]) is not None:
537-
setv(to_object, ["config"], getv(from_object, ["config"]))
538-
539539
return to_object
540540

541541

@@ -677,9 +677,6 @@ def _RubricGenerationSpec_to_vertex(
677677
if getv(from_object, ["prompt_template"]) is not None:
678678
setv(to_object, ["promptTemplate"], getv(from_object, ["prompt_template"]))
679679

680-
if getv(from_object, ["generator_model_config"]) is not None:
681-
setv(to_object, ["model_config"], getv(from_object, ["generator_model_config"]))
682-
683680
if getv(from_object, ["rubric_content_type"]) is not None:
684681
setv(
685682
to_object, ["rubricContentType"], getv(from_object, ["rubric_content_type"])
@@ -692,6 +689,9 @@ def _RubricGenerationSpec_to_vertex(
692689
getv(from_object, ["rubric_type_ontology"]),
693690
)
694691

692+
if getv(from_object, ["generator_model_config"]) is not None:
693+
setv(to_object, ["model_config"], getv(from_object, ["generator_model_config"]))
694+
695695
return to_object
696696

697697

@@ -1070,8 +1070,8 @@ def _generate_rubrics(
10701070
types.PredefinedMetricSpecOrDict
10711071
] = None,
10721072
rubric_generation_spec: Optional[types.RubricGenerationSpecOrDict] = None,
1073-
metric_resource_name: Optional[str] = None,
10741073
config: Optional[types.RubricGenerationConfigOrDict] = None,
1074+
metric_resource_name: Optional[str] = None,
10751075
) -> types.GenerateInstanceRubricsResponse:
10761076
"""
10771077
Generates rubrics for a given prompt.
@@ -1081,8 +1081,8 @@ def _generate_rubrics(
10811081
contents=contents,
10821082
predefined_rubric_generation_spec=predefined_rubric_generation_spec,
10831083
rubric_generation_spec=rubric_generation_spec,
1084-
metric_resource_name=metric_resource_name,
10851084
config=config,
1085+
metric_resource_name=metric_resource_name,
10861086
)
10871087

10881088
request_url_dict: Optional[dict[str, str]]
@@ -2360,8 +2360,8 @@ async def _generate_rubrics(
23602360
types.PredefinedMetricSpecOrDict
23612361
] = None,
23622362
rubric_generation_spec: Optional[types.RubricGenerationSpecOrDict] = None,
2363-
metric_resource_name: Optional[str] = None,
23642363
config: Optional[types.RubricGenerationConfigOrDict] = None,
2364+
metric_resource_name: Optional[str] = None,
23652365
) -> types.GenerateInstanceRubricsResponse:
23662366
"""
23672367
Generates rubrics for a given prompt.
@@ -2371,8 +2371,8 @@ async def _generate_rubrics(
23712371
contents=contents,
23722372
predefined_rubric_generation_spec=predefined_rubric_generation_spec,
23732373
rubric_generation_spec=rubric_generation_spec,
2374-
metric_resource_name=metric_resource_name,
23752374
config=config,
2375+
metric_resource_name=metric_resource_name,
23762376
)
23772377

23782378
request_url_dict: Optional[dict[str, str]]

vertexai/_genai/types/__init__.py

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1193,6 +1193,9 @@
11931193
"GetAgentEngineTaskConfig",
11941194
"GetAgentEngineTaskConfigDict",
11951195
"GetAgentEngineTaskConfigOrDict",
1196+
"PartMediaResolution",
1197+
"PartMediaResolutionDict",
1198+
"PartMediaResolutionOrDict",
11961199
"CodeExecutionResult",
11971200
"CodeExecutionResultDict",
11981201
"CodeExecutionResultOrDict",
@@ -1208,12 +1211,12 @@
12081211
"FunctionCall",
12091212
"FunctionCallDict",
12101213
"FunctionCallOrDict",
1211-
"FunctionResponseFileData",
1212-
"FunctionResponseFileDataDict",
1213-
"FunctionResponseFileDataOrDict",
12141214
"FunctionResponseBlob",
12151215
"FunctionResponseBlobDict",
12161216
"FunctionResponseBlobOrDict",
1217+
"FunctionResponseFileData",
1218+
"FunctionResponseFileDataDict",
1219+
"FunctionResponseFileDataOrDict",
12171220
"FunctionResponsePart",
12181221
"FunctionResponsePartDict",
12191222
"FunctionResponsePartOrDict",
@@ -1223,9 +1226,6 @@
12231226
"Blob",
12241227
"BlobDict",
12251228
"BlobOrDict",
1226-
"PartMediaResolution",
1227-
"PartMediaResolutionDict",
1228-
"PartMediaResolutionOrDict",
12291229
"VideoMetadata",
12301230
"VideoMetadataDict",
12311231
"VideoMetadataOrDict",
@@ -1385,12 +1385,12 @@
13851385
"EvalCase",
13861386
"EvalCaseDict",
13871387
"EvalCaseOrDict",
1388-
"GcsSource",
1389-
"GcsSourceDict",
1390-
"GcsSourceOrDict",
13911388
"BigQuerySource",
13921389
"BigQuerySourceDict",
13931390
"BigQuerySourceOrDict",
1391+
"GcsSource",
1392+
"GcsSourceDict",
1393+
"GcsSourceOrDict",
13941394
"EvaluationDataset",
13951395
"EvaluationDatasetDict",
13961396
"EvaluationDatasetOrDict",
@@ -2214,10 +2214,10 @@
22142214
"OptimizeJobConfigDict",
22152215
"OptimizeJobConfigOrDict",
22162216
"A2aTaskState",
2217+
"MediaResolution",
22172218
"Outcome",
22182219
"Language",
22192220
"FunctionResponseScheduling",
2220-
"MediaResolution",
22212221
"State",
22222222
"ComputationBasedMetricType",
22232223
"PairwiseChoice",

0 commit comments

Comments
 (0)