diff --git a/google/cloud/aiplatform_v1/__init__.py b/google/cloud/aiplatform_v1/__init__.py index 9c54ee4c48..79d0422665 100644 --- a/google/cloud/aiplatform_v1/__init__.py +++ b/google/cloud/aiplatform_v1/__init__.py @@ -563,6 +563,8 @@ from .types.io import SharePointSources from .types.io import SlackSource from .types.io import TFRecordDestination +from .types.io import VertexMultimodalDatasetDestination +from .types.io import VertexMultimodalDatasetSource from .types.job_service import CancelBatchPredictionJobRequest from .types.job_service import CancelCustomJobRequest from .types.job_service import CancelDataLabelingJobRequest @@ -2198,6 +2200,8 @@ def _get_version(dependency_name): "Value", "VertexAISearch", "VertexAiSearchConfig", + "VertexMultimodalDatasetDestination", + "VertexMultimodalDatasetSource", "VertexRagDataServiceClient", "VertexRagServiceClient", "VertexRagStore", diff --git a/google/cloud/aiplatform_v1/services/dataset_service/async_client.py b/google/cloud/aiplatform_v1/services/dataset_service/async_client.py index f321aca6be..4df4fc103f 100644 --- a/google/cloud/aiplatform_v1/services/dataset_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/dataset_service/async_client.py @@ -507,7 +507,6 @@ async def sample_get_dataset(): request (Optional[Union[google.cloud.aiplatform_v1.types.GetDatasetRequest, dict]]): The request object. Request message for [DatasetService.GetDataset][google.cloud.aiplatform.v1.DatasetService.GetDataset]. - Next ID: 4 name (:class:`str`): Required. The name of the Dataset resource. @@ -1704,7 +1703,6 @@ async def sample_get_dataset_version(): request (Optional[Union[google.cloud.aiplatform_v1.types.GetDatasetVersionRequest, dict]]): The request object. Request message for [DatasetService.GetDatasetVersion][google.cloud.aiplatform.v1.DatasetService.GetDatasetVersion]. - Next ID: 4 name (:class:`str`): Required. The resource name of the Dataset version to delete. Format: @@ -2643,10 +2641,7 @@ async def list_annotations( timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListAnnotationsAsyncPager: - r"""Lists Annotations belongs to a dataitem - This RPC is only available in InternalDatasetService. It - is only used for exporting conversation data to CCAI - Insights. + r"""Lists Annotations belongs to a dataitem. .. code-block:: python diff --git a/google/cloud/aiplatform_v1/services/dataset_service/client.py b/google/cloud/aiplatform_v1/services/dataset_service/client.py index 9e0e03ff4f..8af5856750 100644 --- a/google/cloud/aiplatform_v1/services/dataset_service/client.py +++ b/google/cloud/aiplatform_v1/services/dataset_service/client.py @@ -1091,7 +1091,6 @@ def sample_get_dataset(): request (Union[google.cloud.aiplatform_v1.types.GetDatasetRequest, dict]): The request object. Request message for [DatasetService.GetDataset][google.cloud.aiplatform.v1.DatasetService.GetDataset]. - Next ID: 4 name (str): Required. The name of the Dataset resource. @@ -2261,7 +2260,6 @@ def sample_get_dataset_version(): request (Union[google.cloud.aiplatform_v1.types.GetDatasetVersionRequest, dict]): The request object. Request message for [DatasetService.GetDatasetVersion][google.cloud.aiplatform.v1.DatasetService.GetDatasetVersion]. - Next ID: 4 name (str): Required. The resource name of the Dataset version to delete. Format: @@ -3177,10 +3175,7 @@ def list_annotations( timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListAnnotationsPager: - r"""Lists Annotations belongs to a dataitem - This RPC is only available in InternalDatasetService. It - is only used for exporting conversation data to CCAI - Insights. + r"""Lists Annotations belongs to a dataitem. .. code-block:: python diff --git a/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc.py index 8410f7f790..baa6b4d9ba 100644 --- a/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc.py @@ -853,10 +853,7 @@ def list_annotations( ]: r"""Return a callable for the list annotations method over gRPC. - Lists Annotations belongs to a dataitem - This RPC is only available in InternalDatasetService. It - is only used for exporting conversation data to CCAI - Insights. + Lists Annotations belongs to a dataitem. Returns: Callable[[~.ListAnnotationsRequest], diff --git a/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc_asyncio.py index 1cdabd6e2f..4ecd09f9ff 100644 --- a/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc_asyncio.py @@ -880,10 +880,7 @@ def list_annotations( ]: r"""Return a callable for the list annotations method over gRPC. - Lists Annotations belongs to a dataitem - This RPC is only available in InternalDatasetService. It - is only used for exporting conversation data to CCAI - Insights. + Lists Annotations belongs to a dataitem. Returns: Callable[[~.ListAnnotationsRequest], diff --git a/google/cloud/aiplatform_v1/services/dataset_service/transports/rest.py b/google/cloud/aiplatform_v1/services/dataset_service/transports/rest.py index e084918200..98fb0171c2 100644 --- a/google/cloud/aiplatform_v1/services/dataset_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/dataset_service/transports/rest.py @@ -4569,7 +4569,6 @@ def __call__( request (~.dataset_service.GetDatasetRequest): The request object. Request message for [DatasetService.GetDataset][google.cloud.aiplatform.v1.DatasetService.GetDataset]. - Next ID: 4 retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -4720,7 +4719,6 @@ def __call__( request (~.dataset_service.GetDatasetVersionRequest): The request object. Request message for [DatasetService.GetDatasetVersion][google.cloud.aiplatform.v1.DatasetService.GetDatasetVersion]. - Next ID: 4 retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. diff --git a/google/cloud/aiplatform_v1/services/dataset_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1/services/dataset_service/transports/rest_asyncio.py index ddc755b15b..378a0a7ef6 100644 --- a/google/cloud/aiplatform_v1/services/dataset_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1/services/dataset_service/transports/rest_asyncio.py @@ -2839,7 +2839,6 @@ async def __call__( request (~.dataset_service.GetDatasetRequest): The request object. Request message for [DatasetService.GetDataset][google.cloud.aiplatform.v1.DatasetService.GetDataset]. - Next ID: 4 retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -2999,7 +2998,6 @@ async def __call__( request (~.dataset_service.GetDatasetVersionRequest): The request object. Request message for [DatasetService.GetDatasetVersion][google.cloud.aiplatform.v1.DatasetService.GetDatasetVersion]. - Next ID: 4 retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. diff --git a/google/cloud/aiplatform_v1/services/migration_service/client.py b/google/cloud/aiplatform_v1/services/migration_service/client.py index e626e873a0..8755475c2d 100644 --- a/google/cloud/aiplatform_v1/services/migration_service/client.py +++ b/google/cloud/aiplatform_v1/services/migration_service/client.py @@ -271,40 +271,40 @@ def parse_annotated_dataset_path(path: str) -> Dict[str, str]: @staticmethod def dataset_path( project: str, + location: str, dataset: str, ) -> str: """Returns a fully-qualified dataset string.""" - return "projects/{project}/datasets/{dataset}".format( + return "projects/{project}/locations/{location}/datasets/{dataset}".format( project=project, + location=location, dataset=dataset, ) @staticmethod def parse_dataset_path(path: str) -> Dict[str, str]: """Parses a dataset path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path) + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", + path, + ) return m.groupdict() if m else {} @staticmethod def dataset_path( project: str, - location: str, dataset: str, ) -> str: """Returns a fully-qualified dataset string.""" - return "projects/{project}/locations/{location}/datasets/{dataset}".format( + return "projects/{project}/datasets/{dataset}".format( project=project, - location=location, dataset=dataset, ) @staticmethod def parse_dataset_path(path: str) -> Dict[str, str]: """Parses a dataset path into its component segments.""" - m = re.match( - r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", - path, - ) + m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path) return m.groupdict() if m else {} @staticmethod diff --git a/google/cloud/aiplatform_v1/types/__init__.py b/google/cloud/aiplatform_v1/types/__init__.py index 0bfadf9248..4f2c8b83bd 100644 --- a/google/cloud/aiplatform_v1/types/__init__.py +++ b/google/cloud/aiplatform_v1/types/__init__.py @@ -566,6 +566,8 @@ SharePointSources, SlackSource, TFRecordDestination, + VertexMultimodalDatasetDestination, + VertexMultimodalDatasetSource, ) from .job_service import ( CancelBatchPredictionJobRequest, @@ -1665,6 +1667,8 @@ "SharePointSources", "SlackSource", "TFRecordDestination", + "VertexMultimodalDatasetDestination", + "VertexMultimodalDatasetSource", "CancelBatchPredictionJobRequest", "CancelCustomJobRequest", "CancelDataLabelingJobRequest", diff --git a/google/cloud/aiplatform_v1/types/batch_prediction_job.py b/google/cloud/aiplatform_v1/types/batch_prediction_job.py index e1c263e295..8a2d6ac15a 100644 --- a/google/cloud/aiplatform_v1/types/batch_prediction_job.py +++ b/google/cloud/aiplatform_v1/types/batch_prediction_job.py @@ -273,6 +273,11 @@ class InputConfig(proto.Message): additional columns that are not described by the schema, and they will be ignored. + This field is a member of `oneof`_ ``source``. + vertex_multimodal_dataset_source (google.cloud.aiplatform_v1.types.VertexMultimodalDatasetSource): + A Vertex Managed Dataset. Currently, only + datasets of type Multimodal are supported. + This field is a member of `oneof`_ ``source``. instances_format (str): Required. The format in which instances are given, must be @@ -293,6 +298,14 @@ class InputConfig(proto.Message): oneof="source", message=io.BigQuerySource, ) + vertex_multimodal_dataset_source: io.VertexMultimodalDatasetSource = ( + proto.Field( + proto.MESSAGE, + number=4, + oneof="source", + message=io.VertexMultimodalDatasetSource, + ) + ) instances_format: str = proto.Field( proto.STRING, number=1, @@ -489,6 +502,11 @@ class OutputConfig(proto.Message): [google.rpc.Status][google.rpc.Status] represented as a STRUCT, and containing only ``code`` and ``message``. + This field is a member of `oneof`_ ``destination``. + vertex_multimodal_dataset_destination (google.cloud.aiplatform_v1.types.VertexMultimodalDatasetDestination): + The details for a Vertex Multimodal Dataset + that will be created for the output. + This field is a member of `oneof`_ ``destination``. predictions_format (str): Required. The format in which Vertex AI gives the @@ -509,6 +527,14 @@ class OutputConfig(proto.Message): oneof="destination", message=io.BigQueryDestination, ) + vertex_multimodal_dataset_destination: io.VertexMultimodalDatasetDestination = ( + proto.Field( + proto.MESSAGE, + number=6, + oneof="destination", + message=io.VertexMultimodalDatasetDestination, + ) + ) predictions_format: str = proto.Field( proto.STRING, number=1, @@ -537,6 +563,13 @@ class OutputInfo(proto.Message): ``bq://projectId.bqDatasetId`` format, into which the prediction output is written. + This field is a member of `oneof`_ ``output_location``. + vertex_multimodal_dataset_name (str): + Output only. The resource name of the Vertex Managed Dataset + created, into which the prediction output is written. + Format: + ``projects/{project}/locations/{location}/datasets/{dataset}`` + This field is a member of `oneof`_ ``output_location``. bigquery_output_table (str): Output only. The name of the BigQuery table created, in @@ -555,6 +588,11 @@ class OutputInfo(proto.Message): number=2, oneof="output_location", ) + vertex_multimodal_dataset_name: str = proto.Field( + proto.STRING, + number=5, + oneof="output_location", + ) bigquery_output_table: str = proto.Field( proto.STRING, number=4, diff --git a/google/cloud/aiplatform_v1/types/dataset.py b/google/cloud/aiplatform_v1/types/dataset.py index c810b9997b..55c0a49670 100644 --- a/google/cloud/aiplatform_v1/types/dataset.py +++ b/google/cloud/aiplatform_v1/types/dataset.py @@ -43,8 +43,9 @@ class Dataset(proto.Message): Attributes: name (str): - Output only. Identifier. The resource name of - the Dataset. + Output only. Identifier. The resource name of the Dataset. + Format: + ``projects/{project}/locations/{location}/datasets/{dataset}`` display_name (str): Required. The user-defined name of the Dataset. The name can be up to 128 characters diff --git a/google/cloud/aiplatform_v1/types/dataset_service.py b/google/cloud/aiplatform_v1/types/dataset_service.py index aa594f5304..858d1ec4e6 100644 --- a/google/cloud/aiplatform_v1/types/dataset_service.py +++ b/google/cloud/aiplatform_v1/types/dataset_service.py @@ -112,7 +112,6 @@ class CreateDatasetOperationMetadata(proto.Message): class GetDatasetRequest(proto.Message): r"""Request message for [DatasetService.GetDataset][google.cloud.aiplatform.v1.DatasetService.GetDataset]. - Next ID: 4 Attributes: name (str): @@ -489,7 +488,6 @@ class DeleteDatasetVersionRequest(proto.Message): class GetDatasetVersionRequest(proto.Message): r"""Request message for [DatasetService.GetDatasetVersion][google.cloud.aiplatform.v1.DatasetService.GetDatasetVersion]. - Next ID: 4 Attributes: name (str): diff --git a/google/cloud/aiplatform_v1/types/dataset_version.py b/google/cloud/aiplatform_v1/types/dataset_version.py index 5a03fe5aa8..22affcb419 100644 --- a/google/cloud/aiplatform_v1/types/dataset_version.py +++ b/google/cloud/aiplatform_v1/types/dataset_version.py @@ -36,8 +36,9 @@ class DatasetVersion(proto.Message): Attributes: name (str): - Output only. Identifier. The resource name of - the DatasetVersion. + Output only. Identifier. The resource name of the + DatasetVersion. Format: + ``projects/{project}/locations/{location}/datasets/{dataset}/datasetVersions/{dataset_version}`` create_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Timestamp when this DatasetVersion was created. diff --git a/google/cloud/aiplatform_v1/types/io.py b/google/cloud/aiplatform_v1/types/io.py index 07d89f0bea..ab5f96aecd 100644 --- a/google/cloud/aiplatform_v1/types/io.py +++ b/google/cloud/aiplatform_v1/types/io.py @@ -32,6 +32,8 @@ "GcsDestination", "BigQuerySource", "BigQueryDestination", + "VertexMultimodalDatasetSource", + "VertexMultimodalDatasetDestination", "CsvDestination", "TFRecordDestination", "ContainerRegistryDestination", @@ -152,6 +154,46 @@ class BigQueryDestination(proto.Message): ) +class VertexMultimodalDatasetSource(proto.Message): + r"""The Vertex Multimodal Dataset for the input content. + + Attributes: + dataset_name (str): + Required. The resource name of the Vertex Dataset. Format: + ``projects/{project}/locations/{location}/datasets/{dataset}`` + """ + + dataset_name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class VertexMultimodalDatasetDestination(proto.Message): + r"""The details for a Vertex Multimodal Dataset output. + + Attributes: + bigquery_destination (google.cloud.aiplatform_v1.types.BigQueryDestination): + Optional. The destination of the underlying + BigQuery table that will be created for the + output Multimodal Dataset. If not specified, the + BigQuery table will be created in a default + BigQuery dataset. + display_name (str): + Optional. Display name of the output dataset. + """ + + bigquery_destination: "BigQueryDestination" = proto.Field( + proto.MESSAGE, + number=1, + message="BigQueryDestination", + ) + display_name: str = proto.Field( + proto.STRING, + number=2, + ) + + class CsvDestination(proto.Message): r"""The storage details for CSV output content. diff --git a/google/cloud/aiplatform_v1beta1/__init__.py b/google/cloud/aiplatform_v1beta1/__init__.py index 759601dc07..20c099df20 100644 --- a/google/cloud/aiplatform_v1beta1/__init__.py +++ b/google/cloud/aiplatform_v1beta1/__init__.py @@ -697,6 +697,8 @@ from .types.io import SharePointSources from .types.io import SlackSource from .types.io import TFRecordDestination +from .types.io import VertexMultimodalDatasetDestination +from .types.io import VertexMultimodalDatasetSource from .types.job_service import CancelBatchPredictionJobRequest from .types.job_service import CancelCustomJobRequest from .types.job_service import CancelDataLabelingJobRequest @@ -2664,6 +2666,8 @@ def _get_version(dependency_name): "VeoTuningSpec", "VertexAISearch", "VertexAiSearchConfig", + "VertexMultimodalDatasetDestination", + "VertexMultimodalDatasetSource", "VertexRagDataServiceClient", "VertexRagServiceClient", "VertexRagStore", diff --git a/google/cloud/aiplatform_v1beta1/types/__init__.py b/google/cloud/aiplatform_v1beta1/types/__init__.py index 299cc957ec..d962b5eabc 100644 --- a/google/cloud/aiplatform_v1beta1/types/__init__.py +++ b/google/cloud/aiplatform_v1beta1/types/__init__.py @@ -704,6 +704,8 @@ SharePointSources, SlackSource, TFRecordDestination, + VertexMultimodalDatasetDestination, + VertexMultimodalDatasetSource, ) from .job_service import ( CancelBatchPredictionJobRequest, @@ -2048,6 +2050,8 @@ "SharePointSources", "SlackSource", "TFRecordDestination", + "VertexMultimodalDatasetDestination", + "VertexMultimodalDatasetSource", "CancelBatchPredictionJobRequest", "CancelCustomJobRequest", "CancelDataLabelingJobRequest", diff --git a/google/cloud/aiplatform_v1beta1/types/batch_prediction_job.py b/google/cloud/aiplatform_v1beta1/types/batch_prediction_job.py index d7b16541c6..f67df622a3 100644 --- a/google/cloud/aiplatform_v1beta1/types/batch_prediction_job.py +++ b/google/cloud/aiplatform_v1beta1/types/batch_prediction_job.py @@ -289,6 +289,11 @@ class InputConfig(proto.Message): additional columns that are not described by the schema, and they will be ignored. + This field is a member of `oneof`_ ``source``. + vertex_multimodal_dataset_source (google.cloud.aiplatform_v1beta1.types.VertexMultimodalDatasetSource): + A Vertex Managed Dataset. Currently, only + datasets of type Multimodal are supported. + This field is a member of `oneof`_ ``source``. instances_format (str): Required. The format in which instances are given, must be @@ -309,6 +314,14 @@ class InputConfig(proto.Message): oneof="source", message=io.BigQuerySource, ) + vertex_multimodal_dataset_source: io.VertexMultimodalDatasetSource = ( + proto.Field( + proto.MESSAGE, + number=4, + oneof="source", + message=io.VertexMultimodalDatasetSource, + ) + ) instances_format: str = proto.Field( proto.STRING, number=1, @@ -505,6 +518,11 @@ class OutputConfig(proto.Message): [google.rpc.Status][google.rpc.Status] represented as a STRUCT, and containing only ``code`` and ``message``. + This field is a member of `oneof`_ ``destination``. + vertex_multimodal_dataset_destination (google.cloud.aiplatform_v1beta1.types.VertexMultimodalDatasetDestination): + The details for a Vertex Multimodal Dataset + that will be created for the output. + This field is a member of `oneof`_ ``destination``. predictions_format (str): Required. The format in which Vertex AI gives the @@ -525,6 +543,14 @@ class OutputConfig(proto.Message): oneof="destination", message=io.BigQueryDestination, ) + vertex_multimodal_dataset_destination: io.VertexMultimodalDatasetDestination = ( + proto.Field( + proto.MESSAGE, + number=6, + oneof="destination", + message=io.VertexMultimodalDatasetDestination, + ) + ) predictions_format: str = proto.Field( proto.STRING, number=1, @@ -553,6 +579,13 @@ class OutputInfo(proto.Message): ``bq://projectId.bqDatasetId`` format, into which the prediction output is written. + This field is a member of `oneof`_ ``output_location``. + vertex_multimodal_dataset_name (str): + Output only. The resource name of the Vertex Managed Dataset + created, into which the prediction output is written. + Format: + ``projects/{project}/locations/{location}/datasets/{dataset}`` + This field is a member of `oneof`_ ``output_location``. bigquery_output_table (str): Output only. The name of the BigQuery table created, in @@ -571,6 +604,11 @@ class OutputInfo(proto.Message): number=2, oneof="output_location", ) + vertex_multimodal_dataset_name: str = proto.Field( + proto.STRING, + number=5, + oneof="output_location", + ) bigquery_output_table: str = proto.Field( proto.STRING, number=4, diff --git a/google/cloud/aiplatform_v1beta1/types/dataset.py b/google/cloud/aiplatform_v1beta1/types/dataset.py index 0abfb5459b..b1bf1242b8 100644 --- a/google/cloud/aiplatform_v1beta1/types/dataset.py +++ b/google/cloud/aiplatform_v1beta1/types/dataset.py @@ -42,8 +42,9 @@ class Dataset(proto.Message): Attributes: name (str): - Output only. Identifier. The resource name of - the Dataset. + Output only. Identifier. The resource name of the Dataset. + Format: + ``projects/{project}/locations/{location}/datasets/{dataset}`` display_name (str): Required. The user-defined name of the Dataset. The name can be up to 128 characters diff --git a/google/cloud/aiplatform_v1beta1/types/dataset_version.py b/google/cloud/aiplatform_v1beta1/types/dataset_version.py index 4a587ac1c6..88cde27d86 100644 --- a/google/cloud/aiplatform_v1beta1/types/dataset_version.py +++ b/google/cloud/aiplatform_v1beta1/types/dataset_version.py @@ -36,8 +36,9 @@ class DatasetVersion(proto.Message): Attributes: name (str): - Output only. Identifier. The resource name of - the DatasetVersion. + Output only. Identifier. The resource name of the + DatasetVersion. Format: + ``projects/{project}/locations/{location}/datasets/{dataset}/datasetVersions/{dataset_version}`` create_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Timestamp when this DatasetVersion was created. diff --git a/google/cloud/aiplatform_v1beta1/types/io.py b/google/cloud/aiplatform_v1beta1/types/io.py index c885e6b202..8cc00c6e62 100644 --- a/google/cloud/aiplatform_v1beta1/types/io.py +++ b/google/cloud/aiplatform_v1beta1/types/io.py @@ -32,6 +32,8 @@ "GcsDestination", "BigQuerySource", "BigQueryDestination", + "VertexMultimodalDatasetSource", + "VertexMultimodalDatasetDestination", "CsvDestination", "TFRecordDestination", "ContainerRegistryDestination", @@ -152,6 +154,46 @@ class BigQueryDestination(proto.Message): ) +class VertexMultimodalDatasetSource(proto.Message): + r"""The Vertex Multimodal Dataset for the input content. + + Attributes: + dataset_name (str): + Required. The resource name of the Vertex Dataset. Format: + ``projects/{project}/locations/{location}/datasets/{dataset}`` + """ + + dataset_name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class VertexMultimodalDatasetDestination(proto.Message): + r"""The details for a Vertex Multimodal Dataset output. + + Attributes: + bigquery_destination (google.cloud.aiplatform_v1beta1.types.BigQueryDestination): + Optional. The destination of the underlying + BigQuery table that will be created for the + output Multimodal Dataset. If not specified, the + BigQuery table will be created in a default + BigQuery dataset. + display_name (str): + Optional. Display name of the output dataset. + """ + + bigquery_destination: "BigQueryDestination" = proto.Field( + proto.MESSAGE, + number=1, + message="BigQueryDestination", + ) + display_name: str = proto.Field( + proto.STRING, + number=2, + ) + + class CsvDestination(proto.Message): r"""The storage details for CSV output content. diff --git a/google/cloud/aiplatform_v1beta1/types/schedule.py b/google/cloud/aiplatform_v1beta1/types/schedule.py index 1f8c2a41cc..1cde82a4de 100644 --- a/google/cloud/aiplatform_v1beta1/types/schedule.py +++ b/google/cloud/aiplatform_v1beta1/types/schedule.py @@ -125,6 +125,14 @@ class Schedule(proto.Message): the limit for starting the scheduled requests and not the execution of the operations/jobs created by the requests (if applicable). + max_concurrent_active_run_count (int): + Optional. Specifies the maximum number of + active runs that can be executed concurrently + for this Schedule. This limits the number of + runs that can be in a non-terminal state at the + same time. + Currently, this field is only supported for + requests of type CreatePipelineJobRequest. allow_queueing (bool): Optional. Whether new scheduled runs can be queued when max_concurrent_runs limit is reached. If set to true, new @@ -279,6 +287,10 @@ class RunResponse(proto.Message): proto.INT64, number=11, ) + max_concurrent_active_run_count: int = proto.Field( + proto.INT64, + number=21, + ) allow_queueing: bool = proto.Field( proto.BOOL, number=12, diff --git a/tests/unit/gapic/aiplatform_v1/test_job_service.py b/tests/unit/gapic/aiplatform_v1/test_job_service.py index 25c241ef4d..b9a58d873e 100644 --- a/tests/unit/gapic/aiplatform_v1/test_job_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_job_service.py @@ -28279,6 +28279,7 @@ def test_create_batch_prediction_job_rest_call_success(request_type): "input_config": { "gcs_source": {"uris": ["uris_value1", "uris_value2"]}, "bigquery_source": {"input_uri": "input_uri_value"}, + "vertex_multimodal_dataset_source": {"dataset_name": "dataset_name_value"}, "instances_format": "instances_format_value", }, "instance_config": { @@ -28298,6 +28299,10 @@ def test_create_batch_prediction_job_rest_call_success(request_type): "output_config": { "gcs_destination": {"output_uri_prefix": "output_uri_prefix_value"}, "bigquery_destination": {"output_uri": "output_uri_value"}, + "vertex_multimodal_dataset_destination": { + "bigquery_destination": {}, + "display_name": "display_name_value", + }, "predictions_format": "predictions_format_value", }, "dedicated_resources": { @@ -28357,6 +28362,7 @@ def test_create_batch_prediction_job_rest_call_success(request_type): "output_info": { "gcs_output_directory": "gcs_output_directory_value", "bigquery_output_dataset": "bigquery_output_dataset_value", + "vertex_multimodal_dataset_name": "vertex_multimodal_dataset_name_value", "bigquery_output_table": "bigquery_output_table_value", }, "state": 1, @@ -36125,6 +36131,7 @@ async def test_create_batch_prediction_job_rest_asyncio_call_success(request_typ "input_config": { "gcs_source": {"uris": ["uris_value1", "uris_value2"]}, "bigquery_source": {"input_uri": "input_uri_value"}, + "vertex_multimodal_dataset_source": {"dataset_name": "dataset_name_value"}, "instances_format": "instances_format_value", }, "instance_config": { @@ -36144,6 +36151,10 @@ async def test_create_batch_prediction_job_rest_asyncio_call_success(request_typ "output_config": { "gcs_destination": {"output_uri_prefix": "output_uri_prefix_value"}, "bigquery_destination": {"output_uri": "output_uri_value"}, + "vertex_multimodal_dataset_destination": { + "bigquery_destination": {}, + "display_name": "display_name_value", + }, "predictions_format": "predictions_format_value", }, "dedicated_resources": { @@ -36203,6 +36214,7 @@ async def test_create_batch_prediction_job_rest_asyncio_call_success(request_typ "output_info": { "gcs_output_directory": "gcs_output_directory_value", "bigquery_output_dataset": "bigquery_output_dataset_value", + "vertex_multimodal_dataset_name": "vertex_multimodal_dataset_name_value", "bigquery_output_table": "bigquery_output_table_value", }, "state": 1, diff --git a/tests/unit/gapic/aiplatform_v1/test_migration_service.py b/tests/unit/gapic/aiplatform_v1/test_migration_service.py index aefcb219d7..a783bb76ea 100644 --- a/tests/unit/gapic/aiplatform_v1/test_migration_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_migration_service.py @@ -5596,19 +5596,22 @@ def test_parse_annotated_dataset_path(): def test_dataset_path(): project = "cuttlefish" - dataset = "mussel" - expected = "projects/{project}/datasets/{dataset}".format( + location = "mussel" + dataset = "winkle" + expected = "projects/{project}/locations/{location}/datasets/{dataset}".format( project=project, + location=location, dataset=dataset, ) - actual = MigrationServiceClient.dataset_path(project, dataset) + actual = MigrationServiceClient.dataset_path(project, location, dataset) assert expected == actual def test_parse_dataset_path(): expected = { - "project": "winkle", - "dataset": "nautilus", + "project": "nautilus", + "location": "scallop", + "dataset": "abalone", } path = MigrationServiceClient.dataset_path(**expected) @@ -5618,22 +5621,19 @@ def test_parse_dataset_path(): def test_dataset_path(): - project = "scallop" - location = "abalone" - dataset = "squid" - expected = "projects/{project}/locations/{location}/datasets/{dataset}".format( + project = "squid" + dataset = "clam" + expected = "projects/{project}/datasets/{dataset}".format( project=project, - location=location, dataset=dataset, ) - actual = MigrationServiceClient.dataset_path(project, location, dataset) + actual = MigrationServiceClient.dataset_path(project, dataset) assert expected == actual def test_parse_dataset_path(): expected = { - "project": "clam", - "location": "whelk", + "project": "whelk", "dataset": "octopus", } path = MigrationServiceClient.dataset_path(**expected) diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_job_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_job_service.py index a8ecbd6e6b..90a1a9d939 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_job_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_job_service.py @@ -28310,6 +28310,7 @@ def test_create_batch_prediction_job_rest_call_success(request_type): "input_config": { "gcs_source": {"uris": ["uris_value1", "uris_value2"]}, "bigquery_source": {"input_uri": "input_uri_value"}, + "vertex_multimodal_dataset_source": {"dataset_name": "dataset_name_value"}, "instances_format": "instances_format_value", }, "instance_config": { @@ -28329,6 +28330,10 @@ def test_create_batch_prediction_job_rest_call_success(request_type): "output_config": { "gcs_destination": {"output_uri_prefix": "output_uri_prefix_value"}, "bigquery_destination": {"output_uri": "output_uri_value"}, + "vertex_multimodal_dataset_destination": { + "bigquery_destination": {}, + "display_name": "display_name_value", + }, "predictions_format": "predictions_format_value", }, "dedicated_resources": { @@ -28393,6 +28398,7 @@ def test_create_batch_prediction_job_rest_call_success(request_type): "output_info": { "gcs_output_directory": "gcs_output_directory_value", "bigquery_output_dataset": "bigquery_output_dataset_value", + "vertex_multimodal_dataset_name": "vertex_multimodal_dataset_name_value", "bigquery_output_table": "bigquery_output_table_value", }, "state": 1, @@ -36251,6 +36257,7 @@ async def test_create_batch_prediction_job_rest_asyncio_call_success(request_typ "input_config": { "gcs_source": {"uris": ["uris_value1", "uris_value2"]}, "bigquery_source": {"input_uri": "input_uri_value"}, + "vertex_multimodal_dataset_source": {"dataset_name": "dataset_name_value"}, "instances_format": "instances_format_value", }, "instance_config": { @@ -36270,6 +36277,10 @@ async def test_create_batch_prediction_job_rest_asyncio_call_success(request_typ "output_config": { "gcs_destination": {"output_uri_prefix": "output_uri_prefix_value"}, "bigquery_destination": {"output_uri": "output_uri_value"}, + "vertex_multimodal_dataset_destination": { + "bigquery_destination": {}, + "display_name": "display_name_value", + }, "predictions_format": "predictions_format_value", }, "dedicated_resources": { @@ -36334,6 +36345,7 @@ async def test_create_batch_prediction_job_rest_asyncio_call_success(request_typ "output_info": { "gcs_output_directory": "gcs_output_directory_value", "bigquery_output_dataset": "bigquery_output_dataset_value", + "vertex_multimodal_dataset_name": "vertex_multimodal_dataset_name_value", "bigquery_output_table": "bigquery_output_table_value", }, "state": 1, diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_schedule_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_schedule_service.py index a3307d5186..efbf062810 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_schedule_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_schedule_service.py @@ -1414,6 +1414,7 @@ def test_create_schedule(request_type, transport: str = "grpc"): started_run_count=1843, state=gca_schedule.Schedule.State.ACTIVE, max_concurrent_run_count=2596, + max_concurrent_active_run_count=3327, allow_queueing=True, catch_up=True, cron="cron_value", @@ -1434,6 +1435,7 @@ def test_create_schedule(request_type, transport: str = "grpc"): assert response.started_run_count == 1843 assert response.state == gca_schedule.Schedule.State.ACTIVE assert response.max_concurrent_run_count == 2596 + assert response.max_concurrent_active_run_count == 3327 assert response.allow_queueing is True assert response.catch_up is True @@ -1567,6 +1569,7 @@ async def test_create_schedule_async( started_run_count=1843, state=gca_schedule.Schedule.State.ACTIVE, max_concurrent_run_count=2596, + max_concurrent_active_run_count=3327, allow_queueing=True, catch_up=True, ) @@ -1587,6 +1590,7 @@ async def test_create_schedule_async( assert response.started_run_count == 1843 assert response.state == gca_schedule.Schedule.State.ACTIVE assert response.max_concurrent_run_count == 2596 + assert response.max_concurrent_active_run_count == 3327 assert response.allow_queueing is True assert response.catch_up is True @@ -2103,6 +2107,7 @@ def test_get_schedule(request_type, transport: str = "grpc"): started_run_count=1843, state=schedule.Schedule.State.ACTIVE, max_concurrent_run_count=2596, + max_concurrent_active_run_count=3327, allow_queueing=True, catch_up=True, cron="cron_value", @@ -2123,6 +2128,7 @@ def test_get_schedule(request_type, transport: str = "grpc"): assert response.started_run_count == 1843 assert response.state == schedule.Schedule.State.ACTIVE assert response.max_concurrent_run_count == 2596 + assert response.max_concurrent_active_run_count == 3327 assert response.allow_queueing is True assert response.catch_up is True @@ -2256,6 +2262,7 @@ async def test_get_schedule_async( started_run_count=1843, state=schedule.Schedule.State.ACTIVE, max_concurrent_run_count=2596, + max_concurrent_active_run_count=3327, allow_queueing=True, catch_up=True, ) @@ -2276,6 +2283,7 @@ async def test_get_schedule_async( assert response.started_run_count == 1843 assert response.state == schedule.Schedule.State.ACTIVE assert response.max_concurrent_run_count == 2596 + assert response.max_concurrent_active_run_count == 3327 assert response.allow_queueing is True assert response.catch_up is True @@ -3606,6 +3614,7 @@ def test_update_schedule(request_type, transport: str = "grpc"): started_run_count=1843, state=gca_schedule.Schedule.State.ACTIVE, max_concurrent_run_count=2596, + max_concurrent_active_run_count=3327, allow_queueing=True, catch_up=True, cron="cron_value", @@ -3626,6 +3635,7 @@ def test_update_schedule(request_type, transport: str = "grpc"): assert response.started_run_count == 1843 assert response.state == gca_schedule.Schedule.State.ACTIVE assert response.max_concurrent_run_count == 2596 + assert response.max_concurrent_active_run_count == 3327 assert response.allow_queueing is True assert response.catch_up is True @@ -3755,6 +3765,7 @@ async def test_update_schedule_async( started_run_count=1843, state=gca_schedule.Schedule.State.ACTIVE, max_concurrent_run_count=2596, + max_concurrent_active_run_count=3327, allow_queueing=True, catch_up=True, ) @@ -3775,6 +3786,7 @@ async def test_update_schedule_async( assert response.started_run_count == 1843 assert response.state == gca_schedule.Schedule.State.ACTIVE assert response.max_concurrent_run_count == 2596 + assert response.max_concurrent_active_run_count == 3327 assert response.allow_queueing is True assert response.catch_up is True @@ -5574,6 +5586,7 @@ async def test_create_schedule_empty_call_grpc_asyncio(): started_run_count=1843, state=gca_schedule.Schedule.State.ACTIVE, max_concurrent_run_count=2596, + max_concurrent_active_run_count=3327, allow_queueing=True, catch_up=True, ) @@ -5633,6 +5646,7 @@ async def test_get_schedule_empty_call_grpc_asyncio(): started_run_count=1843, state=schedule.Schedule.State.ACTIVE, max_concurrent_run_count=2596, + max_concurrent_active_run_count=3327, allow_queueing=True, catch_up=True, ) @@ -5740,6 +5754,7 @@ async def test_update_schedule_empty_call_grpc_asyncio(): started_run_count=1843, state=gca_schedule.Schedule.State.ACTIVE, max_concurrent_run_count=2596, + max_concurrent_active_run_count=3327, allow_queueing=True, catch_up=True, ) @@ -6162,6 +6177,7 @@ def test_create_schedule_rest_call_success(request_type): "last_pause_time": {}, "last_resume_time": {}, "max_concurrent_run_count": 2596, + "max_concurrent_active_run_count": 3327, "allow_queueing": True, "catch_up": True, "last_scheduled_run_response": { @@ -6248,6 +6264,7 @@ def get_message_fields(field): started_run_count=1843, state=gca_schedule.Schedule.State.ACTIVE, max_concurrent_run_count=2596, + max_concurrent_active_run_count=3327, allow_queueing=True, catch_up=True, cron="cron_value", @@ -6273,6 +6290,7 @@ def get_message_fields(field): assert response.started_run_count == 1843 assert response.state == gca_schedule.Schedule.State.ACTIVE assert response.max_concurrent_run_count == 2596 + assert response.max_concurrent_active_run_count == 3327 assert response.allow_queueing is True assert response.catch_up is True @@ -6513,6 +6531,7 @@ def test_get_schedule_rest_call_success(request_type): started_run_count=1843, state=schedule.Schedule.State.ACTIVE, max_concurrent_run_count=2596, + max_concurrent_active_run_count=3327, allow_queueing=True, catch_up=True, cron="cron_value", @@ -6538,6 +6557,7 @@ def test_get_schedule_rest_call_success(request_type): assert response.started_run_count == 1843 assert response.state == schedule.Schedule.State.ACTIVE assert response.max_concurrent_run_count == 2596 + assert response.max_concurrent_active_run_count == 3327 assert response.allow_queueing is True assert response.catch_up is True @@ -7359,6 +7379,7 @@ def test_update_schedule_rest_call_success(request_type): "last_pause_time": {}, "last_resume_time": {}, "max_concurrent_run_count": 2596, + "max_concurrent_active_run_count": 3327, "allow_queueing": True, "catch_up": True, "last_scheduled_run_response": { @@ -7445,6 +7466,7 @@ def get_message_fields(field): started_run_count=1843, state=gca_schedule.Schedule.State.ACTIVE, max_concurrent_run_count=2596, + max_concurrent_active_run_count=3327, allow_queueing=True, catch_up=True, cron="cron_value", @@ -7470,6 +7492,7 @@ def get_message_fields(field): assert response.started_run_count == 1843 assert response.state == gca_schedule.Schedule.State.ACTIVE assert response.max_concurrent_run_count == 2596 + assert response.max_concurrent_active_run_count == 3327 assert response.allow_queueing is True assert response.catch_up is True @@ -8746,6 +8769,7 @@ async def test_create_schedule_rest_asyncio_call_success(request_type): "last_pause_time": {}, "last_resume_time": {}, "max_concurrent_run_count": 2596, + "max_concurrent_active_run_count": 3327, "allow_queueing": True, "catch_up": True, "last_scheduled_run_response": { @@ -8832,6 +8856,7 @@ def get_message_fields(field): started_run_count=1843, state=gca_schedule.Schedule.State.ACTIVE, max_concurrent_run_count=2596, + max_concurrent_active_run_count=3327, allow_queueing=True, catch_up=True, cron="cron_value", @@ -8859,6 +8884,7 @@ def get_message_fields(field): assert response.started_run_count == 1843 assert response.state == gca_schedule.Schedule.State.ACTIVE assert response.max_concurrent_run_count == 2596 + assert response.max_concurrent_active_run_count == 3327 assert response.allow_queueing is True assert response.catch_up is True @@ -9135,6 +9161,7 @@ async def test_get_schedule_rest_asyncio_call_success(request_type): started_run_count=1843, state=schedule.Schedule.State.ACTIVE, max_concurrent_run_count=2596, + max_concurrent_active_run_count=3327, allow_queueing=True, catch_up=True, cron="cron_value", @@ -9162,6 +9189,7 @@ async def test_get_schedule_rest_asyncio_call_success(request_type): assert response.started_run_count == 1843 assert response.state == schedule.Schedule.State.ACTIVE assert response.max_concurrent_run_count == 2596 + assert response.max_concurrent_active_run_count == 3327 assert response.allow_queueing is True assert response.catch_up is True @@ -10055,6 +10083,7 @@ async def test_update_schedule_rest_asyncio_call_success(request_type): "last_pause_time": {}, "last_resume_time": {}, "max_concurrent_run_count": 2596, + "max_concurrent_active_run_count": 3327, "allow_queueing": True, "catch_up": True, "last_scheduled_run_response": { @@ -10141,6 +10170,7 @@ def get_message_fields(field): started_run_count=1843, state=gca_schedule.Schedule.State.ACTIVE, max_concurrent_run_count=2596, + max_concurrent_active_run_count=3327, allow_queueing=True, catch_up=True, cron="cron_value", @@ -10168,6 +10198,7 @@ def get_message_fields(field): assert response.started_run_count == 1843 assert response.state == gca_schedule.Schedule.State.ACTIVE assert response.max_concurrent_run_count == 2596 + assert response.max_concurrent_active_run_count == 3327 assert response.allow_queueing is True assert response.catch_up is True