From 7841b8314187879bd8cb72f2225768967361cd38 Mon Sep 17 00:00:00 2001 From: Mikhail Shilkov Date: Mon, 1 Apr 2024 19:17:31 +0200 Subject: [PATCH] Add resources that have Upload method as Create --- .../metadata.json | 884 ++++++++- .../pulumi-resource-google-native/schema.json | 1705 +++++++++++++++++ provider/pkg/gen/discovery.go | 2 +- sdk/dotnet/Aiplatform/V1/GetModel.cs | 274 +++ sdk/dotnet/Aiplatform/V1/Model.cs | 379 ++++ sdk/dotnet/Aiplatform/V1Beta1/GetModel.cs | 267 +++ sdk/dotnet/Aiplatform/V1Beta1/Model.cs | 367 ++++ sdk/dotnet/CloudSearch/V1/GetItem.cs | 147 ++ .../CloudSearch/V1/Inputs/DebugOptionsArgs.cs | 29 + sdk/dotnet/CloudSearch/V1/Item.cs | 158 ++ .../V1/Outputs/ContextAttributeResponse.cs | 38 + .../V1/Outputs/DateValuesResponse.cs | 27 + .../V1/Outputs/DoubleValuesResponse.cs | 27 + .../V1/Outputs/EnumValuesResponse.cs | 30 + .../V1/Outputs/FieldViolationResponse.cs | 35 + .../V1/Outputs/HtmlValuesResponse.cs | 30 + .../V1/Outputs/IntegerValuesResponse.cs | 27 + .../V1/Outputs/InteractionResponse.cs | 42 + .../CloudSearch/V1/Outputs/ItemAclResponse.cs | 59 + .../V1/Outputs/ItemContentResponse.cs | 49 + .../V1/Outputs/ItemMetadataResponse.cs | 115 ++ .../V1/Outputs/ItemStatusResponse.cs | 45 + .../V1/Outputs/ItemStructuredDataResponse.cs | 38 + .../V1/Outputs/NamedPropertyResponse.cs | 67 + .../V1/Outputs/ObjectValuesResponse.cs | 27 + .../V1/Outputs/PrincipalResponse.cs | 45 + .../V1/Outputs/ProcessingErrorResponse.cs | 42 + .../V1/Outputs/RepositoryErrorResponse.cs | 45 + .../Outputs/SearchQualityMetadataResponse.cs | 30 + .../Outputs/StructuredDataObjectResponse.cs | 30 + .../V1/Outputs/TextValuesResponse.cs | 30 + .../V1/Outputs/TimestampValuesResponse.cs | 27 + .../V1/Outputs/UploadItemRefResponse.cs | 30 + ...tactcenterinsightsV1RedactionConfigArgs.cs | 35 + ...ContactcenterinsightsV1SpeechConfigArgs.cs | 29 + sdk/go/google/aiplatform/v1/getModel.go | 280 +++ sdk/go/google/aiplatform/v1/init.go | 2 + sdk/go/google/aiplatform/v1/model.go | 406 ++++ sdk/go/google/aiplatform/v1beta1/getModel.go | 283 +++ sdk/go/google/aiplatform/v1beta1/init.go | 2 + sdk/go/google/aiplatform/v1beta1/model.go | 399 ++++ sdk/go/google/cloudsearch/v1/getItem.go | 145 ++ sdk/go/google/cloudsearch/v1/init.go | 2 + sdk/go/google/cloudsearch/v1/item.go | 211 ++ sdk/go/google/cloudsearch/v1/pulumiTypes.go | 1489 ++++++++++++-- sdk/nodejs/aiplatform/v1/getModel.ts | 158 ++ sdk/nodejs/aiplatform/v1/index.ts | 12 + sdk/nodejs/aiplatform/v1/model.ts | 325 ++++ sdk/nodejs/aiplatform/v1beta1/getModel.ts | 154 ++ sdk/nodejs/aiplatform/v1beta1/index.ts | 12 + sdk/nodejs/aiplatform/v1beta1/model.ts | 315 +++ sdk/nodejs/cloudsearch/v1/getItem.ts | 85 + sdk/nodejs/cloudsearch/v1/index.ts | 12 + sdk/nodejs/cloudsearch/v1/item.ts | 150 ++ sdk/nodejs/tsconfig.json | 6 + sdk/nodejs/types/input.ts | 10 + sdk/nodejs/types/output.ts | 340 ++++ sdk/python/pulumi_google_native/__init__.py | 3 + .../aiplatform/v1/__init__.py | 2 + .../aiplatform/v1/get_model.py | 435 +++++ .../aiplatform/v1/model.py | 773 ++++++++ .../aiplatform/v1beta1/__init__.py | 2 + .../aiplatform/v1beta1/get_model.py | 422 ++++ .../aiplatform/v1beta1/model.py | 744 +++++++ .../cloudsearch/v1/__init__.py | 2 + .../cloudsearch/v1/_inputs.py | 25 + .../cloudsearch/v1/get_item.py | 191 ++ .../cloudsearch/v1/item.py | 280 +++ .../cloudsearch/v1/outputs.py | 1397 ++++++++++++-- 69 files changed, 13977 insertions(+), 308 deletions(-) create mode 100644 sdk/dotnet/Aiplatform/V1/GetModel.cs create mode 100644 sdk/dotnet/Aiplatform/V1/Model.cs create mode 100644 sdk/dotnet/Aiplatform/V1Beta1/GetModel.cs create mode 100644 sdk/dotnet/Aiplatform/V1Beta1/Model.cs create mode 100644 sdk/dotnet/CloudSearch/V1/GetItem.cs create mode 100644 sdk/dotnet/CloudSearch/V1/Inputs/DebugOptionsArgs.cs create mode 100644 sdk/dotnet/CloudSearch/V1/Item.cs create mode 100644 sdk/dotnet/CloudSearch/V1/Outputs/ContextAttributeResponse.cs create mode 100644 sdk/dotnet/CloudSearch/V1/Outputs/DateValuesResponse.cs create mode 100644 sdk/dotnet/CloudSearch/V1/Outputs/DoubleValuesResponse.cs create mode 100644 sdk/dotnet/CloudSearch/V1/Outputs/EnumValuesResponse.cs create mode 100644 sdk/dotnet/CloudSearch/V1/Outputs/FieldViolationResponse.cs create mode 100644 sdk/dotnet/CloudSearch/V1/Outputs/HtmlValuesResponse.cs create mode 100644 sdk/dotnet/CloudSearch/V1/Outputs/IntegerValuesResponse.cs create mode 100644 sdk/dotnet/CloudSearch/V1/Outputs/InteractionResponse.cs create mode 100644 sdk/dotnet/CloudSearch/V1/Outputs/ItemAclResponse.cs create mode 100644 sdk/dotnet/CloudSearch/V1/Outputs/ItemContentResponse.cs create mode 100644 sdk/dotnet/CloudSearch/V1/Outputs/ItemMetadataResponse.cs create mode 100644 sdk/dotnet/CloudSearch/V1/Outputs/ItemStatusResponse.cs create mode 100644 sdk/dotnet/CloudSearch/V1/Outputs/ItemStructuredDataResponse.cs create mode 100644 sdk/dotnet/CloudSearch/V1/Outputs/NamedPropertyResponse.cs create mode 100644 sdk/dotnet/CloudSearch/V1/Outputs/ObjectValuesResponse.cs create mode 100644 sdk/dotnet/CloudSearch/V1/Outputs/PrincipalResponse.cs create mode 100644 sdk/dotnet/CloudSearch/V1/Outputs/ProcessingErrorResponse.cs create mode 100644 sdk/dotnet/CloudSearch/V1/Outputs/RepositoryErrorResponse.cs create mode 100644 sdk/dotnet/CloudSearch/V1/Outputs/SearchQualityMetadataResponse.cs create mode 100644 sdk/dotnet/CloudSearch/V1/Outputs/StructuredDataObjectResponse.cs create mode 100644 sdk/dotnet/CloudSearch/V1/Outputs/TextValuesResponse.cs create mode 100644 sdk/dotnet/CloudSearch/V1/Outputs/TimestampValuesResponse.cs create mode 100644 sdk/dotnet/CloudSearch/V1/Outputs/UploadItemRefResponse.cs create mode 100644 sdk/dotnet/Contactcenterinsights/V1/Inputs/GoogleCloudContactcenterinsightsV1RedactionConfigArgs.cs create mode 100644 sdk/dotnet/Contactcenterinsights/V1/Inputs/GoogleCloudContactcenterinsightsV1SpeechConfigArgs.cs create mode 100644 sdk/go/google/aiplatform/v1/getModel.go create mode 100644 sdk/go/google/aiplatform/v1/model.go create mode 100644 sdk/go/google/aiplatform/v1beta1/getModel.go create mode 100644 sdk/go/google/aiplatform/v1beta1/model.go create mode 100644 sdk/go/google/cloudsearch/v1/getItem.go create mode 100644 sdk/go/google/cloudsearch/v1/item.go create mode 100644 sdk/nodejs/aiplatform/v1/getModel.ts create mode 100644 sdk/nodejs/aiplatform/v1/model.ts create mode 100644 sdk/nodejs/aiplatform/v1beta1/getModel.ts create mode 100644 sdk/nodejs/aiplatform/v1beta1/model.ts create mode 100644 sdk/nodejs/cloudsearch/v1/getItem.ts create mode 100644 sdk/nodejs/cloudsearch/v1/item.ts create mode 100644 sdk/python/pulumi_google_native/aiplatform/v1/get_model.py create mode 100644 sdk/python/pulumi_google_native/aiplatform/v1/model.py create mode 100644 sdk/python/pulumi_google_native/aiplatform/v1beta1/get_model.py create mode 100644 sdk/python/pulumi_google_native/aiplatform/v1beta1/model.py create mode 100644 sdk/python/pulumi_google_native/cloudsearch/v1/get_item.py create mode 100644 sdk/python/pulumi_google_native/cloudsearch/v1/item.py diff --git a/provider/cmd/pulumi-resource-google-native/metadata.json b/provider/cmd/pulumi-resource-google-native/metadata.json index 52bcf20efc..cb81eef72c 100644 --- a/provider/cmd/pulumi-resource-google-native/metadata.json +++ b/provider/cmd/pulumi-resource-google-native/metadata.json @@ -25226,6 +25226,14 @@ } } }, + "google-native:cloudsearch/v1:ContextAttributeResponse": { + "properties": { + "name": {}, + "values": { + "items": {} + } + } + }, "google-native:cloudsearch/v1:DataSourceRestriction": { "properties": { "filterOptions": { @@ -25276,6 +25284,34 @@ } } }, + "google-native:cloudsearch/v1:DateValuesResponse": { + "properties": { + "values": { + "items": { + "$ref": "#/types/google-native:cloudsearch/v1:DateResponse" + } + } + } + }, + "google-native:cloudsearch/v1:DebugOptions": { + "properties": { + "enableDebugging": {} + } + }, + "google-native:cloudsearch/v1:DoubleValuesResponse": { + "properties": { + "values": { + "items": {} + } + } + }, + "google-native:cloudsearch/v1:EnumValuesResponse": { + "properties": { + "values": { + "items": {} + } + } + }, "google-native:cloudsearch/v1:FacetOptions": { "properties": { "integerFacetingOptions": { @@ -25302,6 +25338,12 @@ "sourceName": {} } }, + "google-native:cloudsearch/v1:FieldViolationResponse": { + "properties": { + "description": {}, + "field": {} + } + }, "google-native:cloudsearch/v1:Filter": { "properties": { "compositeFilter": { @@ -25352,6 +25394,13 @@ "gsuiteUserEmail": {} } }, + "google-native:cloudsearch/v1:HtmlValuesResponse": { + "properties": { + "values": { + "items": {} + } + } + }, "google-native:cloudsearch/v1:IntegerFacetingOptions": { "properties": { "integerBuckets": { @@ -25366,6 +25415,172 @@ } } }, + "google-native:cloudsearch/v1:IntegerValuesResponse": { + "properties": { + "values": { + "items": {} + } + } + }, + "google-native:cloudsearch/v1:InteractionResponse": { + "properties": { + "interactionTime": { + "format": "google-datetime" + }, + "principal": { + "$ref": "#/types/google-native:cloudsearch/v1:PrincipalResponse" + }, + "type": {} + } + }, + "google-native:cloudsearch/v1:ItemAclResponse": { + "properties": { + "aclInheritanceType": {}, + "deniedReaders": { + "items": { + "$ref": "#/types/google-native:cloudsearch/v1:PrincipalResponse" + } + }, + "inheritAclFrom": {}, + "owners": { + "items": { + "$ref": "#/types/google-native:cloudsearch/v1:PrincipalResponse" + } + }, + "readers": { + "items": { + "$ref": "#/types/google-native:cloudsearch/v1:PrincipalResponse" + } + } + } + }, + "google-native:cloudsearch/v1:ItemContentResponse": { + "properties": { + "contentDataRef": { + "$ref": "#/types/google-native:cloudsearch/v1:UploadItemRefResponse" + }, + "contentFormat": {}, + "hash": {}, + "inlineContent": { + "format": "byte" + } + } + }, + "google-native:cloudsearch/v1:ItemMetadataResponse": { + "properties": { + "containerName": {}, + "contentLanguage": {}, + "contextAttributes": { + "items": { + "$ref": "#/types/google-native:cloudsearch/v1:ContextAttributeResponse" + } + }, + "createTime": { + "format": "google-datetime" + }, + "hash": {}, + "interactions": { + "items": { + "$ref": "#/types/google-native:cloudsearch/v1:InteractionResponse" + } + }, + "keywords": { + "items": {} + }, + "mimeType": {}, + "objectType": {}, + "searchQualityMetadata": { + "$ref": "#/types/google-native:cloudsearch/v1:SearchQualityMetadataResponse" + }, + "sourceRepositoryUrl": {}, + "title": {}, + "updateTime": { + "format": "google-datetime" + } + } + }, + "google-native:cloudsearch/v1:ItemStatusResponse": { + "properties": { + "code": {}, + "processingErrors": { + "items": { + "$ref": "#/types/google-native:cloudsearch/v1:ProcessingErrorResponse" + } + }, + "repositoryErrors": { + "items": { + "$ref": "#/types/google-native:cloudsearch/v1:RepositoryErrorResponse" + } + } + } + }, + "google-native:cloudsearch/v1:ItemStructuredDataResponse": { + "properties": { + "hash": {}, + "object": { + "$ref": "#/types/google-native:cloudsearch/v1:StructuredDataObjectResponse" + } + } + }, + "google-native:cloudsearch/v1:NamedPropertyResponse": { + "properties": { + "booleanValue": {}, + "dateValues": { + "$ref": "#/types/google-native:cloudsearch/v1:DateValuesResponse" + }, + "doubleValues": { + "$ref": "#/types/google-native:cloudsearch/v1:DoubleValuesResponse" + }, + "enumValues": { + "$ref": "#/types/google-native:cloudsearch/v1:EnumValuesResponse" + }, + "htmlValues": { + "$ref": "#/types/google-native:cloudsearch/v1:HtmlValuesResponse" + }, + "integerValues": { + "$ref": "#/types/google-native:cloudsearch/v1:IntegerValuesResponse" + }, + "name": {}, + "objectValues": { + "$ref": "#/types/google-native:cloudsearch/v1:ObjectValuesResponse" + }, + "textValues": { + "$ref": "#/types/google-native:cloudsearch/v1:TextValuesResponse" + }, + "timestampValues": { + "$ref": "#/types/google-native:cloudsearch/v1:TimestampValuesResponse" + } + } + }, + "google-native:cloudsearch/v1:ObjectValuesResponse": { + "properties": { + "values": { + "items": { + "$ref": "#/types/google-native:cloudsearch/v1:StructuredDataObjectResponse" + } + } + } + }, + "google-native:cloudsearch/v1:PrincipalResponse": { + "properties": { + "groupResourceName": {}, + "gsuitePrincipal": { + "$ref": "#/types/google-native:cloudsearch/v1:GSuitePrincipalResponse" + }, + "userResourceName": {} + } + }, + "google-native:cloudsearch/v1:ProcessingErrorResponse": { + "properties": { + "code": {}, + "errorMessage": {}, + "fieldViolations": { + "items": { + "$ref": "#/types/google-native:cloudsearch/v1:FieldViolationResponse" + } + } + } + }, "google-native:cloudsearch/v1:QueryInterpretationConfig": { "properties": { "forceDisableSupplementalResults": {}, @@ -25378,6 +25593,15 @@ "forceVerbatimMode": {} } }, + "google-native:cloudsearch/v1:RepositoryErrorResponse": { + "properties": { + "errorMessage": {}, + "httpStatusCode": { + "format": "int32" + }, + "type": {} + } + }, "google-native:cloudsearch/v1:ScoringConfig": { "properties": { "disableFreshness": {}, @@ -25390,6 +25614,13 @@ "disablePersonalization": {} } }, + "google-native:cloudsearch/v1:SearchQualityMetadataResponse": { + "properties": { + "quality": { + "format": "double" + } + } + }, "google-native:cloudsearch/v1:SortOptions": { "properties": { "operatorName": {}, @@ -25476,6 +25707,34 @@ "sourceImportance": {} } }, + "google-native:cloudsearch/v1:StructuredDataObjectResponse": { + "properties": { + "properties": { + "items": { + "$ref": "#/types/google-native:cloudsearch/v1:NamedPropertyResponse" + } + } + } + }, + "google-native:cloudsearch/v1:TextValuesResponse": { + "properties": { + "values": { + "items": {} + } + } + }, + "google-native:cloudsearch/v1:TimestampValuesResponse": { + "properties": { + "values": { + "items": {} + } + } + }, + "google-native:cloudsearch/v1:UploadItemRefResponse": { + "properties": { + "name": {} + } + }, "google-native:cloudsearch/v1:Value": { "properties": { "booleanValue": {}, @@ -144477,6 +144736,220 @@ "projectsId": "project" } }, + "google-native:aiplatform/v1:Model": { + "create": { + "endpoint": { + "template": "https://aiplatform.googleapis.com/v1/projects/{projectsId}/locations/{locationsId}/models:upload", + "values": [ + { + "name": "projectsId", + "sdkName": "project", + "kind": "path" + }, + { + "name": "locationsId", + "sdkName": "location", + "kind": "path" + } + ] + }, + "sdkProperties": { + "artifactUri": { + "container": "model" + }, + "containerSpec": { + "container": "model" + }, + "description": { + "container": "model" + }, + "displayName": { + "container": "model" + }, + "encryptionSpec": { + "container": "model" + }, + "etag": { + "container": "model" + }, + "explanationSpec": { + "container": "model" + }, + "labels": { + "container": "model" + }, + "metadata": { + "container": "model" + }, + "metadataSchemaUri": { + "container": "model" + }, + "modelId": {}, + "name": { + "container": "model" + }, + "parentModel": {}, + "pipelineJob": { + "container": "model" + }, + "predictSchemata": { + "container": "model" + }, + "serviceAccount": {}, + "versionAliases": { + "container": "model" + }, + "versionDescription": { + "container": "model" + } + }, + "verb": "POST", + "operations": { + "template": "https://aiplatform.googleapis.com/v1/{+name}", + "values": [ + { + "name": "name", + "sdkName": "name", + "kind": "path" + } + ] + }, + "autoname": { + "fieldName": "{name}" + } + }, + "read": { + "endpoint": { + "template": "https://aiplatform.googleapis.com/v1/projects/{projectsId}/locations/{locationsId}/models/{modelsId}", + "values": [ + { + "name": "locationsId", + "sdkName": "location", + "kind": "path" + }, + { + "name": "modelsId", + "sdkName": "modelId", + "kind": "path" + }, + { + "name": "projectsId", + "sdkName": "project", + "kind": "path" + } + ] + }, + "verb": "GET" + }, + "update": { + "endpoint": { + "template": "https://aiplatform.googleapis.com/v1/projects/{projectsId}/locations/{locationsId}/models/{modelsId}?updateMask={updateMask}", + "values": [ + { + "name": "locationsId", + "sdkName": "location", + "kind": "path" + }, + { + "name": "modelsId", + "sdkName": "modelId", + "kind": "path" + }, + { + "name": "projectsId", + "sdkName": "project", + "kind": "path" + }, + { + "name": "updateMask", + "sdkName": "updateMask", + "kind": "query" + } + ] + }, + "sdkProperties": { + "artifactUri": { + "forceNew": true + }, + "containerSpec": { + "$ref": "#/types/google-native:aiplatform/v1:GoogleCloudAiplatformV1ModelContainerSpec" + }, + "description": {}, + "displayName": { + "required": true + }, + "encryptionSpec": { + "$ref": "#/types/google-native:aiplatform/v1:GoogleCloudAiplatformV1EncryptionSpec" + }, + "etag": {}, + "explanationSpec": { + "$ref": "#/types/google-native:aiplatform/v1:GoogleCloudAiplatformV1ExplanationSpec" + }, + "labels": {}, + "metadata": { + "$ref": "pulumi.json#/Any", + "forceNew": true + }, + "metadataSchemaUri": { + "forceNew": true + }, + "name": {}, + "pipelineJob": {}, + "predictSchemata": { + "$ref": "#/types/google-native:aiplatform/v1:GoogleCloudAiplatformV1PredictSchemata" + }, + "versionAliases": { + "items": {} + }, + "versionDescription": {} + }, + "verb": "PATCH", + "updateMask": { + "queryParamName": "updateMask" + } + }, + "delete": { + "endpoint": { + "template": "https://aiplatform.googleapis.com/v1/projects/{projectsId}/locations/{locationsId}/models/{modelsId}", + "values": [ + { + "name": "locationsId", + "sdkName": "location", + "kind": "path" + }, + { + "name": "modelsId", + "sdkName": "modelId", + "kind": "path" + }, + { + "name": "projectsId", + "sdkName": "project", + "kind": "path" + } + ] + }, + "verb": "DELETE", + "operations": { + "template": "https://aiplatform.googleapis.com/v1/{+name}", + "values": [ + { + "name": "name", + "sdkName": "name", + "kind": "path" + } + ] + } + }, + "rootUrl": "https://aiplatform.googleapis.com/", + "formDataUpload": {}, + "idPath": "v1/projects/{projectsId}/locations/{locationsId}/models/{modelsId}", + "idParams": { + "locationsId": "location", + "modelsId": "modelId", + "projectsId": "project" + } + }, "google-native:aiplatform/v1:ModelDeploymentMonitoringJob": { "create": { "endpoint": { @@ -151127,27 +151600,132 @@ "updateMask": {} }, "delete": { - "endpoint": {} + "endpoint": {} + }, + "rootUrl": "https://aiplatform.googleapis.com/", + "formDataUpload": {}, + "idPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/metadataStores/{metadataStoresId}/metadataSchemas/{metadataSchemasId}", + "idParams": { + "locationsId": "location", + "metadataSchemasId": "metadataSchemaId", + "metadataStoresId": "metadataStoreId", + "projectsId": "project" + } + }, + "google-native:aiplatform/v1beta1:MetadataStore": { + "create": { + "endpoint": { + "template": "https://aiplatform.googleapis.com/v1beta1/projects/{projectsId}/locations/{locationsId}/metadataStores", + "values": [ + { + "name": "metadataStoreId", + "kind": "query" + }, + { + "name": "projectsId", + "sdkName": "project", + "kind": "path" + }, + { + "name": "locationsId", + "sdkName": "location", + "kind": "path" + } + ] + }, + "sdkProperties": { + "description": {}, + "encryptionSpec": { + "$ref": "#/types/google-native:aiplatform/v1beta1:GoogleCloudAiplatformV1beta1EncryptionSpec" + } + }, + "verb": "POST", + "operations": { + "template": "https://aiplatform.googleapis.com/v1beta1/{+name}", + "values": [ + { + "name": "name", + "sdkName": "name", + "kind": "path" + } + ] + }, + "autoname": {} + }, + "read": { + "endpoint": { + "template": "https://aiplatform.googleapis.com/v1beta1/projects/{projectsId}/locations/{locationsId}/metadataStores/{metadataStoresId}", + "values": [ + { + "name": "locationsId", + "sdkName": "location", + "kind": "path" + }, + { + "name": "metadataStoresId", + "sdkName": "metadataStoreId", + "kind": "path" + }, + { + "name": "projectsId", + "sdkName": "project", + "kind": "path" + } + ] + }, + "verb": "GET" + }, + "update": { + "endpoint": {}, + "updateMask": {} + }, + "delete": { + "endpoint": { + "template": "https://aiplatform.googleapis.com/v1beta1/projects/{projectsId}/locations/{locationsId}/metadataStores/{metadataStoresId}", + "values": [ + { + "name": "locationsId", + "sdkName": "location", + "kind": "path" + }, + { + "name": "metadataStoresId", + "sdkName": "metadataStoreId", + "kind": "path" + }, + { + "name": "projectsId", + "sdkName": "project", + "kind": "path" + } + ] + }, + "verb": "DELETE", + "operations": { + "template": "https://aiplatform.googleapis.com/v1beta1/{+name}", + "values": [ + { + "name": "name", + "sdkName": "name", + "kind": "path" + } + ] + } }, "rootUrl": "https://aiplatform.googleapis.com/", "formDataUpload": {}, - "idPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/metadataStores/{metadataStoresId}/metadataSchemas/{metadataSchemasId}", + "idPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/metadataStores/{metadataStoresId}", "idParams": { "locationsId": "location", - "metadataSchemasId": "metadataSchemaId", "metadataStoresId": "metadataStoreId", "projectsId": "project" } }, - "google-native:aiplatform/v1beta1:MetadataStore": { + "google-native:aiplatform/v1beta1:Model": { "create": { "endpoint": { - "template": "https://aiplatform.googleapis.com/v1beta1/projects/{projectsId}/locations/{locationsId}/metadataStores", + "template": "https://aiplatform.googleapis.com/v1beta1/projects/{projectsId}/locations/{locationsId}/models:upload", "values": [ - { - "name": "metadataStoreId", - "kind": "query" - }, { "name": "projectsId", "sdkName": "project", @@ -151161,9 +151739,50 @@ ] }, "sdkProperties": { - "description": {}, + "artifactUri": { + "container": "model" + }, + "containerSpec": { + "container": "model" + }, + "description": { + "container": "model" + }, + "displayName": { + "container": "model" + }, "encryptionSpec": { - "$ref": "#/types/google-native:aiplatform/v1beta1:GoogleCloudAiplatformV1beta1EncryptionSpec" + "container": "model" + }, + "etag": { + "container": "model" + }, + "explanationSpec": { + "container": "model" + }, + "labels": { + "container": "model" + }, + "metadata": { + "container": "model" + }, + "metadataSchemaUri": { + "container": "model" + }, + "modelId": {}, + "name": { + "container": "model" + }, + "parentModel": {}, + "predictSchemata": { + "container": "model" + }, + "serviceAccount": {}, + "versionAliases": { + "container": "model" + }, + "versionDescription": { + "container": "model" } }, "verb": "POST", @@ -151177,11 +151796,13 @@ } ] }, - "autoname": {} + "autoname": { + "fieldName": "{name}" + } }, "read": { "endpoint": { - "template": "https://aiplatform.googleapis.com/v1beta1/projects/{projectsId}/locations/{locationsId}/metadataStores/{metadataStoresId}", + "template": "https://aiplatform.googleapis.com/v1beta1/projects/{projectsId}/locations/{locationsId}/models/{modelsId}", "values": [ { "name": "locationsId", @@ -151189,8 +151810,8 @@ "kind": "path" }, { - "name": "metadataStoresId", - "sdkName": "metadataStoreId", + "name": "modelsId", + "sdkName": "modelId", "kind": "path" }, { @@ -151203,12 +151824,74 @@ "verb": "GET" }, "update": { - "endpoint": {}, - "updateMask": {} + "endpoint": { + "template": "https://aiplatform.googleapis.com/v1beta1/projects/{projectsId}/locations/{locationsId}/models/{modelsId}?updateMask={updateMask}", + "values": [ + { + "name": "locationsId", + "sdkName": "location", + "kind": "path" + }, + { + "name": "modelsId", + "sdkName": "modelId", + "kind": "path" + }, + { + "name": "projectsId", + "sdkName": "project", + "kind": "path" + }, + { + "name": "updateMask", + "sdkName": "updateMask", + "kind": "query" + } + ] + }, + "sdkProperties": { + "artifactUri": { + "forceNew": true + }, + "containerSpec": { + "$ref": "#/types/google-native:aiplatform/v1beta1:GoogleCloudAiplatformV1beta1ModelContainerSpec" + }, + "description": {}, + "displayName": { + "required": true + }, + "encryptionSpec": { + "$ref": "#/types/google-native:aiplatform/v1beta1:GoogleCloudAiplatformV1beta1EncryptionSpec" + }, + "etag": {}, + "explanationSpec": { + "$ref": "#/types/google-native:aiplatform/v1beta1:GoogleCloudAiplatformV1beta1ExplanationSpec" + }, + "labels": {}, + "metadata": { + "$ref": "pulumi.json#/Any", + "forceNew": true + }, + "metadataSchemaUri": { + "forceNew": true + }, + "name": {}, + "predictSchemata": { + "$ref": "#/types/google-native:aiplatform/v1beta1:GoogleCloudAiplatformV1beta1PredictSchemata" + }, + "versionAliases": { + "items": {} + }, + "versionDescription": {} + }, + "verb": "PATCH", + "updateMask": { + "queryParamName": "updateMask" + } }, "delete": { "endpoint": { - "template": "https://aiplatform.googleapis.com/v1beta1/projects/{projectsId}/locations/{locationsId}/metadataStores/{metadataStoresId}", + "template": "https://aiplatform.googleapis.com/v1beta1/projects/{projectsId}/locations/{locationsId}/models/{modelsId}", "values": [ { "name": "locationsId", @@ -151216,8 +151899,8 @@ "kind": "path" }, { - "name": "metadataStoresId", - "sdkName": "metadataStoreId", + "name": "modelsId", + "sdkName": "modelId", "kind": "path" }, { @@ -151241,10 +151924,10 @@ }, "rootUrl": "https://aiplatform.googleapis.com/", "formDataUpload": {}, - "idPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/metadataStores/{metadataStoresId}", + "idPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/models/{modelsId}", "idParams": { "locationsId": "location", - "metadataStoresId": "metadataStoreId", + "modelsId": "modelId", "projectsId": "project" } }, @@ -214651,6 +215334,90 @@ "datasourcesId": "name" } }, + "google-native:cloudsearch/v1:Item": { + "create": { + "endpoint": { + "template": "https://cloudsearch.googleapis.com/v1/indexing/datasources/{datasourcesId}/items/{itemsId}:upload", + "values": [ + { + "name": "datasourcesId", + "sdkName": "datasourceId", + "kind": "path" + }, + { + "name": "itemsId", + "sdkName": "itemId", + "kind": "path" + } + ] + }, + "sdkProperties": { + "connectorName": {}, + "debugOptions": { + "$ref": "#/types/google-native:cloudsearch/v1:DebugOptions" + } + }, + "verb": "POST", + "autoname": {} + }, + "read": { + "endpoint": { + "template": "https://cloudsearch.googleapis.com/v1/indexing/datasources/{datasourcesId}/items/{itemsId}", + "values": [ + { + "name": "datasourcesId", + "sdkName": "datasourceId", + "kind": "path" + }, + { + "name": "itemsId", + "sdkName": "itemId", + "kind": "path" + } + ] + }, + "verb": "GET" + }, + "update": { + "endpoint": {}, + "updateMask": {} + }, + "delete": { + "endpoint": { + "template": "https://cloudsearch.googleapis.com/v1/indexing/datasources/{datasourcesId}/items/{itemsId}?mode={mode}\u0026version={version}", + "values": [ + { + "name": "datasourcesId", + "sdkName": "datasourceId", + "kind": "path" + }, + { + "name": "itemsId", + "sdkName": "itemId", + "kind": "path" + } + ] + }, + "verb": "DELETE", + "operations": { + "template": "https://cloudsearch.googleapis.com/v1/{+name}", + "values": [ + { + "name": "name", + "sdkName": "name", + "kind": "path" + } + ] + } + }, + "rootUrl": "https://cloudsearch.googleapis.com/", + "formDataUpload": {}, + "idPath": "v1/indexing/datasources/{datasourcesId}/items/{itemsId}", + "idParams": { + "datasourcesId": "datasourceId", + "itemsId": "itemId" + } + }, "google-native:cloudsearch/v1:SearchApplication": { "create": { "endpoint": { @@ -453334,6 +454101,29 @@ }, "verb": "GET" }, + "google-native:aiplatform/v1:getModel": { + "url": { + "template": "https://aiplatform.googleapis.com/v1/projects/{projectsId}/locations/{locationsId}/models/{modelsId}", + "values": [ + { + "name": "projectsId", + "sdkName": "project", + "kind": "path" + }, + { + "name": "locationsId", + "sdkName": "location", + "kind": "path" + }, + { + "name": "modelsId", + "sdkName": "modelId", + "kind": "path" + } + ] + }, + "verb": "GET" + }, "google-native:aiplatform/v1:getModelDeploymentMonitoringJob": { "url": { "template": "https://aiplatform.googleapis.com/v1/projects/{projectsId}/locations/{locationsId}/modelDeploymentMonitoringJobs/{modelDeploymentMonitoringJobsId}", @@ -454343,6 +455133,29 @@ }, "verb": "GET" }, + "google-native:aiplatform/v1beta1:getModel": { + "url": { + "template": "https://aiplatform.googleapis.com/v1beta1/projects/{projectsId}/locations/{locationsId}/models/{modelsId}", + "values": [ + { + "name": "projectsId", + "sdkName": "project", + "kind": "path" + }, + { + "name": "locationsId", + "sdkName": "location", + "kind": "path" + }, + { + "name": "modelsId", + "sdkName": "modelId", + "kind": "path" + } + ] + }, + "verb": "GET" + }, "google-native:aiplatform/v1beta1:getModelDeploymentMonitoringJob": { "url": { "template": "https://aiplatform.googleapis.com/v1beta1/projects/{projectsId}/locations/{locationsId}/modelDeploymentMonitoringJobs/{modelDeploymentMonitoringJobsId}", @@ -461400,6 +462213,33 @@ }, "verb": "GET" }, + "google-native:cloudsearch/v1:getItem": { + "url": { + "template": "https://cloudsearch.googleapis.com/v1/indexing/datasources/{datasourcesId}/items/{itemsId}", + "values": [ + { + "name": "connectorName", + "kind": "query" + }, + { + "name": "debugOptions.enableDebugging", + "sdkName": "debugOptionsEnableDebugging", + "kind": "query" + }, + { + "name": "datasourcesId", + "sdkName": "datasourceId", + "kind": "path" + }, + { + "name": "itemsId", + "sdkName": "itemId", + "kind": "path" + } + ] + }, + "verb": "GET" + }, "google-native:cloudsearch/v1:getSearchApplication": { "url": { "template": "https://cloudsearch.googleapis.com/v1/settings/searchapplications/{searchapplicationsId}", diff --git a/provider/cmd/pulumi-resource-google-native/schema.json b/provider/cmd/pulumi-resource-google-native/schema.json index fff278841f..f7391c8e2b 100644 --- a/provider/cmd/pulumi-resource-google-native/schema.json +++ b/provider/cmd/pulumi-resource-google-native/schema.json @@ -64449,6 +64449,27 @@ "subFilters" ] }, + "google-native:cloudsearch/v1:ContextAttributeResponse": { + "description": "A named attribute associated with an item which can be used for influencing the ranking of the item based on the context in the request.", + "properties": { + "name": { + "type": "string", + "description": "The name of the attribute. It should not be empty. The maximum length is 32 characters. The name must start with a letter and can only contain letters (A-Z, a-z) or numbers (0-9). The name will be normalized (lower-cased) before being matched." + }, + "values": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Text values of the attribute. The maximum number of elements is 10. The maximum length of an element in the array is 32 characters. The value will be normalized (lower-cased) before being matched." + } + }, + "type": "object", + "required": [ + "name", + "values" + ] + }, "google-native:cloudsearch/v1:DataSourceRestriction": { "description": "Restriction on Datasource.", "properties": { @@ -64532,6 +64553,63 @@ "year" ] }, + "google-native:cloudsearch/v1:DateValuesResponse": { + "description": "List of date values.", + "properties": { + "values": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/types/google-native:cloudsearch/v1:DateResponse" + } + } + }, + "type": "object", + "required": [ + "values" + ] + }, + "google-native:cloudsearch/v1:DebugOptions": { + "description": "Shared request debug options for all cloudsearch RPC methods.", + "properties": { + "enableDebugging": { + "type": "boolean", + "description": "If you are asked by Google to help with debugging, set this field. Otherwise, ignore this field." + } + }, + "type": "object" + }, + "google-native:cloudsearch/v1:DoubleValuesResponse": { + "description": "List of double values.", + "properties": { + "values": { + "type": "array", + "items": { + "type": "number" + } + } + }, + "type": "object", + "required": [ + "values" + ] + }, + "google-native:cloudsearch/v1:EnumValuesResponse": { + "description": "List of enum values.", + "properties": { + "values": { + "type": "array", + "items": { + "type": "string" + }, + "description": "The maximum allowable length for string values is 32 characters." + } + }, + "type": "object", + "required": [ + "values" + ] + }, "google-native:cloudsearch/v1:FacetOptions": { "description": "Specifies operators to return facet results for. There will be one FacetResult for every source_name/object_type/operator_name combination.", "properties": { @@ -64593,6 +64671,23 @@ "sourceName" ] }, + "google-native:cloudsearch/v1:FieldViolationResponse": { + "properties": { + "description": { + "type": "string", + "description": "The description of the error." + }, + "field": { + "type": "string", + "description": "Path of field with violation." + } + }, + "type": "object", + "required": [ + "description", + "field" + ] + }, "google-native:cloudsearch/v1:Filter": { "description": "A generic way of expressing filters in a query, which supports two approaches: **1. Setting a ValueFilter.** The name must match an operator_name defined in the schema for your data source. **2. Setting a CompositeFilter.** The filters are evaluated using the logical operator. The top-level operators can only be either an AND or a NOT. AND can appear only at the top-most level. OR can appear only under a top-level AND.", "properties": { @@ -64698,6 +64793,22 @@ "gsuiteUserEmail" ] }, + "google-native:cloudsearch/v1:HtmlValuesResponse": { + "description": "List of html values.", + "properties": { + "values": { + "type": "array", + "items": { + "type": "string" + }, + "description": "The maximum allowable length for html values is 2048 characters." + } + }, + "type": "object", + "required": [ + "values" + ] + }, "google-native:cloudsearch/v1:IntegerFacetingOptions": { "description": "Used to specify integer faceting options.", "properties": { @@ -64727,6 +64838,375 @@ "integerBuckets" ] }, + "google-native:cloudsearch/v1:IntegerValuesResponse": { + "description": "List of integer values.", + "properties": { + "values": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "type": "object", + "required": [ + "values" + ] + }, + "google-native:cloudsearch/v1:InteractionResponse": { + "description": "Represents an interaction between a user and an item.", + "properties": { + "interactionTime": { + "type": "string", + "description": "The time when the user acted on the item. If multiple actions of the same type exist for a single user, only the most recent action is recorded." + }, + "principal": { + "type": "object", + "$ref": "#/types/google-native:cloudsearch/v1:PrincipalResponse", + "description": "The user that acted on the item." + }, + "type": { + "type": "string" + } + }, + "type": "object", + "required": [ + "interactionTime", + "principal", + "type" + ] + }, + "google-native:cloudsearch/v1:ItemAclResponse": { + "description": "Access control list information for the item. For more information see [Map ACLs](https://developers.google.com/cloud-search/docs/guides/acls).", + "properties": { + "aclInheritanceType": { + "type": "string", + "description": "Sets the type of access rules to apply when an item inherits its ACL from a parent. This should always be set in tandem with the inheritAclFrom field. Also, when the inheritAclFrom field is set, this field should be set to a valid AclInheritanceType." + }, + "deniedReaders": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/types/google-native:cloudsearch/v1:PrincipalResponse" + }, + "description": "List of principals who are explicitly denied access to the item in search results. While principals are denied access by default, use denied readers to handle exceptions and override the list allowed readers. The maximum number of elements is 100." + }, + "inheritAclFrom": { + "type": "string", + "description": "The name of the item to inherit the Access Permission List (ACL) from. Note: ACL inheritance *only* provides access permissions to child items and does not define structural relationships, nor does it provide convenient ways to delete large groups of items. Deleting an ACL parent from the index only alters the access permissions of child items that reference the parent in the inheritAclFrom field. The item is still in the index, but may not visible in search results. By contrast, deletion of a container item also deletes all items that reference the container via the containerName field. The maximum length for this field is 1536 characters." + }, + "owners": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/types/google-native:cloudsearch/v1:PrincipalResponse" + }, + "description": "Optional. List of owners for the item. This field has no bearing on document access permissions. It does, however, offer a slight ranking boosts items where the querying user is an owner. The maximum number of elements is 5." + }, + "readers": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/types/google-native:cloudsearch/v1:PrincipalResponse" + }, + "description": "List of principals who are allowed to see the item in search results. Optional if inheriting permissions from another item or if the item is not intended to be visible, such as virtual containers. The maximum number of elements is 1000." + } + }, + "type": "object", + "required": [ + "aclInheritanceType", + "deniedReaders", + "inheritAclFrom", + "owners", + "readers" + ] + }, + "google-native:cloudsearch/v1:ItemContentResponse": { + "description": "Content of an item to be indexed and surfaced by Cloud Search. Only UTF-8 encoded strings are allowed as inlineContent. If the content is uploaded and not binary, it must be UTF-8 encoded.", + "properties": { + "contentDataRef": { + "type": "object", + "$ref": "#/types/google-native:cloudsearch/v1:UploadItemRefResponse", + "description": "Upload reference ID of a previously uploaded content via write method." + }, + "contentFormat": { + "type": "string" + }, + "hash": { + "type": "string", + "description": "Hashing info calculated and provided by the API client for content. Can be used with the items.push method to calculate modified state. The maximum length is 2048 characters." + }, + "inlineContent": { + "type": "string", + "description": "Content that is supplied inlined within the update method. The maximum length is 102400 bytes (100 KiB)." + } + }, + "type": "object", + "required": [ + "contentDataRef", + "contentFormat", + "hash", + "inlineContent" + ] + }, + "google-native:cloudsearch/v1:ItemMetadataResponse": { + "description": "Available metadata fields for the item.", + "properties": { + "containerName": { + "type": "string", + "description": "The name of the container for this item. Deletion of the container item leads to automatic deletion of this item. Note: ACLs are not inherited from a container item. To provide ACL inheritance for an item, use the inheritAclFrom field. The maximum length is 1536 characters." + }, + "contentLanguage": { + "type": "string", + "description": "The BCP-47 language code for the item, such as \"en-US\" or \"sr-Latn\". For more information, see http://www.unicode.org/reports/tr35/#Unicode_locale_identifier. The maximum length is 32 characters." + }, + "contextAttributes": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/types/google-native:cloudsearch/v1:ContextAttributeResponse" + }, + "description": "A set of named attributes associated with the item. This can be used for influencing the ranking of the item based on the context in the request. The maximum number of elements is 10." + }, + "createTime": { + "type": "string", + "description": "The time when the item was created in the source repository." + }, + "hash": { + "type": "string", + "description": "Hashing value provided by the API caller. This can be used with the items.push method to calculate modified state. The maximum length is 2048 characters." + }, + "interactions": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/types/google-native:cloudsearch/v1:InteractionResponse" + }, + "description": "A list of interactions for the item. Interactions are used to improve Search quality, but are not exposed to end users. The maximum number of elements is 1000." + }, + "keywords": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Additional keywords or phrases that should match the item. Used internally for user generated content. The maximum number of elements is 100. The maximum length is 8192 characters." + }, + "mimeType": { + "type": "string", + "description": "The original mime-type of ItemContent.content in the source repository. The maximum length is 256 characters." + }, + "objectType": { + "type": "string", + "description": "The type of the item. This should correspond to the name of an object definition in the schema registered for the data source. For example, if the schema for the data source contains an object definition with name 'document', then item indexing requests for objects of that type should set objectType to 'document'. The maximum length is 256 characters." + }, + "searchQualityMetadata": { + "type": "object", + "$ref": "#/types/google-native:cloudsearch/v1:SearchQualityMetadataResponse", + "description": "Additional search quality metadata of the item" + }, + "sourceRepositoryUrl": { + "type": "string", + "description": "Link to the source repository serving the data. Seach results apply this link to the title. Whitespace or special characters may cause Cloud Seach result links to trigger a redirect notice; to avoid this, encode the URL. The maximum length is 2048 characters." + }, + "title": { + "type": "string", + "description": "The title of the item. If given, this will be the displayed title of the Search result. The maximum length is 2048 characters." + }, + "updateTime": { + "type": "string", + "description": "The time when the item was last modified in the source repository." + } + }, + "type": "object", + "required": [ + "containerName", + "contentLanguage", + "contextAttributes", + "createTime", + "hash", + "interactions", + "keywords", + "mimeType", + "objectType", + "searchQualityMetadata", + "sourceRepositoryUrl", + "title", + "updateTime" + ] + }, + "google-native:cloudsearch/v1:ItemStatusResponse": { + "description": "This contains item's status and any errors.", + "properties": { + "code": { + "type": "string", + "description": "Status code." + }, + "processingErrors": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/types/google-native:cloudsearch/v1:ProcessingErrorResponse" + }, + "description": "Error details in case the item is in ERROR state." + }, + "repositoryErrors": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/types/google-native:cloudsearch/v1:RepositoryErrorResponse" + }, + "description": "Repository error reported by connector." + } + }, + "type": "object", + "required": [ + "code", + "processingErrors", + "repositoryErrors" + ] + }, + "google-native:cloudsearch/v1:ItemStructuredDataResponse": { + "description": "Available structured data fields for the item.", + "properties": { + "hash": { + "type": "string", + "description": "Hashing value provided by the API caller. This can be used with the items.push method to calculate modified state. The maximum length is 2048 characters." + }, + "object": { + "type": "object", + "$ref": "#/types/google-native:cloudsearch/v1:StructuredDataObjectResponse", + "description": "The structured data object that should conform to a registered object definition in the schema for the data source." + } + }, + "type": "object", + "required": [ + "hash", + "object" + ] + }, + "google-native:cloudsearch/v1:NamedPropertyResponse": { + "description": "A typed name-value pair for structured data. The type of the value should be the same as the registered type for the `name` property in the object definition of `objectType`.", + "properties": { + "booleanValue": { + "type": "boolean" + }, + "dateValues": { + "type": "object", + "$ref": "#/types/google-native:cloudsearch/v1:DateValuesResponse" + }, + "doubleValues": { + "type": "object", + "$ref": "#/types/google-native:cloudsearch/v1:DoubleValuesResponse" + }, + "enumValues": { + "type": "object", + "$ref": "#/types/google-native:cloudsearch/v1:EnumValuesResponse" + }, + "htmlValues": { + "type": "object", + "$ref": "#/types/google-native:cloudsearch/v1:HtmlValuesResponse" + }, + "integerValues": { + "type": "object", + "$ref": "#/types/google-native:cloudsearch/v1:IntegerValuesResponse" + }, + "name": { + "type": "string", + "description": "The name of the property. This name should correspond to the name of the property that was registered for object definition in the schema. The maximum allowable length for this property is 256 characters." + }, + "objectValues": { + "type": "object", + "$ref": "#/types/google-native:cloudsearch/v1:ObjectValuesResponse" + }, + "textValues": { + "type": "object", + "$ref": "#/types/google-native:cloudsearch/v1:TextValuesResponse" + }, + "timestampValues": { + "type": "object", + "$ref": "#/types/google-native:cloudsearch/v1:TimestampValuesResponse" + } + }, + "type": "object", + "required": [ + "booleanValue", + "dateValues", + "doubleValues", + "enumValues", + "htmlValues", + "integerValues", + "name", + "objectValues", + "textValues", + "timestampValues" + ] + }, + "google-native:cloudsearch/v1:ObjectValuesResponse": { + "description": "List of object values.", + "properties": { + "values": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/types/google-native:cloudsearch/v1:StructuredDataObjectResponse" + } + } + }, + "type": "object", + "required": [ + "values" + ] + }, + "google-native:cloudsearch/v1:PrincipalResponse": { + "description": "Reference to a user, group, or domain.", + "properties": { + "groupResourceName": { + "type": "string", + "description": "This principal is a group identified using an external identity. The name field must specify the group resource name with this format: identitysources/{source_id}/groups/{ID}" + }, + "gsuitePrincipal": { + "type": "object", + "$ref": "#/types/google-native:cloudsearch/v1:GSuitePrincipalResponse", + "description": "This principal is a Google Workspace user, group or domain." + }, + "userResourceName": { + "type": "string", + "description": "This principal is a user identified using an external identity. The name field must specify the user resource name with this format: identitysources/{source_id}/users/{ID}" + } + }, + "type": "object", + "required": [ + "groupResourceName", + "gsuitePrincipal", + "userResourceName" + ] + }, + "google-native:cloudsearch/v1:ProcessingErrorResponse": { + "properties": { + "code": { + "type": "string", + "description": "Error code indicating the nature of the error." + }, + "errorMessage": { + "type": "string", + "description": "The description of the error." + }, + "fieldViolations": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/types/google-native:cloudsearch/v1:FieldViolationResponse" + }, + "description": "In case the item fields are invalid, this field contains the details about the validation errors." + } + }, + "type": "object", + "required": [ + "code", + "errorMessage", + "fieldViolations" + ] + }, "google-native:cloudsearch/v1:QueryInterpretationConfig": { "description": "Default options to interpret user query.", "properties": { @@ -64759,6 +65239,29 @@ "forceVerbatimMode" ] }, + "google-native:cloudsearch/v1:RepositoryErrorResponse": { + "description": "Errors when the connector is communicating to the source repository.", + "properties": { + "errorMessage": { + "type": "string", + "description": "Message that describes the error. The maximum allowable length of the message is 8192 characters." + }, + "httpStatusCode": { + "type": "integer", + "description": "Error codes. Matches the definition of HTTP status codes." + }, + "type": { + "type": "string", + "description": "The type of error." + } + }, + "type": "object", + "required": [ + "errorMessage", + "httpStatusCode", + "type" + ] + }, "google-native:cloudsearch/v1:ScoringConfig": { "description": "Scoring configurations for a source while processing a Search or Suggest request.", "properties": { @@ -64791,6 +65294,19 @@ "disablePersonalization" ] }, + "google-native:cloudsearch/v1:SearchQualityMetadataResponse": { + "description": "Additional search quality metadata of the item.", + "properties": { + "quality": { + "type": "number", + "description": "An indication of the quality of the item, used to influence search quality. Value should be between 0.0 (lowest quality) and 1.0 (highest quality). The default value is 0.0." + } + }, + "type": "object", + "required": [ + "quality" + ] + }, "google-native:cloudsearch/v1:SortOptions": { "properties": { "operatorName": { @@ -65031,6 +65547,67 @@ } ] }, + "google-native:cloudsearch/v1:StructuredDataObjectResponse": { + "description": "A structured data object consisting of named properties.", + "properties": { + "properties": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/types/google-native:cloudsearch/v1:NamedPropertyResponse" + }, + "description": "The properties for the object. The maximum number of elements is 1000." + } + }, + "type": "object", + "required": [ + "properties" + ] + }, + "google-native:cloudsearch/v1:TextValuesResponse": { + "description": "List of text values.", + "properties": { + "values": { + "type": "array", + "items": { + "type": "string" + }, + "description": "The maximum allowable length for text values is 2048 characters." + } + }, + "type": "object", + "required": [ + "values" + ] + }, + "google-native:cloudsearch/v1:TimestampValuesResponse": { + "description": "List of timestamp values.", + "properties": { + "values": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "type": "object", + "required": [ + "values" + ] + }, + "google-native:cloudsearch/v1:UploadItemRefResponse": { + "description": "Represents an upload session reference. This reference is created via upload method. This reference is valid for 30 days after its creation. Updating of item content may refer to this uploaded content via contentDataRef.", + "properties": { + "name": { + "type": "string", + "description": "The name of the content reference. The maximum length is 2048 characters." + } + }, + "type": "object", + "required": [ + "name" + ] + }, "google-native:cloudsearch/v1:Value": { "description": "Definition of a single value with generic type.", "properties": { @@ -365243,6 +365820,287 @@ } } }, + "google-native:aiplatform/v1:Model": { + "description": "Uploads a Model artifact into Vertex AI.", + "properties": { + "artifactUri": { + "type": "string", + "description": "Immutable. The path to the directory containing the Model artifact and any of its supporting files. Not present for AutoML Models or Large Models." + }, + "containerSpec": { + "type": "object", + "$ref": "#/types/google-native:aiplatform/v1:GoogleCloudAiplatformV1ModelContainerSpecResponse", + "description": "Input only. The specification of the container that is to be used when deploying this Model. The specification is ingested upon ModelService.UploadModel, and all binaries it contains are copied and stored internally by Vertex AI. Not present for AutoML Models or Large Models." + }, + "createTime": { + "type": "string", + "description": "Timestamp when this Model was uploaded into Vertex AI." + }, + "deployedModels": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/types/google-native:aiplatform/v1:GoogleCloudAiplatformV1DeployedModelRefResponse" + }, + "description": "The pointers to DeployedModels created from this Model. Note that Model could have been deployed to Endpoints in different Locations." + }, + "description": { + "type": "string", + "description": "The description of the Model." + }, + "displayName": { + "type": "string", + "description": "The display name of the Model. The name can be up to 128 characters long and can consist of any UTF-8 characters." + }, + "encryptionSpec": { + "type": "object", + "$ref": "#/types/google-native:aiplatform/v1:GoogleCloudAiplatformV1EncryptionSpecResponse", + "description": "Customer-managed encryption key spec for a Model. If set, this Model and all sub-resources of this Model will be secured by this key." + }, + "etag": { + "type": "string", + "description": "Used to perform consistent read-modify-write updates. If not set, a blind \"overwrite\" update happens." + }, + "explanationSpec": { + "type": "object", + "$ref": "#/types/google-native:aiplatform/v1:GoogleCloudAiplatformV1ExplanationSpecResponse", + "description": "The default explanation specification for this Model. The Model can be used for requesting explanation after being deployed if it is populated. The Model can be used for batch explanation if it is populated. All fields of the explanation_spec can be overridden by explanation_spec of DeployModelRequest.deployed_model, or explanation_spec of BatchPredictionJob. If the default explanation specification is not set for this Model, this Model can still be used for requesting explanation by setting explanation_spec of DeployModelRequest.deployed_model and for batch explanation by setting explanation_spec of BatchPredictionJob." + }, + "labels": { + "type": "object", + "description": "The labels with user-defined metadata to organize your Models. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels." + }, + "location": { + "type": "string", + "replaceOnChanges": true + }, + "metadata": { + "$ref": "pulumi.json#/Any", + "description": "Immutable. An additional information about the Model; the schema of the metadata can be found in metadata_schema. Unset if the Model does not have any additional information." + }, + "metadataArtifact": { + "type": "string", + "description": "The resource name of the Artifact that was created in MetadataStore when creating the Model. The Artifact resource name pattern is `projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}`." + }, + "metadataSchemaUri": { + "type": "string", + "description": "Immutable. Points to a YAML file stored on Google Cloud Storage describing additional information about the Model, that is specific to it. Unset if the Model does not have any additional information. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML Models always have this field populated by Vertex AI, if no additional metadata is needed, this field is set to an empty string. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access." + }, + "modelSourceInfo": { + "type": "object", + "$ref": "#/types/google-native:aiplatform/v1:GoogleCloudAiplatformV1ModelSourceInfoResponse", + "description": "Source of a model. It can either be automl training pipeline, custom training pipeline, BigQuery ML, or existing Vertex AI Model." + }, + "name": { + "type": "string", + "description": "The resource name of the Model." + }, + "originalModelInfo": { + "type": "object", + "$ref": "#/types/google-native:aiplatform/v1:GoogleCloudAiplatformV1ModelOriginalModelInfoResponse", + "description": "If this Model is a copy of another Model, this contains info about the original." + }, + "pipelineJob": { + "type": "string", + "description": "Optional. This field is populated if the model is produced by a pipeline job." + }, + "predictSchemata": { + "type": "object", + "$ref": "#/types/google-native:aiplatform/v1:GoogleCloudAiplatformV1PredictSchemataResponse", + "description": "The schemata that describe formats of the Model's predictions and explanations as given and returned via PredictionService.Predict and PredictionService.Explain." + }, + "project": { + "type": "string", + "replaceOnChanges": true + }, + "supportedDeploymentResourcesTypes": { + "type": "array", + "items": { + "type": "string" + }, + "description": "When this Model is deployed, its prediction resources are described by the `prediction_resources` field of the Endpoint.deployed_models object. Because not all Models support all resource configuration types, the configuration types this Model supports are listed here. If no configuration types are listed, the Model cannot be deployed to an Endpoint and does not support online predictions (PredictionService.Predict or PredictionService.Explain). Such a Model can serve predictions by using a BatchPredictionJob, if it has at least one entry each in supported_input_storage_formats and supported_output_storage_formats." + }, + "supportedExportFormats": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/types/google-native:aiplatform/v1:GoogleCloudAiplatformV1ModelExportFormatResponse" + }, + "description": "The formats in which this Model may be exported. If empty, this Model is not available for export." + }, + "supportedInputStorageFormats": { + "type": "array", + "items": { + "type": "string" + }, + "description": "The formats this Model supports in BatchPredictionJob.input_config. If PredictSchemata.instance_schema_uri exists, the instances should be given as per that schema. The possible formats are: * `jsonl` The JSON Lines format, where each instance is a single line. Uses GcsSource. * `csv` The CSV format, where each instance is a single comma-separated line. The first line in the file is the header, containing comma-separated field names. Uses GcsSource. * `tf-record` The TFRecord format, where each instance is a single record in tfrecord syntax. Uses GcsSource. * `tf-record-gzip` Similar to `tf-record`, but the file is gzipped. Uses GcsSource. * `bigquery` Each instance is a single row in BigQuery. Uses BigQuerySource. * `file-list` Each line of the file is the location of an instance to process, uses `gcs_source` field of the InputConfig object. If this Model doesn't support any of these formats it means it cannot be used with a BatchPredictionJob. However, if it has supported_deployment_resources_types, it could serve online predictions by using PredictionService.Predict or PredictionService.Explain." + }, + "supportedOutputStorageFormats": { + "type": "array", + "items": { + "type": "string" + }, + "description": "The formats this Model supports in BatchPredictionJob.output_config. If both PredictSchemata.instance_schema_uri and PredictSchemata.prediction_schema_uri exist, the predictions are returned together with their instances. In other words, the prediction has the original instance data first, followed by the actual prediction content (as per the schema). The possible formats are: * `jsonl` The JSON Lines format, where each prediction is a single line. Uses GcsDestination. * `csv` The CSV format, where each prediction is a single comma-separated line. The first line in the file is the header, containing comma-separated field names. Uses GcsDestination. * `bigquery` Each prediction is a single row in a BigQuery table, uses BigQueryDestination . If this Model doesn't support any of these formats it means it cannot be used with a BatchPredictionJob. However, if it has supported_deployment_resources_types, it could serve online predictions by using PredictionService.Predict or PredictionService.Explain." + }, + "trainingPipeline": { + "type": "string", + "description": "The resource name of the TrainingPipeline that uploaded this Model, if any." + }, + "updateTime": { + "type": "string", + "description": "Timestamp when this Model was most recently updated." + }, + "versionAliases": { + "type": "array", + "items": { + "type": "string" + }, + "description": "User provided version aliases so that a model version can be referenced via alias (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_alias}` instead of auto-generated version id (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_id})`. The format is a-z{0,126}[a-z0-9] to distinguish from version_id. A default version alias will be created for the first version of the model, and there must be exactly one default version alias for a model." + }, + "versionCreateTime": { + "type": "string", + "description": "Timestamp when this version was created." + }, + "versionDescription": { + "type": "string", + "description": "The description of this version." + }, + "versionId": { + "type": "string", + "description": "Immutable. The version ID of the model. A new version is committed when a new model version is uploaded or trained under an existing model id. It is an auto-incrementing decimal number in string representation." + }, + "versionUpdateTime": { + "type": "string", + "description": "Timestamp when this version was most recently updated." + } + }, + "type": "object", + "required": [ + "artifactUri", + "containerSpec", + "createTime", + "deployedModels", + "description", + "displayName", + "encryptionSpec", + "etag", + "explanationSpec", + "labels", + "location", + "metadata", + "metadataArtifact", + "metadataSchemaUri", + "modelSourceInfo", + "name", + "originalModelInfo", + "pipelineJob", + "predictSchemata", + "project", + "supportedDeploymentResourcesTypes", + "supportedExportFormats", + "supportedInputStorageFormats", + "supportedOutputStorageFormats", + "trainingPipeline", + "updateTime", + "versionAliases", + "versionCreateTime", + "versionDescription", + "versionId", + "versionUpdateTime" + ], + "inputProperties": { + "artifactUri": { + "type": "string", + "description": "Immutable. The path to the directory containing the Model artifact and any of its supporting files. Not present for AutoML Models or Large Models." + }, + "containerSpec": { + "type": "object", + "$ref": "#/types/google-native:aiplatform/v1:GoogleCloudAiplatformV1ModelContainerSpec", + "description": "Input only. The specification of the container that is to be used when deploying this Model. The specification is ingested upon ModelService.UploadModel, and all binaries it contains are copied and stored internally by Vertex AI. Not present for AutoML Models or Large Models." + }, + "description": { + "type": "string", + "description": "The description of the Model." + }, + "displayName": { + "type": "string", + "description": "The display name of the Model. The name can be up to 128 characters long and can consist of any UTF-8 characters." + }, + "encryptionSpec": { + "type": "object", + "$ref": "#/types/google-native:aiplatform/v1:GoogleCloudAiplatformV1EncryptionSpec", + "description": "Customer-managed encryption key spec for a Model. If set, this Model and all sub-resources of this Model will be secured by this key." + }, + "etag": { + "type": "string", + "description": "Used to perform consistent read-modify-write updates. If not set, a blind \"overwrite\" update happens." + }, + "explanationSpec": { + "type": "object", + "$ref": "#/types/google-native:aiplatform/v1:GoogleCloudAiplatformV1ExplanationSpec", + "description": "The default explanation specification for this Model. The Model can be used for requesting explanation after being deployed if it is populated. The Model can be used for batch explanation if it is populated. All fields of the explanation_spec can be overridden by explanation_spec of DeployModelRequest.deployed_model, or explanation_spec of BatchPredictionJob. If the default explanation specification is not set for this Model, this Model can still be used for requesting explanation by setting explanation_spec of DeployModelRequest.deployed_model and for batch explanation by setting explanation_spec of BatchPredictionJob." + }, + "labels": { + "type": "object", + "description": "The labels with user-defined metadata to organize your Models. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels." + }, + "location": { + "type": "string", + "replaceOnChanges": true + }, + "metadata": { + "$ref": "pulumi.json#/Any", + "description": "Immutable. An additional information about the Model; the schema of the metadata can be found in metadata_schema. Unset if the Model does not have any additional information." + }, + "metadataSchemaUri": { + "type": "string", + "description": "Immutable. Points to a YAML file stored on Google Cloud Storage describing additional information about the Model, that is specific to it. Unset if the Model does not have any additional information. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML Models always have this field populated by Vertex AI, if no additional metadata is needed, this field is set to an empty string. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access." + }, + "modelId": { + "type": "string", + "description": "Optional. The ID to use for the uploaded Model, which will become the final component of the model resource name. This value may be up to 63 characters, and valid characters are `[a-z0-9_-]`. The first character cannot be a number or hyphen." + }, + "name": { + "type": "string", + "description": "The resource name of the Model." + }, + "parentModel": { + "type": "string", + "description": "Optional. The resource name of the model into which to upload the version. Only specify this field when uploading a new version." + }, + "pipelineJob": { + "type": "string", + "description": "Optional. This field is populated if the model is produced by a pipeline job." + }, + "predictSchemata": { + "type": "object", + "$ref": "#/types/google-native:aiplatform/v1:GoogleCloudAiplatformV1PredictSchemata", + "description": "The schemata that describe formats of the Model's predictions and explanations as given and returned via PredictionService.Predict and PredictionService.Explain." + }, + "project": { + "type": "string", + "replaceOnChanges": true + }, + "serviceAccount": { + "type": "string", + "description": "Optional. The user-provided custom service account to use to do the model upload. If empty, [Vertex AI Service Agent](https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) will be used to access resources needed to upload the model. This account must belong to the target project where the model is uploaded to, i.e., the project specified in the `parent` field of this request and have necessary read permissions (to Google Cloud Storage, Artifact Registry, etc.)." + }, + "versionAliases": { + "type": "array", + "items": { + "type": "string" + }, + "description": "User provided version aliases so that a model version can be referenced via alias (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_alias}` instead of auto-generated version id (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_id})`. The format is a-z{0,126}[a-z0-9] to distinguish from version_id. A default version alias will be created for the first version of the model, and there must be exactly one default version alias for a model." + }, + "versionDescription": { + "type": "string", + "description": "The description of this version." + } + }, + "requiredInputs": [ + "displayName" + ] + }, "google-native:aiplatform/v1:ModelDeploymentMonitoringJob": { "description": "Creates a ModelDeploymentMonitoringJob. It will run periodically on a configured interval.\nAuto-naming is currently not supported for this resource.", "properties": { @@ -370779,6 +371637,278 @@ } } }, + "google-native:aiplatform/v1beta1:Model": { + "description": "Uploads a Model artifact into Vertex AI.", + "properties": { + "artifactUri": { + "type": "string", + "description": "Immutable. The path to the directory containing the Model artifact and any of its supporting files. Not present for AutoML Models or Large Models." + }, + "containerSpec": { + "type": "object", + "$ref": "#/types/google-native:aiplatform/v1beta1:GoogleCloudAiplatformV1beta1ModelContainerSpecResponse", + "description": "Input only. The specification of the container that is to be used when deploying this Model. The specification is ingested upon ModelService.UploadModel, and all binaries it contains are copied and stored internally by Vertex AI. Not present for AutoML Models or Large Models." + }, + "createTime": { + "type": "string", + "description": "Timestamp when this Model was uploaded into Vertex AI." + }, + "deployedModels": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/types/google-native:aiplatform/v1beta1:GoogleCloudAiplatformV1beta1DeployedModelRefResponse" + }, + "description": "The pointers to DeployedModels created from this Model. Note that Model could have been deployed to Endpoints in different Locations." + }, + "description": { + "type": "string", + "description": "The description of the Model." + }, + "displayName": { + "type": "string", + "description": "The display name of the Model. The name can be up to 128 characters long and can consist of any UTF-8 characters." + }, + "encryptionSpec": { + "type": "object", + "$ref": "#/types/google-native:aiplatform/v1beta1:GoogleCloudAiplatformV1beta1EncryptionSpecResponse", + "description": "Customer-managed encryption key spec for a Model. If set, this Model and all sub-resources of this Model will be secured by this key." + }, + "etag": { + "type": "string", + "description": "Used to perform consistent read-modify-write updates. If not set, a blind \"overwrite\" update happens." + }, + "explanationSpec": { + "type": "object", + "$ref": "#/types/google-native:aiplatform/v1beta1:GoogleCloudAiplatformV1beta1ExplanationSpecResponse", + "description": "The default explanation specification for this Model. The Model can be used for requesting explanation after being deployed if it is populated. The Model can be used for batch explanation if it is populated. All fields of the explanation_spec can be overridden by explanation_spec of DeployModelRequest.deployed_model, or explanation_spec of BatchPredictionJob. If the default explanation specification is not set for this Model, this Model can still be used for requesting explanation by setting explanation_spec of DeployModelRequest.deployed_model and for batch explanation by setting explanation_spec of BatchPredictionJob." + }, + "labels": { + "type": "object", + "description": "The labels with user-defined metadata to organize your Models. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels." + }, + "location": { + "type": "string", + "replaceOnChanges": true + }, + "metadata": { + "$ref": "pulumi.json#/Any", + "description": "Immutable. An additional information about the Model; the schema of the metadata can be found in metadata_schema. Unset if the Model does not have any additional information." + }, + "metadataArtifact": { + "type": "string", + "description": "The resource name of the Artifact that was created in MetadataStore when creating the Model. The Artifact resource name pattern is `projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}`." + }, + "metadataSchemaUri": { + "type": "string", + "description": "Immutable. Points to a YAML file stored on Google Cloud Storage describing additional information about the Model, that is specific to it. Unset if the Model does not have any additional information. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML Models always have this field populated by Vertex AI, if no additional metadata is needed, this field is set to an empty string. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access." + }, + "modelSourceInfo": { + "type": "object", + "$ref": "#/types/google-native:aiplatform/v1beta1:GoogleCloudAiplatformV1beta1ModelSourceInfoResponse", + "description": "Source of a model. It can either be automl training pipeline, custom training pipeline, BigQuery ML, or existing Vertex AI Model." + }, + "name": { + "type": "string", + "description": "The resource name of the Model." + }, + "originalModelInfo": { + "type": "object", + "$ref": "#/types/google-native:aiplatform/v1beta1:GoogleCloudAiplatformV1beta1ModelOriginalModelInfoResponse", + "description": "If this Model is a copy of another Model, this contains info about the original." + }, + "predictSchemata": { + "type": "object", + "$ref": "#/types/google-native:aiplatform/v1beta1:GoogleCloudAiplatformV1beta1PredictSchemataResponse", + "description": "The schemata that describe formats of the Model's predictions and explanations as given and returned via PredictionService.Predict and PredictionService.Explain." + }, + "project": { + "type": "string", + "replaceOnChanges": true + }, + "supportedDeploymentResourcesTypes": { + "type": "array", + "items": { + "type": "string" + }, + "description": "When this Model is deployed, its prediction resources are described by the `prediction_resources` field of the Endpoint.deployed_models object. Because not all Models support all resource configuration types, the configuration types this Model supports are listed here. If no configuration types are listed, the Model cannot be deployed to an Endpoint and does not support online predictions (PredictionService.Predict or PredictionService.Explain). Such a Model can serve predictions by using a BatchPredictionJob, if it has at least one entry each in supported_input_storage_formats and supported_output_storage_formats." + }, + "supportedExportFormats": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/types/google-native:aiplatform/v1beta1:GoogleCloudAiplatformV1beta1ModelExportFormatResponse" + }, + "description": "The formats in which this Model may be exported. If empty, this Model is not available for export." + }, + "supportedInputStorageFormats": { + "type": "array", + "items": { + "type": "string" + }, + "description": "The formats this Model supports in BatchPredictionJob.input_config. If PredictSchemata.instance_schema_uri exists, the instances should be given as per that schema. The possible formats are: * `jsonl` The JSON Lines format, where each instance is a single line. Uses GcsSource. * `csv` The CSV format, where each instance is a single comma-separated line. The first line in the file is the header, containing comma-separated field names. Uses GcsSource. * `tf-record` The TFRecord format, where each instance is a single record in tfrecord syntax. Uses GcsSource. * `tf-record-gzip` Similar to `tf-record`, but the file is gzipped. Uses GcsSource. * `bigquery` Each instance is a single row in BigQuery. Uses BigQuerySource. * `file-list` Each line of the file is the location of an instance to process, uses `gcs_source` field of the InputConfig object. If this Model doesn't support any of these formats it means it cannot be used with a BatchPredictionJob. However, if it has supported_deployment_resources_types, it could serve online predictions by using PredictionService.Predict or PredictionService.Explain." + }, + "supportedOutputStorageFormats": { + "type": "array", + "items": { + "type": "string" + }, + "description": "The formats this Model supports in BatchPredictionJob.output_config. If both PredictSchemata.instance_schema_uri and PredictSchemata.prediction_schema_uri exist, the predictions are returned together with their instances. In other words, the prediction has the original instance data first, followed by the actual prediction content (as per the schema). The possible formats are: * `jsonl` The JSON Lines format, where each prediction is a single line. Uses GcsDestination. * `csv` The CSV format, where each prediction is a single comma-separated line. The first line in the file is the header, containing comma-separated field names. Uses GcsDestination. * `bigquery` Each prediction is a single row in a BigQuery table, uses BigQueryDestination . If this Model doesn't support any of these formats it means it cannot be used with a BatchPredictionJob. However, if it has supported_deployment_resources_types, it could serve online predictions by using PredictionService.Predict or PredictionService.Explain." + }, + "trainingPipeline": { + "type": "string", + "description": "The resource name of the TrainingPipeline that uploaded this Model, if any." + }, + "updateTime": { + "type": "string", + "description": "Timestamp when this Model was most recently updated." + }, + "versionAliases": { + "type": "array", + "items": { + "type": "string" + }, + "description": "User provided version aliases so that a model version can be referenced via alias (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_alias}` instead of auto-generated version id (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_id})`. The format is a-z{0,126}[a-z0-9] to distinguish from version_id. A default version alias will be created for the first version of the model, and there must be exactly one default version alias for a model." + }, + "versionCreateTime": { + "type": "string", + "description": "Timestamp when this version was created." + }, + "versionDescription": { + "type": "string", + "description": "The description of this version." + }, + "versionId": { + "type": "string", + "description": "Immutable. The version ID of the model. A new version is committed when a new model version is uploaded or trained under an existing model id. It is an auto-incrementing decimal number in string representation." + }, + "versionUpdateTime": { + "type": "string", + "description": "Timestamp when this version was most recently updated." + } + }, + "type": "object", + "required": [ + "artifactUri", + "containerSpec", + "createTime", + "deployedModels", + "description", + "displayName", + "encryptionSpec", + "etag", + "explanationSpec", + "labels", + "location", + "metadata", + "metadataArtifact", + "metadataSchemaUri", + "modelSourceInfo", + "name", + "originalModelInfo", + "predictSchemata", + "project", + "supportedDeploymentResourcesTypes", + "supportedExportFormats", + "supportedInputStorageFormats", + "supportedOutputStorageFormats", + "trainingPipeline", + "updateTime", + "versionAliases", + "versionCreateTime", + "versionDescription", + "versionId", + "versionUpdateTime" + ], + "inputProperties": { + "artifactUri": { + "type": "string", + "description": "Immutable. The path to the directory containing the Model artifact and any of its supporting files. Not present for AutoML Models or Large Models." + }, + "containerSpec": { + "type": "object", + "$ref": "#/types/google-native:aiplatform/v1beta1:GoogleCloudAiplatformV1beta1ModelContainerSpec", + "description": "Input only. The specification of the container that is to be used when deploying this Model. The specification is ingested upon ModelService.UploadModel, and all binaries it contains are copied and stored internally by Vertex AI. Not present for AutoML Models or Large Models." + }, + "description": { + "type": "string", + "description": "The description of the Model." + }, + "displayName": { + "type": "string", + "description": "The display name of the Model. The name can be up to 128 characters long and can consist of any UTF-8 characters." + }, + "encryptionSpec": { + "type": "object", + "$ref": "#/types/google-native:aiplatform/v1beta1:GoogleCloudAiplatformV1beta1EncryptionSpec", + "description": "Customer-managed encryption key spec for a Model. If set, this Model and all sub-resources of this Model will be secured by this key." + }, + "etag": { + "type": "string", + "description": "Used to perform consistent read-modify-write updates. If not set, a blind \"overwrite\" update happens." + }, + "explanationSpec": { + "type": "object", + "$ref": "#/types/google-native:aiplatform/v1beta1:GoogleCloudAiplatformV1beta1ExplanationSpec", + "description": "The default explanation specification for this Model. The Model can be used for requesting explanation after being deployed if it is populated. The Model can be used for batch explanation if it is populated. All fields of the explanation_spec can be overridden by explanation_spec of DeployModelRequest.deployed_model, or explanation_spec of BatchPredictionJob. If the default explanation specification is not set for this Model, this Model can still be used for requesting explanation by setting explanation_spec of DeployModelRequest.deployed_model and for batch explanation by setting explanation_spec of BatchPredictionJob." + }, + "labels": { + "type": "object", + "description": "The labels with user-defined metadata to organize your Models. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels." + }, + "location": { + "type": "string", + "replaceOnChanges": true + }, + "metadata": { + "$ref": "pulumi.json#/Any", + "description": "Immutable. An additional information about the Model; the schema of the metadata can be found in metadata_schema. Unset if the Model does not have any additional information." + }, + "metadataSchemaUri": { + "type": "string", + "description": "Immutable. Points to a YAML file stored on Google Cloud Storage describing additional information about the Model, that is specific to it. Unset if the Model does not have any additional information. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML Models always have this field populated by Vertex AI, if no additional metadata is needed, this field is set to an empty string. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access." + }, + "modelId": { + "type": "string", + "description": "Optional. The ID to use for the uploaded Model, which will become the final component of the model resource name. This value may be up to 63 characters, and valid characters are `[a-z0-9_-]`. The first character cannot be a number or hyphen." + }, + "name": { + "type": "string", + "description": "The resource name of the Model." + }, + "parentModel": { + "type": "string", + "description": "Optional. The resource name of the model into which to upload the version. Only specify this field when uploading a new version." + }, + "predictSchemata": { + "type": "object", + "$ref": "#/types/google-native:aiplatform/v1beta1:GoogleCloudAiplatformV1beta1PredictSchemata", + "description": "The schemata that describe formats of the Model's predictions and explanations as given and returned via PredictionService.Predict and PredictionService.Explain." + }, + "project": { + "type": "string", + "replaceOnChanges": true + }, + "serviceAccount": { + "type": "string", + "description": "Optional. The user-provided custom service account to use to do the model upload. If empty, [Vertex AI Service Agent](https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) will be used to access resources needed to upload the model. This account must belong to the target project where the model is uploaded to, i.e., the project specified in the `parent` field of this request and have necessary read permissions (to Google Cloud Storage, Artifact Registry, etc.)." + }, + "versionAliases": { + "type": "array", + "items": { + "type": "string" + }, + "description": "User provided version aliases so that a model version can be referenced via alias (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_alias}` instead of auto-generated version id (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_id})`. The format is a-z{0,126}[a-z0-9] to distinguish from version_id. A default version alias will be created for the first version of the model, and there must be exactly one default version alias for a model." + }, + "versionDescription": { + "type": "string", + "description": "The description of this version." + } + }, + "requiredInputs": [ + "displayName" + ] + }, "google-native:aiplatform/v1beta1:ModelDeploymentMonitoringJob": { "description": "Creates a ModelDeploymentMonitoringJob. It will run periodically on a configured interval.\nAuto-naming is currently not supported for this resource.", "properties": { @@ -415711,6 +416841,102 @@ "displayName" ] }, + "google-native:cloudsearch/v1:Item": { + "description": "Creates an upload session for uploading item content. For items smaller than 100 KB, it's easier to embed the content inline within an index request. This API requires an admin or service account to execute. The service account used is the one whitelisted in the corresponding data source.\nAuto-naming is currently not supported for this resource.", + "properties": { + "acl": { + "type": "object", + "$ref": "#/types/google-native:cloudsearch/v1:ItemAclResponse", + "description": "Access control list for this item." + }, + "content": { + "type": "object", + "$ref": "#/types/google-native:cloudsearch/v1:ItemContentResponse", + "description": "Item content to be indexed and made text searchable." + }, + "datasourceId": { + "type": "string", + "replaceOnChanges": true + }, + "itemId": { + "type": "string", + "replaceOnChanges": true + }, + "itemType": { + "type": "string", + "description": "The type for this item." + }, + "metadata": { + "type": "object", + "$ref": "#/types/google-native:cloudsearch/v1:ItemMetadataResponse", + "description": "The metadata information." + }, + "name": { + "type": "string", + "description": "The name of the Item. Format: datasources/{source_id}/items/{item_id} This is a required field. The maximum length is 1536 characters." + }, + "payload": { + "type": "string", + "description": "Additional state connector can store for this item. The maximum length is 10000 bytes." + }, + "queue": { + "type": "string", + "description": "Queue this item belongs to. The maximum length is 100 characters." + }, + "status": { + "type": "object", + "$ref": "#/types/google-native:cloudsearch/v1:ItemStatusResponse", + "description": "Status of the item. Output only field." + }, + "structuredData": { + "type": "object", + "$ref": "#/types/google-native:cloudsearch/v1:ItemStructuredDataResponse", + "description": "The structured data for the item that should conform to a registered object definition in the schema for the data source." + }, + "version": { + "type": "string", + "description": "The indexing system stores the version from the datasource as a byte string and compares the Item version in the index to the version of the queued Item using lexical ordering. Cloud Search Indexing won't index or delete any queued item with a version value that is less than or equal to the version of the currently indexed item. The maximum length for this field is 1024 bytes. For information on how item version affects the deletion process, refer to [Handle revisions after manual deletes](https://developers.google.com/cloud-search/docs/guides/operations)." + } + }, + "type": "object", + "required": [ + "acl", + "content", + "datasourceId", + "itemId", + "itemType", + "metadata", + "name", + "payload", + "queue", + "status", + "structuredData", + "version" + ], + "inputProperties": { + "connectorName": { + "type": "string", + "description": "The name of connector making this call. Format: datasources/{source_id}/connectors/{ID}" + }, + "datasourceId": { + "type": "string", + "replaceOnChanges": true + }, + "debugOptions": { + "type": "object", + "$ref": "#/types/google-native:cloudsearch/v1:DebugOptions", + "description": "Common debug options." + }, + "itemId": { + "type": "string", + "replaceOnChanges": true + } + }, + "requiredInputs": [ + "datasourceId", + "itemId" + ] + }, "google-native:cloudsearch/v1:SearchApplication": { "description": "Creates a search application. **Note:** This API requires an admin account to execute.", "properties": { @@ -608753,6 +609979,205 @@ ] } }, + "google-native:aiplatform/v1:getModel": { + "description": "Gets a Model.", + "inputs": { + "properties": { + "location": { + "type": "string" + }, + "modelId": { + "type": "string" + }, + "project": { + "type": "string" + } + }, + "type": "object", + "required": [ + "location", + "modelId" + ] + }, + "outputs": { + "properties": { + "artifactUri": { + "type": "string", + "description": "Immutable. The path to the directory containing the Model artifact and any of its supporting files. Not present for AutoML Models or Large Models." + }, + "containerSpec": { + "type": "object", + "$ref": "#/types/google-native:aiplatform/v1:GoogleCloudAiplatformV1ModelContainerSpecResponse", + "description": "Input only. The specification of the container that is to be used when deploying this Model. The specification is ingested upon ModelService.UploadModel, and all binaries it contains are copied and stored internally by Vertex AI. Not present for AutoML Models or Large Models." + }, + "createTime": { + "type": "string", + "description": "Timestamp when this Model was uploaded into Vertex AI." + }, + "deployedModels": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/types/google-native:aiplatform/v1:GoogleCloudAiplatformV1DeployedModelRefResponse" + }, + "description": "The pointers to DeployedModels created from this Model. Note that Model could have been deployed to Endpoints in different Locations." + }, + "description": { + "type": "string", + "description": "The description of the Model." + }, + "displayName": { + "type": "string", + "description": "The display name of the Model. The name can be up to 128 characters long and can consist of any UTF-8 characters." + }, + "encryptionSpec": { + "type": "object", + "$ref": "#/types/google-native:aiplatform/v1:GoogleCloudAiplatformV1EncryptionSpecResponse", + "description": "Customer-managed encryption key spec for a Model. If set, this Model and all sub-resources of this Model will be secured by this key." + }, + "etag": { + "type": "string", + "description": "Used to perform consistent read-modify-write updates. If not set, a blind \"overwrite\" update happens." + }, + "explanationSpec": { + "type": "object", + "$ref": "#/types/google-native:aiplatform/v1:GoogleCloudAiplatformV1ExplanationSpecResponse", + "description": "The default explanation specification for this Model. The Model can be used for requesting explanation after being deployed if it is populated. The Model can be used for batch explanation if it is populated. All fields of the explanation_spec can be overridden by explanation_spec of DeployModelRequest.deployed_model, or explanation_spec of BatchPredictionJob. If the default explanation specification is not set for this Model, this Model can still be used for requesting explanation by setting explanation_spec of DeployModelRequest.deployed_model and for batch explanation by setting explanation_spec of BatchPredictionJob." + }, + "labels": { + "type": "object", + "description": "The labels with user-defined metadata to organize your Models. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels." + }, + "metadata": { + "$ref": "pulumi.json#/Any", + "description": "Immutable. An additional information about the Model; the schema of the metadata can be found in metadata_schema. Unset if the Model does not have any additional information." + }, + "metadataArtifact": { + "type": "string", + "description": "The resource name of the Artifact that was created in MetadataStore when creating the Model. The Artifact resource name pattern is `projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}`." + }, + "metadataSchemaUri": { + "type": "string", + "description": "Immutable. Points to a YAML file stored on Google Cloud Storage describing additional information about the Model, that is specific to it. Unset if the Model does not have any additional information. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML Models always have this field populated by Vertex AI, if no additional metadata is needed, this field is set to an empty string. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access." + }, + "modelSourceInfo": { + "type": "object", + "$ref": "#/types/google-native:aiplatform/v1:GoogleCloudAiplatformV1ModelSourceInfoResponse", + "description": "Source of a model. It can either be automl training pipeline, custom training pipeline, BigQuery ML, or existing Vertex AI Model." + }, + "name": { + "type": "string", + "description": "The resource name of the Model." + }, + "originalModelInfo": { + "type": "object", + "$ref": "#/types/google-native:aiplatform/v1:GoogleCloudAiplatformV1ModelOriginalModelInfoResponse", + "description": "If this Model is a copy of another Model, this contains info about the original." + }, + "pipelineJob": { + "type": "string", + "description": "Optional. This field is populated if the model is produced by a pipeline job." + }, + "predictSchemata": { + "type": "object", + "$ref": "#/types/google-native:aiplatform/v1:GoogleCloudAiplatformV1PredictSchemataResponse", + "description": "The schemata that describe formats of the Model's predictions and explanations as given and returned via PredictionService.Predict and PredictionService.Explain." + }, + "supportedDeploymentResourcesTypes": { + "type": "array", + "items": { + "type": "string" + }, + "description": "When this Model is deployed, its prediction resources are described by the `prediction_resources` field of the Endpoint.deployed_models object. Because not all Models support all resource configuration types, the configuration types this Model supports are listed here. If no configuration types are listed, the Model cannot be deployed to an Endpoint and does not support online predictions (PredictionService.Predict or PredictionService.Explain). Such a Model can serve predictions by using a BatchPredictionJob, if it has at least one entry each in supported_input_storage_formats and supported_output_storage_formats." + }, + "supportedExportFormats": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/types/google-native:aiplatform/v1:GoogleCloudAiplatformV1ModelExportFormatResponse" + }, + "description": "The formats in which this Model may be exported. If empty, this Model is not available for export." + }, + "supportedInputStorageFormats": { + "type": "array", + "items": { + "type": "string" + }, + "description": "The formats this Model supports in BatchPredictionJob.input_config. If PredictSchemata.instance_schema_uri exists, the instances should be given as per that schema. The possible formats are: * `jsonl` The JSON Lines format, where each instance is a single line. Uses GcsSource. * `csv` The CSV format, where each instance is a single comma-separated line. The first line in the file is the header, containing comma-separated field names. Uses GcsSource. * `tf-record` The TFRecord format, where each instance is a single record in tfrecord syntax. Uses GcsSource. * `tf-record-gzip` Similar to `tf-record`, but the file is gzipped. Uses GcsSource. * `bigquery` Each instance is a single row in BigQuery. Uses BigQuerySource. * `file-list` Each line of the file is the location of an instance to process, uses `gcs_source` field of the InputConfig object. If this Model doesn't support any of these formats it means it cannot be used with a BatchPredictionJob. However, if it has supported_deployment_resources_types, it could serve online predictions by using PredictionService.Predict or PredictionService.Explain." + }, + "supportedOutputStorageFormats": { + "type": "array", + "items": { + "type": "string" + }, + "description": "The formats this Model supports in BatchPredictionJob.output_config. If both PredictSchemata.instance_schema_uri and PredictSchemata.prediction_schema_uri exist, the predictions are returned together with their instances. In other words, the prediction has the original instance data first, followed by the actual prediction content (as per the schema). The possible formats are: * `jsonl` The JSON Lines format, where each prediction is a single line. Uses GcsDestination. * `csv` The CSV format, where each prediction is a single comma-separated line. The first line in the file is the header, containing comma-separated field names. Uses GcsDestination. * `bigquery` Each prediction is a single row in a BigQuery table, uses BigQueryDestination . If this Model doesn't support any of these formats it means it cannot be used with a BatchPredictionJob. However, if it has supported_deployment_resources_types, it could serve online predictions by using PredictionService.Predict or PredictionService.Explain." + }, + "trainingPipeline": { + "type": "string", + "description": "The resource name of the TrainingPipeline that uploaded this Model, if any." + }, + "updateTime": { + "type": "string", + "description": "Timestamp when this Model was most recently updated." + }, + "versionAliases": { + "type": "array", + "items": { + "type": "string" + }, + "description": "User provided version aliases so that a model version can be referenced via alias (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_alias}` instead of auto-generated version id (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_id})`. The format is a-z{0,126}[a-z0-9] to distinguish from version_id. A default version alias will be created for the first version of the model, and there must be exactly one default version alias for a model." + }, + "versionCreateTime": { + "type": "string", + "description": "Timestamp when this version was created." + }, + "versionDescription": { + "type": "string", + "description": "The description of this version." + }, + "versionId": { + "type": "string", + "description": "Immutable. The version ID of the model. A new version is committed when a new model version is uploaded or trained under an existing model id. It is an auto-incrementing decimal number in string representation." + }, + "versionUpdateTime": { + "type": "string", + "description": "Timestamp when this version was most recently updated." + } + }, + "type": "object", + "required": [ + "artifactUri", + "containerSpec", + "createTime", + "deployedModels", + "description", + "displayName", + "encryptionSpec", + "etag", + "explanationSpec", + "labels", + "metadata", + "metadataArtifact", + "metadataSchemaUri", + "modelSourceInfo", + "name", + "originalModelInfo", + "pipelineJob", + "predictSchemata", + "supportedDeploymentResourcesTypes", + "supportedExportFormats", + "supportedInputStorageFormats", + "supportedOutputStorageFormats", + "trainingPipeline", + "updateTime", + "versionAliases", + "versionCreateTime", + "versionDescription", + "versionId", + "versionUpdateTime" + ] + } + }, "google-native:aiplatform/v1:getModelDeploymentMonitoringJob": { "description": "Gets a ModelDeploymentMonitoringJob.", "inputs": { @@ -612248,6 +613673,200 @@ ] } }, + "google-native:aiplatform/v1beta1:getModel": { + "description": "Gets a Model.", + "inputs": { + "properties": { + "location": { + "type": "string" + }, + "modelId": { + "type": "string" + }, + "project": { + "type": "string" + } + }, + "type": "object", + "required": [ + "location", + "modelId" + ] + }, + "outputs": { + "properties": { + "artifactUri": { + "type": "string", + "description": "Immutable. The path to the directory containing the Model artifact and any of its supporting files. Not present for AutoML Models or Large Models." + }, + "containerSpec": { + "type": "object", + "$ref": "#/types/google-native:aiplatform/v1beta1:GoogleCloudAiplatformV1beta1ModelContainerSpecResponse", + "description": "Input only. The specification of the container that is to be used when deploying this Model. The specification is ingested upon ModelService.UploadModel, and all binaries it contains are copied and stored internally by Vertex AI. Not present for AutoML Models or Large Models." + }, + "createTime": { + "type": "string", + "description": "Timestamp when this Model was uploaded into Vertex AI." + }, + "deployedModels": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/types/google-native:aiplatform/v1beta1:GoogleCloudAiplatformV1beta1DeployedModelRefResponse" + }, + "description": "The pointers to DeployedModels created from this Model. Note that Model could have been deployed to Endpoints in different Locations." + }, + "description": { + "type": "string", + "description": "The description of the Model." + }, + "displayName": { + "type": "string", + "description": "The display name of the Model. The name can be up to 128 characters long and can consist of any UTF-8 characters." + }, + "encryptionSpec": { + "type": "object", + "$ref": "#/types/google-native:aiplatform/v1beta1:GoogleCloudAiplatformV1beta1EncryptionSpecResponse", + "description": "Customer-managed encryption key spec for a Model. If set, this Model and all sub-resources of this Model will be secured by this key." + }, + "etag": { + "type": "string", + "description": "Used to perform consistent read-modify-write updates. If not set, a blind \"overwrite\" update happens." + }, + "explanationSpec": { + "type": "object", + "$ref": "#/types/google-native:aiplatform/v1beta1:GoogleCloudAiplatformV1beta1ExplanationSpecResponse", + "description": "The default explanation specification for this Model. The Model can be used for requesting explanation after being deployed if it is populated. The Model can be used for batch explanation if it is populated. All fields of the explanation_spec can be overridden by explanation_spec of DeployModelRequest.deployed_model, or explanation_spec of BatchPredictionJob. If the default explanation specification is not set for this Model, this Model can still be used for requesting explanation by setting explanation_spec of DeployModelRequest.deployed_model and for batch explanation by setting explanation_spec of BatchPredictionJob." + }, + "labels": { + "type": "object", + "description": "The labels with user-defined metadata to organize your Models. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels." + }, + "metadata": { + "$ref": "pulumi.json#/Any", + "description": "Immutable. An additional information about the Model; the schema of the metadata can be found in metadata_schema. Unset if the Model does not have any additional information." + }, + "metadataArtifact": { + "type": "string", + "description": "The resource name of the Artifact that was created in MetadataStore when creating the Model. The Artifact resource name pattern is `projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}`." + }, + "metadataSchemaUri": { + "type": "string", + "description": "Immutable. Points to a YAML file stored on Google Cloud Storage describing additional information about the Model, that is specific to it. Unset if the Model does not have any additional information. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML Models always have this field populated by Vertex AI, if no additional metadata is needed, this field is set to an empty string. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access." + }, + "modelSourceInfo": { + "type": "object", + "$ref": "#/types/google-native:aiplatform/v1beta1:GoogleCloudAiplatformV1beta1ModelSourceInfoResponse", + "description": "Source of a model. It can either be automl training pipeline, custom training pipeline, BigQuery ML, or existing Vertex AI Model." + }, + "name": { + "type": "string", + "description": "The resource name of the Model." + }, + "originalModelInfo": { + "type": "object", + "$ref": "#/types/google-native:aiplatform/v1beta1:GoogleCloudAiplatformV1beta1ModelOriginalModelInfoResponse", + "description": "If this Model is a copy of another Model, this contains info about the original." + }, + "predictSchemata": { + "type": "object", + "$ref": "#/types/google-native:aiplatform/v1beta1:GoogleCloudAiplatformV1beta1PredictSchemataResponse", + "description": "The schemata that describe formats of the Model's predictions and explanations as given and returned via PredictionService.Predict and PredictionService.Explain." + }, + "supportedDeploymentResourcesTypes": { + "type": "array", + "items": { + "type": "string" + }, + "description": "When this Model is deployed, its prediction resources are described by the `prediction_resources` field of the Endpoint.deployed_models object. Because not all Models support all resource configuration types, the configuration types this Model supports are listed here. If no configuration types are listed, the Model cannot be deployed to an Endpoint and does not support online predictions (PredictionService.Predict or PredictionService.Explain). Such a Model can serve predictions by using a BatchPredictionJob, if it has at least one entry each in supported_input_storage_formats and supported_output_storage_formats." + }, + "supportedExportFormats": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/types/google-native:aiplatform/v1beta1:GoogleCloudAiplatformV1beta1ModelExportFormatResponse" + }, + "description": "The formats in which this Model may be exported. If empty, this Model is not available for export." + }, + "supportedInputStorageFormats": { + "type": "array", + "items": { + "type": "string" + }, + "description": "The formats this Model supports in BatchPredictionJob.input_config. If PredictSchemata.instance_schema_uri exists, the instances should be given as per that schema. The possible formats are: * `jsonl` The JSON Lines format, where each instance is a single line. Uses GcsSource. * `csv` The CSV format, where each instance is a single comma-separated line. The first line in the file is the header, containing comma-separated field names. Uses GcsSource. * `tf-record` The TFRecord format, where each instance is a single record in tfrecord syntax. Uses GcsSource. * `tf-record-gzip` Similar to `tf-record`, but the file is gzipped. Uses GcsSource. * `bigquery` Each instance is a single row in BigQuery. Uses BigQuerySource. * `file-list` Each line of the file is the location of an instance to process, uses `gcs_source` field of the InputConfig object. If this Model doesn't support any of these formats it means it cannot be used with a BatchPredictionJob. However, if it has supported_deployment_resources_types, it could serve online predictions by using PredictionService.Predict or PredictionService.Explain." + }, + "supportedOutputStorageFormats": { + "type": "array", + "items": { + "type": "string" + }, + "description": "The formats this Model supports in BatchPredictionJob.output_config. If both PredictSchemata.instance_schema_uri and PredictSchemata.prediction_schema_uri exist, the predictions are returned together with their instances. In other words, the prediction has the original instance data first, followed by the actual prediction content (as per the schema). The possible formats are: * `jsonl` The JSON Lines format, where each prediction is a single line. Uses GcsDestination. * `csv` The CSV format, where each prediction is a single comma-separated line. The first line in the file is the header, containing comma-separated field names. Uses GcsDestination. * `bigquery` Each prediction is a single row in a BigQuery table, uses BigQueryDestination . If this Model doesn't support any of these formats it means it cannot be used with a BatchPredictionJob. However, if it has supported_deployment_resources_types, it could serve online predictions by using PredictionService.Predict or PredictionService.Explain." + }, + "trainingPipeline": { + "type": "string", + "description": "The resource name of the TrainingPipeline that uploaded this Model, if any." + }, + "updateTime": { + "type": "string", + "description": "Timestamp when this Model was most recently updated." + }, + "versionAliases": { + "type": "array", + "items": { + "type": "string" + }, + "description": "User provided version aliases so that a model version can be referenced via alias (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_alias}` instead of auto-generated version id (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_id})`. The format is a-z{0,126}[a-z0-9] to distinguish from version_id. A default version alias will be created for the first version of the model, and there must be exactly one default version alias for a model." + }, + "versionCreateTime": { + "type": "string", + "description": "Timestamp when this version was created." + }, + "versionDescription": { + "type": "string", + "description": "The description of this version." + }, + "versionId": { + "type": "string", + "description": "Immutable. The version ID of the model. A new version is committed when a new model version is uploaded or trained under an existing model id. It is an auto-incrementing decimal number in string representation." + }, + "versionUpdateTime": { + "type": "string", + "description": "Timestamp when this version was most recently updated." + } + }, + "type": "object", + "required": [ + "artifactUri", + "containerSpec", + "createTime", + "deployedModels", + "description", + "displayName", + "encryptionSpec", + "etag", + "explanationSpec", + "labels", + "metadata", + "metadataArtifact", + "metadataSchemaUri", + "modelSourceInfo", + "name", + "originalModelInfo", + "predictSchemata", + "supportedDeploymentResourcesTypes", + "supportedExportFormats", + "supportedInputStorageFormats", + "supportedOutputStorageFormats", + "trainingPipeline", + "updateTime", + "versionAliases", + "versionCreateTime", + "versionDescription", + "versionId", + "versionUpdateTime" + ] + } + }, "google-native:aiplatform/v1beta1:getModelDeploymentMonitoringJob": { "description": "Gets a ModelDeploymentMonitoringJob.", "inputs": { @@ -635556,6 +637175,92 @@ ] } }, + "google-native:cloudsearch/v1:getItem": { + "description": "Gets Item resource by item name. This API requires an admin or service account to execute. The service account used is the one whitelisted in the corresponding data source.", + "inputs": { + "properties": { + "connectorName": { + "type": "string" + }, + "datasourceId": { + "type": "string" + }, + "debugOptionsEnableDebugging": { + "type": "boolean" + }, + "itemId": { + "type": "string" + } + }, + "type": "object", + "required": [ + "datasourceId", + "itemId" + ] + }, + "outputs": { + "properties": { + "acl": { + "type": "object", + "$ref": "#/types/google-native:cloudsearch/v1:ItemAclResponse", + "description": "Access control list for this item." + }, + "content": { + "type": "object", + "$ref": "#/types/google-native:cloudsearch/v1:ItemContentResponse", + "description": "Item content to be indexed and made text searchable." + }, + "itemType": { + "type": "string", + "description": "The type for this item." + }, + "metadata": { + "type": "object", + "$ref": "#/types/google-native:cloudsearch/v1:ItemMetadataResponse", + "description": "The metadata information." + }, + "name": { + "type": "string", + "description": "The name of the Item. Format: datasources/{source_id}/items/{item_id} This is a required field. The maximum length is 1536 characters." + }, + "payload": { + "type": "string", + "description": "Additional state connector can store for this item. The maximum length is 10000 bytes." + }, + "queue": { + "type": "string", + "description": "Queue this item belongs to. The maximum length is 100 characters." + }, + "status": { + "type": "object", + "$ref": "#/types/google-native:cloudsearch/v1:ItemStatusResponse", + "description": "Status of the item. Output only field." + }, + "structuredData": { + "type": "object", + "$ref": "#/types/google-native:cloudsearch/v1:ItemStructuredDataResponse", + "description": "The structured data for the item that should conform to a registered object definition in the schema for the data source." + }, + "version": { + "type": "string", + "description": "The indexing system stores the version from the datasource as a byte string and compares the Item version in the index to the version of the queued Item using lexical ordering. Cloud Search Indexing won't index or delete any queued item with a version value that is less than or equal to the version of the currently indexed item. The maximum length for this field is 1024 bytes. For information on how item version affects the deletion process, refer to [Handle revisions after manual deletes](https://developers.google.com/cloud-search/docs/guides/operations)." + } + }, + "type": "object", + "required": [ + "acl", + "content", + "itemType", + "metadata", + "name", + "payload", + "queue", + "status", + "structuredData", + "version" + ] + } + }, "google-native:cloudsearch/v1:getSearchApplication": { "description": "Gets the specified search application. **Note:** This API requires an admin account to execute.", "inputs": { diff --git a/provider/pkg/gen/discovery.go b/provider/pkg/gen/discovery.go index 7825553392..e65832f6ec 100644 --- a/provider/pkg/gen/discovery.go +++ b/provider/pkg/gen/discovery.go @@ -113,7 +113,7 @@ func findResourcesImpl(docName, parentName string, rest map[string]discovery.Res switch methodName { case "create", "insert": createMethod = &restMethod - case "submit", "register": + case "submit", "register", "upload": if createMethod == nil { createMethod = &restMethod } diff --git a/sdk/dotnet/Aiplatform/V1/GetModel.cs b/sdk/dotnet/Aiplatform/V1/GetModel.cs new file mode 100644 index 0000000000..6b4bad7293 --- /dev/null +++ b/sdk/dotnet/Aiplatform/V1/GetModel.cs @@ -0,0 +1,274 @@ +// *** WARNING: this file was generated by the Pulumi SDK Generator. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.GoogleNative.Aiplatform.V1 +{ + public static class GetModel + { + /// + /// Gets a Model. + /// + public static Task InvokeAsync(GetModelArgs args, InvokeOptions? options = null) + => global::Pulumi.Deployment.Instance.InvokeAsync("google-native:aiplatform/v1:getModel", args ?? new GetModelArgs(), options.WithDefaults()); + + /// + /// Gets a Model. + /// + public static Output Invoke(GetModelInvokeArgs args, InvokeOptions? options = null) + => global::Pulumi.Deployment.Instance.Invoke("google-native:aiplatform/v1:getModel", args ?? new GetModelInvokeArgs(), options.WithDefaults()); + } + + + public sealed class GetModelArgs : global::Pulumi.InvokeArgs + { + [Input("location", required: true)] + public string Location { get; set; } = null!; + + [Input("modelId", required: true)] + public string ModelId { get; set; } = null!; + + [Input("project")] + public string? Project { get; set; } + + public GetModelArgs() + { + } + public static new GetModelArgs Empty => new GetModelArgs(); + } + + public sealed class GetModelInvokeArgs : global::Pulumi.InvokeArgs + { + [Input("location", required: true)] + public Input Location { get; set; } = null!; + + [Input("modelId", required: true)] + public Input ModelId { get; set; } = null!; + + [Input("project")] + public Input? Project { get; set; } + + public GetModelInvokeArgs() + { + } + public static new GetModelInvokeArgs Empty => new GetModelInvokeArgs(); + } + + + [OutputType] + public sealed class GetModelResult + { + /// + /// Immutable. The path to the directory containing the Model artifact and any of its supporting files. Not present for AutoML Models or Large Models. + /// + public readonly string ArtifactUri; + /// + /// Input only. The specification of the container that is to be used when deploying this Model. The specification is ingested upon ModelService.UploadModel, and all binaries it contains are copied and stored internally by Vertex AI. Not present for AutoML Models or Large Models. + /// + public readonly Outputs.GoogleCloudAiplatformV1ModelContainerSpecResponse ContainerSpec; + /// + /// Timestamp when this Model was uploaded into Vertex AI. + /// + public readonly string CreateTime; + /// + /// The pointers to DeployedModels created from this Model. Note that Model could have been deployed to Endpoints in different Locations. + /// + public readonly ImmutableArray DeployedModels; + /// + /// The description of the Model. + /// + public readonly string Description; + /// + /// The display name of the Model. The name can be up to 128 characters long and can consist of any UTF-8 characters. + /// + public readonly string DisplayName; + /// + /// Customer-managed encryption key spec for a Model. If set, this Model and all sub-resources of this Model will be secured by this key. + /// + public readonly Outputs.GoogleCloudAiplatformV1EncryptionSpecResponse EncryptionSpec; + /// + /// Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + /// + public readonly string Etag; + /// + /// The default explanation specification for this Model. The Model can be used for requesting explanation after being deployed if it is populated. The Model can be used for batch explanation if it is populated. All fields of the explanation_spec can be overridden by explanation_spec of DeployModelRequest.deployed_model, or explanation_spec of BatchPredictionJob. If the default explanation specification is not set for this Model, this Model can still be used for requesting explanation by setting explanation_spec of DeployModelRequest.deployed_model and for batch explanation by setting explanation_spec of BatchPredictionJob. + /// + public readonly Outputs.GoogleCloudAiplatformV1ExplanationSpecResponse ExplanationSpec; + /// + /// The labels with user-defined metadata to organize your Models. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. + /// + public readonly ImmutableDictionary Labels; + /// + /// Immutable. An additional information about the Model; the schema of the metadata can be found in metadata_schema. Unset if the Model does not have any additional information. + /// + public readonly object Metadata; + /// + /// The resource name of the Artifact that was created in MetadataStore when creating the Model. The Artifact resource name pattern is `projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}`. + /// + public readonly string MetadataArtifact; + /// + /// Immutable. Points to a YAML file stored on Google Cloud Storage describing additional information about the Model, that is specific to it. Unset if the Model does not have any additional information. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML Models always have this field populated by Vertex AI, if no additional metadata is needed, this field is set to an empty string. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. + /// + public readonly string MetadataSchemaUri; + /// + /// Source of a model. It can either be automl training pipeline, custom training pipeline, BigQuery ML, or existing Vertex AI Model. + /// + public readonly Outputs.GoogleCloudAiplatformV1ModelSourceInfoResponse ModelSourceInfo; + /// + /// The resource name of the Model. + /// + public readonly string Name; + /// + /// If this Model is a copy of another Model, this contains info about the original. + /// + public readonly Outputs.GoogleCloudAiplatformV1ModelOriginalModelInfoResponse OriginalModelInfo; + /// + /// Optional. This field is populated if the model is produced by a pipeline job. + /// + public readonly string PipelineJob; + /// + /// The schemata that describe formats of the Model's predictions and explanations as given and returned via PredictionService.Predict and PredictionService.Explain. + /// + public readonly Outputs.GoogleCloudAiplatformV1PredictSchemataResponse PredictSchemata; + /// + /// When this Model is deployed, its prediction resources are described by the `prediction_resources` field of the Endpoint.deployed_models object. Because not all Models support all resource configuration types, the configuration types this Model supports are listed here. If no configuration types are listed, the Model cannot be deployed to an Endpoint and does not support online predictions (PredictionService.Predict or PredictionService.Explain). Such a Model can serve predictions by using a BatchPredictionJob, if it has at least one entry each in supported_input_storage_formats and supported_output_storage_formats. + /// + public readonly ImmutableArray SupportedDeploymentResourcesTypes; + /// + /// The formats in which this Model may be exported. If empty, this Model is not available for export. + /// + public readonly ImmutableArray SupportedExportFormats; + /// + /// The formats this Model supports in BatchPredictionJob.input_config. If PredictSchemata.instance_schema_uri exists, the instances should be given as per that schema. The possible formats are: * `jsonl` The JSON Lines format, where each instance is a single line. Uses GcsSource. * `csv` The CSV format, where each instance is a single comma-separated line. The first line in the file is the header, containing comma-separated field names. Uses GcsSource. * `tf-record` The TFRecord format, where each instance is a single record in tfrecord syntax. Uses GcsSource. * `tf-record-gzip` Similar to `tf-record`, but the file is gzipped. Uses GcsSource. * `bigquery` Each instance is a single row in BigQuery. Uses BigQuerySource. * `file-list` Each line of the file is the location of an instance to process, uses `gcs_source` field of the InputConfig object. If this Model doesn't support any of these formats it means it cannot be used with a BatchPredictionJob. However, if it has supported_deployment_resources_types, it could serve online predictions by using PredictionService.Predict or PredictionService.Explain. + /// + public readonly ImmutableArray SupportedInputStorageFormats; + /// + /// The formats this Model supports in BatchPredictionJob.output_config. If both PredictSchemata.instance_schema_uri and PredictSchemata.prediction_schema_uri exist, the predictions are returned together with their instances. In other words, the prediction has the original instance data first, followed by the actual prediction content (as per the schema). The possible formats are: * `jsonl` The JSON Lines format, where each prediction is a single line. Uses GcsDestination. * `csv` The CSV format, where each prediction is a single comma-separated line. The first line in the file is the header, containing comma-separated field names. Uses GcsDestination. * `bigquery` Each prediction is a single row in a BigQuery table, uses BigQueryDestination . If this Model doesn't support any of these formats it means it cannot be used with a BatchPredictionJob. However, if it has supported_deployment_resources_types, it could serve online predictions by using PredictionService.Predict or PredictionService.Explain. + /// + public readonly ImmutableArray SupportedOutputStorageFormats; + /// + /// The resource name of the TrainingPipeline that uploaded this Model, if any. + /// + public readonly string TrainingPipeline; + /// + /// Timestamp when this Model was most recently updated. + /// + public readonly string UpdateTime; + /// + /// User provided version aliases so that a model version can be referenced via alias (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_alias}` instead of auto-generated version id (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_id})`. The format is a-z{0,126}[a-z0-9] to distinguish from version_id. A default version alias will be created for the first version of the model, and there must be exactly one default version alias for a model. + /// + public readonly ImmutableArray VersionAliases; + /// + /// Timestamp when this version was created. + /// + public readonly string VersionCreateTime; + /// + /// The description of this version. + /// + public readonly string VersionDescription; + /// + /// Immutable. The version ID of the model. A new version is committed when a new model version is uploaded or trained under an existing model id. It is an auto-incrementing decimal number in string representation. + /// + public readonly string VersionId; + /// + /// Timestamp when this version was most recently updated. + /// + public readonly string VersionUpdateTime; + + [OutputConstructor] + private GetModelResult( + string artifactUri, + + Outputs.GoogleCloudAiplatformV1ModelContainerSpecResponse containerSpec, + + string createTime, + + ImmutableArray deployedModels, + + string description, + + string displayName, + + Outputs.GoogleCloudAiplatformV1EncryptionSpecResponse encryptionSpec, + + string etag, + + Outputs.GoogleCloudAiplatformV1ExplanationSpecResponse explanationSpec, + + ImmutableDictionary labels, + + object metadata, + + string metadataArtifact, + + string metadataSchemaUri, + + Outputs.GoogleCloudAiplatformV1ModelSourceInfoResponse modelSourceInfo, + + string name, + + Outputs.GoogleCloudAiplatformV1ModelOriginalModelInfoResponse originalModelInfo, + + string pipelineJob, + + Outputs.GoogleCloudAiplatformV1PredictSchemataResponse predictSchemata, + + ImmutableArray supportedDeploymentResourcesTypes, + + ImmutableArray supportedExportFormats, + + ImmutableArray supportedInputStorageFormats, + + ImmutableArray supportedOutputStorageFormats, + + string trainingPipeline, + + string updateTime, + + ImmutableArray versionAliases, + + string versionCreateTime, + + string versionDescription, + + string versionId, + + string versionUpdateTime) + { + ArtifactUri = artifactUri; + ContainerSpec = containerSpec; + CreateTime = createTime; + DeployedModels = deployedModels; + Description = description; + DisplayName = displayName; + EncryptionSpec = encryptionSpec; + Etag = etag; + ExplanationSpec = explanationSpec; + Labels = labels; + Metadata = metadata; + MetadataArtifact = metadataArtifact; + MetadataSchemaUri = metadataSchemaUri; + ModelSourceInfo = modelSourceInfo; + Name = name; + OriginalModelInfo = originalModelInfo; + PipelineJob = pipelineJob; + PredictSchemata = predictSchemata; + SupportedDeploymentResourcesTypes = supportedDeploymentResourcesTypes; + SupportedExportFormats = supportedExportFormats; + SupportedInputStorageFormats = supportedInputStorageFormats; + SupportedOutputStorageFormats = supportedOutputStorageFormats; + TrainingPipeline = trainingPipeline; + UpdateTime = updateTime; + VersionAliases = versionAliases; + VersionCreateTime = versionCreateTime; + VersionDescription = versionDescription; + VersionId = versionId; + VersionUpdateTime = versionUpdateTime; + } + } +} diff --git a/sdk/dotnet/Aiplatform/V1/Model.cs b/sdk/dotnet/Aiplatform/V1/Model.cs new file mode 100644 index 0000000000..0f8c82db99 --- /dev/null +++ b/sdk/dotnet/Aiplatform/V1/Model.cs @@ -0,0 +1,379 @@ +// *** WARNING: this file was generated by the Pulumi SDK Generator. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.GoogleNative.Aiplatform.V1 +{ + /// + /// Uploads a Model artifact into Vertex AI. + /// + [GoogleNativeResourceType("google-native:aiplatform/v1:Model")] + public partial class Model : global::Pulumi.CustomResource + { + /// + /// Immutable. The path to the directory containing the Model artifact and any of its supporting files. Not present for AutoML Models or Large Models. + /// + [Output("artifactUri")] + public Output ArtifactUri { get; private set; } = null!; + + /// + /// Input only. The specification of the container that is to be used when deploying this Model. The specification is ingested upon ModelService.UploadModel, and all binaries it contains are copied and stored internally by Vertex AI. Not present for AutoML Models or Large Models. + /// + [Output("containerSpec")] + public Output ContainerSpec { get; private set; } = null!; + + /// + /// Timestamp when this Model was uploaded into Vertex AI. + /// + [Output("createTime")] + public Output CreateTime { get; private set; } = null!; + + /// + /// The pointers to DeployedModels created from this Model. Note that Model could have been deployed to Endpoints in different Locations. + /// + [Output("deployedModels")] + public Output> DeployedModels { get; private set; } = null!; + + /// + /// The description of the Model. + /// + [Output("description")] + public Output Description { get; private set; } = null!; + + /// + /// The display name of the Model. The name can be up to 128 characters long and can consist of any UTF-8 characters. + /// + [Output("displayName")] + public Output DisplayName { get; private set; } = null!; + + /// + /// Customer-managed encryption key spec for a Model. If set, this Model and all sub-resources of this Model will be secured by this key. + /// + [Output("encryptionSpec")] + public Output EncryptionSpec { get; private set; } = null!; + + /// + /// Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + /// + [Output("etag")] + public Output Etag { get; private set; } = null!; + + /// + /// The default explanation specification for this Model. The Model can be used for requesting explanation after being deployed if it is populated. The Model can be used for batch explanation if it is populated. All fields of the explanation_spec can be overridden by explanation_spec of DeployModelRequest.deployed_model, or explanation_spec of BatchPredictionJob. If the default explanation specification is not set for this Model, this Model can still be used for requesting explanation by setting explanation_spec of DeployModelRequest.deployed_model and for batch explanation by setting explanation_spec of BatchPredictionJob. + /// + [Output("explanationSpec")] + public Output ExplanationSpec { get; private set; } = null!; + + /// + /// The labels with user-defined metadata to organize your Models. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. + /// + [Output("labels")] + public Output> Labels { get; private set; } = null!; + + [Output("location")] + public Output Location { get; private set; } = null!; + + /// + /// Immutable. An additional information about the Model; the schema of the metadata can be found in metadata_schema. Unset if the Model does not have any additional information. + /// + [Output("metadata")] + public Output Metadata { get; private set; } = null!; + + /// + /// The resource name of the Artifact that was created in MetadataStore when creating the Model. The Artifact resource name pattern is `projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}`. + /// + [Output("metadataArtifact")] + public Output MetadataArtifact { get; private set; } = null!; + + /// + /// Immutable. Points to a YAML file stored on Google Cloud Storage describing additional information about the Model, that is specific to it. Unset if the Model does not have any additional information. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML Models always have this field populated by Vertex AI, if no additional metadata is needed, this field is set to an empty string. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. + /// + [Output("metadataSchemaUri")] + public Output MetadataSchemaUri { get; private set; } = null!; + + /// + /// Source of a model. It can either be automl training pipeline, custom training pipeline, BigQuery ML, or existing Vertex AI Model. + /// + [Output("modelSourceInfo")] + public Output ModelSourceInfo { get; private set; } = null!; + + /// + /// The resource name of the Model. + /// + [Output("name")] + public Output Name { get; private set; } = null!; + + /// + /// If this Model is a copy of another Model, this contains info about the original. + /// + [Output("originalModelInfo")] + public Output OriginalModelInfo { get; private set; } = null!; + + /// + /// Optional. This field is populated if the model is produced by a pipeline job. + /// + [Output("pipelineJob")] + public Output PipelineJob { get; private set; } = null!; + + /// + /// The schemata that describe formats of the Model's predictions and explanations as given and returned via PredictionService.Predict and PredictionService.Explain. + /// + [Output("predictSchemata")] + public Output PredictSchemata { get; private set; } = null!; + + [Output("project")] + public Output Project { get; private set; } = null!; + + /// + /// When this Model is deployed, its prediction resources are described by the `prediction_resources` field of the Endpoint.deployed_models object. Because not all Models support all resource configuration types, the configuration types this Model supports are listed here. If no configuration types are listed, the Model cannot be deployed to an Endpoint and does not support online predictions (PredictionService.Predict or PredictionService.Explain). Such a Model can serve predictions by using a BatchPredictionJob, if it has at least one entry each in supported_input_storage_formats and supported_output_storage_formats. + /// + [Output("supportedDeploymentResourcesTypes")] + public Output> SupportedDeploymentResourcesTypes { get; private set; } = null!; + + /// + /// The formats in which this Model may be exported. If empty, this Model is not available for export. + /// + [Output("supportedExportFormats")] + public Output> SupportedExportFormats { get; private set; } = null!; + + /// + /// The formats this Model supports in BatchPredictionJob.input_config. If PredictSchemata.instance_schema_uri exists, the instances should be given as per that schema. The possible formats are: * `jsonl` The JSON Lines format, where each instance is a single line. Uses GcsSource. * `csv` The CSV format, where each instance is a single comma-separated line. The first line in the file is the header, containing comma-separated field names. Uses GcsSource. * `tf-record` The TFRecord format, where each instance is a single record in tfrecord syntax. Uses GcsSource. * `tf-record-gzip` Similar to `tf-record`, but the file is gzipped. Uses GcsSource. * `bigquery` Each instance is a single row in BigQuery. Uses BigQuerySource. * `file-list` Each line of the file is the location of an instance to process, uses `gcs_source` field of the InputConfig object. If this Model doesn't support any of these formats it means it cannot be used with a BatchPredictionJob. However, if it has supported_deployment_resources_types, it could serve online predictions by using PredictionService.Predict or PredictionService.Explain. + /// + [Output("supportedInputStorageFormats")] + public Output> SupportedInputStorageFormats { get; private set; } = null!; + + /// + /// The formats this Model supports in BatchPredictionJob.output_config. If both PredictSchemata.instance_schema_uri and PredictSchemata.prediction_schema_uri exist, the predictions are returned together with their instances. In other words, the prediction has the original instance data first, followed by the actual prediction content (as per the schema). The possible formats are: * `jsonl` The JSON Lines format, where each prediction is a single line. Uses GcsDestination. * `csv` The CSV format, where each prediction is a single comma-separated line. The first line in the file is the header, containing comma-separated field names. Uses GcsDestination. * `bigquery` Each prediction is a single row in a BigQuery table, uses BigQueryDestination . If this Model doesn't support any of these formats it means it cannot be used with a BatchPredictionJob. However, if it has supported_deployment_resources_types, it could serve online predictions by using PredictionService.Predict or PredictionService.Explain. + /// + [Output("supportedOutputStorageFormats")] + public Output> SupportedOutputStorageFormats { get; private set; } = null!; + + /// + /// The resource name of the TrainingPipeline that uploaded this Model, if any. + /// + [Output("trainingPipeline")] + public Output TrainingPipeline { get; private set; } = null!; + + /// + /// Timestamp when this Model was most recently updated. + /// + [Output("updateTime")] + public Output UpdateTime { get; private set; } = null!; + + /// + /// User provided version aliases so that a model version can be referenced via alias (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_alias}` instead of auto-generated version id (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_id})`. The format is a-z{0,126}[a-z0-9] to distinguish from version_id. A default version alias will be created for the first version of the model, and there must be exactly one default version alias for a model. + /// + [Output("versionAliases")] + public Output> VersionAliases { get; private set; } = null!; + + /// + /// Timestamp when this version was created. + /// + [Output("versionCreateTime")] + public Output VersionCreateTime { get; private set; } = null!; + + /// + /// The description of this version. + /// + [Output("versionDescription")] + public Output VersionDescription { get; private set; } = null!; + + /// + /// Immutable. The version ID of the model. A new version is committed when a new model version is uploaded or trained under an existing model id. It is an auto-incrementing decimal number in string representation. + /// + [Output("versionId")] + public Output VersionId { get; private set; } = null!; + + /// + /// Timestamp when this version was most recently updated. + /// + [Output("versionUpdateTime")] + public Output VersionUpdateTime { get; private set; } = null!; + + + /// + /// Create a Model resource with the given unique name, arguments, and options. + /// + /// + /// The unique name of the resource + /// The arguments used to populate this resource's properties + /// A bag of options that control this resource's behavior + public Model(string name, ModelArgs args, CustomResourceOptions? options = null) + : base("google-native:aiplatform/v1:Model", name, args ?? new ModelArgs(), MakeResourceOptions(options, "")) + { + } + + private Model(string name, Input id, CustomResourceOptions? options = null) + : base("google-native:aiplatform/v1:Model", name, null, MakeResourceOptions(options, id)) + { + } + + private static CustomResourceOptions MakeResourceOptions(CustomResourceOptions? options, Input? id) + { + var defaultOptions = new CustomResourceOptions + { + Version = Utilities.Version, + ReplaceOnChanges = + { + "location", + "project", + }, + }; + var merged = CustomResourceOptions.Merge(defaultOptions, options); + // Override the ID if one was specified for consistency with other language SDKs. + merged.Id = id ?? merged.Id; + return merged; + } + /// + /// Get an existing Model resource's state with the given name, ID, and optional extra + /// properties used to qualify the lookup. + /// + /// + /// The unique name of the resulting resource. + /// The unique provider ID of the resource to lookup. + /// A bag of options that control this resource's behavior + public static Model Get(string name, Input id, CustomResourceOptions? options = null) + { + return new Model(name, id, options); + } + } + + public sealed class ModelArgs : global::Pulumi.ResourceArgs + { + /// + /// Immutable. The path to the directory containing the Model artifact and any of its supporting files. Not present for AutoML Models or Large Models. + /// + [Input("artifactUri")] + public Input? ArtifactUri { get; set; } + + /// + /// Input only. The specification of the container that is to be used when deploying this Model. The specification is ingested upon ModelService.UploadModel, and all binaries it contains are copied and stored internally by Vertex AI. Not present for AutoML Models or Large Models. + /// + [Input("containerSpec")] + public Input? ContainerSpec { get; set; } + + /// + /// The description of the Model. + /// + [Input("description")] + public Input? Description { get; set; } + + /// + /// The display name of the Model. The name can be up to 128 characters long and can consist of any UTF-8 characters. + /// + [Input("displayName", required: true)] + public Input DisplayName { get; set; } = null!; + + /// + /// Customer-managed encryption key spec for a Model. If set, this Model and all sub-resources of this Model will be secured by this key. + /// + [Input("encryptionSpec")] + public Input? EncryptionSpec { get; set; } + + /// + /// Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + /// + [Input("etag")] + public Input? Etag { get; set; } + + /// + /// The default explanation specification for this Model. The Model can be used for requesting explanation after being deployed if it is populated. The Model can be used for batch explanation if it is populated. All fields of the explanation_spec can be overridden by explanation_spec of DeployModelRequest.deployed_model, or explanation_spec of BatchPredictionJob. If the default explanation specification is not set for this Model, this Model can still be used for requesting explanation by setting explanation_spec of DeployModelRequest.deployed_model and for batch explanation by setting explanation_spec of BatchPredictionJob. + /// + [Input("explanationSpec")] + public Input? ExplanationSpec { get; set; } + + [Input("labels")] + private InputMap? _labels; + + /// + /// The labels with user-defined metadata to organize your Models. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. + /// + public InputMap Labels + { + get => _labels ?? (_labels = new InputMap()); + set => _labels = value; + } + + [Input("location")] + public Input? Location { get; set; } + + /// + /// Immutable. An additional information about the Model; the schema of the metadata can be found in metadata_schema. Unset if the Model does not have any additional information. + /// + [Input("metadata")] + public Input? Metadata { get; set; } + + /// + /// Immutable. Points to a YAML file stored on Google Cloud Storage describing additional information about the Model, that is specific to it. Unset if the Model does not have any additional information. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML Models always have this field populated by Vertex AI, if no additional metadata is needed, this field is set to an empty string. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. + /// + [Input("metadataSchemaUri")] + public Input? MetadataSchemaUri { get; set; } + + /// + /// Optional. The ID to use for the uploaded Model, which will become the final component of the model resource name. This value may be up to 63 characters, and valid characters are `[a-z0-9_-]`. The first character cannot be a number or hyphen. + /// + [Input("modelId")] + public Input? ModelId { get; set; } + + /// + /// The resource name of the Model. + /// + [Input("name")] + public Input? Name { get; set; } + + /// + /// Optional. The resource name of the model into which to upload the version. Only specify this field when uploading a new version. + /// + [Input("parentModel")] + public Input? ParentModel { get; set; } + + /// + /// Optional. This field is populated if the model is produced by a pipeline job. + /// + [Input("pipelineJob")] + public Input? PipelineJob { get; set; } + + /// + /// The schemata that describe formats of the Model's predictions and explanations as given and returned via PredictionService.Predict and PredictionService.Explain. + /// + [Input("predictSchemata")] + public Input? PredictSchemata { get; set; } + + [Input("project")] + public Input? Project { get; set; } + + /// + /// Optional. The user-provided custom service account to use to do the model upload. If empty, [Vertex AI Service Agent](https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) will be used to access resources needed to upload the model. This account must belong to the target project where the model is uploaded to, i.e., the project specified in the `parent` field of this request and have necessary read permissions (to Google Cloud Storage, Artifact Registry, etc.). + /// + [Input("serviceAccount")] + public Input? ServiceAccount { get; set; } + + [Input("versionAliases")] + private InputList? _versionAliases; + + /// + /// User provided version aliases so that a model version can be referenced via alias (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_alias}` instead of auto-generated version id (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_id})`. The format is a-z{0,126}[a-z0-9] to distinguish from version_id. A default version alias will be created for the first version of the model, and there must be exactly one default version alias for a model. + /// + public InputList VersionAliases + { + get => _versionAliases ?? (_versionAliases = new InputList()); + set => _versionAliases = value; + } + + /// + /// The description of this version. + /// + [Input("versionDescription")] + public Input? VersionDescription { get; set; } + + public ModelArgs() + { + } + public static new ModelArgs Empty => new ModelArgs(); + } +} diff --git a/sdk/dotnet/Aiplatform/V1Beta1/GetModel.cs b/sdk/dotnet/Aiplatform/V1Beta1/GetModel.cs new file mode 100644 index 0000000000..2f555eb1ed --- /dev/null +++ b/sdk/dotnet/Aiplatform/V1Beta1/GetModel.cs @@ -0,0 +1,267 @@ +// *** WARNING: this file was generated by the Pulumi SDK Generator. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.GoogleNative.Aiplatform.V1Beta1 +{ + public static class GetModel + { + /// + /// Gets a Model. + /// + public static Task InvokeAsync(GetModelArgs args, InvokeOptions? options = null) + => global::Pulumi.Deployment.Instance.InvokeAsync("google-native:aiplatform/v1beta1:getModel", args ?? new GetModelArgs(), options.WithDefaults()); + + /// + /// Gets a Model. + /// + public static Output Invoke(GetModelInvokeArgs args, InvokeOptions? options = null) + => global::Pulumi.Deployment.Instance.Invoke("google-native:aiplatform/v1beta1:getModel", args ?? new GetModelInvokeArgs(), options.WithDefaults()); + } + + + public sealed class GetModelArgs : global::Pulumi.InvokeArgs + { + [Input("location", required: true)] + public string Location { get; set; } = null!; + + [Input("modelId", required: true)] + public string ModelId { get; set; } = null!; + + [Input("project")] + public string? Project { get; set; } + + public GetModelArgs() + { + } + public static new GetModelArgs Empty => new GetModelArgs(); + } + + public sealed class GetModelInvokeArgs : global::Pulumi.InvokeArgs + { + [Input("location", required: true)] + public Input Location { get; set; } = null!; + + [Input("modelId", required: true)] + public Input ModelId { get; set; } = null!; + + [Input("project")] + public Input? Project { get; set; } + + public GetModelInvokeArgs() + { + } + public static new GetModelInvokeArgs Empty => new GetModelInvokeArgs(); + } + + + [OutputType] + public sealed class GetModelResult + { + /// + /// Immutable. The path to the directory containing the Model artifact and any of its supporting files. Not present for AutoML Models or Large Models. + /// + public readonly string ArtifactUri; + /// + /// Input only. The specification of the container that is to be used when deploying this Model. The specification is ingested upon ModelService.UploadModel, and all binaries it contains are copied and stored internally by Vertex AI. Not present for AutoML Models or Large Models. + /// + public readonly Outputs.GoogleCloudAiplatformV1beta1ModelContainerSpecResponse ContainerSpec; + /// + /// Timestamp when this Model was uploaded into Vertex AI. + /// + public readonly string CreateTime; + /// + /// The pointers to DeployedModels created from this Model. Note that Model could have been deployed to Endpoints in different Locations. + /// + public readonly ImmutableArray DeployedModels; + /// + /// The description of the Model. + /// + public readonly string Description; + /// + /// The display name of the Model. The name can be up to 128 characters long and can consist of any UTF-8 characters. + /// + public readonly string DisplayName; + /// + /// Customer-managed encryption key spec for a Model. If set, this Model and all sub-resources of this Model will be secured by this key. + /// + public readonly Outputs.GoogleCloudAiplatformV1beta1EncryptionSpecResponse EncryptionSpec; + /// + /// Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + /// + public readonly string Etag; + /// + /// The default explanation specification for this Model. The Model can be used for requesting explanation after being deployed if it is populated. The Model can be used for batch explanation if it is populated. All fields of the explanation_spec can be overridden by explanation_spec of DeployModelRequest.deployed_model, or explanation_spec of BatchPredictionJob. If the default explanation specification is not set for this Model, this Model can still be used for requesting explanation by setting explanation_spec of DeployModelRequest.deployed_model and for batch explanation by setting explanation_spec of BatchPredictionJob. + /// + public readonly Outputs.GoogleCloudAiplatformV1beta1ExplanationSpecResponse ExplanationSpec; + /// + /// The labels with user-defined metadata to organize your Models. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. + /// + public readonly ImmutableDictionary Labels; + /// + /// Immutable. An additional information about the Model; the schema of the metadata can be found in metadata_schema. Unset if the Model does not have any additional information. + /// + public readonly object Metadata; + /// + /// The resource name of the Artifact that was created in MetadataStore when creating the Model. The Artifact resource name pattern is `projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}`. + /// + public readonly string MetadataArtifact; + /// + /// Immutable. Points to a YAML file stored on Google Cloud Storage describing additional information about the Model, that is specific to it. Unset if the Model does not have any additional information. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML Models always have this field populated by Vertex AI, if no additional metadata is needed, this field is set to an empty string. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. + /// + public readonly string MetadataSchemaUri; + /// + /// Source of a model. It can either be automl training pipeline, custom training pipeline, BigQuery ML, or existing Vertex AI Model. + /// + public readonly Outputs.GoogleCloudAiplatformV1beta1ModelSourceInfoResponse ModelSourceInfo; + /// + /// The resource name of the Model. + /// + public readonly string Name; + /// + /// If this Model is a copy of another Model, this contains info about the original. + /// + public readonly Outputs.GoogleCloudAiplatformV1beta1ModelOriginalModelInfoResponse OriginalModelInfo; + /// + /// The schemata that describe formats of the Model's predictions and explanations as given and returned via PredictionService.Predict and PredictionService.Explain. + /// + public readonly Outputs.GoogleCloudAiplatformV1beta1PredictSchemataResponse PredictSchemata; + /// + /// When this Model is deployed, its prediction resources are described by the `prediction_resources` field of the Endpoint.deployed_models object. Because not all Models support all resource configuration types, the configuration types this Model supports are listed here. If no configuration types are listed, the Model cannot be deployed to an Endpoint and does not support online predictions (PredictionService.Predict or PredictionService.Explain). Such a Model can serve predictions by using a BatchPredictionJob, if it has at least one entry each in supported_input_storage_formats and supported_output_storage_formats. + /// + public readonly ImmutableArray SupportedDeploymentResourcesTypes; + /// + /// The formats in which this Model may be exported. If empty, this Model is not available for export. + /// + public readonly ImmutableArray SupportedExportFormats; + /// + /// The formats this Model supports in BatchPredictionJob.input_config. If PredictSchemata.instance_schema_uri exists, the instances should be given as per that schema. The possible formats are: * `jsonl` The JSON Lines format, where each instance is a single line. Uses GcsSource. * `csv` The CSV format, where each instance is a single comma-separated line. The first line in the file is the header, containing comma-separated field names. Uses GcsSource. * `tf-record` The TFRecord format, where each instance is a single record in tfrecord syntax. Uses GcsSource. * `tf-record-gzip` Similar to `tf-record`, but the file is gzipped. Uses GcsSource. * `bigquery` Each instance is a single row in BigQuery. Uses BigQuerySource. * `file-list` Each line of the file is the location of an instance to process, uses `gcs_source` field of the InputConfig object. If this Model doesn't support any of these formats it means it cannot be used with a BatchPredictionJob. However, if it has supported_deployment_resources_types, it could serve online predictions by using PredictionService.Predict or PredictionService.Explain. + /// + public readonly ImmutableArray SupportedInputStorageFormats; + /// + /// The formats this Model supports in BatchPredictionJob.output_config. If both PredictSchemata.instance_schema_uri and PredictSchemata.prediction_schema_uri exist, the predictions are returned together with their instances. In other words, the prediction has the original instance data first, followed by the actual prediction content (as per the schema). The possible formats are: * `jsonl` The JSON Lines format, where each prediction is a single line. Uses GcsDestination. * `csv` The CSV format, where each prediction is a single comma-separated line. The first line in the file is the header, containing comma-separated field names. Uses GcsDestination. * `bigquery` Each prediction is a single row in a BigQuery table, uses BigQueryDestination . If this Model doesn't support any of these formats it means it cannot be used with a BatchPredictionJob. However, if it has supported_deployment_resources_types, it could serve online predictions by using PredictionService.Predict or PredictionService.Explain. + /// + public readonly ImmutableArray SupportedOutputStorageFormats; + /// + /// The resource name of the TrainingPipeline that uploaded this Model, if any. + /// + public readonly string TrainingPipeline; + /// + /// Timestamp when this Model was most recently updated. + /// + public readonly string UpdateTime; + /// + /// User provided version aliases so that a model version can be referenced via alias (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_alias}` instead of auto-generated version id (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_id})`. The format is a-z{0,126}[a-z0-9] to distinguish from version_id. A default version alias will be created for the first version of the model, and there must be exactly one default version alias for a model. + /// + public readonly ImmutableArray VersionAliases; + /// + /// Timestamp when this version was created. + /// + public readonly string VersionCreateTime; + /// + /// The description of this version. + /// + public readonly string VersionDescription; + /// + /// Immutable. The version ID of the model. A new version is committed when a new model version is uploaded or trained under an existing model id. It is an auto-incrementing decimal number in string representation. + /// + public readonly string VersionId; + /// + /// Timestamp when this version was most recently updated. + /// + public readonly string VersionUpdateTime; + + [OutputConstructor] + private GetModelResult( + string artifactUri, + + Outputs.GoogleCloudAiplatformV1beta1ModelContainerSpecResponse containerSpec, + + string createTime, + + ImmutableArray deployedModels, + + string description, + + string displayName, + + Outputs.GoogleCloudAiplatformV1beta1EncryptionSpecResponse encryptionSpec, + + string etag, + + Outputs.GoogleCloudAiplatformV1beta1ExplanationSpecResponse explanationSpec, + + ImmutableDictionary labels, + + object metadata, + + string metadataArtifact, + + string metadataSchemaUri, + + Outputs.GoogleCloudAiplatformV1beta1ModelSourceInfoResponse modelSourceInfo, + + string name, + + Outputs.GoogleCloudAiplatformV1beta1ModelOriginalModelInfoResponse originalModelInfo, + + Outputs.GoogleCloudAiplatformV1beta1PredictSchemataResponse predictSchemata, + + ImmutableArray supportedDeploymentResourcesTypes, + + ImmutableArray supportedExportFormats, + + ImmutableArray supportedInputStorageFormats, + + ImmutableArray supportedOutputStorageFormats, + + string trainingPipeline, + + string updateTime, + + ImmutableArray versionAliases, + + string versionCreateTime, + + string versionDescription, + + string versionId, + + string versionUpdateTime) + { + ArtifactUri = artifactUri; + ContainerSpec = containerSpec; + CreateTime = createTime; + DeployedModels = deployedModels; + Description = description; + DisplayName = displayName; + EncryptionSpec = encryptionSpec; + Etag = etag; + ExplanationSpec = explanationSpec; + Labels = labels; + Metadata = metadata; + MetadataArtifact = metadataArtifact; + MetadataSchemaUri = metadataSchemaUri; + ModelSourceInfo = modelSourceInfo; + Name = name; + OriginalModelInfo = originalModelInfo; + PredictSchemata = predictSchemata; + SupportedDeploymentResourcesTypes = supportedDeploymentResourcesTypes; + SupportedExportFormats = supportedExportFormats; + SupportedInputStorageFormats = supportedInputStorageFormats; + SupportedOutputStorageFormats = supportedOutputStorageFormats; + TrainingPipeline = trainingPipeline; + UpdateTime = updateTime; + VersionAliases = versionAliases; + VersionCreateTime = versionCreateTime; + VersionDescription = versionDescription; + VersionId = versionId; + VersionUpdateTime = versionUpdateTime; + } + } +} diff --git a/sdk/dotnet/Aiplatform/V1Beta1/Model.cs b/sdk/dotnet/Aiplatform/V1Beta1/Model.cs new file mode 100644 index 0000000000..500ccb1ba3 --- /dev/null +++ b/sdk/dotnet/Aiplatform/V1Beta1/Model.cs @@ -0,0 +1,367 @@ +// *** WARNING: this file was generated by the Pulumi SDK Generator. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.GoogleNative.Aiplatform.V1Beta1 +{ + /// + /// Uploads a Model artifact into Vertex AI. + /// + [GoogleNativeResourceType("google-native:aiplatform/v1beta1:Model")] + public partial class Model : global::Pulumi.CustomResource + { + /// + /// Immutable. The path to the directory containing the Model artifact and any of its supporting files. Not present for AutoML Models or Large Models. + /// + [Output("artifactUri")] + public Output ArtifactUri { get; private set; } = null!; + + /// + /// Input only. The specification of the container that is to be used when deploying this Model. The specification is ingested upon ModelService.UploadModel, and all binaries it contains are copied and stored internally by Vertex AI. Not present for AutoML Models or Large Models. + /// + [Output("containerSpec")] + public Output ContainerSpec { get; private set; } = null!; + + /// + /// Timestamp when this Model was uploaded into Vertex AI. + /// + [Output("createTime")] + public Output CreateTime { get; private set; } = null!; + + /// + /// The pointers to DeployedModels created from this Model. Note that Model could have been deployed to Endpoints in different Locations. + /// + [Output("deployedModels")] + public Output> DeployedModels { get; private set; } = null!; + + /// + /// The description of the Model. + /// + [Output("description")] + public Output Description { get; private set; } = null!; + + /// + /// The display name of the Model. The name can be up to 128 characters long and can consist of any UTF-8 characters. + /// + [Output("displayName")] + public Output DisplayName { get; private set; } = null!; + + /// + /// Customer-managed encryption key spec for a Model. If set, this Model and all sub-resources of this Model will be secured by this key. + /// + [Output("encryptionSpec")] + public Output EncryptionSpec { get; private set; } = null!; + + /// + /// Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + /// + [Output("etag")] + public Output Etag { get; private set; } = null!; + + /// + /// The default explanation specification for this Model. The Model can be used for requesting explanation after being deployed if it is populated. The Model can be used for batch explanation if it is populated. All fields of the explanation_spec can be overridden by explanation_spec of DeployModelRequest.deployed_model, or explanation_spec of BatchPredictionJob. If the default explanation specification is not set for this Model, this Model can still be used for requesting explanation by setting explanation_spec of DeployModelRequest.deployed_model and for batch explanation by setting explanation_spec of BatchPredictionJob. + /// + [Output("explanationSpec")] + public Output ExplanationSpec { get; private set; } = null!; + + /// + /// The labels with user-defined metadata to organize your Models. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. + /// + [Output("labels")] + public Output> Labels { get; private set; } = null!; + + [Output("location")] + public Output Location { get; private set; } = null!; + + /// + /// Immutable. An additional information about the Model; the schema of the metadata can be found in metadata_schema. Unset if the Model does not have any additional information. + /// + [Output("metadata")] + public Output Metadata { get; private set; } = null!; + + /// + /// The resource name of the Artifact that was created in MetadataStore when creating the Model. The Artifact resource name pattern is `projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}`. + /// + [Output("metadataArtifact")] + public Output MetadataArtifact { get; private set; } = null!; + + /// + /// Immutable. Points to a YAML file stored on Google Cloud Storage describing additional information about the Model, that is specific to it. Unset if the Model does not have any additional information. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML Models always have this field populated by Vertex AI, if no additional metadata is needed, this field is set to an empty string. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. + /// + [Output("metadataSchemaUri")] + public Output MetadataSchemaUri { get; private set; } = null!; + + /// + /// Source of a model. It can either be automl training pipeline, custom training pipeline, BigQuery ML, or existing Vertex AI Model. + /// + [Output("modelSourceInfo")] + public Output ModelSourceInfo { get; private set; } = null!; + + /// + /// The resource name of the Model. + /// + [Output("name")] + public Output Name { get; private set; } = null!; + + /// + /// If this Model is a copy of another Model, this contains info about the original. + /// + [Output("originalModelInfo")] + public Output OriginalModelInfo { get; private set; } = null!; + + /// + /// The schemata that describe formats of the Model's predictions and explanations as given and returned via PredictionService.Predict and PredictionService.Explain. + /// + [Output("predictSchemata")] + public Output PredictSchemata { get; private set; } = null!; + + [Output("project")] + public Output Project { get; private set; } = null!; + + /// + /// When this Model is deployed, its prediction resources are described by the `prediction_resources` field of the Endpoint.deployed_models object. Because not all Models support all resource configuration types, the configuration types this Model supports are listed here. If no configuration types are listed, the Model cannot be deployed to an Endpoint and does not support online predictions (PredictionService.Predict or PredictionService.Explain). Such a Model can serve predictions by using a BatchPredictionJob, if it has at least one entry each in supported_input_storage_formats and supported_output_storage_formats. + /// + [Output("supportedDeploymentResourcesTypes")] + public Output> SupportedDeploymentResourcesTypes { get; private set; } = null!; + + /// + /// The formats in which this Model may be exported. If empty, this Model is not available for export. + /// + [Output("supportedExportFormats")] + public Output> SupportedExportFormats { get; private set; } = null!; + + /// + /// The formats this Model supports in BatchPredictionJob.input_config. If PredictSchemata.instance_schema_uri exists, the instances should be given as per that schema. The possible formats are: * `jsonl` The JSON Lines format, where each instance is a single line. Uses GcsSource. * `csv` The CSV format, where each instance is a single comma-separated line. The first line in the file is the header, containing comma-separated field names. Uses GcsSource. * `tf-record` The TFRecord format, where each instance is a single record in tfrecord syntax. Uses GcsSource. * `tf-record-gzip` Similar to `tf-record`, but the file is gzipped. Uses GcsSource. * `bigquery` Each instance is a single row in BigQuery. Uses BigQuerySource. * `file-list` Each line of the file is the location of an instance to process, uses `gcs_source` field of the InputConfig object. If this Model doesn't support any of these formats it means it cannot be used with a BatchPredictionJob. However, if it has supported_deployment_resources_types, it could serve online predictions by using PredictionService.Predict or PredictionService.Explain. + /// + [Output("supportedInputStorageFormats")] + public Output> SupportedInputStorageFormats { get; private set; } = null!; + + /// + /// The formats this Model supports in BatchPredictionJob.output_config. If both PredictSchemata.instance_schema_uri and PredictSchemata.prediction_schema_uri exist, the predictions are returned together with their instances. In other words, the prediction has the original instance data first, followed by the actual prediction content (as per the schema). The possible formats are: * `jsonl` The JSON Lines format, where each prediction is a single line. Uses GcsDestination. * `csv` The CSV format, where each prediction is a single comma-separated line. The first line in the file is the header, containing comma-separated field names. Uses GcsDestination. * `bigquery` Each prediction is a single row in a BigQuery table, uses BigQueryDestination . If this Model doesn't support any of these formats it means it cannot be used with a BatchPredictionJob. However, if it has supported_deployment_resources_types, it could serve online predictions by using PredictionService.Predict or PredictionService.Explain. + /// + [Output("supportedOutputStorageFormats")] + public Output> SupportedOutputStorageFormats { get; private set; } = null!; + + /// + /// The resource name of the TrainingPipeline that uploaded this Model, if any. + /// + [Output("trainingPipeline")] + public Output TrainingPipeline { get; private set; } = null!; + + /// + /// Timestamp when this Model was most recently updated. + /// + [Output("updateTime")] + public Output UpdateTime { get; private set; } = null!; + + /// + /// User provided version aliases so that a model version can be referenced via alias (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_alias}` instead of auto-generated version id (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_id})`. The format is a-z{0,126}[a-z0-9] to distinguish from version_id. A default version alias will be created for the first version of the model, and there must be exactly one default version alias for a model. + /// + [Output("versionAliases")] + public Output> VersionAliases { get; private set; } = null!; + + /// + /// Timestamp when this version was created. + /// + [Output("versionCreateTime")] + public Output VersionCreateTime { get; private set; } = null!; + + /// + /// The description of this version. + /// + [Output("versionDescription")] + public Output VersionDescription { get; private set; } = null!; + + /// + /// Immutable. The version ID of the model. A new version is committed when a new model version is uploaded or trained under an existing model id. It is an auto-incrementing decimal number in string representation. + /// + [Output("versionId")] + public Output VersionId { get; private set; } = null!; + + /// + /// Timestamp when this version was most recently updated. + /// + [Output("versionUpdateTime")] + public Output VersionUpdateTime { get; private set; } = null!; + + + /// + /// Create a Model resource with the given unique name, arguments, and options. + /// + /// + /// The unique name of the resource + /// The arguments used to populate this resource's properties + /// A bag of options that control this resource's behavior + public Model(string name, ModelArgs args, CustomResourceOptions? options = null) + : base("google-native:aiplatform/v1beta1:Model", name, args ?? new ModelArgs(), MakeResourceOptions(options, "")) + { + } + + private Model(string name, Input id, CustomResourceOptions? options = null) + : base("google-native:aiplatform/v1beta1:Model", name, null, MakeResourceOptions(options, id)) + { + } + + private static CustomResourceOptions MakeResourceOptions(CustomResourceOptions? options, Input? id) + { + var defaultOptions = new CustomResourceOptions + { + Version = Utilities.Version, + ReplaceOnChanges = + { + "location", + "project", + }, + }; + var merged = CustomResourceOptions.Merge(defaultOptions, options); + // Override the ID if one was specified for consistency with other language SDKs. + merged.Id = id ?? merged.Id; + return merged; + } + /// + /// Get an existing Model resource's state with the given name, ID, and optional extra + /// properties used to qualify the lookup. + /// + /// + /// The unique name of the resulting resource. + /// The unique provider ID of the resource to lookup. + /// A bag of options that control this resource's behavior + public static Model Get(string name, Input id, CustomResourceOptions? options = null) + { + return new Model(name, id, options); + } + } + + public sealed class ModelArgs : global::Pulumi.ResourceArgs + { + /// + /// Immutable. The path to the directory containing the Model artifact and any of its supporting files. Not present for AutoML Models or Large Models. + /// + [Input("artifactUri")] + public Input? ArtifactUri { get; set; } + + /// + /// Input only. The specification of the container that is to be used when deploying this Model. The specification is ingested upon ModelService.UploadModel, and all binaries it contains are copied and stored internally by Vertex AI. Not present for AutoML Models or Large Models. + /// + [Input("containerSpec")] + public Input? ContainerSpec { get; set; } + + /// + /// The description of the Model. + /// + [Input("description")] + public Input? Description { get; set; } + + /// + /// The display name of the Model. The name can be up to 128 characters long and can consist of any UTF-8 characters. + /// + [Input("displayName", required: true)] + public Input DisplayName { get; set; } = null!; + + /// + /// Customer-managed encryption key spec for a Model. If set, this Model and all sub-resources of this Model will be secured by this key. + /// + [Input("encryptionSpec")] + public Input? EncryptionSpec { get; set; } + + /// + /// Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + /// + [Input("etag")] + public Input? Etag { get; set; } + + /// + /// The default explanation specification for this Model. The Model can be used for requesting explanation after being deployed if it is populated. The Model can be used for batch explanation if it is populated. All fields of the explanation_spec can be overridden by explanation_spec of DeployModelRequest.deployed_model, or explanation_spec of BatchPredictionJob. If the default explanation specification is not set for this Model, this Model can still be used for requesting explanation by setting explanation_spec of DeployModelRequest.deployed_model and for batch explanation by setting explanation_spec of BatchPredictionJob. + /// + [Input("explanationSpec")] + public Input? ExplanationSpec { get; set; } + + [Input("labels")] + private InputMap? _labels; + + /// + /// The labels with user-defined metadata to organize your Models. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. + /// + public InputMap Labels + { + get => _labels ?? (_labels = new InputMap()); + set => _labels = value; + } + + [Input("location")] + public Input? Location { get; set; } + + /// + /// Immutable. An additional information about the Model; the schema of the metadata can be found in metadata_schema. Unset if the Model does not have any additional information. + /// + [Input("metadata")] + public Input? Metadata { get; set; } + + /// + /// Immutable. Points to a YAML file stored on Google Cloud Storage describing additional information about the Model, that is specific to it. Unset if the Model does not have any additional information. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML Models always have this field populated by Vertex AI, if no additional metadata is needed, this field is set to an empty string. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. + /// + [Input("metadataSchemaUri")] + public Input? MetadataSchemaUri { get; set; } + + /// + /// Optional. The ID to use for the uploaded Model, which will become the final component of the model resource name. This value may be up to 63 characters, and valid characters are `[a-z0-9_-]`. The first character cannot be a number or hyphen. + /// + [Input("modelId")] + public Input? ModelId { get; set; } + + /// + /// The resource name of the Model. + /// + [Input("name")] + public Input? Name { get; set; } + + /// + /// Optional. The resource name of the model into which to upload the version. Only specify this field when uploading a new version. + /// + [Input("parentModel")] + public Input? ParentModel { get; set; } + + /// + /// The schemata that describe formats of the Model's predictions and explanations as given and returned via PredictionService.Predict and PredictionService.Explain. + /// + [Input("predictSchemata")] + public Input? PredictSchemata { get; set; } + + [Input("project")] + public Input? Project { get; set; } + + /// + /// Optional. The user-provided custom service account to use to do the model upload. If empty, [Vertex AI Service Agent](https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) will be used to access resources needed to upload the model. This account must belong to the target project where the model is uploaded to, i.e., the project specified in the `parent` field of this request and have necessary read permissions (to Google Cloud Storage, Artifact Registry, etc.). + /// + [Input("serviceAccount")] + public Input? ServiceAccount { get; set; } + + [Input("versionAliases")] + private InputList? _versionAliases; + + /// + /// User provided version aliases so that a model version can be referenced via alias (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_alias}` instead of auto-generated version id (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_id})`. The format is a-z{0,126}[a-z0-9] to distinguish from version_id. A default version alias will be created for the first version of the model, and there must be exactly one default version alias for a model. + /// + public InputList VersionAliases + { + get => _versionAliases ?? (_versionAliases = new InputList()); + set => _versionAliases = value; + } + + /// + /// The description of this version. + /// + [Input("versionDescription")] + public Input? VersionDescription { get; set; } + + public ModelArgs() + { + } + public static new ModelArgs Empty => new ModelArgs(); + } +} diff --git a/sdk/dotnet/CloudSearch/V1/GetItem.cs b/sdk/dotnet/CloudSearch/V1/GetItem.cs new file mode 100644 index 0000000000..f73d7e3b51 --- /dev/null +++ b/sdk/dotnet/CloudSearch/V1/GetItem.cs @@ -0,0 +1,147 @@ +// *** WARNING: this file was generated by the Pulumi SDK Generator. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.GoogleNative.CloudSearch.V1 +{ + public static class GetItem + { + /// + /// Gets Item resource by item name. This API requires an admin or service account to execute. The service account used is the one whitelisted in the corresponding data source. + /// + public static Task InvokeAsync(GetItemArgs args, InvokeOptions? options = null) + => global::Pulumi.Deployment.Instance.InvokeAsync("google-native:cloudsearch/v1:getItem", args ?? new GetItemArgs(), options.WithDefaults()); + + /// + /// Gets Item resource by item name. This API requires an admin or service account to execute. The service account used is the one whitelisted in the corresponding data source. + /// + public static Output Invoke(GetItemInvokeArgs args, InvokeOptions? options = null) + => global::Pulumi.Deployment.Instance.Invoke("google-native:cloudsearch/v1:getItem", args ?? new GetItemInvokeArgs(), options.WithDefaults()); + } + + + public sealed class GetItemArgs : global::Pulumi.InvokeArgs + { + [Input("connectorName")] + public string? ConnectorName { get; set; } + + [Input("datasourceId", required: true)] + public string DatasourceId { get; set; } = null!; + + [Input("debugOptionsEnableDebugging")] + public bool? DebugOptionsEnableDebugging { get; set; } + + [Input("itemId", required: true)] + public string ItemId { get; set; } = null!; + + public GetItemArgs() + { + } + public static new GetItemArgs Empty => new GetItemArgs(); + } + + public sealed class GetItemInvokeArgs : global::Pulumi.InvokeArgs + { + [Input("connectorName")] + public Input? ConnectorName { get; set; } + + [Input("datasourceId", required: true)] + public Input DatasourceId { get; set; } = null!; + + [Input("debugOptionsEnableDebugging")] + public Input? DebugOptionsEnableDebugging { get; set; } + + [Input("itemId", required: true)] + public Input ItemId { get; set; } = null!; + + public GetItemInvokeArgs() + { + } + public static new GetItemInvokeArgs Empty => new GetItemInvokeArgs(); + } + + + [OutputType] + public sealed class GetItemResult + { + /// + /// Access control list for this item. + /// + public readonly Outputs.ItemAclResponse Acl; + /// + /// Item content to be indexed and made text searchable. + /// + public readonly Outputs.ItemContentResponse Content; + /// + /// The type for this item. + /// + public readonly string ItemType; + /// + /// The metadata information. + /// + public readonly Outputs.ItemMetadataResponse Metadata; + /// + /// The name of the Item. Format: datasources/{source_id}/items/{item_id} This is a required field. The maximum length is 1536 characters. + /// + public readonly string Name; + /// + /// Additional state connector can store for this item. The maximum length is 10000 bytes. + /// + public readonly string Payload; + /// + /// Queue this item belongs to. The maximum length is 100 characters. + /// + public readonly string Queue; + /// + /// Status of the item. Output only field. + /// + public readonly Outputs.ItemStatusResponse Status; + /// + /// The structured data for the item that should conform to a registered object definition in the schema for the data source. + /// + public readonly Outputs.ItemStructuredDataResponse StructuredData; + /// + /// The indexing system stores the version from the datasource as a byte string and compares the Item version in the index to the version of the queued Item using lexical ordering. Cloud Search Indexing won't index or delete any queued item with a version value that is less than or equal to the version of the currently indexed item. The maximum length for this field is 1024 bytes. For information on how item version affects the deletion process, refer to [Handle revisions after manual deletes](https://developers.google.com/cloud-search/docs/guides/operations). + /// + public readonly string Version; + + [OutputConstructor] + private GetItemResult( + Outputs.ItemAclResponse acl, + + Outputs.ItemContentResponse content, + + string itemType, + + Outputs.ItemMetadataResponse metadata, + + string name, + + string payload, + + string queue, + + Outputs.ItemStatusResponse status, + + Outputs.ItemStructuredDataResponse structuredData, + + string version) + { + Acl = acl; + Content = content; + ItemType = itemType; + Metadata = metadata; + Name = name; + Payload = payload; + Queue = queue; + Status = status; + StructuredData = structuredData; + Version = version; + } + } +} diff --git a/sdk/dotnet/CloudSearch/V1/Inputs/DebugOptionsArgs.cs b/sdk/dotnet/CloudSearch/V1/Inputs/DebugOptionsArgs.cs new file mode 100644 index 0000000000..d1056609b2 --- /dev/null +++ b/sdk/dotnet/CloudSearch/V1/Inputs/DebugOptionsArgs.cs @@ -0,0 +1,29 @@ +// *** WARNING: this file was generated by the Pulumi SDK Generator. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.GoogleNative.CloudSearch.V1.Inputs +{ + + /// + /// Shared request debug options for all cloudsearch RPC methods. + /// + public sealed class DebugOptionsArgs : global::Pulumi.ResourceArgs + { + /// + /// If you are asked by Google to help with debugging, set this field. Otherwise, ignore this field. + /// + [Input("enableDebugging")] + public Input? EnableDebugging { get; set; } + + public DebugOptionsArgs() + { + } + public static new DebugOptionsArgs Empty => new DebugOptionsArgs(); + } +} diff --git a/sdk/dotnet/CloudSearch/V1/Item.cs b/sdk/dotnet/CloudSearch/V1/Item.cs new file mode 100644 index 0000000000..21f8ee2c5a --- /dev/null +++ b/sdk/dotnet/CloudSearch/V1/Item.cs @@ -0,0 +1,158 @@ +// *** WARNING: this file was generated by the Pulumi SDK Generator. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.GoogleNative.CloudSearch.V1 +{ + /// + /// Creates an upload session for uploading item content. For items smaller than 100 KB, it's easier to embed the content inline within an index request. This API requires an admin or service account to execute. The service account used is the one whitelisted in the corresponding data source. + /// Auto-naming is currently not supported for this resource. + /// + [GoogleNativeResourceType("google-native:cloudsearch/v1:Item")] + public partial class Item : global::Pulumi.CustomResource + { + /// + /// Access control list for this item. + /// + [Output("acl")] + public Output Acl { get; private set; } = null!; + + /// + /// Item content to be indexed and made text searchable. + /// + [Output("content")] + public Output Content { get; private set; } = null!; + + [Output("datasourceId")] + public Output DatasourceId { get; private set; } = null!; + + [Output("itemId")] + public Output ItemId { get; private set; } = null!; + + /// + /// The type for this item. + /// + [Output("itemType")] + public Output ItemType { get; private set; } = null!; + + /// + /// The metadata information. + /// + [Output("metadata")] + public Output Metadata { get; private set; } = null!; + + /// + /// The name of the Item. Format: datasources/{source_id}/items/{item_id} This is a required field. The maximum length is 1536 characters. + /// + [Output("name")] + public Output Name { get; private set; } = null!; + + /// + /// Additional state connector can store for this item. The maximum length is 10000 bytes. + /// + [Output("payload")] + public Output Payload { get; private set; } = null!; + + /// + /// Queue this item belongs to. The maximum length is 100 characters. + /// + [Output("queue")] + public Output Queue { get; private set; } = null!; + + /// + /// Status of the item. Output only field. + /// + [Output("status")] + public Output Status { get; private set; } = null!; + + /// + /// The structured data for the item that should conform to a registered object definition in the schema for the data source. + /// + [Output("structuredData")] + public Output StructuredData { get; private set; } = null!; + + /// + /// The indexing system stores the version from the datasource as a byte string and compares the Item version in the index to the version of the queued Item using lexical ordering. Cloud Search Indexing won't index or delete any queued item with a version value that is less than or equal to the version of the currently indexed item. The maximum length for this field is 1024 bytes. For information on how item version affects the deletion process, refer to [Handle revisions after manual deletes](https://developers.google.com/cloud-search/docs/guides/operations). + /// + [Output("version")] + public Output Version { get; private set; } = null!; + + + /// + /// Create a Item resource with the given unique name, arguments, and options. + /// + /// + /// The unique name of the resource + /// The arguments used to populate this resource's properties + /// A bag of options that control this resource's behavior + public Item(string name, ItemArgs args, CustomResourceOptions? options = null) + : base("google-native:cloudsearch/v1:Item", name, args ?? new ItemArgs(), MakeResourceOptions(options, "")) + { + } + + private Item(string name, Input id, CustomResourceOptions? options = null) + : base("google-native:cloudsearch/v1:Item", name, null, MakeResourceOptions(options, id)) + { + } + + private static CustomResourceOptions MakeResourceOptions(CustomResourceOptions? options, Input? id) + { + var defaultOptions = new CustomResourceOptions + { + Version = Utilities.Version, + ReplaceOnChanges = + { + "datasourceId", + "itemId", + }, + }; + var merged = CustomResourceOptions.Merge(defaultOptions, options); + // Override the ID if one was specified for consistency with other language SDKs. + merged.Id = id ?? merged.Id; + return merged; + } + /// + /// Get an existing Item resource's state with the given name, ID, and optional extra + /// properties used to qualify the lookup. + /// + /// + /// The unique name of the resulting resource. + /// The unique provider ID of the resource to lookup. + /// A bag of options that control this resource's behavior + public static Item Get(string name, Input id, CustomResourceOptions? options = null) + { + return new Item(name, id, options); + } + } + + public sealed class ItemArgs : global::Pulumi.ResourceArgs + { + /// + /// The name of connector making this call. Format: datasources/{source_id}/connectors/{ID} + /// + [Input("connectorName")] + public Input? ConnectorName { get; set; } + + [Input("datasourceId", required: true)] + public Input DatasourceId { get; set; } = null!; + + /// + /// Common debug options. + /// + [Input("debugOptions")] + public Input? DebugOptions { get; set; } + + [Input("itemId", required: true)] + public Input ItemId { get; set; } = null!; + + public ItemArgs() + { + } + public static new ItemArgs Empty => new ItemArgs(); + } +} diff --git a/sdk/dotnet/CloudSearch/V1/Outputs/ContextAttributeResponse.cs b/sdk/dotnet/CloudSearch/V1/Outputs/ContextAttributeResponse.cs new file mode 100644 index 0000000000..23723ca9c6 --- /dev/null +++ b/sdk/dotnet/CloudSearch/V1/Outputs/ContextAttributeResponse.cs @@ -0,0 +1,38 @@ +// *** WARNING: this file was generated by the Pulumi SDK Generator. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.GoogleNative.CloudSearch.V1.Outputs +{ + + /// + /// A named attribute associated with an item which can be used for influencing the ranking of the item based on the context in the request. + /// + [OutputType] + public sealed class ContextAttributeResponse + { + /// + /// The name of the attribute. It should not be empty. The maximum length is 32 characters. The name must start with a letter and can only contain letters (A-Z, a-z) or numbers (0-9). The name will be normalized (lower-cased) before being matched. + /// + public readonly string Name; + /// + /// Text values of the attribute. The maximum number of elements is 10. The maximum length of an element in the array is 32 characters. The value will be normalized (lower-cased) before being matched. + /// + public readonly ImmutableArray Values; + + [OutputConstructor] + private ContextAttributeResponse( + string name, + + ImmutableArray values) + { + Name = name; + Values = values; + } + } +} diff --git a/sdk/dotnet/CloudSearch/V1/Outputs/DateValuesResponse.cs b/sdk/dotnet/CloudSearch/V1/Outputs/DateValuesResponse.cs new file mode 100644 index 0000000000..67d2a730b8 --- /dev/null +++ b/sdk/dotnet/CloudSearch/V1/Outputs/DateValuesResponse.cs @@ -0,0 +1,27 @@ +// *** WARNING: this file was generated by the Pulumi SDK Generator. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.GoogleNative.CloudSearch.V1.Outputs +{ + + /// + /// List of date values. + /// + [OutputType] + public sealed class DateValuesResponse + { + public readonly ImmutableArray Values; + + [OutputConstructor] + private DateValuesResponse(ImmutableArray values) + { + Values = values; + } + } +} diff --git a/sdk/dotnet/CloudSearch/V1/Outputs/DoubleValuesResponse.cs b/sdk/dotnet/CloudSearch/V1/Outputs/DoubleValuesResponse.cs new file mode 100644 index 0000000000..8ff8744d18 --- /dev/null +++ b/sdk/dotnet/CloudSearch/V1/Outputs/DoubleValuesResponse.cs @@ -0,0 +1,27 @@ +// *** WARNING: this file was generated by the Pulumi SDK Generator. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.GoogleNative.CloudSearch.V1.Outputs +{ + + /// + /// List of double values. + /// + [OutputType] + public sealed class DoubleValuesResponse + { + public readonly ImmutableArray Values; + + [OutputConstructor] + private DoubleValuesResponse(ImmutableArray values) + { + Values = values; + } + } +} diff --git a/sdk/dotnet/CloudSearch/V1/Outputs/EnumValuesResponse.cs b/sdk/dotnet/CloudSearch/V1/Outputs/EnumValuesResponse.cs new file mode 100644 index 0000000000..457916172d --- /dev/null +++ b/sdk/dotnet/CloudSearch/V1/Outputs/EnumValuesResponse.cs @@ -0,0 +1,30 @@ +// *** WARNING: this file was generated by the Pulumi SDK Generator. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.GoogleNative.CloudSearch.V1.Outputs +{ + + /// + /// List of enum values. + /// + [OutputType] + public sealed class EnumValuesResponse + { + /// + /// The maximum allowable length for string values is 32 characters. + /// + public readonly ImmutableArray Values; + + [OutputConstructor] + private EnumValuesResponse(ImmutableArray values) + { + Values = values; + } + } +} diff --git a/sdk/dotnet/CloudSearch/V1/Outputs/FieldViolationResponse.cs b/sdk/dotnet/CloudSearch/V1/Outputs/FieldViolationResponse.cs new file mode 100644 index 0000000000..f67a46348d --- /dev/null +++ b/sdk/dotnet/CloudSearch/V1/Outputs/FieldViolationResponse.cs @@ -0,0 +1,35 @@ +// *** WARNING: this file was generated by the Pulumi SDK Generator. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.GoogleNative.CloudSearch.V1.Outputs +{ + + [OutputType] + public sealed class FieldViolationResponse + { + /// + /// The description of the error. + /// + public readonly string Description; + /// + /// Path of field with violation. + /// + public readonly string Field; + + [OutputConstructor] + private FieldViolationResponse( + string description, + + string field) + { + Description = description; + Field = field; + } + } +} diff --git a/sdk/dotnet/CloudSearch/V1/Outputs/HtmlValuesResponse.cs b/sdk/dotnet/CloudSearch/V1/Outputs/HtmlValuesResponse.cs new file mode 100644 index 0000000000..a6cd3c4666 --- /dev/null +++ b/sdk/dotnet/CloudSearch/V1/Outputs/HtmlValuesResponse.cs @@ -0,0 +1,30 @@ +// *** WARNING: this file was generated by the Pulumi SDK Generator. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.GoogleNative.CloudSearch.V1.Outputs +{ + + /// + /// List of html values. + /// + [OutputType] + public sealed class HtmlValuesResponse + { + /// + /// The maximum allowable length for html values is 2048 characters. + /// + public readonly ImmutableArray Values; + + [OutputConstructor] + private HtmlValuesResponse(ImmutableArray values) + { + Values = values; + } + } +} diff --git a/sdk/dotnet/CloudSearch/V1/Outputs/IntegerValuesResponse.cs b/sdk/dotnet/CloudSearch/V1/Outputs/IntegerValuesResponse.cs new file mode 100644 index 0000000000..a0e407eaf1 --- /dev/null +++ b/sdk/dotnet/CloudSearch/V1/Outputs/IntegerValuesResponse.cs @@ -0,0 +1,27 @@ +// *** WARNING: this file was generated by the Pulumi SDK Generator. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.GoogleNative.CloudSearch.V1.Outputs +{ + + /// + /// List of integer values. + /// + [OutputType] + public sealed class IntegerValuesResponse + { + public readonly ImmutableArray Values; + + [OutputConstructor] + private IntegerValuesResponse(ImmutableArray values) + { + Values = values; + } + } +} diff --git a/sdk/dotnet/CloudSearch/V1/Outputs/InteractionResponse.cs b/sdk/dotnet/CloudSearch/V1/Outputs/InteractionResponse.cs new file mode 100644 index 0000000000..59230c14f4 --- /dev/null +++ b/sdk/dotnet/CloudSearch/V1/Outputs/InteractionResponse.cs @@ -0,0 +1,42 @@ +// *** WARNING: this file was generated by the Pulumi SDK Generator. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.GoogleNative.CloudSearch.V1.Outputs +{ + + /// + /// Represents an interaction between a user and an item. + /// + [OutputType] + public sealed class InteractionResponse + { + /// + /// The time when the user acted on the item. If multiple actions of the same type exist for a single user, only the most recent action is recorded. + /// + public readonly string InteractionTime; + /// + /// The user that acted on the item. + /// + public readonly Outputs.PrincipalResponse Principal; + public readonly string Type; + + [OutputConstructor] + private InteractionResponse( + string interactionTime, + + Outputs.PrincipalResponse principal, + + string type) + { + InteractionTime = interactionTime; + Principal = principal; + Type = type; + } + } +} diff --git a/sdk/dotnet/CloudSearch/V1/Outputs/ItemAclResponse.cs b/sdk/dotnet/CloudSearch/V1/Outputs/ItemAclResponse.cs new file mode 100644 index 0000000000..bd1f36e537 --- /dev/null +++ b/sdk/dotnet/CloudSearch/V1/Outputs/ItemAclResponse.cs @@ -0,0 +1,59 @@ +// *** WARNING: this file was generated by the Pulumi SDK Generator. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.GoogleNative.CloudSearch.V1.Outputs +{ + + /// + /// Access control list information for the item. For more information see [Map ACLs](https://developers.google.com/cloud-search/docs/guides/acls). + /// + [OutputType] + public sealed class ItemAclResponse + { + /// + /// Sets the type of access rules to apply when an item inherits its ACL from a parent. This should always be set in tandem with the inheritAclFrom field. Also, when the inheritAclFrom field is set, this field should be set to a valid AclInheritanceType. + /// + public readonly string AclInheritanceType; + /// + /// List of principals who are explicitly denied access to the item in search results. While principals are denied access by default, use denied readers to handle exceptions and override the list allowed readers. The maximum number of elements is 100. + /// + public readonly ImmutableArray DeniedReaders; + /// + /// The name of the item to inherit the Access Permission List (ACL) from. Note: ACL inheritance *only* provides access permissions to child items and does not define structural relationships, nor does it provide convenient ways to delete large groups of items. Deleting an ACL parent from the index only alters the access permissions of child items that reference the parent in the inheritAclFrom field. The item is still in the index, but may not visible in search results. By contrast, deletion of a container item also deletes all items that reference the container via the containerName field. The maximum length for this field is 1536 characters. + /// + public readonly string InheritAclFrom; + /// + /// Optional. List of owners for the item. This field has no bearing on document access permissions. It does, however, offer a slight ranking boosts items where the querying user is an owner. The maximum number of elements is 5. + /// + public readonly ImmutableArray Owners; + /// + /// List of principals who are allowed to see the item in search results. Optional if inheriting permissions from another item or if the item is not intended to be visible, such as virtual containers. The maximum number of elements is 1000. + /// + public readonly ImmutableArray Readers; + + [OutputConstructor] + private ItemAclResponse( + string aclInheritanceType, + + ImmutableArray deniedReaders, + + string inheritAclFrom, + + ImmutableArray owners, + + ImmutableArray readers) + { + AclInheritanceType = aclInheritanceType; + DeniedReaders = deniedReaders; + InheritAclFrom = inheritAclFrom; + Owners = owners; + Readers = readers; + } + } +} diff --git a/sdk/dotnet/CloudSearch/V1/Outputs/ItemContentResponse.cs b/sdk/dotnet/CloudSearch/V1/Outputs/ItemContentResponse.cs new file mode 100644 index 0000000000..0a42ac779c --- /dev/null +++ b/sdk/dotnet/CloudSearch/V1/Outputs/ItemContentResponse.cs @@ -0,0 +1,49 @@ +// *** WARNING: this file was generated by the Pulumi SDK Generator. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.GoogleNative.CloudSearch.V1.Outputs +{ + + /// + /// Content of an item to be indexed and surfaced by Cloud Search. Only UTF-8 encoded strings are allowed as inlineContent. If the content is uploaded and not binary, it must be UTF-8 encoded. + /// + [OutputType] + public sealed class ItemContentResponse + { + /// + /// Upload reference ID of a previously uploaded content via write method. + /// + public readonly Outputs.UploadItemRefResponse ContentDataRef; + public readonly string ContentFormat; + /// + /// Hashing info calculated and provided by the API client for content. Can be used with the items.push method to calculate modified state. The maximum length is 2048 characters. + /// + public readonly string Hash; + /// + /// Content that is supplied inlined within the update method. The maximum length is 102400 bytes (100 KiB). + /// + public readonly string InlineContent; + + [OutputConstructor] + private ItemContentResponse( + Outputs.UploadItemRefResponse contentDataRef, + + string contentFormat, + + string hash, + + string inlineContent) + { + ContentDataRef = contentDataRef; + ContentFormat = contentFormat; + Hash = hash; + InlineContent = inlineContent; + } + } +} diff --git a/sdk/dotnet/CloudSearch/V1/Outputs/ItemMetadataResponse.cs b/sdk/dotnet/CloudSearch/V1/Outputs/ItemMetadataResponse.cs new file mode 100644 index 0000000000..740587b139 --- /dev/null +++ b/sdk/dotnet/CloudSearch/V1/Outputs/ItemMetadataResponse.cs @@ -0,0 +1,115 @@ +// *** WARNING: this file was generated by the Pulumi SDK Generator. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.GoogleNative.CloudSearch.V1.Outputs +{ + + /// + /// Available metadata fields for the item. + /// + [OutputType] + public sealed class ItemMetadataResponse + { + /// + /// The name of the container for this item. Deletion of the container item leads to automatic deletion of this item. Note: ACLs are not inherited from a container item. To provide ACL inheritance for an item, use the inheritAclFrom field. The maximum length is 1536 characters. + /// + public readonly string ContainerName; + /// + /// The BCP-47 language code for the item, such as "en-US" or "sr-Latn". For more information, see http://www.unicode.org/reports/tr35/#Unicode_locale_identifier. The maximum length is 32 characters. + /// + public readonly string ContentLanguage; + /// + /// A set of named attributes associated with the item. This can be used for influencing the ranking of the item based on the context in the request. The maximum number of elements is 10. + /// + public readonly ImmutableArray ContextAttributes; + /// + /// The time when the item was created in the source repository. + /// + public readonly string CreateTime; + /// + /// Hashing value provided by the API caller. This can be used with the items.push method to calculate modified state. The maximum length is 2048 characters. + /// + public readonly string Hash; + /// + /// A list of interactions for the item. Interactions are used to improve Search quality, but are not exposed to end users. The maximum number of elements is 1000. + /// + public readonly ImmutableArray Interactions; + /// + /// Additional keywords or phrases that should match the item. Used internally for user generated content. The maximum number of elements is 100. The maximum length is 8192 characters. + /// + public readonly ImmutableArray Keywords; + /// + /// The original mime-type of ItemContent.content in the source repository. The maximum length is 256 characters. + /// + public readonly string MimeType; + /// + /// The type of the item. This should correspond to the name of an object definition in the schema registered for the data source. For example, if the schema for the data source contains an object definition with name 'document', then item indexing requests for objects of that type should set objectType to 'document'. The maximum length is 256 characters. + /// + public readonly string ObjectType; + /// + /// Additional search quality metadata of the item + /// + public readonly Outputs.SearchQualityMetadataResponse SearchQualityMetadata; + /// + /// Link to the source repository serving the data. Seach results apply this link to the title. Whitespace or special characters may cause Cloud Seach result links to trigger a redirect notice; to avoid this, encode the URL. The maximum length is 2048 characters. + /// + public readonly string SourceRepositoryUrl; + /// + /// The title of the item. If given, this will be the displayed title of the Search result. The maximum length is 2048 characters. + /// + public readonly string Title; + /// + /// The time when the item was last modified in the source repository. + /// + public readonly string UpdateTime; + + [OutputConstructor] + private ItemMetadataResponse( + string containerName, + + string contentLanguage, + + ImmutableArray contextAttributes, + + string createTime, + + string hash, + + ImmutableArray interactions, + + ImmutableArray keywords, + + string mimeType, + + string objectType, + + Outputs.SearchQualityMetadataResponse searchQualityMetadata, + + string sourceRepositoryUrl, + + string title, + + string updateTime) + { + ContainerName = containerName; + ContentLanguage = contentLanguage; + ContextAttributes = contextAttributes; + CreateTime = createTime; + Hash = hash; + Interactions = interactions; + Keywords = keywords; + MimeType = mimeType; + ObjectType = objectType; + SearchQualityMetadata = searchQualityMetadata; + SourceRepositoryUrl = sourceRepositoryUrl; + Title = title; + UpdateTime = updateTime; + } + } +} diff --git a/sdk/dotnet/CloudSearch/V1/Outputs/ItemStatusResponse.cs b/sdk/dotnet/CloudSearch/V1/Outputs/ItemStatusResponse.cs new file mode 100644 index 0000000000..2fb8b3f22d --- /dev/null +++ b/sdk/dotnet/CloudSearch/V1/Outputs/ItemStatusResponse.cs @@ -0,0 +1,45 @@ +// *** WARNING: this file was generated by the Pulumi SDK Generator. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.GoogleNative.CloudSearch.V1.Outputs +{ + + /// + /// This contains item's status and any errors. + /// + [OutputType] + public sealed class ItemStatusResponse + { + /// + /// Status code. + /// + public readonly string Code; + /// + /// Error details in case the item is in ERROR state. + /// + public readonly ImmutableArray ProcessingErrors; + /// + /// Repository error reported by connector. + /// + public readonly ImmutableArray RepositoryErrors; + + [OutputConstructor] + private ItemStatusResponse( + string code, + + ImmutableArray processingErrors, + + ImmutableArray repositoryErrors) + { + Code = code; + ProcessingErrors = processingErrors; + RepositoryErrors = repositoryErrors; + } + } +} diff --git a/sdk/dotnet/CloudSearch/V1/Outputs/ItemStructuredDataResponse.cs b/sdk/dotnet/CloudSearch/V1/Outputs/ItemStructuredDataResponse.cs new file mode 100644 index 0000000000..b14e316c46 --- /dev/null +++ b/sdk/dotnet/CloudSearch/V1/Outputs/ItemStructuredDataResponse.cs @@ -0,0 +1,38 @@ +// *** WARNING: this file was generated by the Pulumi SDK Generator. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.GoogleNative.CloudSearch.V1.Outputs +{ + + /// + /// Available structured data fields for the item. + /// + [OutputType] + public sealed class ItemStructuredDataResponse + { + /// + /// Hashing value provided by the API caller. This can be used with the items.push method to calculate modified state. The maximum length is 2048 characters. + /// + public readonly string Hash; + /// + /// The structured data object that should conform to a registered object definition in the schema for the data source. + /// + public readonly Outputs.StructuredDataObjectResponse Object; + + [OutputConstructor] + private ItemStructuredDataResponse( + string hash, + + Outputs.StructuredDataObjectResponse @object) + { + Hash = hash; + Object = @object; + } + } +} diff --git a/sdk/dotnet/CloudSearch/V1/Outputs/NamedPropertyResponse.cs b/sdk/dotnet/CloudSearch/V1/Outputs/NamedPropertyResponse.cs new file mode 100644 index 0000000000..6132a6e71b --- /dev/null +++ b/sdk/dotnet/CloudSearch/V1/Outputs/NamedPropertyResponse.cs @@ -0,0 +1,67 @@ +// *** WARNING: this file was generated by the Pulumi SDK Generator. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.GoogleNative.CloudSearch.V1.Outputs +{ + + /// + /// A typed name-value pair for structured data. The type of the value should be the same as the registered type for the `name` property in the object definition of `objectType`. + /// + [OutputType] + public sealed class NamedPropertyResponse + { + public readonly bool BooleanValue; + public readonly Outputs.DateValuesResponse DateValues; + public readonly Outputs.DoubleValuesResponse DoubleValues; + public readonly Outputs.EnumValuesResponse EnumValues; + public readonly Outputs.HtmlValuesResponse HtmlValues; + public readonly Outputs.IntegerValuesResponse IntegerValues; + /// + /// The name of the property. This name should correspond to the name of the property that was registered for object definition in the schema. The maximum allowable length for this property is 256 characters. + /// + public readonly string Name; + public readonly Outputs.ObjectValuesResponse ObjectValues; + public readonly Outputs.TextValuesResponse TextValues; + public readonly Outputs.TimestampValuesResponse TimestampValues; + + [OutputConstructor] + private NamedPropertyResponse( + bool booleanValue, + + Outputs.DateValuesResponse dateValues, + + Outputs.DoubleValuesResponse doubleValues, + + Outputs.EnumValuesResponse enumValues, + + Outputs.HtmlValuesResponse htmlValues, + + Outputs.IntegerValuesResponse integerValues, + + string name, + + Outputs.ObjectValuesResponse objectValues, + + Outputs.TextValuesResponse textValues, + + Outputs.TimestampValuesResponse timestampValues) + { + BooleanValue = booleanValue; + DateValues = dateValues; + DoubleValues = doubleValues; + EnumValues = enumValues; + HtmlValues = htmlValues; + IntegerValues = integerValues; + Name = name; + ObjectValues = objectValues; + TextValues = textValues; + TimestampValues = timestampValues; + } + } +} diff --git a/sdk/dotnet/CloudSearch/V1/Outputs/ObjectValuesResponse.cs b/sdk/dotnet/CloudSearch/V1/Outputs/ObjectValuesResponse.cs new file mode 100644 index 0000000000..8c3c1aaac4 --- /dev/null +++ b/sdk/dotnet/CloudSearch/V1/Outputs/ObjectValuesResponse.cs @@ -0,0 +1,27 @@ +// *** WARNING: this file was generated by the Pulumi SDK Generator. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.GoogleNative.CloudSearch.V1.Outputs +{ + + /// + /// List of object values. + /// + [OutputType] + public sealed class ObjectValuesResponse + { + public readonly ImmutableArray Values; + + [OutputConstructor] + private ObjectValuesResponse(ImmutableArray values) + { + Values = values; + } + } +} diff --git a/sdk/dotnet/CloudSearch/V1/Outputs/PrincipalResponse.cs b/sdk/dotnet/CloudSearch/V1/Outputs/PrincipalResponse.cs new file mode 100644 index 0000000000..1ba3157c4a --- /dev/null +++ b/sdk/dotnet/CloudSearch/V1/Outputs/PrincipalResponse.cs @@ -0,0 +1,45 @@ +// *** WARNING: this file was generated by the Pulumi SDK Generator. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.GoogleNative.CloudSearch.V1.Outputs +{ + + /// + /// Reference to a user, group, or domain. + /// + [OutputType] + public sealed class PrincipalResponse + { + /// + /// This principal is a group identified using an external identity. The name field must specify the group resource name with this format: identitysources/{source_id}/groups/{ID} + /// + public readonly string GroupResourceName; + /// + /// This principal is a Google Workspace user, group or domain. + /// + public readonly Outputs.GSuitePrincipalResponse GsuitePrincipal; + /// + /// This principal is a user identified using an external identity. The name field must specify the user resource name with this format: identitysources/{source_id}/users/{ID} + /// + public readonly string UserResourceName; + + [OutputConstructor] + private PrincipalResponse( + string groupResourceName, + + Outputs.GSuitePrincipalResponse gsuitePrincipal, + + string userResourceName) + { + GroupResourceName = groupResourceName; + GsuitePrincipal = gsuitePrincipal; + UserResourceName = userResourceName; + } + } +} diff --git a/sdk/dotnet/CloudSearch/V1/Outputs/ProcessingErrorResponse.cs b/sdk/dotnet/CloudSearch/V1/Outputs/ProcessingErrorResponse.cs new file mode 100644 index 0000000000..f6d1ebe1f6 --- /dev/null +++ b/sdk/dotnet/CloudSearch/V1/Outputs/ProcessingErrorResponse.cs @@ -0,0 +1,42 @@ +// *** WARNING: this file was generated by the Pulumi SDK Generator. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.GoogleNative.CloudSearch.V1.Outputs +{ + + [OutputType] + public sealed class ProcessingErrorResponse + { + /// + /// Error code indicating the nature of the error. + /// + public readonly string Code; + /// + /// The description of the error. + /// + public readonly string ErrorMessage; + /// + /// In case the item fields are invalid, this field contains the details about the validation errors. + /// + public readonly ImmutableArray FieldViolations; + + [OutputConstructor] + private ProcessingErrorResponse( + string code, + + string errorMessage, + + ImmutableArray fieldViolations) + { + Code = code; + ErrorMessage = errorMessage; + FieldViolations = fieldViolations; + } + } +} diff --git a/sdk/dotnet/CloudSearch/V1/Outputs/RepositoryErrorResponse.cs b/sdk/dotnet/CloudSearch/V1/Outputs/RepositoryErrorResponse.cs new file mode 100644 index 0000000000..66b2e94f07 --- /dev/null +++ b/sdk/dotnet/CloudSearch/V1/Outputs/RepositoryErrorResponse.cs @@ -0,0 +1,45 @@ +// *** WARNING: this file was generated by the Pulumi SDK Generator. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.GoogleNative.CloudSearch.V1.Outputs +{ + + /// + /// Errors when the connector is communicating to the source repository. + /// + [OutputType] + public sealed class RepositoryErrorResponse + { + /// + /// Message that describes the error. The maximum allowable length of the message is 8192 characters. + /// + public readonly string ErrorMessage; + /// + /// Error codes. Matches the definition of HTTP status codes. + /// + public readonly int HttpStatusCode; + /// + /// The type of error. + /// + public readonly string Type; + + [OutputConstructor] + private RepositoryErrorResponse( + string errorMessage, + + int httpStatusCode, + + string type) + { + ErrorMessage = errorMessage; + HttpStatusCode = httpStatusCode; + Type = type; + } + } +} diff --git a/sdk/dotnet/CloudSearch/V1/Outputs/SearchQualityMetadataResponse.cs b/sdk/dotnet/CloudSearch/V1/Outputs/SearchQualityMetadataResponse.cs new file mode 100644 index 0000000000..ce1005b5d2 --- /dev/null +++ b/sdk/dotnet/CloudSearch/V1/Outputs/SearchQualityMetadataResponse.cs @@ -0,0 +1,30 @@ +// *** WARNING: this file was generated by the Pulumi SDK Generator. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.GoogleNative.CloudSearch.V1.Outputs +{ + + /// + /// Additional search quality metadata of the item. + /// + [OutputType] + public sealed class SearchQualityMetadataResponse + { + /// + /// An indication of the quality of the item, used to influence search quality. Value should be between 0.0 (lowest quality) and 1.0 (highest quality). The default value is 0.0. + /// + public readonly double Quality; + + [OutputConstructor] + private SearchQualityMetadataResponse(double quality) + { + Quality = quality; + } + } +} diff --git a/sdk/dotnet/CloudSearch/V1/Outputs/StructuredDataObjectResponse.cs b/sdk/dotnet/CloudSearch/V1/Outputs/StructuredDataObjectResponse.cs new file mode 100644 index 0000000000..e98f625f19 --- /dev/null +++ b/sdk/dotnet/CloudSearch/V1/Outputs/StructuredDataObjectResponse.cs @@ -0,0 +1,30 @@ +// *** WARNING: this file was generated by the Pulumi SDK Generator. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.GoogleNative.CloudSearch.V1.Outputs +{ + + /// + /// A structured data object consisting of named properties. + /// + [OutputType] + public sealed class StructuredDataObjectResponse + { + /// + /// The properties for the object. The maximum number of elements is 1000. + /// + public readonly ImmutableArray Properties; + + [OutputConstructor] + private StructuredDataObjectResponse(ImmutableArray properties) + { + Properties = properties; + } + } +} diff --git a/sdk/dotnet/CloudSearch/V1/Outputs/TextValuesResponse.cs b/sdk/dotnet/CloudSearch/V1/Outputs/TextValuesResponse.cs new file mode 100644 index 0000000000..3924cda131 --- /dev/null +++ b/sdk/dotnet/CloudSearch/V1/Outputs/TextValuesResponse.cs @@ -0,0 +1,30 @@ +// *** WARNING: this file was generated by the Pulumi SDK Generator. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.GoogleNative.CloudSearch.V1.Outputs +{ + + /// + /// List of text values. + /// + [OutputType] + public sealed class TextValuesResponse + { + /// + /// The maximum allowable length for text values is 2048 characters. + /// + public readonly ImmutableArray Values; + + [OutputConstructor] + private TextValuesResponse(ImmutableArray values) + { + Values = values; + } + } +} diff --git a/sdk/dotnet/CloudSearch/V1/Outputs/TimestampValuesResponse.cs b/sdk/dotnet/CloudSearch/V1/Outputs/TimestampValuesResponse.cs new file mode 100644 index 0000000000..6b0a8c2a21 --- /dev/null +++ b/sdk/dotnet/CloudSearch/V1/Outputs/TimestampValuesResponse.cs @@ -0,0 +1,27 @@ +// *** WARNING: this file was generated by the Pulumi SDK Generator. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.GoogleNative.CloudSearch.V1.Outputs +{ + + /// + /// List of timestamp values. + /// + [OutputType] + public sealed class TimestampValuesResponse + { + public readonly ImmutableArray Values; + + [OutputConstructor] + private TimestampValuesResponse(ImmutableArray values) + { + Values = values; + } + } +} diff --git a/sdk/dotnet/CloudSearch/V1/Outputs/UploadItemRefResponse.cs b/sdk/dotnet/CloudSearch/V1/Outputs/UploadItemRefResponse.cs new file mode 100644 index 0000000000..b3b7707113 --- /dev/null +++ b/sdk/dotnet/CloudSearch/V1/Outputs/UploadItemRefResponse.cs @@ -0,0 +1,30 @@ +// *** WARNING: this file was generated by the Pulumi SDK Generator. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.GoogleNative.CloudSearch.V1.Outputs +{ + + /// + /// Represents an upload session reference. This reference is created via upload method. This reference is valid for 30 days after its creation. Updating of item content may refer to this uploaded content via contentDataRef. + /// + [OutputType] + public sealed class UploadItemRefResponse + { + /// + /// The name of the content reference. The maximum length is 2048 characters. + /// + public readonly string Name; + + [OutputConstructor] + private UploadItemRefResponse(string name) + { + Name = name; + } + } +} diff --git a/sdk/dotnet/Contactcenterinsights/V1/Inputs/GoogleCloudContactcenterinsightsV1RedactionConfigArgs.cs b/sdk/dotnet/Contactcenterinsights/V1/Inputs/GoogleCloudContactcenterinsightsV1RedactionConfigArgs.cs new file mode 100644 index 0000000000..1951f8f608 --- /dev/null +++ b/sdk/dotnet/Contactcenterinsights/V1/Inputs/GoogleCloudContactcenterinsightsV1RedactionConfigArgs.cs @@ -0,0 +1,35 @@ +// *** WARNING: this file was generated by the Pulumi SDK Generator. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.GoogleNative.Contactcenterinsights.V1.Inputs +{ + + /// + /// DLP resources used for redaction while ingesting conversations. + /// + public sealed class GoogleCloudContactcenterinsightsV1RedactionConfigArgs : global::Pulumi.ResourceArgs + { + /// + /// The fully-qualified DLP deidentify template resource name. Format: `projects/{project}/deidentifyTemplates/{template}` + /// + [Input("deidentifyTemplate")] + public Input? DeidentifyTemplate { get; set; } + + /// + /// The fully-qualified DLP inspect template resource name. Format: `projects/{project}/locations/{location}/inspectTemplates/{template}` + /// + [Input("inspectTemplate")] + public Input? InspectTemplate { get; set; } + + public GoogleCloudContactcenterinsightsV1RedactionConfigArgs() + { + } + public static new GoogleCloudContactcenterinsightsV1RedactionConfigArgs Empty => new GoogleCloudContactcenterinsightsV1RedactionConfigArgs(); + } +} diff --git a/sdk/dotnet/Contactcenterinsights/V1/Inputs/GoogleCloudContactcenterinsightsV1SpeechConfigArgs.cs b/sdk/dotnet/Contactcenterinsights/V1/Inputs/GoogleCloudContactcenterinsightsV1SpeechConfigArgs.cs new file mode 100644 index 0000000000..221590cade --- /dev/null +++ b/sdk/dotnet/Contactcenterinsights/V1/Inputs/GoogleCloudContactcenterinsightsV1SpeechConfigArgs.cs @@ -0,0 +1,29 @@ +// *** WARNING: this file was generated by the Pulumi SDK Generator. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.GoogleNative.Contactcenterinsights.V1.Inputs +{ + + /// + /// Speech-to-Text configuration. + /// + public sealed class GoogleCloudContactcenterinsightsV1SpeechConfigArgs : global::Pulumi.ResourceArgs + { + /// + /// The fully-qualified Speech Recognizer resource name. Format: `projects/{project_id}/locations/{location}/recognizer/{recognizer}` + /// + [Input("speechRecognizer")] + public Input? SpeechRecognizer { get; set; } + + public GoogleCloudContactcenterinsightsV1SpeechConfigArgs() + { + } + public static new GoogleCloudContactcenterinsightsV1SpeechConfigArgs Empty => new GoogleCloudContactcenterinsightsV1SpeechConfigArgs(); + } +} diff --git a/sdk/go/google/aiplatform/v1/getModel.go b/sdk/go/google/aiplatform/v1/getModel.go new file mode 100644 index 0000000000..2f72050f7d --- /dev/null +++ b/sdk/go/google/aiplatform/v1/getModel.go @@ -0,0 +1,280 @@ +// Code generated by the Pulumi SDK Generator DO NOT EDIT. +// *** WARNING: Do not edit by hand unless you're certain you know what you are doing! *** + +package v1 + +import ( + "context" + "reflect" + + "github.com/pulumi/pulumi-google-native/sdk/go/google/internal" + "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +) + +// Gets a Model. +func LookupModel(ctx *pulumi.Context, args *LookupModelArgs, opts ...pulumi.InvokeOption) (*LookupModelResult, error) { + opts = internal.PkgInvokeDefaultOpts(opts) + var rv LookupModelResult + err := ctx.Invoke("google-native:aiplatform/v1:getModel", args, &rv, opts...) + if err != nil { + return nil, err + } + return &rv, nil +} + +type LookupModelArgs struct { + Location string `pulumi:"location"` + ModelId string `pulumi:"modelId"` + Project *string `pulumi:"project"` +} + +type LookupModelResult struct { + // Immutable. The path to the directory containing the Model artifact and any of its supporting files. Not present for AutoML Models or Large Models. + ArtifactUri string `pulumi:"artifactUri"` + // Input only. The specification of the container that is to be used when deploying this Model. The specification is ingested upon ModelService.UploadModel, and all binaries it contains are copied and stored internally by Vertex AI. Not present for AutoML Models or Large Models. + ContainerSpec GoogleCloudAiplatformV1ModelContainerSpecResponse `pulumi:"containerSpec"` + // Timestamp when this Model was uploaded into Vertex AI. + CreateTime string `pulumi:"createTime"` + // The pointers to DeployedModels created from this Model. Note that Model could have been deployed to Endpoints in different Locations. + DeployedModels []GoogleCloudAiplatformV1DeployedModelRefResponse `pulumi:"deployedModels"` + // The description of the Model. + Description string `pulumi:"description"` + // The display name of the Model. The name can be up to 128 characters long and can consist of any UTF-8 characters. + DisplayName string `pulumi:"displayName"` + // Customer-managed encryption key spec for a Model. If set, this Model and all sub-resources of this Model will be secured by this key. + EncryptionSpec GoogleCloudAiplatformV1EncryptionSpecResponse `pulumi:"encryptionSpec"` + // Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + Etag string `pulumi:"etag"` + // The default explanation specification for this Model. The Model can be used for requesting explanation after being deployed if it is populated. The Model can be used for batch explanation if it is populated. All fields of the explanation_spec can be overridden by explanation_spec of DeployModelRequest.deployed_model, or explanation_spec of BatchPredictionJob. If the default explanation specification is not set for this Model, this Model can still be used for requesting explanation by setting explanation_spec of DeployModelRequest.deployed_model and for batch explanation by setting explanation_spec of BatchPredictionJob. + ExplanationSpec GoogleCloudAiplatformV1ExplanationSpecResponse `pulumi:"explanationSpec"` + // The labels with user-defined metadata to organize your Models. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. + Labels map[string]string `pulumi:"labels"` + // Immutable. An additional information about the Model; the schema of the metadata can be found in metadata_schema. Unset if the Model does not have any additional information. + Metadata interface{} `pulumi:"metadata"` + // The resource name of the Artifact that was created in MetadataStore when creating the Model. The Artifact resource name pattern is `projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}`. + MetadataArtifact string `pulumi:"metadataArtifact"` + // Immutable. Points to a YAML file stored on Google Cloud Storage describing additional information about the Model, that is specific to it. Unset if the Model does not have any additional information. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML Models always have this field populated by Vertex AI, if no additional metadata is needed, this field is set to an empty string. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. + MetadataSchemaUri string `pulumi:"metadataSchemaUri"` + // Source of a model. It can either be automl training pipeline, custom training pipeline, BigQuery ML, or existing Vertex AI Model. + ModelSourceInfo GoogleCloudAiplatformV1ModelSourceInfoResponse `pulumi:"modelSourceInfo"` + // The resource name of the Model. + Name string `pulumi:"name"` + // If this Model is a copy of another Model, this contains info about the original. + OriginalModelInfo GoogleCloudAiplatformV1ModelOriginalModelInfoResponse `pulumi:"originalModelInfo"` + // Optional. This field is populated if the model is produced by a pipeline job. + PipelineJob string `pulumi:"pipelineJob"` + // The schemata that describe formats of the Model's predictions and explanations as given and returned via PredictionService.Predict and PredictionService.Explain. + PredictSchemata GoogleCloudAiplatformV1PredictSchemataResponse `pulumi:"predictSchemata"` + // When this Model is deployed, its prediction resources are described by the `prediction_resources` field of the Endpoint.deployed_models object. Because not all Models support all resource configuration types, the configuration types this Model supports are listed here. If no configuration types are listed, the Model cannot be deployed to an Endpoint and does not support online predictions (PredictionService.Predict or PredictionService.Explain). Such a Model can serve predictions by using a BatchPredictionJob, if it has at least one entry each in supported_input_storage_formats and supported_output_storage_formats. + SupportedDeploymentResourcesTypes []string `pulumi:"supportedDeploymentResourcesTypes"` + // The formats in which this Model may be exported. If empty, this Model is not available for export. + SupportedExportFormats []GoogleCloudAiplatformV1ModelExportFormatResponse `pulumi:"supportedExportFormats"` + // The formats this Model supports in BatchPredictionJob.input_config. If PredictSchemata.instance_schema_uri exists, the instances should be given as per that schema. The possible formats are: * `jsonl` The JSON Lines format, where each instance is a single line. Uses GcsSource. * `csv` The CSV format, where each instance is a single comma-separated line. The first line in the file is the header, containing comma-separated field names. Uses GcsSource. * `tf-record` The TFRecord format, where each instance is a single record in tfrecord syntax. Uses GcsSource. * `tf-record-gzip` Similar to `tf-record`, but the file is gzipped. Uses GcsSource. * `bigquery` Each instance is a single row in BigQuery. Uses BigQuerySource. * `file-list` Each line of the file is the location of an instance to process, uses `gcs_source` field of the InputConfig object. If this Model doesn't support any of these formats it means it cannot be used with a BatchPredictionJob. However, if it has supported_deployment_resources_types, it could serve online predictions by using PredictionService.Predict or PredictionService.Explain. + SupportedInputStorageFormats []string `pulumi:"supportedInputStorageFormats"` + // The formats this Model supports in BatchPredictionJob.output_config. If both PredictSchemata.instance_schema_uri and PredictSchemata.prediction_schema_uri exist, the predictions are returned together with their instances. In other words, the prediction has the original instance data first, followed by the actual prediction content (as per the schema). The possible formats are: * `jsonl` The JSON Lines format, where each prediction is a single line. Uses GcsDestination. * `csv` The CSV format, where each prediction is a single comma-separated line. The first line in the file is the header, containing comma-separated field names. Uses GcsDestination. * `bigquery` Each prediction is a single row in a BigQuery table, uses BigQueryDestination . If this Model doesn't support any of these formats it means it cannot be used with a BatchPredictionJob. However, if it has supported_deployment_resources_types, it could serve online predictions by using PredictionService.Predict or PredictionService.Explain. + SupportedOutputStorageFormats []string `pulumi:"supportedOutputStorageFormats"` + // The resource name of the TrainingPipeline that uploaded this Model, if any. + TrainingPipeline string `pulumi:"trainingPipeline"` + // Timestamp when this Model was most recently updated. + UpdateTime string `pulumi:"updateTime"` + // User provided version aliases so that a model version can be referenced via alias (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_alias}` instead of auto-generated version id (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_id})`. The format is a-z{0,126}[a-z0-9] to distinguish from version_id. A default version alias will be created for the first version of the model, and there must be exactly one default version alias for a model. + VersionAliases []string `pulumi:"versionAliases"` + // Timestamp when this version was created. + VersionCreateTime string `pulumi:"versionCreateTime"` + // The description of this version. + VersionDescription string `pulumi:"versionDescription"` + // Immutable. The version ID of the model. A new version is committed when a new model version is uploaded or trained under an existing model id. It is an auto-incrementing decimal number in string representation. + VersionId string `pulumi:"versionId"` + // Timestamp when this version was most recently updated. + VersionUpdateTime string `pulumi:"versionUpdateTime"` +} + +func LookupModelOutput(ctx *pulumi.Context, args LookupModelOutputArgs, opts ...pulumi.InvokeOption) LookupModelResultOutput { + return pulumi.ToOutputWithContext(context.Background(), args). + ApplyT(func(v interface{}) (LookupModelResult, error) { + args := v.(LookupModelArgs) + r, err := LookupModel(ctx, &args, opts...) + var s LookupModelResult + if r != nil { + s = *r + } + return s, err + }).(LookupModelResultOutput) +} + +type LookupModelOutputArgs struct { + Location pulumi.StringInput `pulumi:"location"` + ModelId pulumi.StringInput `pulumi:"modelId"` + Project pulumi.StringPtrInput `pulumi:"project"` +} + +func (LookupModelOutputArgs) ElementType() reflect.Type { + return reflect.TypeOf((*LookupModelArgs)(nil)).Elem() +} + +type LookupModelResultOutput struct{ *pulumi.OutputState } + +func (LookupModelResultOutput) ElementType() reflect.Type { + return reflect.TypeOf((*LookupModelResult)(nil)).Elem() +} + +func (o LookupModelResultOutput) ToLookupModelResultOutput() LookupModelResultOutput { + return o +} + +func (o LookupModelResultOutput) ToLookupModelResultOutputWithContext(ctx context.Context) LookupModelResultOutput { + return o +} + +// Immutable. The path to the directory containing the Model artifact and any of its supporting files. Not present for AutoML Models or Large Models. +func (o LookupModelResultOutput) ArtifactUri() pulumi.StringOutput { + return o.ApplyT(func(v LookupModelResult) string { return v.ArtifactUri }).(pulumi.StringOutput) +} + +// Input only. The specification of the container that is to be used when deploying this Model. The specification is ingested upon ModelService.UploadModel, and all binaries it contains are copied and stored internally by Vertex AI. Not present for AutoML Models or Large Models. +func (o LookupModelResultOutput) ContainerSpec() GoogleCloudAiplatformV1ModelContainerSpecResponseOutput { + return o.ApplyT(func(v LookupModelResult) GoogleCloudAiplatformV1ModelContainerSpecResponse { return v.ContainerSpec }).(GoogleCloudAiplatformV1ModelContainerSpecResponseOutput) +} + +// Timestamp when this Model was uploaded into Vertex AI. +func (o LookupModelResultOutput) CreateTime() pulumi.StringOutput { + return o.ApplyT(func(v LookupModelResult) string { return v.CreateTime }).(pulumi.StringOutput) +} + +// The pointers to DeployedModels created from this Model. Note that Model could have been deployed to Endpoints in different Locations. +func (o LookupModelResultOutput) DeployedModels() GoogleCloudAiplatformV1DeployedModelRefResponseArrayOutput { + return o.ApplyT(func(v LookupModelResult) []GoogleCloudAiplatformV1DeployedModelRefResponse { return v.DeployedModels }).(GoogleCloudAiplatformV1DeployedModelRefResponseArrayOutput) +} + +// The description of the Model. +func (o LookupModelResultOutput) Description() pulumi.StringOutput { + return o.ApplyT(func(v LookupModelResult) string { return v.Description }).(pulumi.StringOutput) +} + +// The display name of the Model. The name can be up to 128 characters long and can consist of any UTF-8 characters. +func (o LookupModelResultOutput) DisplayName() pulumi.StringOutput { + return o.ApplyT(func(v LookupModelResult) string { return v.DisplayName }).(pulumi.StringOutput) +} + +// Customer-managed encryption key spec for a Model. If set, this Model and all sub-resources of this Model will be secured by this key. +func (o LookupModelResultOutput) EncryptionSpec() GoogleCloudAiplatformV1EncryptionSpecResponseOutput { + return o.ApplyT(func(v LookupModelResult) GoogleCloudAiplatformV1EncryptionSpecResponse { return v.EncryptionSpec }).(GoogleCloudAiplatformV1EncryptionSpecResponseOutput) +} + +// Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. +func (o LookupModelResultOutput) Etag() pulumi.StringOutput { + return o.ApplyT(func(v LookupModelResult) string { return v.Etag }).(pulumi.StringOutput) +} + +// The default explanation specification for this Model. The Model can be used for requesting explanation after being deployed if it is populated. The Model can be used for batch explanation if it is populated. All fields of the explanation_spec can be overridden by explanation_spec of DeployModelRequest.deployed_model, or explanation_spec of BatchPredictionJob. If the default explanation specification is not set for this Model, this Model can still be used for requesting explanation by setting explanation_spec of DeployModelRequest.deployed_model and for batch explanation by setting explanation_spec of BatchPredictionJob. +func (o LookupModelResultOutput) ExplanationSpec() GoogleCloudAiplatformV1ExplanationSpecResponseOutput { + return o.ApplyT(func(v LookupModelResult) GoogleCloudAiplatformV1ExplanationSpecResponse { return v.ExplanationSpec }).(GoogleCloudAiplatformV1ExplanationSpecResponseOutput) +} + +// The labels with user-defined metadata to organize your Models. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. +func (o LookupModelResultOutput) Labels() pulumi.StringMapOutput { + return o.ApplyT(func(v LookupModelResult) map[string]string { return v.Labels }).(pulumi.StringMapOutput) +} + +// Immutable. An additional information about the Model; the schema of the metadata can be found in metadata_schema. Unset if the Model does not have any additional information. +func (o LookupModelResultOutput) Metadata() pulumi.AnyOutput { + return o.ApplyT(func(v LookupModelResult) interface{} { return v.Metadata }).(pulumi.AnyOutput) +} + +// The resource name of the Artifact that was created in MetadataStore when creating the Model. The Artifact resource name pattern is `projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}`. +func (o LookupModelResultOutput) MetadataArtifact() pulumi.StringOutput { + return o.ApplyT(func(v LookupModelResult) string { return v.MetadataArtifact }).(pulumi.StringOutput) +} + +// Immutable. Points to a YAML file stored on Google Cloud Storage describing additional information about the Model, that is specific to it. Unset if the Model does not have any additional information. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML Models always have this field populated by Vertex AI, if no additional metadata is needed, this field is set to an empty string. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. +func (o LookupModelResultOutput) MetadataSchemaUri() pulumi.StringOutput { + return o.ApplyT(func(v LookupModelResult) string { return v.MetadataSchemaUri }).(pulumi.StringOutput) +} + +// Source of a model. It can either be automl training pipeline, custom training pipeline, BigQuery ML, or existing Vertex AI Model. +func (o LookupModelResultOutput) ModelSourceInfo() GoogleCloudAiplatformV1ModelSourceInfoResponseOutput { + return o.ApplyT(func(v LookupModelResult) GoogleCloudAiplatformV1ModelSourceInfoResponse { return v.ModelSourceInfo }).(GoogleCloudAiplatformV1ModelSourceInfoResponseOutput) +} + +// The resource name of the Model. +func (o LookupModelResultOutput) Name() pulumi.StringOutput { + return o.ApplyT(func(v LookupModelResult) string { return v.Name }).(pulumi.StringOutput) +} + +// If this Model is a copy of another Model, this contains info about the original. +func (o LookupModelResultOutput) OriginalModelInfo() GoogleCloudAiplatformV1ModelOriginalModelInfoResponseOutput { + return o.ApplyT(func(v LookupModelResult) GoogleCloudAiplatformV1ModelOriginalModelInfoResponse { + return v.OriginalModelInfo + }).(GoogleCloudAiplatformV1ModelOriginalModelInfoResponseOutput) +} + +// Optional. This field is populated if the model is produced by a pipeline job. +func (o LookupModelResultOutput) PipelineJob() pulumi.StringOutput { + return o.ApplyT(func(v LookupModelResult) string { return v.PipelineJob }).(pulumi.StringOutput) +} + +// The schemata that describe formats of the Model's predictions and explanations as given and returned via PredictionService.Predict and PredictionService.Explain. +func (o LookupModelResultOutput) PredictSchemata() GoogleCloudAiplatformV1PredictSchemataResponseOutput { + return o.ApplyT(func(v LookupModelResult) GoogleCloudAiplatformV1PredictSchemataResponse { return v.PredictSchemata }).(GoogleCloudAiplatformV1PredictSchemataResponseOutput) +} + +// When this Model is deployed, its prediction resources are described by the `prediction_resources` field of the Endpoint.deployed_models object. Because not all Models support all resource configuration types, the configuration types this Model supports are listed here. If no configuration types are listed, the Model cannot be deployed to an Endpoint and does not support online predictions (PredictionService.Predict or PredictionService.Explain). Such a Model can serve predictions by using a BatchPredictionJob, if it has at least one entry each in supported_input_storage_formats and supported_output_storage_formats. +func (o LookupModelResultOutput) SupportedDeploymentResourcesTypes() pulumi.StringArrayOutput { + return o.ApplyT(func(v LookupModelResult) []string { return v.SupportedDeploymentResourcesTypes }).(pulumi.StringArrayOutput) +} + +// The formats in which this Model may be exported. If empty, this Model is not available for export. +func (o LookupModelResultOutput) SupportedExportFormats() GoogleCloudAiplatformV1ModelExportFormatResponseArrayOutput { + return o.ApplyT(func(v LookupModelResult) []GoogleCloudAiplatformV1ModelExportFormatResponse { + return v.SupportedExportFormats + }).(GoogleCloudAiplatformV1ModelExportFormatResponseArrayOutput) +} + +// The formats this Model supports in BatchPredictionJob.input_config. If PredictSchemata.instance_schema_uri exists, the instances should be given as per that schema. The possible formats are: * `jsonl` The JSON Lines format, where each instance is a single line. Uses GcsSource. * `csv` The CSV format, where each instance is a single comma-separated line. The first line in the file is the header, containing comma-separated field names. Uses GcsSource. * `tf-record` The TFRecord format, where each instance is a single record in tfrecord syntax. Uses GcsSource. * `tf-record-gzip` Similar to `tf-record`, but the file is gzipped. Uses GcsSource. * `bigquery` Each instance is a single row in BigQuery. Uses BigQuerySource. * `file-list` Each line of the file is the location of an instance to process, uses `gcs_source` field of the InputConfig object. If this Model doesn't support any of these formats it means it cannot be used with a BatchPredictionJob. However, if it has supported_deployment_resources_types, it could serve online predictions by using PredictionService.Predict or PredictionService.Explain. +func (o LookupModelResultOutput) SupportedInputStorageFormats() pulumi.StringArrayOutput { + return o.ApplyT(func(v LookupModelResult) []string { return v.SupportedInputStorageFormats }).(pulumi.StringArrayOutput) +} + +// The formats this Model supports in BatchPredictionJob.output_config. If both PredictSchemata.instance_schema_uri and PredictSchemata.prediction_schema_uri exist, the predictions are returned together with their instances. In other words, the prediction has the original instance data first, followed by the actual prediction content (as per the schema). The possible formats are: * `jsonl` The JSON Lines format, where each prediction is a single line. Uses GcsDestination. * `csv` The CSV format, where each prediction is a single comma-separated line. The first line in the file is the header, containing comma-separated field names. Uses GcsDestination. * `bigquery` Each prediction is a single row in a BigQuery table, uses BigQueryDestination . If this Model doesn't support any of these formats it means it cannot be used with a BatchPredictionJob. However, if it has supported_deployment_resources_types, it could serve online predictions by using PredictionService.Predict or PredictionService.Explain. +func (o LookupModelResultOutput) SupportedOutputStorageFormats() pulumi.StringArrayOutput { + return o.ApplyT(func(v LookupModelResult) []string { return v.SupportedOutputStorageFormats }).(pulumi.StringArrayOutput) +} + +// The resource name of the TrainingPipeline that uploaded this Model, if any. +func (o LookupModelResultOutput) TrainingPipeline() pulumi.StringOutput { + return o.ApplyT(func(v LookupModelResult) string { return v.TrainingPipeline }).(pulumi.StringOutput) +} + +// Timestamp when this Model was most recently updated. +func (o LookupModelResultOutput) UpdateTime() pulumi.StringOutput { + return o.ApplyT(func(v LookupModelResult) string { return v.UpdateTime }).(pulumi.StringOutput) +} + +// User provided version aliases so that a model version can be referenced via alias (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_alias}` instead of auto-generated version id (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_id})`. The format is a-z{0,126}[a-z0-9] to distinguish from version_id. A default version alias will be created for the first version of the model, and there must be exactly one default version alias for a model. +func (o LookupModelResultOutput) VersionAliases() pulumi.StringArrayOutput { + return o.ApplyT(func(v LookupModelResult) []string { return v.VersionAliases }).(pulumi.StringArrayOutput) +} + +// Timestamp when this version was created. +func (o LookupModelResultOutput) VersionCreateTime() pulumi.StringOutput { + return o.ApplyT(func(v LookupModelResult) string { return v.VersionCreateTime }).(pulumi.StringOutput) +} + +// The description of this version. +func (o LookupModelResultOutput) VersionDescription() pulumi.StringOutput { + return o.ApplyT(func(v LookupModelResult) string { return v.VersionDescription }).(pulumi.StringOutput) +} + +// Immutable. The version ID of the model. A new version is committed when a new model version is uploaded or trained under an existing model id. It is an auto-incrementing decimal number in string representation. +func (o LookupModelResultOutput) VersionId() pulumi.StringOutput { + return o.ApplyT(func(v LookupModelResult) string { return v.VersionId }).(pulumi.StringOutput) +} + +// Timestamp when this version was most recently updated. +func (o LookupModelResultOutput) VersionUpdateTime() pulumi.StringOutput { + return o.ApplyT(func(v LookupModelResult) string { return v.VersionUpdateTime }).(pulumi.StringOutput) +} + +func init() { + pulumi.RegisterOutputType(LookupModelResultOutput{}) +} diff --git a/sdk/go/google/aiplatform/v1/init.go b/sdk/go/google/aiplatform/v1/init.go index 51d446b21f..994f327b17 100644 --- a/sdk/go/google/aiplatform/v1/init.go +++ b/sdk/go/google/aiplatform/v1/init.go @@ -79,6 +79,8 @@ func (m *module) Construct(ctx *pulumi.Context, name, typ, urn string) (r pulumi r = &MetadataSchema{} case "google-native:aiplatform/v1:MetadataStore": r = &MetadataStore{} + case "google-native:aiplatform/v1:Model": + r = &Model{} case "google-native:aiplatform/v1:ModelDeploymentMonitoringJob": r = &ModelDeploymentMonitoringJob{} case "google-native:aiplatform/v1:NasJob": diff --git a/sdk/go/google/aiplatform/v1/model.go b/sdk/go/google/aiplatform/v1/model.go new file mode 100644 index 0000000000..972591b075 --- /dev/null +++ b/sdk/go/google/aiplatform/v1/model.go @@ -0,0 +1,406 @@ +// Code generated by the Pulumi SDK Generator DO NOT EDIT. +// *** WARNING: Do not edit by hand unless you're certain you know what you are doing! *** + +package v1 + +import ( + "context" + "reflect" + + "errors" + "github.com/pulumi/pulumi-google-native/sdk/go/google/internal" + "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +) + +// Uploads a Model artifact into Vertex AI. +type Model struct { + pulumi.CustomResourceState + + // Immutable. The path to the directory containing the Model artifact and any of its supporting files. Not present for AutoML Models or Large Models. + ArtifactUri pulumi.StringOutput `pulumi:"artifactUri"` + // Input only. The specification of the container that is to be used when deploying this Model. The specification is ingested upon ModelService.UploadModel, and all binaries it contains are copied and stored internally by Vertex AI. Not present for AutoML Models or Large Models. + ContainerSpec GoogleCloudAiplatformV1ModelContainerSpecResponseOutput `pulumi:"containerSpec"` + // Timestamp when this Model was uploaded into Vertex AI. + CreateTime pulumi.StringOutput `pulumi:"createTime"` + // The pointers to DeployedModels created from this Model. Note that Model could have been deployed to Endpoints in different Locations. + DeployedModels GoogleCloudAiplatformV1DeployedModelRefResponseArrayOutput `pulumi:"deployedModels"` + // The description of the Model. + Description pulumi.StringOutput `pulumi:"description"` + // The display name of the Model. The name can be up to 128 characters long and can consist of any UTF-8 characters. + DisplayName pulumi.StringOutput `pulumi:"displayName"` + // Customer-managed encryption key spec for a Model. If set, this Model and all sub-resources of this Model will be secured by this key. + EncryptionSpec GoogleCloudAiplatformV1EncryptionSpecResponseOutput `pulumi:"encryptionSpec"` + // Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + Etag pulumi.StringOutput `pulumi:"etag"` + // The default explanation specification for this Model. The Model can be used for requesting explanation after being deployed if it is populated. The Model can be used for batch explanation if it is populated. All fields of the explanation_spec can be overridden by explanation_spec of DeployModelRequest.deployed_model, or explanation_spec of BatchPredictionJob. If the default explanation specification is not set for this Model, this Model can still be used for requesting explanation by setting explanation_spec of DeployModelRequest.deployed_model and for batch explanation by setting explanation_spec of BatchPredictionJob. + ExplanationSpec GoogleCloudAiplatformV1ExplanationSpecResponseOutput `pulumi:"explanationSpec"` + // The labels with user-defined metadata to organize your Models. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. + Labels pulumi.StringMapOutput `pulumi:"labels"` + Location pulumi.StringOutput `pulumi:"location"` + // Immutable. An additional information about the Model; the schema of the metadata can be found in metadata_schema. Unset if the Model does not have any additional information. + Metadata pulumi.AnyOutput `pulumi:"metadata"` + // The resource name of the Artifact that was created in MetadataStore when creating the Model. The Artifact resource name pattern is `projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}`. + MetadataArtifact pulumi.StringOutput `pulumi:"metadataArtifact"` + // Immutable. Points to a YAML file stored on Google Cloud Storage describing additional information about the Model, that is specific to it. Unset if the Model does not have any additional information. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML Models always have this field populated by Vertex AI, if no additional metadata is needed, this field is set to an empty string. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. + MetadataSchemaUri pulumi.StringOutput `pulumi:"metadataSchemaUri"` + // Source of a model. It can either be automl training pipeline, custom training pipeline, BigQuery ML, or existing Vertex AI Model. + ModelSourceInfo GoogleCloudAiplatformV1ModelSourceInfoResponseOutput `pulumi:"modelSourceInfo"` + // The resource name of the Model. + Name pulumi.StringOutput `pulumi:"name"` + // If this Model is a copy of another Model, this contains info about the original. + OriginalModelInfo GoogleCloudAiplatformV1ModelOriginalModelInfoResponseOutput `pulumi:"originalModelInfo"` + // Optional. This field is populated if the model is produced by a pipeline job. + PipelineJob pulumi.StringOutput `pulumi:"pipelineJob"` + // The schemata that describe formats of the Model's predictions and explanations as given and returned via PredictionService.Predict and PredictionService.Explain. + PredictSchemata GoogleCloudAiplatformV1PredictSchemataResponseOutput `pulumi:"predictSchemata"` + Project pulumi.StringOutput `pulumi:"project"` + // When this Model is deployed, its prediction resources are described by the `prediction_resources` field of the Endpoint.deployed_models object. Because not all Models support all resource configuration types, the configuration types this Model supports are listed here. If no configuration types are listed, the Model cannot be deployed to an Endpoint and does not support online predictions (PredictionService.Predict or PredictionService.Explain). Such a Model can serve predictions by using a BatchPredictionJob, if it has at least one entry each in supported_input_storage_formats and supported_output_storage_formats. + SupportedDeploymentResourcesTypes pulumi.StringArrayOutput `pulumi:"supportedDeploymentResourcesTypes"` + // The formats in which this Model may be exported. If empty, this Model is not available for export. + SupportedExportFormats GoogleCloudAiplatformV1ModelExportFormatResponseArrayOutput `pulumi:"supportedExportFormats"` + // The formats this Model supports in BatchPredictionJob.input_config. If PredictSchemata.instance_schema_uri exists, the instances should be given as per that schema. The possible formats are: * `jsonl` The JSON Lines format, where each instance is a single line. Uses GcsSource. * `csv` The CSV format, where each instance is a single comma-separated line. The first line in the file is the header, containing comma-separated field names. Uses GcsSource. * `tf-record` The TFRecord format, where each instance is a single record in tfrecord syntax. Uses GcsSource. * `tf-record-gzip` Similar to `tf-record`, but the file is gzipped. Uses GcsSource. * `bigquery` Each instance is a single row in BigQuery. Uses BigQuerySource. * `file-list` Each line of the file is the location of an instance to process, uses `gcs_source` field of the InputConfig object. If this Model doesn't support any of these formats it means it cannot be used with a BatchPredictionJob. However, if it has supported_deployment_resources_types, it could serve online predictions by using PredictionService.Predict or PredictionService.Explain. + SupportedInputStorageFormats pulumi.StringArrayOutput `pulumi:"supportedInputStorageFormats"` + // The formats this Model supports in BatchPredictionJob.output_config. If both PredictSchemata.instance_schema_uri and PredictSchemata.prediction_schema_uri exist, the predictions are returned together with their instances. In other words, the prediction has the original instance data first, followed by the actual prediction content (as per the schema). The possible formats are: * `jsonl` The JSON Lines format, where each prediction is a single line. Uses GcsDestination. * `csv` The CSV format, where each prediction is a single comma-separated line. The first line in the file is the header, containing comma-separated field names. Uses GcsDestination. * `bigquery` Each prediction is a single row in a BigQuery table, uses BigQueryDestination . If this Model doesn't support any of these formats it means it cannot be used with a BatchPredictionJob. However, if it has supported_deployment_resources_types, it could serve online predictions by using PredictionService.Predict or PredictionService.Explain. + SupportedOutputStorageFormats pulumi.StringArrayOutput `pulumi:"supportedOutputStorageFormats"` + // The resource name of the TrainingPipeline that uploaded this Model, if any. + TrainingPipeline pulumi.StringOutput `pulumi:"trainingPipeline"` + // Timestamp when this Model was most recently updated. + UpdateTime pulumi.StringOutput `pulumi:"updateTime"` + // User provided version aliases so that a model version can be referenced via alias (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_alias}` instead of auto-generated version id (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_id})`. The format is a-z{0,126}[a-z0-9] to distinguish from version_id. A default version alias will be created for the first version of the model, and there must be exactly one default version alias for a model. + VersionAliases pulumi.StringArrayOutput `pulumi:"versionAliases"` + // Timestamp when this version was created. + VersionCreateTime pulumi.StringOutput `pulumi:"versionCreateTime"` + // The description of this version. + VersionDescription pulumi.StringOutput `pulumi:"versionDescription"` + // Immutable. The version ID of the model. A new version is committed when a new model version is uploaded or trained under an existing model id. It is an auto-incrementing decimal number in string representation. + VersionId pulumi.StringOutput `pulumi:"versionId"` + // Timestamp when this version was most recently updated. + VersionUpdateTime pulumi.StringOutput `pulumi:"versionUpdateTime"` +} + +// NewModel registers a new resource with the given unique name, arguments, and options. +func NewModel(ctx *pulumi.Context, + name string, args *ModelArgs, opts ...pulumi.ResourceOption) (*Model, error) { + if args == nil { + return nil, errors.New("missing one or more required arguments") + } + + if args.DisplayName == nil { + return nil, errors.New("invalid value for required argument 'DisplayName'") + } + replaceOnChanges := pulumi.ReplaceOnChanges([]string{ + "location", + "project", + }) + opts = append(opts, replaceOnChanges) + opts = internal.PkgResourceDefaultOpts(opts) + var resource Model + err := ctx.RegisterResource("google-native:aiplatform/v1:Model", name, args, &resource, opts...) + if err != nil { + return nil, err + } + return &resource, nil +} + +// GetModel gets an existing Model resource's state with the given name, ID, and optional +// state properties that are used to uniquely qualify the lookup (nil if not required). +func GetModel(ctx *pulumi.Context, + name string, id pulumi.IDInput, state *ModelState, opts ...pulumi.ResourceOption) (*Model, error) { + var resource Model + err := ctx.ReadResource("google-native:aiplatform/v1:Model", name, id, state, &resource, opts...) + if err != nil { + return nil, err + } + return &resource, nil +} + +// Input properties used for looking up and filtering Model resources. +type modelState struct { +} + +type ModelState struct { +} + +func (ModelState) ElementType() reflect.Type { + return reflect.TypeOf((*modelState)(nil)).Elem() +} + +type modelArgs struct { + // Immutable. The path to the directory containing the Model artifact and any of its supporting files. Not present for AutoML Models or Large Models. + ArtifactUri *string `pulumi:"artifactUri"` + // Input only. The specification of the container that is to be used when deploying this Model. The specification is ingested upon ModelService.UploadModel, and all binaries it contains are copied and stored internally by Vertex AI. Not present for AutoML Models or Large Models. + ContainerSpec *GoogleCloudAiplatformV1ModelContainerSpec `pulumi:"containerSpec"` + // The description of the Model. + Description *string `pulumi:"description"` + // The display name of the Model. The name can be up to 128 characters long and can consist of any UTF-8 characters. + DisplayName string `pulumi:"displayName"` + // Customer-managed encryption key spec for a Model. If set, this Model and all sub-resources of this Model will be secured by this key. + EncryptionSpec *GoogleCloudAiplatformV1EncryptionSpec `pulumi:"encryptionSpec"` + // Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + Etag *string `pulumi:"etag"` + // The default explanation specification for this Model. The Model can be used for requesting explanation after being deployed if it is populated. The Model can be used for batch explanation if it is populated. All fields of the explanation_spec can be overridden by explanation_spec of DeployModelRequest.deployed_model, or explanation_spec of BatchPredictionJob. If the default explanation specification is not set for this Model, this Model can still be used for requesting explanation by setting explanation_spec of DeployModelRequest.deployed_model and for batch explanation by setting explanation_spec of BatchPredictionJob. + ExplanationSpec *GoogleCloudAiplatformV1ExplanationSpec `pulumi:"explanationSpec"` + // The labels with user-defined metadata to organize your Models. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. + Labels map[string]string `pulumi:"labels"` + Location *string `pulumi:"location"` + // Immutable. An additional information about the Model; the schema of the metadata can be found in metadata_schema. Unset if the Model does not have any additional information. + Metadata interface{} `pulumi:"metadata"` + // Immutable. Points to a YAML file stored on Google Cloud Storage describing additional information about the Model, that is specific to it. Unset if the Model does not have any additional information. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML Models always have this field populated by Vertex AI, if no additional metadata is needed, this field is set to an empty string. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. + MetadataSchemaUri *string `pulumi:"metadataSchemaUri"` + // Optional. The ID to use for the uploaded Model, which will become the final component of the model resource name. This value may be up to 63 characters, and valid characters are `[a-z0-9_-]`. The first character cannot be a number or hyphen. + ModelId *string `pulumi:"modelId"` + // The resource name of the Model. + Name *string `pulumi:"name"` + // Optional. The resource name of the model into which to upload the version. Only specify this field when uploading a new version. + ParentModel *string `pulumi:"parentModel"` + // Optional. This field is populated if the model is produced by a pipeline job. + PipelineJob *string `pulumi:"pipelineJob"` + // The schemata that describe formats of the Model's predictions and explanations as given and returned via PredictionService.Predict and PredictionService.Explain. + PredictSchemata *GoogleCloudAiplatformV1PredictSchemata `pulumi:"predictSchemata"` + Project *string `pulumi:"project"` + // Optional. The user-provided custom service account to use to do the model upload. If empty, [Vertex AI Service Agent](https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) will be used to access resources needed to upload the model. This account must belong to the target project where the model is uploaded to, i.e., the project specified in the `parent` field of this request and have necessary read permissions (to Google Cloud Storage, Artifact Registry, etc.). + ServiceAccount *string `pulumi:"serviceAccount"` + // User provided version aliases so that a model version can be referenced via alias (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_alias}` instead of auto-generated version id (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_id})`. The format is a-z{0,126}[a-z0-9] to distinguish from version_id. A default version alias will be created for the first version of the model, and there must be exactly one default version alias for a model. + VersionAliases []string `pulumi:"versionAliases"` + // The description of this version. + VersionDescription *string `pulumi:"versionDescription"` +} + +// The set of arguments for constructing a Model resource. +type ModelArgs struct { + // Immutable. The path to the directory containing the Model artifact and any of its supporting files. Not present for AutoML Models or Large Models. + ArtifactUri pulumi.StringPtrInput + // Input only. The specification of the container that is to be used when deploying this Model. The specification is ingested upon ModelService.UploadModel, and all binaries it contains are copied and stored internally by Vertex AI. Not present for AutoML Models or Large Models. + ContainerSpec GoogleCloudAiplatformV1ModelContainerSpecPtrInput + // The description of the Model. + Description pulumi.StringPtrInput + // The display name of the Model. The name can be up to 128 characters long and can consist of any UTF-8 characters. + DisplayName pulumi.StringInput + // Customer-managed encryption key spec for a Model. If set, this Model and all sub-resources of this Model will be secured by this key. + EncryptionSpec GoogleCloudAiplatformV1EncryptionSpecPtrInput + // Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + Etag pulumi.StringPtrInput + // The default explanation specification for this Model. The Model can be used for requesting explanation after being deployed if it is populated. The Model can be used for batch explanation if it is populated. All fields of the explanation_spec can be overridden by explanation_spec of DeployModelRequest.deployed_model, or explanation_spec of BatchPredictionJob. If the default explanation specification is not set for this Model, this Model can still be used for requesting explanation by setting explanation_spec of DeployModelRequest.deployed_model and for batch explanation by setting explanation_spec of BatchPredictionJob. + ExplanationSpec GoogleCloudAiplatformV1ExplanationSpecPtrInput + // The labels with user-defined metadata to organize your Models. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. + Labels pulumi.StringMapInput + Location pulumi.StringPtrInput + // Immutable. An additional information about the Model; the schema of the metadata can be found in metadata_schema. Unset if the Model does not have any additional information. + Metadata pulumi.Input + // Immutable. Points to a YAML file stored on Google Cloud Storage describing additional information about the Model, that is specific to it. Unset if the Model does not have any additional information. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML Models always have this field populated by Vertex AI, if no additional metadata is needed, this field is set to an empty string. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. + MetadataSchemaUri pulumi.StringPtrInput + // Optional. The ID to use for the uploaded Model, which will become the final component of the model resource name. This value may be up to 63 characters, and valid characters are `[a-z0-9_-]`. The first character cannot be a number or hyphen. + ModelId pulumi.StringPtrInput + // The resource name of the Model. + Name pulumi.StringPtrInput + // Optional. The resource name of the model into which to upload the version. Only specify this field when uploading a new version. + ParentModel pulumi.StringPtrInput + // Optional. This field is populated if the model is produced by a pipeline job. + PipelineJob pulumi.StringPtrInput + // The schemata that describe formats of the Model's predictions and explanations as given and returned via PredictionService.Predict and PredictionService.Explain. + PredictSchemata GoogleCloudAiplatformV1PredictSchemataPtrInput + Project pulumi.StringPtrInput + // Optional. The user-provided custom service account to use to do the model upload. If empty, [Vertex AI Service Agent](https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) will be used to access resources needed to upload the model. This account must belong to the target project where the model is uploaded to, i.e., the project specified in the `parent` field of this request and have necessary read permissions (to Google Cloud Storage, Artifact Registry, etc.). + ServiceAccount pulumi.StringPtrInput + // User provided version aliases so that a model version can be referenced via alias (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_alias}` instead of auto-generated version id (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_id})`. The format is a-z{0,126}[a-z0-9] to distinguish from version_id. A default version alias will be created for the first version of the model, and there must be exactly one default version alias for a model. + VersionAliases pulumi.StringArrayInput + // The description of this version. + VersionDescription pulumi.StringPtrInput +} + +func (ModelArgs) ElementType() reflect.Type { + return reflect.TypeOf((*modelArgs)(nil)).Elem() +} + +type ModelInput interface { + pulumi.Input + + ToModelOutput() ModelOutput + ToModelOutputWithContext(ctx context.Context) ModelOutput +} + +func (*Model) ElementType() reflect.Type { + return reflect.TypeOf((**Model)(nil)).Elem() +} + +func (i *Model) ToModelOutput() ModelOutput { + return i.ToModelOutputWithContext(context.Background()) +} + +func (i *Model) ToModelOutputWithContext(ctx context.Context) ModelOutput { + return pulumi.ToOutputWithContext(ctx, i).(ModelOutput) +} + +type ModelOutput struct{ *pulumi.OutputState } + +func (ModelOutput) ElementType() reflect.Type { + return reflect.TypeOf((**Model)(nil)).Elem() +} + +func (o ModelOutput) ToModelOutput() ModelOutput { + return o +} + +func (o ModelOutput) ToModelOutputWithContext(ctx context.Context) ModelOutput { + return o +} + +// Immutable. The path to the directory containing the Model artifact and any of its supporting files. Not present for AutoML Models or Large Models. +func (o ModelOutput) ArtifactUri() pulumi.StringOutput { + return o.ApplyT(func(v *Model) pulumi.StringOutput { return v.ArtifactUri }).(pulumi.StringOutput) +} + +// Input only. The specification of the container that is to be used when deploying this Model. The specification is ingested upon ModelService.UploadModel, and all binaries it contains are copied and stored internally by Vertex AI. Not present for AutoML Models or Large Models. +func (o ModelOutput) ContainerSpec() GoogleCloudAiplatformV1ModelContainerSpecResponseOutput { + return o.ApplyT(func(v *Model) GoogleCloudAiplatformV1ModelContainerSpecResponseOutput { return v.ContainerSpec }).(GoogleCloudAiplatformV1ModelContainerSpecResponseOutput) +} + +// Timestamp when this Model was uploaded into Vertex AI. +func (o ModelOutput) CreateTime() pulumi.StringOutput { + return o.ApplyT(func(v *Model) pulumi.StringOutput { return v.CreateTime }).(pulumi.StringOutput) +} + +// The pointers to DeployedModels created from this Model. Note that Model could have been deployed to Endpoints in different Locations. +func (o ModelOutput) DeployedModels() GoogleCloudAiplatformV1DeployedModelRefResponseArrayOutput { + return o.ApplyT(func(v *Model) GoogleCloudAiplatformV1DeployedModelRefResponseArrayOutput { return v.DeployedModels }).(GoogleCloudAiplatformV1DeployedModelRefResponseArrayOutput) +} + +// The description of the Model. +func (o ModelOutput) Description() pulumi.StringOutput { + return o.ApplyT(func(v *Model) pulumi.StringOutput { return v.Description }).(pulumi.StringOutput) +} + +// The display name of the Model. The name can be up to 128 characters long and can consist of any UTF-8 characters. +func (o ModelOutput) DisplayName() pulumi.StringOutput { + return o.ApplyT(func(v *Model) pulumi.StringOutput { return v.DisplayName }).(pulumi.StringOutput) +} + +// Customer-managed encryption key spec for a Model. If set, this Model and all sub-resources of this Model will be secured by this key. +func (o ModelOutput) EncryptionSpec() GoogleCloudAiplatformV1EncryptionSpecResponseOutput { + return o.ApplyT(func(v *Model) GoogleCloudAiplatformV1EncryptionSpecResponseOutput { return v.EncryptionSpec }).(GoogleCloudAiplatformV1EncryptionSpecResponseOutput) +} + +// Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. +func (o ModelOutput) Etag() pulumi.StringOutput { + return o.ApplyT(func(v *Model) pulumi.StringOutput { return v.Etag }).(pulumi.StringOutput) +} + +// The default explanation specification for this Model. The Model can be used for requesting explanation after being deployed if it is populated. The Model can be used for batch explanation if it is populated. All fields of the explanation_spec can be overridden by explanation_spec of DeployModelRequest.deployed_model, or explanation_spec of BatchPredictionJob. If the default explanation specification is not set for this Model, this Model can still be used for requesting explanation by setting explanation_spec of DeployModelRequest.deployed_model and for batch explanation by setting explanation_spec of BatchPredictionJob. +func (o ModelOutput) ExplanationSpec() GoogleCloudAiplatformV1ExplanationSpecResponseOutput { + return o.ApplyT(func(v *Model) GoogleCloudAiplatformV1ExplanationSpecResponseOutput { return v.ExplanationSpec }).(GoogleCloudAiplatformV1ExplanationSpecResponseOutput) +} + +// The labels with user-defined metadata to organize your Models. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. +func (o ModelOutput) Labels() pulumi.StringMapOutput { + return o.ApplyT(func(v *Model) pulumi.StringMapOutput { return v.Labels }).(pulumi.StringMapOutput) +} + +func (o ModelOutput) Location() pulumi.StringOutput { + return o.ApplyT(func(v *Model) pulumi.StringOutput { return v.Location }).(pulumi.StringOutput) +} + +// Immutable. An additional information about the Model; the schema of the metadata can be found in metadata_schema. Unset if the Model does not have any additional information. +func (o ModelOutput) Metadata() pulumi.AnyOutput { + return o.ApplyT(func(v *Model) pulumi.AnyOutput { return v.Metadata }).(pulumi.AnyOutput) +} + +// The resource name of the Artifact that was created in MetadataStore when creating the Model. The Artifact resource name pattern is `projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}`. +func (o ModelOutput) MetadataArtifact() pulumi.StringOutput { + return o.ApplyT(func(v *Model) pulumi.StringOutput { return v.MetadataArtifact }).(pulumi.StringOutput) +} + +// Immutable. Points to a YAML file stored on Google Cloud Storage describing additional information about the Model, that is specific to it. Unset if the Model does not have any additional information. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML Models always have this field populated by Vertex AI, if no additional metadata is needed, this field is set to an empty string. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. +func (o ModelOutput) MetadataSchemaUri() pulumi.StringOutput { + return o.ApplyT(func(v *Model) pulumi.StringOutput { return v.MetadataSchemaUri }).(pulumi.StringOutput) +} + +// Source of a model. It can either be automl training pipeline, custom training pipeline, BigQuery ML, or existing Vertex AI Model. +func (o ModelOutput) ModelSourceInfo() GoogleCloudAiplatformV1ModelSourceInfoResponseOutput { + return o.ApplyT(func(v *Model) GoogleCloudAiplatformV1ModelSourceInfoResponseOutput { return v.ModelSourceInfo }).(GoogleCloudAiplatformV1ModelSourceInfoResponseOutput) +} + +// The resource name of the Model. +func (o ModelOutput) Name() pulumi.StringOutput { + return o.ApplyT(func(v *Model) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput) +} + +// If this Model is a copy of another Model, this contains info about the original. +func (o ModelOutput) OriginalModelInfo() GoogleCloudAiplatformV1ModelOriginalModelInfoResponseOutput { + return o.ApplyT(func(v *Model) GoogleCloudAiplatformV1ModelOriginalModelInfoResponseOutput { return v.OriginalModelInfo }).(GoogleCloudAiplatformV1ModelOriginalModelInfoResponseOutput) +} + +// Optional. This field is populated if the model is produced by a pipeline job. +func (o ModelOutput) PipelineJob() pulumi.StringOutput { + return o.ApplyT(func(v *Model) pulumi.StringOutput { return v.PipelineJob }).(pulumi.StringOutput) +} + +// The schemata that describe formats of the Model's predictions and explanations as given and returned via PredictionService.Predict and PredictionService.Explain. +func (o ModelOutput) PredictSchemata() GoogleCloudAiplatformV1PredictSchemataResponseOutput { + return o.ApplyT(func(v *Model) GoogleCloudAiplatformV1PredictSchemataResponseOutput { return v.PredictSchemata }).(GoogleCloudAiplatformV1PredictSchemataResponseOutput) +} + +func (o ModelOutput) Project() pulumi.StringOutput { + return o.ApplyT(func(v *Model) pulumi.StringOutput { return v.Project }).(pulumi.StringOutput) +} + +// When this Model is deployed, its prediction resources are described by the `prediction_resources` field of the Endpoint.deployed_models object. Because not all Models support all resource configuration types, the configuration types this Model supports are listed here. If no configuration types are listed, the Model cannot be deployed to an Endpoint and does not support online predictions (PredictionService.Predict or PredictionService.Explain). Such a Model can serve predictions by using a BatchPredictionJob, if it has at least one entry each in supported_input_storage_formats and supported_output_storage_formats. +func (o ModelOutput) SupportedDeploymentResourcesTypes() pulumi.StringArrayOutput { + return o.ApplyT(func(v *Model) pulumi.StringArrayOutput { return v.SupportedDeploymentResourcesTypes }).(pulumi.StringArrayOutput) +} + +// The formats in which this Model may be exported. If empty, this Model is not available for export. +func (o ModelOutput) SupportedExportFormats() GoogleCloudAiplatformV1ModelExportFormatResponseArrayOutput { + return o.ApplyT(func(v *Model) GoogleCloudAiplatformV1ModelExportFormatResponseArrayOutput { + return v.SupportedExportFormats + }).(GoogleCloudAiplatformV1ModelExportFormatResponseArrayOutput) +} + +// The formats this Model supports in BatchPredictionJob.input_config. If PredictSchemata.instance_schema_uri exists, the instances should be given as per that schema. The possible formats are: * `jsonl` The JSON Lines format, where each instance is a single line. Uses GcsSource. * `csv` The CSV format, where each instance is a single comma-separated line. The first line in the file is the header, containing comma-separated field names. Uses GcsSource. * `tf-record` The TFRecord format, where each instance is a single record in tfrecord syntax. Uses GcsSource. * `tf-record-gzip` Similar to `tf-record`, but the file is gzipped. Uses GcsSource. * `bigquery` Each instance is a single row in BigQuery. Uses BigQuerySource. * `file-list` Each line of the file is the location of an instance to process, uses `gcs_source` field of the InputConfig object. If this Model doesn't support any of these formats it means it cannot be used with a BatchPredictionJob. However, if it has supported_deployment_resources_types, it could serve online predictions by using PredictionService.Predict or PredictionService.Explain. +func (o ModelOutput) SupportedInputStorageFormats() pulumi.StringArrayOutput { + return o.ApplyT(func(v *Model) pulumi.StringArrayOutput { return v.SupportedInputStorageFormats }).(pulumi.StringArrayOutput) +} + +// The formats this Model supports in BatchPredictionJob.output_config. If both PredictSchemata.instance_schema_uri and PredictSchemata.prediction_schema_uri exist, the predictions are returned together with their instances. In other words, the prediction has the original instance data first, followed by the actual prediction content (as per the schema). The possible formats are: * `jsonl` The JSON Lines format, where each prediction is a single line. Uses GcsDestination. * `csv` The CSV format, where each prediction is a single comma-separated line. The first line in the file is the header, containing comma-separated field names. Uses GcsDestination. * `bigquery` Each prediction is a single row in a BigQuery table, uses BigQueryDestination . If this Model doesn't support any of these formats it means it cannot be used with a BatchPredictionJob. However, if it has supported_deployment_resources_types, it could serve online predictions by using PredictionService.Predict or PredictionService.Explain. +func (o ModelOutput) SupportedOutputStorageFormats() pulumi.StringArrayOutput { + return o.ApplyT(func(v *Model) pulumi.StringArrayOutput { return v.SupportedOutputStorageFormats }).(pulumi.StringArrayOutput) +} + +// The resource name of the TrainingPipeline that uploaded this Model, if any. +func (o ModelOutput) TrainingPipeline() pulumi.StringOutput { + return o.ApplyT(func(v *Model) pulumi.StringOutput { return v.TrainingPipeline }).(pulumi.StringOutput) +} + +// Timestamp when this Model was most recently updated. +func (o ModelOutput) UpdateTime() pulumi.StringOutput { + return o.ApplyT(func(v *Model) pulumi.StringOutput { return v.UpdateTime }).(pulumi.StringOutput) +} + +// User provided version aliases so that a model version can be referenced via alias (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_alias}` instead of auto-generated version id (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_id})`. The format is a-z{0,126}[a-z0-9] to distinguish from version_id. A default version alias will be created for the first version of the model, and there must be exactly one default version alias for a model. +func (o ModelOutput) VersionAliases() pulumi.StringArrayOutput { + return o.ApplyT(func(v *Model) pulumi.StringArrayOutput { return v.VersionAliases }).(pulumi.StringArrayOutput) +} + +// Timestamp when this version was created. +func (o ModelOutput) VersionCreateTime() pulumi.StringOutput { + return o.ApplyT(func(v *Model) pulumi.StringOutput { return v.VersionCreateTime }).(pulumi.StringOutput) +} + +// The description of this version. +func (o ModelOutput) VersionDescription() pulumi.StringOutput { + return o.ApplyT(func(v *Model) pulumi.StringOutput { return v.VersionDescription }).(pulumi.StringOutput) +} + +// Immutable. The version ID of the model. A new version is committed when a new model version is uploaded or trained under an existing model id. It is an auto-incrementing decimal number in string representation. +func (o ModelOutput) VersionId() pulumi.StringOutput { + return o.ApplyT(func(v *Model) pulumi.StringOutput { return v.VersionId }).(pulumi.StringOutput) +} + +// Timestamp when this version was most recently updated. +func (o ModelOutput) VersionUpdateTime() pulumi.StringOutput { + return o.ApplyT(func(v *Model) pulumi.StringOutput { return v.VersionUpdateTime }).(pulumi.StringOutput) +} + +func init() { + pulumi.RegisterInputType(reflect.TypeOf((*ModelInput)(nil)).Elem(), &Model{}) + pulumi.RegisterOutputType(ModelOutput{}) +} diff --git a/sdk/go/google/aiplatform/v1beta1/getModel.go b/sdk/go/google/aiplatform/v1beta1/getModel.go new file mode 100644 index 0000000000..8130aae6e4 --- /dev/null +++ b/sdk/go/google/aiplatform/v1beta1/getModel.go @@ -0,0 +1,283 @@ +// Code generated by the Pulumi SDK Generator DO NOT EDIT. +// *** WARNING: Do not edit by hand unless you're certain you know what you are doing! *** + +package v1beta1 + +import ( + "context" + "reflect" + + "github.com/pulumi/pulumi-google-native/sdk/go/google/internal" + "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +) + +// Gets a Model. +func LookupModel(ctx *pulumi.Context, args *LookupModelArgs, opts ...pulumi.InvokeOption) (*LookupModelResult, error) { + opts = internal.PkgInvokeDefaultOpts(opts) + var rv LookupModelResult + err := ctx.Invoke("google-native:aiplatform/v1beta1:getModel", args, &rv, opts...) + if err != nil { + return nil, err + } + return &rv, nil +} + +type LookupModelArgs struct { + Location string `pulumi:"location"` + ModelId string `pulumi:"modelId"` + Project *string `pulumi:"project"` +} + +type LookupModelResult struct { + // Immutable. The path to the directory containing the Model artifact and any of its supporting files. Not present for AutoML Models or Large Models. + ArtifactUri string `pulumi:"artifactUri"` + // Input only. The specification of the container that is to be used when deploying this Model. The specification is ingested upon ModelService.UploadModel, and all binaries it contains are copied and stored internally by Vertex AI. Not present for AutoML Models or Large Models. + ContainerSpec GoogleCloudAiplatformV1beta1ModelContainerSpecResponse `pulumi:"containerSpec"` + // Timestamp when this Model was uploaded into Vertex AI. + CreateTime string `pulumi:"createTime"` + // The pointers to DeployedModels created from this Model. Note that Model could have been deployed to Endpoints in different Locations. + DeployedModels []GoogleCloudAiplatformV1beta1DeployedModelRefResponse `pulumi:"deployedModels"` + // The description of the Model. + Description string `pulumi:"description"` + // The display name of the Model. The name can be up to 128 characters long and can consist of any UTF-8 characters. + DisplayName string `pulumi:"displayName"` + // Customer-managed encryption key spec for a Model. If set, this Model and all sub-resources of this Model will be secured by this key. + EncryptionSpec GoogleCloudAiplatformV1beta1EncryptionSpecResponse `pulumi:"encryptionSpec"` + // Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + Etag string `pulumi:"etag"` + // The default explanation specification for this Model. The Model can be used for requesting explanation after being deployed if it is populated. The Model can be used for batch explanation if it is populated. All fields of the explanation_spec can be overridden by explanation_spec of DeployModelRequest.deployed_model, or explanation_spec of BatchPredictionJob. If the default explanation specification is not set for this Model, this Model can still be used for requesting explanation by setting explanation_spec of DeployModelRequest.deployed_model and for batch explanation by setting explanation_spec of BatchPredictionJob. + ExplanationSpec GoogleCloudAiplatformV1beta1ExplanationSpecResponse `pulumi:"explanationSpec"` + // The labels with user-defined metadata to organize your Models. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. + Labels map[string]string `pulumi:"labels"` + // Immutable. An additional information about the Model; the schema of the metadata can be found in metadata_schema. Unset if the Model does not have any additional information. + Metadata interface{} `pulumi:"metadata"` + // The resource name of the Artifact that was created in MetadataStore when creating the Model. The Artifact resource name pattern is `projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}`. + MetadataArtifact string `pulumi:"metadataArtifact"` + // Immutable. Points to a YAML file stored on Google Cloud Storage describing additional information about the Model, that is specific to it. Unset if the Model does not have any additional information. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML Models always have this field populated by Vertex AI, if no additional metadata is needed, this field is set to an empty string. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. + MetadataSchemaUri string `pulumi:"metadataSchemaUri"` + // Source of a model. It can either be automl training pipeline, custom training pipeline, BigQuery ML, or existing Vertex AI Model. + ModelSourceInfo GoogleCloudAiplatformV1beta1ModelSourceInfoResponse `pulumi:"modelSourceInfo"` + // The resource name of the Model. + Name string `pulumi:"name"` + // If this Model is a copy of another Model, this contains info about the original. + OriginalModelInfo GoogleCloudAiplatformV1beta1ModelOriginalModelInfoResponse `pulumi:"originalModelInfo"` + // The schemata that describe formats of the Model's predictions and explanations as given and returned via PredictionService.Predict and PredictionService.Explain. + PredictSchemata GoogleCloudAiplatformV1beta1PredictSchemataResponse `pulumi:"predictSchemata"` + // When this Model is deployed, its prediction resources are described by the `prediction_resources` field of the Endpoint.deployed_models object. Because not all Models support all resource configuration types, the configuration types this Model supports are listed here. If no configuration types are listed, the Model cannot be deployed to an Endpoint and does not support online predictions (PredictionService.Predict or PredictionService.Explain). Such a Model can serve predictions by using a BatchPredictionJob, if it has at least one entry each in supported_input_storage_formats and supported_output_storage_formats. + SupportedDeploymentResourcesTypes []string `pulumi:"supportedDeploymentResourcesTypes"` + // The formats in which this Model may be exported. If empty, this Model is not available for export. + SupportedExportFormats []GoogleCloudAiplatformV1beta1ModelExportFormatResponse `pulumi:"supportedExportFormats"` + // The formats this Model supports in BatchPredictionJob.input_config. If PredictSchemata.instance_schema_uri exists, the instances should be given as per that schema. The possible formats are: * `jsonl` The JSON Lines format, where each instance is a single line. Uses GcsSource. * `csv` The CSV format, where each instance is a single comma-separated line. The first line in the file is the header, containing comma-separated field names. Uses GcsSource. * `tf-record` The TFRecord format, where each instance is a single record in tfrecord syntax. Uses GcsSource. * `tf-record-gzip` Similar to `tf-record`, but the file is gzipped. Uses GcsSource. * `bigquery` Each instance is a single row in BigQuery. Uses BigQuerySource. * `file-list` Each line of the file is the location of an instance to process, uses `gcs_source` field of the InputConfig object. If this Model doesn't support any of these formats it means it cannot be used with a BatchPredictionJob. However, if it has supported_deployment_resources_types, it could serve online predictions by using PredictionService.Predict or PredictionService.Explain. + SupportedInputStorageFormats []string `pulumi:"supportedInputStorageFormats"` + // The formats this Model supports in BatchPredictionJob.output_config. If both PredictSchemata.instance_schema_uri and PredictSchemata.prediction_schema_uri exist, the predictions are returned together with their instances. In other words, the prediction has the original instance data first, followed by the actual prediction content (as per the schema). The possible formats are: * `jsonl` The JSON Lines format, where each prediction is a single line. Uses GcsDestination. * `csv` The CSV format, where each prediction is a single comma-separated line. The first line in the file is the header, containing comma-separated field names. Uses GcsDestination. * `bigquery` Each prediction is a single row in a BigQuery table, uses BigQueryDestination . If this Model doesn't support any of these formats it means it cannot be used with a BatchPredictionJob. However, if it has supported_deployment_resources_types, it could serve online predictions by using PredictionService.Predict or PredictionService.Explain. + SupportedOutputStorageFormats []string `pulumi:"supportedOutputStorageFormats"` + // The resource name of the TrainingPipeline that uploaded this Model, if any. + TrainingPipeline string `pulumi:"trainingPipeline"` + // Timestamp when this Model was most recently updated. + UpdateTime string `pulumi:"updateTime"` + // User provided version aliases so that a model version can be referenced via alias (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_alias}` instead of auto-generated version id (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_id})`. The format is a-z{0,126}[a-z0-9] to distinguish from version_id. A default version alias will be created for the first version of the model, and there must be exactly one default version alias for a model. + VersionAliases []string `pulumi:"versionAliases"` + // Timestamp when this version was created. + VersionCreateTime string `pulumi:"versionCreateTime"` + // The description of this version. + VersionDescription string `pulumi:"versionDescription"` + // Immutable. The version ID of the model. A new version is committed when a new model version is uploaded or trained under an existing model id. It is an auto-incrementing decimal number in string representation. + VersionId string `pulumi:"versionId"` + // Timestamp when this version was most recently updated. + VersionUpdateTime string `pulumi:"versionUpdateTime"` +} + +func LookupModelOutput(ctx *pulumi.Context, args LookupModelOutputArgs, opts ...pulumi.InvokeOption) LookupModelResultOutput { + return pulumi.ToOutputWithContext(context.Background(), args). + ApplyT(func(v interface{}) (LookupModelResult, error) { + args := v.(LookupModelArgs) + r, err := LookupModel(ctx, &args, opts...) + var s LookupModelResult + if r != nil { + s = *r + } + return s, err + }).(LookupModelResultOutput) +} + +type LookupModelOutputArgs struct { + Location pulumi.StringInput `pulumi:"location"` + ModelId pulumi.StringInput `pulumi:"modelId"` + Project pulumi.StringPtrInput `pulumi:"project"` +} + +func (LookupModelOutputArgs) ElementType() reflect.Type { + return reflect.TypeOf((*LookupModelArgs)(nil)).Elem() +} + +type LookupModelResultOutput struct{ *pulumi.OutputState } + +func (LookupModelResultOutput) ElementType() reflect.Type { + return reflect.TypeOf((*LookupModelResult)(nil)).Elem() +} + +func (o LookupModelResultOutput) ToLookupModelResultOutput() LookupModelResultOutput { + return o +} + +func (o LookupModelResultOutput) ToLookupModelResultOutputWithContext(ctx context.Context) LookupModelResultOutput { + return o +} + +// Immutable. The path to the directory containing the Model artifact and any of its supporting files. Not present for AutoML Models or Large Models. +func (o LookupModelResultOutput) ArtifactUri() pulumi.StringOutput { + return o.ApplyT(func(v LookupModelResult) string { return v.ArtifactUri }).(pulumi.StringOutput) +} + +// Input only. The specification of the container that is to be used when deploying this Model. The specification is ingested upon ModelService.UploadModel, and all binaries it contains are copied and stored internally by Vertex AI. Not present for AutoML Models or Large Models. +func (o LookupModelResultOutput) ContainerSpec() GoogleCloudAiplatformV1beta1ModelContainerSpecResponseOutput { + return o.ApplyT(func(v LookupModelResult) GoogleCloudAiplatformV1beta1ModelContainerSpecResponse { + return v.ContainerSpec + }).(GoogleCloudAiplatformV1beta1ModelContainerSpecResponseOutput) +} + +// Timestamp when this Model was uploaded into Vertex AI. +func (o LookupModelResultOutput) CreateTime() pulumi.StringOutput { + return o.ApplyT(func(v LookupModelResult) string { return v.CreateTime }).(pulumi.StringOutput) +} + +// The pointers to DeployedModels created from this Model. Note that Model could have been deployed to Endpoints in different Locations. +func (o LookupModelResultOutput) DeployedModels() GoogleCloudAiplatformV1beta1DeployedModelRefResponseArrayOutput { + return o.ApplyT(func(v LookupModelResult) []GoogleCloudAiplatformV1beta1DeployedModelRefResponse { + return v.DeployedModels + }).(GoogleCloudAiplatformV1beta1DeployedModelRefResponseArrayOutput) +} + +// The description of the Model. +func (o LookupModelResultOutput) Description() pulumi.StringOutput { + return o.ApplyT(func(v LookupModelResult) string { return v.Description }).(pulumi.StringOutput) +} + +// The display name of the Model. The name can be up to 128 characters long and can consist of any UTF-8 characters. +func (o LookupModelResultOutput) DisplayName() pulumi.StringOutput { + return o.ApplyT(func(v LookupModelResult) string { return v.DisplayName }).(pulumi.StringOutput) +} + +// Customer-managed encryption key spec for a Model. If set, this Model and all sub-resources of this Model will be secured by this key. +func (o LookupModelResultOutput) EncryptionSpec() GoogleCloudAiplatformV1beta1EncryptionSpecResponseOutput { + return o.ApplyT(func(v LookupModelResult) GoogleCloudAiplatformV1beta1EncryptionSpecResponse { return v.EncryptionSpec }).(GoogleCloudAiplatformV1beta1EncryptionSpecResponseOutput) +} + +// Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. +func (o LookupModelResultOutput) Etag() pulumi.StringOutput { + return o.ApplyT(func(v LookupModelResult) string { return v.Etag }).(pulumi.StringOutput) +} + +// The default explanation specification for this Model. The Model can be used for requesting explanation after being deployed if it is populated. The Model can be used for batch explanation if it is populated. All fields of the explanation_spec can be overridden by explanation_spec of DeployModelRequest.deployed_model, or explanation_spec of BatchPredictionJob. If the default explanation specification is not set for this Model, this Model can still be used for requesting explanation by setting explanation_spec of DeployModelRequest.deployed_model and for batch explanation by setting explanation_spec of BatchPredictionJob. +func (o LookupModelResultOutput) ExplanationSpec() GoogleCloudAiplatformV1beta1ExplanationSpecResponseOutput { + return o.ApplyT(func(v LookupModelResult) GoogleCloudAiplatformV1beta1ExplanationSpecResponse { + return v.ExplanationSpec + }).(GoogleCloudAiplatformV1beta1ExplanationSpecResponseOutput) +} + +// The labels with user-defined metadata to organize your Models. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. +func (o LookupModelResultOutput) Labels() pulumi.StringMapOutput { + return o.ApplyT(func(v LookupModelResult) map[string]string { return v.Labels }).(pulumi.StringMapOutput) +} + +// Immutable. An additional information about the Model; the schema of the metadata can be found in metadata_schema. Unset if the Model does not have any additional information. +func (o LookupModelResultOutput) Metadata() pulumi.AnyOutput { + return o.ApplyT(func(v LookupModelResult) interface{} { return v.Metadata }).(pulumi.AnyOutput) +} + +// The resource name of the Artifact that was created in MetadataStore when creating the Model. The Artifact resource name pattern is `projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}`. +func (o LookupModelResultOutput) MetadataArtifact() pulumi.StringOutput { + return o.ApplyT(func(v LookupModelResult) string { return v.MetadataArtifact }).(pulumi.StringOutput) +} + +// Immutable. Points to a YAML file stored on Google Cloud Storage describing additional information about the Model, that is specific to it. Unset if the Model does not have any additional information. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML Models always have this field populated by Vertex AI, if no additional metadata is needed, this field is set to an empty string. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. +func (o LookupModelResultOutput) MetadataSchemaUri() pulumi.StringOutput { + return o.ApplyT(func(v LookupModelResult) string { return v.MetadataSchemaUri }).(pulumi.StringOutput) +} + +// Source of a model. It can either be automl training pipeline, custom training pipeline, BigQuery ML, or existing Vertex AI Model. +func (o LookupModelResultOutput) ModelSourceInfo() GoogleCloudAiplatformV1beta1ModelSourceInfoResponseOutput { + return o.ApplyT(func(v LookupModelResult) GoogleCloudAiplatformV1beta1ModelSourceInfoResponse { + return v.ModelSourceInfo + }).(GoogleCloudAiplatformV1beta1ModelSourceInfoResponseOutput) +} + +// The resource name of the Model. +func (o LookupModelResultOutput) Name() pulumi.StringOutput { + return o.ApplyT(func(v LookupModelResult) string { return v.Name }).(pulumi.StringOutput) +} + +// If this Model is a copy of another Model, this contains info about the original. +func (o LookupModelResultOutput) OriginalModelInfo() GoogleCloudAiplatformV1beta1ModelOriginalModelInfoResponseOutput { + return o.ApplyT(func(v LookupModelResult) GoogleCloudAiplatformV1beta1ModelOriginalModelInfoResponse { + return v.OriginalModelInfo + }).(GoogleCloudAiplatformV1beta1ModelOriginalModelInfoResponseOutput) +} + +// The schemata that describe formats of the Model's predictions and explanations as given and returned via PredictionService.Predict and PredictionService.Explain. +func (o LookupModelResultOutput) PredictSchemata() GoogleCloudAiplatformV1beta1PredictSchemataResponseOutput { + return o.ApplyT(func(v LookupModelResult) GoogleCloudAiplatformV1beta1PredictSchemataResponse { + return v.PredictSchemata + }).(GoogleCloudAiplatformV1beta1PredictSchemataResponseOutput) +} + +// When this Model is deployed, its prediction resources are described by the `prediction_resources` field of the Endpoint.deployed_models object. Because not all Models support all resource configuration types, the configuration types this Model supports are listed here. If no configuration types are listed, the Model cannot be deployed to an Endpoint and does not support online predictions (PredictionService.Predict or PredictionService.Explain). Such a Model can serve predictions by using a BatchPredictionJob, if it has at least one entry each in supported_input_storage_formats and supported_output_storage_formats. +func (o LookupModelResultOutput) SupportedDeploymentResourcesTypes() pulumi.StringArrayOutput { + return o.ApplyT(func(v LookupModelResult) []string { return v.SupportedDeploymentResourcesTypes }).(pulumi.StringArrayOutput) +} + +// The formats in which this Model may be exported. If empty, this Model is not available for export. +func (o LookupModelResultOutput) SupportedExportFormats() GoogleCloudAiplatformV1beta1ModelExportFormatResponseArrayOutput { + return o.ApplyT(func(v LookupModelResult) []GoogleCloudAiplatformV1beta1ModelExportFormatResponse { + return v.SupportedExportFormats + }).(GoogleCloudAiplatformV1beta1ModelExportFormatResponseArrayOutput) +} + +// The formats this Model supports in BatchPredictionJob.input_config. If PredictSchemata.instance_schema_uri exists, the instances should be given as per that schema. The possible formats are: * `jsonl` The JSON Lines format, where each instance is a single line. Uses GcsSource. * `csv` The CSV format, where each instance is a single comma-separated line. The first line in the file is the header, containing comma-separated field names. Uses GcsSource. * `tf-record` The TFRecord format, where each instance is a single record in tfrecord syntax. Uses GcsSource. * `tf-record-gzip` Similar to `tf-record`, but the file is gzipped. Uses GcsSource. * `bigquery` Each instance is a single row in BigQuery. Uses BigQuerySource. * `file-list` Each line of the file is the location of an instance to process, uses `gcs_source` field of the InputConfig object. If this Model doesn't support any of these formats it means it cannot be used with a BatchPredictionJob. However, if it has supported_deployment_resources_types, it could serve online predictions by using PredictionService.Predict or PredictionService.Explain. +func (o LookupModelResultOutput) SupportedInputStorageFormats() pulumi.StringArrayOutput { + return o.ApplyT(func(v LookupModelResult) []string { return v.SupportedInputStorageFormats }).(pulumi.StringArrayOutput) +} + +// The formats this Model supports in BatchPredictionJob.output_config. If both PredictSchemata.instance_schema_uri and PredictSchemata.prediction_schema_uri exist, the predictions are returned together with their instances. In other words, the prediction has the original instance data first, followed by the actual prediction content (as per the schema). The possible formats are: * `jsonl` The JSON Lines format, where each prediction is a single line. Uses GcsDestination. * `csv` The CSV format, where each prediction is a single comma-separated line. The first line in the file is the header, containing comma-separated field names. Uses GcsDestination. * `bigquery` Each prediction is a single row in a BigQuery table, uses BigQueryDestination . If this Model doesn't support any of these formats it means it cannot be used with a BatchPredictionJob. However, if it has supported_deployment_resources_types, it could serve online predictions by using PredictionService.Predict or PredictionService.Explain. +func (o LookupModelResultOutput) SupportedOutputStorageFormats() pulumi.StringArrayOutput { + return o.ApplyT(func(v LookupModelResult) []string { return v.SupportedOutputStorageFormats }).(pulumi.StringArrayOutput) +} + +// The resource name of the TrainingPipeline that uploaded this Model, if any. +func (o LookupModelResultOutput) TrainingPipeline() pulumi.StringOutput { + return o.ApplyT(func(v LookupModelResult) string { return v.TrainingPipeline }).(pulumi.StringOutput) +} + +// Timestamp when this Model was most recently updated. +func (o LookupModelResultOutput) UpdateTime() pulumi.StringOutput { + return o.ApplyT(func(v LookupModelResult) string { return v.UpdateTime }).(pulumi.StringOutput) +} + +// User provided version aliases so that a model version can be referenced via alias (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_alias}` instead of auto-generated version id (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_id})`. The format is a-z{0,126}[a-z0-9] to distinguish from version_id. A default version alias will be created for the first version of the model, and there must be exactly one default version alias for a model. +func (o LookupModelResultOutput) VersionAliases() pulumi.StringArrayOutput { + return o.ApplyT(func(v LookupModelResult) []string { return v.VersionAliases }).(pulumi.StringArrayOutput) +} + +// Timestamp when this version was created. +func (o LookupModelResultOutput) VersionCreateTime() pulumi.StringOutput { + return o.ApplyT(func(v LookupModelResult) string { return v.VersionCreateTime }).(pulumi.StringOutput) +} + +// The description of this version. +func (o LookupModelResultOutput) VersionDescription() pulumi.StringOutput { + return o.ApplyT(func(v LookupModelResult) string { return v.VersionDescription }).(pulumi.StringOutput) +} + +// Immutable. The version ID of the model. A new version is committed when a new model version is uploaded or trained under an existing model id. It is an auto-incrementing decimal number in string representation. +func (o LookupModelResultOutput) VersionId() pulumi.StringOutput { + return o.ApplyT(func(v LookupModelResult) string { return v.VersionId }).(pulumi.StringOutput) +} + +// Timestamp when this version was most recently updated. +func (o LookupModelResultOutput) VersionUpdateTime() pulumi.StringOutput { + return o.ApplyT(func(v LookupModelResult) string { return v.VersionUpdateTime }).(pulumi.StringOutput) +} + +func init() { + pulumi.RegisterOutputType(LookupModelResultOutput{}) +} diff --git a/sdk/go/google/aiplatform/v1beta1/init.go b/sdk/go/google/aiplatform/v1beta1/init.go index ce7e8f9db2..35ab7c2984 100644 --- a/sdk/go/google/aiplatform/v1beta1/init.go +++ b/sdk/go/google/aiplatform/v1beta1/init.go @@ -85,6 +85,8 @@ func (m *module) Construct(ctx *pulumi.Context, name, typ, urn string) (r pulumi r = &MetadataSchema{} case "google-native:aiplatform/v1beta1:MetadataStore": r = &MetadataStore{} + case "google-native:aiplatform/v1beta1:Model": + r = &Model{} case "google-native:aiplatform/v1beta1:ModelDeploymentMonitoringJob": r = &ModelDeploymentMonitoringJob{} case "google-native:aiplatform/v1beta1:ModelIamBinding": diff --git a/sdk/go/google/aiplatform/v1beta1/model.go b/sdk/go/google/aiplatform/v1beta1/model.go new file mode 100644 index 0000000000..60c007822e --- /dev/null +++ b/sdk/go/google/aiplatform/v1beta1/model.go @@ -0,0 +1,399 @@ +// Code generated by the Pulumi SDK Generator DO NOT EDIT. +// *** WARNING: Do not edit by hand unless you're certain you know what you are doing! *** + +package v1beta1 + +import ( + "context" + "reflect" + + "errors" + "github.com/pulumi/pulumi-google-native/sdk/go/google/internal" + "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +) + +// Uploads a Model artifact into Vertex AI. +type Model struct { + pulumi.CustomResourceState + + // Immutable. The path to the directory containing the Model artifact and any of its supporting files. Not present for AutoML Models or Large Models. + ArtifactUri pulumi.StringOutput `pulumi:"artifactUri"` + // Input only. The specification of the container that is to be used when deploying this Model. The specification is ingested upon ModelService.UploadModel, and all binaries it contains are copied and stored internally by Vertex AI. Not present for AutoML Models or Large Models. + ContainerSpec GoogleCloudAiplatformV1beta1ModelContainerSpecResponseOutput `pulumi:"containerSpec"` + // Timestamp when this Model was uploaded into Vertex AI. + CreateTime pulumi.StringOutput `pulumi:"createTime"` + // The pointers to DeployedModels created from this Model. Note that Model could have been deployed to Endpoints in different Locations. + DeployedModels GoogleCloudAiplatformV1beta1DeployedModelRefResponseArrayOutput `pulumi:"deployedModels"` + // The description of the Model. + Description pulumi.StringOutput `pulumi:"description"` + // The display name of the Model. The name can be up to 128 characters long and can consist of any UTF-8 characters. + DisplayName pulumi.StringOutput `pulumi:"displayName"` + // Customer-managed encryption key spec for a Model. If set, this Model and all sub-resources of this Model will be secured by this key. + EncryptionSpec GoogleCloudAiplatformV1beta1EncryptionSpecResponseOutput `pulumi:"encryptionSpec"` + // Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + Etag pulumi.StringOutput `pulumi:"etag"` + // The default explanation specification for this Model. The Model can be used for requesting explanation after being deployed if it is populated. The Model can be used for batch explanation if it is populated. All fields of the explanation_spec can be overridden by explanation_spec of DeployModelRequest.deployed_model, or explanation_spec of BatchPredictionJob. If the default explanation specification is not set for this Model, this Model can still be used for requesting explanation by setting explanation_spec of DeployModelRequest.deployed_model and for batch explanation by setting explanation_spec of BatchPredictionJob. + ExplanationSpec GoogleCloudAiplatformV1beta1ExplanationSpecResponseOutput `pulumi:"explanationSpec"` + // The labels with user-defined metadata to organize your Models. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. + Labels pulumi.StringMapOutput `pulumi:"labels"` + Location pulumi.StringOutput `pulumi:"location"` + // Immutable. An additional information about the Model; the schema of the metadata can be found in metadata_schema. Unset if the Model does not have any additional information. + Metadata pulumi.AnyOutput `pulumi:"metadata"` + // The resource name of the Artifact that was created in MetadataStore when creating the Model. The Artifact resource name pattern is `projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}`. + MetadataArtifact pulumi.StringOutput `pulumi:"metadataArtifact"` + // Immutable. Points to a YAML file stored on Google Cloud Storage describing additional information about the Model, that is specific to it. Unset if the Model does not have any additional information. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML Models always have this field populated by Vertex AI, if no additional metadata is needed, this field is set to an empty string. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. + MetadataSchemaUri pulumi.StringOutput `pulumi:"metadataSchemaUri"` + // Source of a model. It can either be automl training pipeline, custom training pipeline, BigQuery ML, or existing Vertex AI Model. + ModelSourceInfo GoogleCloudAiplatformV1beta1ModelSourceInfoResponseOutput `pulumi:"modelSourceInfo"` + // The resource name of the Model. + Name pulumi.StringOutput `pulumi:"name"` + // If this Model is a copy of another Model, this contains info about the original. + OriginalModelInfo GoogleCloudAiplatformV1beta1ModelOriginalModelInfoResponseOutput `pulumi:"originalModelInfo"` + // The schemata that describe formats of the Model's predictions and explanations as given and returned via PredictionService.Predict and PredictionService.Explain. + PredictSchemata GoogleCloudAiplatformV1beta1PredictSchemataResponseOutput `pulumi:"predictSchemata"` + Project pulumi.StringOutput `pulumi:"project"` + // When this Model is deployed, its prediction resources are described by the `prediction_resources` field of the Endpoint.deployed_models object. Because not all Models support all resource configuration types, the configuration types this Model supports are listed here. If no configuration types are listed, the Model cannot be deployed to an Endpoint and does not support online predictions (PredictionService.Predict or PredictionService.Explain). Such a Model can serve predictions by using a BatchPredictionJob, if it has at least one entry each in supported_input_storage_formats and supported_output_storage_formats. + SupportedDeploymentResourcesTypes pulumi.StringArrayOutput `pulumi:"supportedDeploymentResourcesTypes"` + // The formats in which this Model may be exported. If empty, this Model is not available for export. + SupportedExportFormats GoogleCloudAiplatformV1beta1ModelExportFormatResponseArrayOutput `pulumi:"supportedExportFormats"` + // The formats this Model supports in BatchPredictionJob.input_config. If PredictSchemata.instance_schema_uri exists, the instances should be given as per that schema. The possible formats are: * `jsonl` The JSON Lines format, where each instance is a single line. Uses GcsSource. * `csv` The CSV format, where each instance is a single comma-separated line. The first line in the file is the header, containing comma-separated field names. Uses GcsSource. * `tf-record` The TFRecord format, where each instance is a single record in tfrecord syntax. Uses GcsSource. * `tf-record-gzip` Similar to `tf-record`, but the file is gzipped. Uses GcsSource. * `bigquery` Each instance is a single row in BigQuery. Uses BigQuerySource. * `file-list` Each line of the file is the location of an instance to process, uses `gcs_source` field of the InputConfig object. If this Model doesn't support any of these formats it means it cannot be used with a BatchPredictionJob. However, if it has supported_deployment_resources_types, it could serve online predictions by using PredictionService.Predict or PredictionService.Explain. + SupportedInputStorageFormats pulumi.StringArrayOutput `pulumi:"supportedInputStorageFormats"` + // The formats this Model supports in BatchPredictionJob.output_config. If both PredictSchemata.instance_schema_uri and PredictSchemata.prediction_schema_uri exist, the predictions are returned together with their instances. In other words, the prediction has the original instance data first, followed by the actual prediction content (as per the schema). The possible formats are: * `jsonl` The JSON Lines format, where each prediction is a single line. Uses GcsDestination. * `csv` The CSV format, where each prediction is a single comma-separated line. The first line in the file is the header, containing comma-separated field names. Uses GcsDestination. * `bigquery` Each prediction is a single row in a BigQuery table, uses BigQueryDestination . If this Model doesn't support any of these formats it means it cannot be used with a BatchPredictionJob. However, if it has supported_deployment_resources_types, it could serve online predictions by using PredictionService.Predict or PredictionService.Explain. + SupportedOutputStorageFormats pulumi.StringArrayOutput `pulumi:"supportedOutputStorageFormats"` + // The resource name of the TrainingPipeline that uploaded this Model, if any. + TrainingPipeline pulumi.StringOutput `pulumi:"trainingPipeline"` + // Timestamp when this Model was most recently updated. + UpdateTime pulumi.StringOutput `pulumi:"updateTime"` + // User provided version aliases so that a model version can be referenced via alias (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_alias}` instead of auto-generated version id (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_id})`. The format is a-z{0,126}[a-z0-9] to distinguish from version_id. A default version alias will be created for the first version of the model, and there must be exactly one default version alias for a model. + VersionAliases pulumi.StringArrayOutput `pulumi:"versionAliases"` + // Timestamp when this version was created. + VersionCreateTime pulumi.StringOutput `pulumi:"versionCreateTime"` + // The description of this version. + VersionDescription pulumi.StringOutput `pulumi:"versionDescription"` + // Immutable. The version ID of the model. A new version is committed when a new model version is uploaded or trained under an existing model id. It is an auto-incrementing decimal number in string representation. + VersionId pulumi.StringOutput `pulumi:"versionId"` + // Timestamp when this version was most recently updated. + VersionUpdateTime pulumi.StringOutput `pulumi:"versionUpdateTime"` +} + +// NewModel registers a new resource with the given unique name, arguments, and options. +func NewModel(ctx *pulumi.Context, + name string, args *ModelArgs, opts ...pulumi.ResourceOption) (*Model, error) { + if args == nil { + return nil, errors.New("missing one or more required arguments") + } + + if args.DisplayName == nil { + return nil, errors.New("invalid value for required argument 'DisplayName'") + } + replaceOnChanges := pulumi.ReplaceOnChanges([]string{ + "location", + "project", + }) + opts = append(opts, replaceOnChanges) + opts = internal.PkgResourceDefaultOpts(opts) + var resource Model + err := ctx.RegisterResource("google-native:aiplatform/v1beta1:Model", name, args, &resource, opts...) + if err != nil { + return nil, err + } + return &resource, nil +} + +// GetModel gets an existing Model resource's state with the given name, ID, and optional +// state properties that are used to uniquely qualify the lookup (nil if not required). +func GetModel(ctx *pulumi.Context, + name string, id pulumi.IDInput, state *ModelState, opts ...pulumi.ResourceOption) (*Model, error) { + var resource Model + err := ctx.ReadResource("google-native:aiplatform/v1beta1:Model", name, id, state, &resource, opts...) + if err != nil { + return nil, err + } + return &resource, nil +} + +// Input properties used for looking up and filtering Model resources. +type modelState struct { +} + +type ModelState struct { +} + +func (ModelState) ElementType() reflect.Type { + return reflect.TypeOf((*modelState)(nil)).Elem() +} + +type modelArgs struct { + // Immutable. The path to the directory containing the Model artifact and any of its supporting files. Not present for AutoML Models or Large Models. + ArtifactUri *string `pulumi:"artifactUri"` + // Input only. The specification of the container that is to be used when deploying this Model. The specification is ingested upon ModelService.UploadModel, and all binaries it contains are copied and stored internally by Vertex AI. Not present for AutoML Models or Large Models. + ContainerSpec *GoogleCloudAiplatformV1beta1ModelContainerSpec `pulumi:"containerSpec"` + // The description of the Model. + Description *string `pulumi:"description"` + // The display name of the Model. The name can be up to 128 characters long and can consist of any UTF-8 characters. + DisplayName string `pulumi:"displayName"` + // Customer-managed encryption key spec for a Model. If set, this Model and all sub-resources of this Model will be secured by this key. + EncryptionSpec *GoogleCloudAiplatformV1beta1EncryptionSpec `pulumi:"encryptionSpec"` + // Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + Etag *string `pulumi:"etag"` + // The default explanation specification for this Model. The Model can be used for requesting explanation after being deployed if it is populated. The Model can be used for batch explanation if it is populated. All fields of the explanation_spec can be overridden by explanation_spec of DeployModelRequest.deployed_model, or explanation_spec of BatchPredictionJob. If the default explanation specification is not set for this Model, this Model can still be used for requesting explanation by setting explanation_spec of DeployModelRequest.deployed_model and for batch explanation by setting explanation_spec of BatchPredictionJob. + ExplanationSpec *GoogleCloudAiplatformV1beta1ExplanationSpec `pulumi:"explanationSpec"` + // The labels with user-defined metadata to organize your Models. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. + Labels map[string]string `pulumi:"labels"` + Location *string `pulumi:"location"` + // Immutable. An additional information about the Model; the schema of the metadata can be found in metadata_schema. Unset if the Model does not have any additional information. + Metadata interface{} `pulumi:"metadata"` + // Immutable. Points to a YAML file stored on Google Cloud Storage describing additional information about the Model, that is specific to it. Unset if the Model does not have any additional information. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML Models always have this field populated by Vertex AI, if no additional metadata is needed, this field is set to an empty string. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. + MetadataSchemaUri *string `pulumi:"metadataSchemaUri"` + // Optional. The ID to use for the uploaded Model, which will become the final component of the model resource name. This value may be up to 63 characters, and valid characters are `[a-z0-9_-]`. The first character cannot be a number or hyphen. + ModelId *string `pulumi:"modelId"` + // The resource name of the Model. + Name *string `pulumi:"name"` + // Optional. The resource name of the model into which to upload the version. Only specify this field when uploading a new version. + ParentModel *string `pulumi:"parentModel"` + // The schemata that describe formats of the Model's predictions and explanations as given and returned via PredictionService.Predict and PredictionService.Explain. + PredictSchemata *GoogleCloudAiplatformV1beta1PredictSchemata `pulumi:"predictSchemata"` + Project *string `pulumi:"project"` + // Optional. The user-provided custom service account to use to do the model upload. If empty, [Vertex AI Service Agent](https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) will be used to access resources needed to upload the model. This account must belong to the target project where the model is uploaded to, i.e., the project specified in the `parent` field of this request and have necessary read permissions (to Google Cloud Storage, Artifact Registry, etc.). + ServiceAccount *string `pulumi:"serviceAccount"` + // User provided version aliases so that a model version can be referenced via alias (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_alias}` instead of auto-generated version id (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_id})`. The format is a-z{0,126}[a-z0-9] to distinguish from version_id. A default version alias will be created for the first version of the model, and there must be exactly one default version alias for a model. + VersionAliases []string `pulumi:"versionAliases"` + // The description of this version. + VersionDescription *string `pulumi:"versionDescription"` +} + +// The set of arguments for constructing a Model resource. +type ModelArgs struct { + // Immutable. The path to the directory containing the Model artifact and any of its supporting files. Not present for AutoML Models or Large Models. + ArtifactUri pulumi.StringPtrInput + // Input only. The specification of the container that is to be used when deploying this Model. The specification is ingested upon ModelService.UploadModel, and all binaries it contains are copied and stored internally by Vertex AI. Not present for AutoML Models or Large Models. + ContainerSpec GoogleCloudAiplatformV1beta1ModelContainerSpecPtrInput + // The description of the Model. + Description pulumi.StringPtrInput + // The display name of the Model. The name can be up to 128 characters long and can consist of any UTF-8 characters. + DisplayName pulumi.StringInput + // Customer-managed encryption key spec for a Model. If set, this Model and all sub-resources of this Model will be secured by this key. + EncryptionSpec GoogleCloudAiplatformV1beta1EncryptionSpecPtrInput + // Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + Etag pulumi.StringPtrInput + // The default explanation specification for this Model. The Model can be used for requesting explanation after being deployed if it is populated. The Model can be used for batch explanation if it is populated. All fields of the explanation_spec can be overridden by explanation_spec of DeployModelRequest.deployed_model, or explanation_spec of BatchPredictionJob. If the default explanation specification is not set for this Model, this Model can still be used for requesting explanation by setting explanation_spec of DeployModelRequest.deployed_model and for batch explanation by setting explanation_spec of BatchPredictionJob. + ExplanationSpec GoogleCloudAiplatformV1beta1ExplanationSpecPtrInput + // The labels with user-defined metadata to organize your Models. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. + Labels pulumi.StringMapInput + Location pulumi.StringPtrInput + // Immutable. An additional information about the Model; the schema of the metadata can be found in metadata_schema. Unset if the Model does not have any additional information. + Metadata pulumi.Input + // Immutable. Points to a YAML file stored on Google Cloud Storage describing additional information about the Model, that is specific to it. Unset if the Model does not have any additional information. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML Models always have this field populated by Vertex AI, if no additional metadata is needed, this field is set to an empty string. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. + MetadataSchemaUri pulumi.StringPtrInput + // Optional. The ID to use for the uploaded Model, which will become the final component of the model resource name. This value may be up to 63 characters, and valid characters are `[a-z0-9_-]`. The first character cannot be a number or hyphen. + ModelId pulumi.StringPtrInput + // The resource name of the Model. + Name pulumi.StringPtrInput + // Optional. The resource name of the model into which to upload the version. Only specify this field when uploading a new version. + ParentModel pulumi.StringPtrInput + // The schemata that describe formats of the Model's predictions and explanations as given and returned via PredictionService.Predict and PredictionService.Explain. + PredictSchemata GoogleCloudAiplatformV1beta1PredictSchemataPtrInput + Project pulumi.StringPtrInput + // Optional. The user-provided custom service account to use to do the model upload. If empty, [Vertex AI Service Agent](https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) will be used to access resources needed to upload the model. This account must belong to the target project where the model is uploaded to, i.e., the project specified in the `parent` field of this request and have necessary read permissions (to Google Cloud Storage, Artifact Registry, etc.). + ServiceAccount pulumi.StringPtrInput + // User provided version aliases so that a model version can be referenced via alias (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_alias}` instead of auto-generated version id (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_id})`. The format is a-z{0,126}[a-z0-9] to distinguish from version_id. A default version alias will be created for the first version of the model, and there must be exactly one default version alias for a model. + VersionAliases pulumi.StringArrayInput + // The description of this version. + VersionDescription pulumi.StringPtrInput +} + +func (ModelArgs) ElementType() reflect.Type { + return reflect.TypeOf((*modelArgs)(nil)).Elem() +} + +type ModelInput interface { + pulumi.Input + + ToModelOutput() ModelOutput + ToModelOutputWithContext(ctx context.Context) ModelOutput +} + +func (*Model) ElementType() reflect.Type { + return reflect.TypeOf((**Model)(nil)).Elem() +} + +func (i *Model) ToModelOutput() ModelOutput { + return i.ToModelOutputWithContext(context.Background()) +} + +func (i *Model) ToModelOutputWithContext(ctx context.Context) ModelOutput { + return pulumi.ToOutputWithContext(ctx, i).(ModelOutput) +} + +type ModelOutput struct{ *pulumi.OutputState } + +func (ModelOutput) ElementType() reflect.Type { + return reflect.TypeOf((**Model)(nil)).Elem() +} + +func (o ModelOutput) ToModelOutput() ModelOutput { + return o +} + +func (o ModelOutput) ToModelOutputWithContext(ctx context.Context) ModelOutput { + return o +} + +// Immutable. The path to the directory containing the Model artifact and any of its supporting files. Not present for AutoML Models or Large Models. +func (o ModelOutput) ArtifactUri() pulumi.StringOutput { + return o.ApplyT(func(v *Model) pulumi.StringOutput { return v.ArtifactUri }).(pulumi.StringOutput) +} + +// Input only. The specification of the container that is to be used when deploying this Model. The specification is ingested upon ModelService.UploadModel, and all binaries it contains are copied and stored internally by Vertex AI. Not present for AutoML Models or Large Models. +func (o ModelOutput) ContainerSpec() GoogleCloudAiplatformV1beta1ModelContainerSpecResponseOutput { + return o.ApplyT(func(v *Model) GoogleCloudAiplatformV1beta1ModelContainerSpecResponseOutput { return v.ContainerSpec }).(GoogleCloudAiplatformV1beta1ModelContainerSpecResponseOutput) +} + +// Timestamp when this Model was uploaded into Vertex AI. +func (o ModelOutput) CreateTime() pulumi.StringOutput { + return o.ApplyT(func(v *Model) pulumi.StringOutput { return v.CreateTime }).(pulumi.StringOutput) +} + +// The pointers to DeployedModels created from this Model. Note that Model could have been deployed to Endpoints in different Locations. +func (o ModelOutput) DeployedModels() GoogleCloudAiplatformV1beta1DeployedModelRefResponseArrayOutput { + return o.ApplyT(func(v *Model) GoogleCloudAiplatformV1beta1DeployedModelRefResponseArrayOutput { + return v.DeployedModels + }).(GoogleCloudAiplatformV1beta1DeployedModelRefResponseArrayOutput) +} + +// The description of the Model. +func (o ModelOutput) Description() pulumi.StringOutput { + return o.ApplyT(func(v *Model) pulumi.StringOutput { return v.Description }).(pulumi.StringOutput) +} + +// The display name of the Model. The name can be up to 128 characters long and can consist of any UTF-8 characters. +func (o ModelOutput) DisplayName() pulumi.StringOutput { + return o.ApplyT(func(v *Model) pulumi.StringOutput { return v.DisplayName }).(pulumi.StringOutput) +} + +// Customer-managed encryption key spec for a Model. If set, this Model and all sub-resources of this Model will be secured by this key. +func (o ModelOutput) EncryptionSpec() GoogleCloudAiplatformV1beta1EncryptionSpecResponseOutput { + return o.ApplyT(func(v *Model) GoogleCloudAiplatformV1beta1EncryptionSpecResponseOutput { return v.EncryptionSpec }).(GoogleCloudAiplatformV1beta1EncryptionSpecResponseOutput) +} + +// Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. +func (o ModelOutput) Etag() pulumi.StringOutput { + return o.ApplyT(func(v *Model) pulumi.StringOutput { return v.Etag }).(pulumi.StringOutput) +} + +// The default explanation specification for this Model. The Model can be used for requesting explanation after being deployed if it is populated. The Model can be used for batch explanation if it is populated. All fields of the explanation_spec can be overridden by explanation_spec of DeployModelRequest.deployed_model, or explanation_spec of BatchPredictionJob. If the default explanation specification is not set for this Model, this Model can still be used for requesting explanation by setting explanation_spec of DeployModelRequest.deployed_model and for batch explanation by setting explanation_spec of BatchPredictionJob. +func (o ModelOutput) ExplanationSpec() GoogleCloudAiplatformV1beta1ExplanationSpecResponseOutput { + return o.ApplyT(func(v *Model) GoogleCloudAiplatformV1beta1ExplanationSpecResponseOutput { return v.ExplanationSpec }).(GoogleCloudAiplatformV1beta1ExplanationSpecResponseOutput) +} + +// The labels with user-defined metadata to organize your Models. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. +func (o ModelOutput) Labels() pulumi.StringMapOutput { + return o.ApplyT(func(v *Model) pulumi.StringMapOutput { return v.Labels }).(pulumi.StringMapOutput) +} + +func (o ModelOutput) Location() pulumi.StringOutput { + return o.ApplyT(func(v *Model) pulumi.StringOutput { return v.Location }).(pulumi.StringOutput) +} + +// Immutable. An additional information about the Model; the schema of the metadata can be found in metadata_schema. Unset if the Model does not have any additional information. +func (o ModelOutput) Metadata() pulumi.AnyOutput { + return o.ApplyT(func(v *Model) pulumi.AnyOutput { return v.Metadata }).(pulumi.AnyOutput) +} + +// The resource name of the Artifact that was created in MetadataStore when creating the Model. The Artifact resource name pattern is `projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}`. +func (o ModelOutput) MetadataArtifact() pulumi.StringOutput { + return o.ApplyT(func(v *Model) pulumi.StringOutput { return v.MetadataArtifact }).(pulumi.StringOutput) +} + +// Immutable. Points to a YAML file stored on Google Cloud Storage describing additional information about the Model, that is specific to it. Unset if the Model does not have any additional information. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML Models always have this field populated by Vertex AI, if no additional metadata is needed, this field is set to an empty string. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. +func (o ModelOutput) MetadataSchemaUri() pulumi.StringOutput { + return o.ApplyT(func(v *Model) pulumi.StringOutput { return v.MetadataSchemaUri }).(pulumi.StringOutput) +} + +// Source of a model. It can either be automl training pipeline, custom training pipeline, BigQuery ML, or existing Vertex AI Model. +func (o ModelOutput) ModelSourceInfo() GoogleCloudAiplatformV1beta1ModelSourceInfoResponseOutput { + return o.ApplyT(func(v *Model) GoogleCloudAiplatformV1beta1ModelSourceInfoResponseOutput { return v.ModelSourceInfo }).(GoogleCloudAiplatformV1beta1ModelSourceInfoResponseOutput) +} + +// The resource name of the Model. +func (o ModelOutput) Name() pulumi.StringOutput { + return o.ApplyT(func(v *Model) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput) +} + +// If this Model is a copy of another Model, this contains info about the original. +func (o ModelOutput) OriginalModelInfo() GoogleCloudAiplatformV1beta1ModelOriginalModelInfoResponseOutput { + return o.ApplyT(func(v *Model) GoogleCloudAiplatformV1beta1ModelOriginalModelInfoResponseOutput { + return v.OriginalModelInfo + }).(GoogleCloudAiplatformV1beta1ModelOriginalModelInfoResponseOutput) +} + +// The schemata that describe formats of the Model's predictions and explanations as given and returned via PredictionService.Predict and PredictionService.Explain. +func (o ModelOutput) PredictSchemata() GoogleCloudAiplatformV1beta1PredictSchemataResponseOutput { + return o.ApplyT(func(v *Model) GoogleCloudAiplatformV1beta1PredictSchemataResponseOutput { return v.PredictSchemata }).(GoogleCloudAiplatformV1beta1PredictSchemataResponseOutput) +} + +func (o ModelOutput) Project() pulumi.StringOutput { + return o.ApplyT(func(v *Model) pulumi.StringOutput { return v.Project }).(pulumi.StringOutput) +} + +// When this Model is deployed, its prediction resources are described by the `prediction_resources` field of the Endpoint.deployed_models object. Because not all Models support all resource configuration types, the configuration types this Model supports are listed here. If no configuration types are listed, the Model cannot be deployed to an Endpoint and does not support online predictions (PredictionService.Predict or PredictionService.Explain). Such a Model can serve predictions by using a BatchPredictionJob, if it has at least one entry each in supported_input_storage_formats and supported_output_storage_formats. +func (o ModelOutput) SupportedDeploymentResourcesTypes() pulumi.StringArrayOutput { + return o.ApplyT(func(v *Model) pulumi.StringArrayOutput { return v.SupportedDeploymentResourcesTypes }).(pulumi.StringArrayOutput) +} + +// The formats in which this Model may be exported. If empty, this Model is not available for export. +func (o ModelOutput) SupportedExportFormats() GoogleCloudAiplatformV1beta1ModelExportFormatResponseArrayOutput { + return o.ApplyT(func(v *Model) GoogleCloudAiplatformV1beta1ModelExportFormatResponseArrayOutput { + return v.SupportedExportFormats + }).(GoogleCloudAiplatformV1beta1ModelExportFormatResponseArrayOutput) +} + +// The formats this Model supports in BatchPredictionJob.input_config. If PredictSchemata.instance_schema_uri exists, the instances should be given as per that schema. The possible formats are: * `jsonl` The JSON Lines format, where each instance is a single line. Uses GcsSource. * `csv` The CSV format, where each instance is a single comma-separated line. The first line in the file is the header, containing comma-separated field names. Uses GcsSource. * `tf-record` The TFRecord format, where each instance is a single record in tfrecord syntax. Uses GcsSource. * `tf-record-gzip` Similar to `tf-record`, but the file is gzipped. Uses GcsSource. * `bigquery` Each instance is a single row in BigQuery. Uses BigQuerySource. * `file-list` Each line of the file is the location of an instance to process, uses `gcs_source` field of the InputConfig object. If this Model doesn't support any of these formats it means it cannot be used with a BatchPredictionJob. However, if it has supported_deployment_resources_types, it could serve online predictions by using PredictionService.Predict or PredictionService.Explain. +func (o ModelOutput) SupportedInputStorageFormats() pulumi.StringArrayOutput { + return o.ApplyT(func(v *Model) pulumi.StringArrayOutput { return v.SupportedInputStorageFormats }).(pulumi.StringArrayOutput) +} + +// The formats this Model supports in BatchPredictionJob.output_config. If both PredictSchemata.instance_schema_uri and PredictSchemata.prediction_schema_uri exist, the predictions are returned together with their instances. In other words, the prediction has the original instance data first, followed by the actual prediction content (as per the schema). The possible formats are: * `jsonl` The JSON Lines format, where each prediction is a single line. Uses GcsDestination. * `csv` The CSV format, where each prediction is a single comma-separated line. The first line in the file is the header, containing comma-separated field names. Uses GcsDestination. * `bigquery` Each prediction is a single row in a BigQuery table, uses BigQueryDestination . If this Model doesn't support any of these formats it means it cannot be used with a BatchPredictionJob. However, if it has supported_deployment_resources_types, it could serve online predictions by using PredictionService.Predict or PredictionService.Explain. +func (o ModelOutput) SupportedOutputStorageFormats() pulumi.StringArrayOutput { + return o.ApplyT(func(v *Model) pulumi.StringArrayOutput { return v.SupportedOutputStorageFormats }).(pulumi.StringArrayOutput) +} + +// The resource name of the TrainingPipeline that uploaded this Model, if any. +func (o ModelOutput) TrainingPipeline() pulumi.StringOutput { + return o.ApplyT(func(v *Model) pulumi.StringOutput { return v.TrainingPipeline }).(pulumi.StringOutput) +} + +// Timestamp when this Model was most recently updated. +func (o ModelOutput) UpdateTime() pulumi.StringOutput { + return o.ApplyT(func(v *Model) pulumi.StringOutput { return v.UpdateTime }).(pulumi.StringOutput) +} + +// User provided version aliases so that a model version can be referenced via alias (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_alias}` instead of auto-generated version id (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_id})`. The format is a-z{0,126}[a-z0-9] to distinguish from version_id. A default version alias will be created for the first version of the model, and there must be exactly one default version alias for a model. +func (o ModelOutput) VersionAliases() pulumi.StringArrayOutput { + return o.ApplyT(func(v *Model) pulumi.StringArrayOutput { return v.VersionAliases }).(pulumi.StringArrayOutput) +} + +// Timestamp when this version was created. +func (o ModelOutput) VersionCreateTime() pulumi.StringOutput { + return o.ApplyT(func(v *Model) pulumi.StringOutput { return v.VersionCreateTime }).(pulumi.StringOutput) +} + +// The description of this version. +func (o ModelOutput) VersionDescription() pulumi.StringOutput { + return o.ApplyT(func(v *Model) pulumi.StringOutput { return v.VersionDescription }).(pulumi.StringOutput) +} + +// Immutable. The version ID of the model. A new version is committed when a new model version is uploaded or trained under an existing model id. It is an auto-incrementing decimal number in string representation. +func (o ModelOutput) VersionId() pulumi.StringOutput { + return o.ApplyT(func(v *Model) pulumi.StringOutput { return v.VersionId }).(pulumi.StringOutput) +} + +// Timestamp when this version was most recently updated. +func (o ModelOutput) VersionUpdateTime() pulumi.StringOutput { + return o.ApplyT(func(v *Model) pulumi.StringOutput { return v.VersionUpdateTime }).(pulumi.StringOutput) +} + +func init() { + pulumi.RegisterInputType(reflect.TypeOf((*ModelInput)(nil)).Elem(), &Model{}) + pulumi.RegisterOutputType(ModelOutput{}) +} diff --git a/sdk/go/google/cloudsearch/v1/getItem.go b/sdk/go/google/cloudsearch/v1/getItem.go new file mode 100644 index 0000000000..2dd6842dbc --- /dev/null +++ b/sdk/go/google/cloudsearch/v1/getItem.go @@ -0,0 +1,145 @@ +// Code generated by the Pulumi SDK Generator DO NOT EDIT. +// *** WARNING: Do not edit by hand unless you're certain you know what you are doing! *** + +package v1 + +import ( + "context" + "reflect" + + "github.com/pulumi/pulumi-google-native/sdk/go/google/internal" + "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +) + +// Gets Item resource by item name. This API requires an admin or service account to execute. The service account used is the one whitelisted in the corresponding data source. +func LookupItem(ctx *pulumi.Context, args *LookupItemArgs, opts ...pulumi.InvokeOption) (*LookupItemResult, error) { + opts = internal.PkgInvokeDefaultOpts(opts) + var rv LookupItemResult + err := ctx.Invoke("google-native:cloudsearch/v1:getItem", args, &rv, opts...) + if err != nil { + return nil, err + } + return &rv, nil +} + +type LookupItemArgs struct { + ConnectorName *string `pulumi:"connectorName"` + DatasourceId string `pulumi:"datasourceId"` + DebugOptionsEnableDebugging *bool `pulumi:"debugOptionsEnableDebugging"` + ItemId string `pulumi:"itemId"` +} + +type LookupItemResult struct { + // Access control list for this item. + Acl ItemAclResponse `pulumi:"acl"` + // Item content to be indexed and made text searchable. + Content ItemContentResponse `pulumi:"content"` + // The type for this item. + ItemType string `pulumi:"itemType"` + // The metadata information. + Metadata ItemMetadataResponse `pulumi:"metadata"` + // The name of the Item. Format: datasources/{source_id}/items/{item_id} This is a required field. The maximum length is 1536 characters. + Name string `pulumi:"name"` + // Additional state connector can store for this item. The maximum length is 10000 bytes. + Payload string `pulumi:"payload"` + // Queue this item belongs to. The maximum length is 100 characters. + Queue string `pulumi:"queue"` + // Status of the item. Output only field. + Status ItemStatusResponse `pulumi:"status"` + // The structured data for the item that should conform to a registered object definition in the schema for the data source. + StructuredData ItemStructuredDataResponse `pulumi:"structuredData"` + // The indexing system stores the version from the datasource as a byte string and compares the Item version in the index to the version of the queued Item using lexical ordering. Cloud Search Indexing won't index or delete any queued item with a version value that is less than or equal to the version of the currently indexed item. The maximum length for this field is 1024 bytes. For information on how item version affects the deletion process, refer to [Handle revisions after manual deletes](https://developers.google.com/cloud-search/docs/guides/operations). + Version string `pulumi:"version"` +} + +func LookupItemOutput(ctx *pulumi.Context, args LookupItemOutputArgs, opts ...pulumi.InvokeOption) LookupItemResultOutput { + return pulumi.ToOutputWithContext(context.Background(), args). + ApplyT(func(v interface{}) (LookupItemResult, error) { + args := v.(LookupItemArgs) + r, err := LookupItem(ctx, &args, opts...) + var s LookupItemResult + if r != nil { + s = *r + } + return s, err + }).(LookupItemResultOutput) +} + +type LookupItemOutputArgs struct { + ConnectorName pulumi.StringPtrInput `pulumi:"connectorName"` + DatasourceId pulumi.StringInput `pulumi:"datasourceId"` + DebugOptionsEnableDebugging pulumi.BoolPtrInput `pulumi:"debugOptionsEnableDebugging"` + ItemId pulumi.StringInput `pulumi:"itemId"` +} + +func (LookupItemOutputArgs) ElementType() reflect.Type { + return reflect.TypeOf((*LookupItemArgs)(nil)).Elem() +} + +type LookupItemResultOutput struct{ *pulumi.OutputState } + +func (LookupItemResultOutput) ElementType() reflect.Type { + return reflect.TypeOf((*LookupItemResult)(nil)).Elem() +} + +func (o LookupItemResultOutput) ToLookupItemResultOutput() LookupItemResultOutput { + return o +} + +func (o LookupItemResultOutput) ToLookupItemResultOutputWithContext(ctx context.Context) LookupItemResultOutput { + return o +} + +// Access control list for this item. +func (o LookupItemResultOutput) Acl() ItemAclResponseOutput { + return o.ApplyT(func(v LookupItemResult) ItemAclResponse { return v.Acl }).(ItemAclResponseOutput) +} + +// Item content to be indexed and made text searchable. +func (o LookupItemResultOutput) Content() ItemContentResponseOutput { + return o.ApplyT(func(v LookupItemResult) ItemContentResponse { return v.Content }).(ItemContentResponseOutput) +} + +// The type for this item. +func (o LookupItemResultOutput) ItemType() pulumi.StringOutput { + return o.ApplyT(func(v LookupItemResult) string { return v.ItemType }).(pulumi.StringOutput) +} + +// The metadata information. +func (o LookupItemResultOutput) Metadata() ItemMetadataResponseOutput { + return o.ApplyT(func(v LookupItemResult) ItemMetadataResponse { return v.Metadata }).(ItemMetadataResponseOutput) +} + +// The name of the Item. Format: datasources/{source_id}/items/{item_id} This is a required field. The maximum length is 1536 characters. +func (o LookupItemResultOutput) Name() pulumi.StringOutput { + return o.ApplyT(func(v LookupItemResult) string { return v.Name }).(pulumi.StringOutput) +} + +// Additional state connector can store for this item. The maximum length is 10000 bytes. +func (o LookupItemResultOutput) Payload() pulumi.StringOutput { + return o.ApplyT(func(v LookupItemResult) string { return v.Payload }).(pulumi.StringOutput) +} + +// Queue this item belongs to. The maximum length is 100 characters. +func (o LookupItemResultOutput) Queue() pulumi.StringOutput { + return o.ApplyT(func(v LookupItemResult) string { return v.Queue }).(pulumi.StringOutput) +} + +// Status of the item. Output only field. +func (o LookupItemResultOutput) Status() ItemStatusResponseOutput { + return o.ApplyT(func(v LookupItemResult) ItemStatusResponse { return v.Status }).(ItemStatusResponseOutput) +} + +// The structured data for the item that should conform to a registered object definition in the schema for the data source. +func (o LookupItemResultOutput) StructuredData() ItemStructuredDataResponseOutput { + return o.ApplyT(func(v LookupItemResult) ItemStructuredDataResponse { return v.StructuredData }).(ItemStructuredDataResponseOutput) +} + +// The indexing system stores the version from the datasource as a byte string and compares the Item version in the index to the version of the queued Item using lexical ordering. Cloud Search Indexing won't index or delete any queued item with a version value that is less than or equal to the version of the currently indexed item. The maximum length for this field is 1024 bytes. For information on how item version affects the deletion process, refer to [Handle revisions after manual deletes](https://developers.google.com/cloud-search/docs/guides/operations). +func (o LookupItemResultOutput) Version() pulumi.StringOutput { + return o.ApplyT(func(v LookupItemResult) string { return v.Version }).(pulumi.StringOutput) +} + +func init() { + pulumi.RegisterOutputType(LookupItemResultOutput{}) +} diff --git a/sdk/go/google/cloudsearch/v1/init.go b/sdk/go/google/cloudsearch/v1/init.go index 163998ab0c..b04d2eaef3 100644 --- a/sdk/go/google/cloudsearch/v1/init.go +++ b/sdk/go/google/cloudsearch/v1/init.go @@ -23,6 +23,8 @@ func (m *module) Construct(ctx *pulumi.Context, name, typ, urn string) (r pulumi switch typ { case "google-native:cloudsearch/v1:DataSource": r = &DataSource{} + case "google-native:cloudsearch/v1:Item": + r = &Item{} case "google-native:cloudsearch/v1:SearchApplication": r = &SearchApplication{} default: diff --git a/sdk/go/google/cloudsearch/v1/item.go b/sdk/go/google/cloudsearch/v1/item.go new file mode 100644 index 0000000000..bc6b9aeade --- /dev/null +++ b/sdk/go/google/cloudsearch/v1/item.go @@ -0,0 +1,211 @@ +// Code generated by the Pulumi SDK Generator DO NOT EDIT. +// *** WARNING: Do not edit by hand unless you're certain you know what you are doing! *** + +package v1 + +import ( + "context" + "reflect" + + "errors" + "github.com/pulumi/pulumi-google-native/sdk/go/google/internal" + "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +) + +// Creates an upload session for uploading item content. For items smaller than 100 KB, it's easier to embed the content inline within an index request. This API requires an admin or service account to execute. The service account used is the one whitelisted in the corresponding data source. +// Auto-naming is currently not supported for this resource. +type Item struct { + pulumi.CustomResourceState + + // Access control list for this item. + Acl ItemAclResponseOutput `pulumi:"acl"` + // Item content to be indexed and made text searchable. + Content ItemContentResponseOutput `pulumi:"content"` + DatasourceId pulumi.StringOutput `pulumi:"datasourceId"` + ItemId pulumi.StringOutput `pulumi:"itemId"` + // The type for this item. + ItemType pulumi.StringOutput `pulumi:"itemType"` + // The metadata information. + Metadata ItemMetadataResponseOutput `pulumi:"metadata"` + // The name of the Item. Format: datasources/{source_id}/items/{item_id} This is a required field. The maximum length is 1536 characters. + Name pulumi.StringOutput `pulumi:"name"` + // Additional state connector can store for this item. The maximum length is 10000 bytes. + Payload pulumi.StringOutput `pulumi:"payload"` + // Queue this item belongs to. The maximum length is 100 characters. + Queue pulumi.StringOutput `pulumi:"queue"` + // Status of the item. Output only field. + Status ItemStatusResponseOutput `pulumi:"status"` + // The structured data for the item that should conform to a registered object definition in the schema for the data source. + StructuredData ItemStructuredDataResponseOutput `pulumi:"structuredData"` + // The indexing system stores the version from the datasource as a byte string and compares the Item version in the index to the version of the queued Item using lexical ordering. Cloud Search Indexing won't index or delete any queued item with a version value that is less than or equal to the version of the currently indexed item. The maximum length for this field is 1024 bytes. For information on how item version affects the deletion process, refer to [Handle revisions after manual deletes](https://developers.google.com/cloud-search/docs/guides/operations). + Version pulumi.StringOutput `pulumi:"version"` +} + +// NewItem registers a new resource with the given unique name, arguments, and options. +func NewItem(ctx *pulumi.Context, + name string, args *ItemArgs, opts ...pulumi.ResourceOption) (*Item, error) { + if args == nil { + return nil, errors.New("missing one or more required arguments") + } + + if args.DatasourceId == nil { + return nil, errors.New("invalid value for required argument 'DatasourceId'") + } + if args.ItemId == nil { + return nil, errors.New("invalid value for required argument 'ItemId'") + } + replaceOnChanges := pulumi.ReplaceOnChanges([]string{ + "datasourceId", + "itemId", + }) + opts = append(opts, replaceOnChanges) + opts = internal.PkgResourceDefaultOpts(opts) + var resource Item + err := ctx.RegisterResource("google-native:cloudsearch/v1:Item", name, args, &resource, opts...) + if err != nil { + return nil, err + } + return &resource, nil +} + +// GetItem gets an existing Item resource's state with the given name, ID, and optional +// state properties that are used to uniquely qualify the lookup (nil if not required). +func GetItem(ctx *pulumi.Context, + name string, id pulumi.IDInput, state *ItemState, opts ...pulumi.ResourceOption) (*Item, error) { + var resource Item + err := ctx.ReadResource("google-native:cloudsearch/v1:Item", name, id, state, &resource, opts...) + if err != nil { + return nil, err + } + return &resource, nil +} + +// Input properties used for looking up and filtering Item resources. +type itemState struct { +} + +type ItemState struct { +} + +func (ItemState) ElementType() reflect.Type { + return reflect.TypeOf((*itemState)(nil)).Elem() +} + +type itemArgs struct { + // The name of connector making this call. Format: datasources/{source_id}/connectors/{ID} + ConnectorName *string `pulumi:"connectorName"` + DatasourceId string `pulumi:"datasourceId"` + // Common debug options. + DebugOptions *DebugOptions `pulumi:"debugOptions"` + ItemId string `pulumi:"itemId"` +} + +// The set of arguments for constructing a Item resource. +type ItemArgs struct { + // The name of connector making this call. Format: datasources/{source_id}/connectors/{ID} + ConnectorName pulumi.StringPtrInput + DatasourceId pulumi.StringInput + // Common debug options. + DebugOptions DebugOptionsPtrInput + ItemId pulumi.StringInput +} + +func (ItemArgs) ElementType() reflect.Type { + return reflect.TypeOf((*itemArgs)(nil)).Elem() +} + +type ItemInput interface { + pulumi.Input + + ToItemOutput() ItemOutput + ToItemOutputWithContext(ctx context.Context) ItemOutput +} + +func (*Item) ElementType() reflect.Type { + return reflect.TypeOf((**Item)(nil)).Elem() +} + +func (i *Item) ToItemOutput() ItemOutput { + return i.ToItemOutputWithContext(context.Background()) +} + +func (i *Item) ToItemOutputWithContext(ctx context.Context) ItemOutput { + return pulumi.ToOutputWithContext(ctx, i).(ItemOutput) +} + +type ItemOutput struct{ *pulumi.OutputState } + +func (ItemOutput) ElementType() reflect.Type { + return reflect.TypeOf((**Item)(nil)).Elem() +} + +func (o ItemOutput) ToItemOutput() ItemOutput { + return o +} + +func (o ItemOutput) ToItemOutputWithContext(ctx context.Context) ItemOutput { + return o +} + +// Access control list for this item. +func (o ItemOutput) Acl() ItemAclResponseOutput { + return o.ApplyT(func(v *Item) ItemAclResponseOutput { return v.Acl }).(ItemAclResponseOutput) +} + +// Item content to be indexed and made text searchable. +func (o ItemOutput) Content() ItemContentResponseOutput { + return o.ApplyT(func(v *Item) ItemContentResponseOutput { return v.Content }).(ItemContentResponseOutput) +} + +func (o ItemOutput) DatasourceId() pulumi.StringOutput { + return o.ApplyT(func(v *Item) pulumi.StringOutput { return v.DatasourceId }).(pulumi.StringOutput) +} + +func (o ItemOutput) ItemId() pulumi.StringOutput { + return o.ApplyT(func(v *Item) pulumi.StringOutput { return v.ItemId }).(pulumi.StringOutput) +} + +// The type for this item. +func (o ItemOutput) ItemType() pulumi.StringOutput { + return o.ApplyT(func(v *Item) pulumi.StringOutput { return v.ItemType }).(pulumi.StringOutput) +} + +// The metadata information. +func (o ItemOutput) Metadata() ItemMetadataResponseOutput { + return o.ApplyT(func(v *Item) ItemMetadataResponseOutput { return v.Metadata }).(ItemMetadataResponseOutput) +} + +// The name of the Item. Format: datasources/{source_id}/items/{item_id} This is a required field. The maximum length is 1536 characters. +func (o ItemOutput) Name() pulumi.StringOutput { + return o.ApplyT(func(v *Item) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput) +} + +// Additional state connector can store for this item. The maximum length is 10000 bytes. +func (o ItemOutput) Payload() pulumi.StringOutput { + return o.ApplyT(func(v *Item) pulumi.StringOutput { return v.Payload }).(pulumi.StringOutput) +} + +// Queue this item belongs to. The maximum length is 100 characters. +func (o ItemOutput) Queue() pulumi.StringOutput { + return o.ApplyT(func(v *Item) pulumi.StringOutput { return v.Queue }).(pulumi.StringOutput) +} + +// Status of the item. Output only field. +func (o ItemOutput) Status() ItemStatusResponseOutput { + return o.ApplyT(func(v *Item) ItemStatusResponseOutput { return v.Status }).(ItemStatusResponseOutput) +} + +// The structured data for the item that should conform to a registered object definition in the schema for the data source. +func (o ItemOutput) StructuredData() ItemStructuredDataResponseOutput { + return o.ApplyT(func(v *Item) ItemStructuredDataResponseOutput { return v.StructuredData }).(ItemStructuredDataResponseOutput) +} + +// The indexing system stores the version from the datasource as a byte string and compares the Item version in the index to the version of the queued Item using lexical ordering. Cloud Search Indexing won't index or delete any queued item with a version value that is less than or equal to the version of the currently indexed item. The maximum length for this field is 1024 bytes. For information on how item version affects the deletion process, refer to [Handle revisions after manual deletes](https://developers.google.com/cloud-search/docs/guides/operations). +func (o ItemOutput) Version() pulumi.StringOutput { + return o.ApplyT(func(v *Item) pulumi.StringOutput { return v.Version }).(pulumi.StringOutput) +} + +func init() { + pulumi.RegisterInputType(reflect.TypeOf((*ItemInput)(nil)).Elem(), &Item{}) + pulumi.RegisterOutputType(ItemOutput{}) +} diff --git a/sdk/go/google/cloudsearch/v1/pulumiTypes.go b/sdk/go/google/cloudsearch/v1/pulumiTypes.go index 5ebd59569c..08727cdff3 100644 --- a/sdk/go/google/cloudsearch/v1/pulumiTypes.go +++ b/sdk/go/google/cloudsearch/v1/pulumiTypes.go @@ -200,6 +200,59 @@ func (o CompositeFilterResponseOutput) SubFilters() FilterResponseArrayOutput { return o.ApplyT(func(v CompositeFilterResponse) []FilterResponse { return v.SubFilters }).(FilterResponseArrayOutput) } +// A named attribute associated with an item which can be used for influencing the ranking of the item based on the context in the request. +type ContextAttributeResponse struct { + // The name of the attribute. It should not be empty. The maximum length is 32 characters. The name must start with a letter and can only contain letters (A-Z, a-z) or numbers (0-9). The name will be normalized (lower-cased) before being matched. + Name string `pulumi:"name"` + // Text values of the attribute. The maximum number of elements is 10. The maximum length of an element in the array is 32 characters. The value will be normalized (lower-cased) before being matched. + Values []string `pulumi:"values"` +} + +// A named attribute associated with an item which can be used for influencing the ranking of the item based on the context in the request. +type ContextAttributeResponseOutput struct{ *pulumi.OutputState } + +func (ContextAttributeResponseOutput) ElementType() reflect.Type { + return reflect.TypeOf((*ContextAttributeResponse)(nil)).Elem() +} + +func (o ContextAttributeResponseOutput) ToContextAttributeResponseOutput() ContextAttributeResponseOutput { + return o +} + +func (o ContextAttributeResponseOutput) ToContextAttributeResponseOutputWithContext(ctx context.Context) ContextAttributeResponseOutput { + return o +} + +// The name of the attribute. It should not be empty. The maximum length is 32 characters. The name must start with a letter and can only contain letters (A-Z, a-z) or numbers (0-9). The name will be normalized (lower-cased) before being matched. +func (o ContextAttributeResponseOutput) Name() pulumi.StringOutput { + return o.ApplyT(func(v ContextAttributeResponse) string { return v.Name }).(pulumi.StringOutput) +} + +// Text values of the attribute. The maximum number of elements is 10. The maximum length of an element in the array is 32 characters. The value will be normalized (lower-cased) before being matched. +func (o ContextAttributeResponseOutput) Values() pulumi.StringArrayOutput { + return o.ApplyT(func(v ContextAttributeResponse) []string { return v.Values }).(pulumi.StringArrayOutput) +} + +type ContextAttributeResponseArrayOutput struct{ *pulumi.OutputState } + +func (ContextAttributeResponseArrayOutput) ElementType() reflect.Type { + return reflect.TypeOf((*[]ContextAttributeResponse)(nil)).Elem() +} + +func (o ContextAttributeResponseArrayOutput) ToContextAttributeResponseArrayOutput() ContextAttributeResponseArrayOutput { + return o +} + +func (o ContextAttributeResponseArrayOutput) ToContextAttributeResponseArrayOutputWithContext(ctx context.Context) ContextAttributeResponseArrayOutput { + return o +} + +func (o ContextAttributeResponseArrayOutput) Index(i pulumi.IntInput) ContextAttributeResponseOutput { + return pulumi.All(o, i).ApplyT(func(vs []interface{}) ContextAttributeResponse { + return vs[0].([]ContextAttributeResponse)[vs[1].(int)] + }).(ContextAttributeResponseOutput) +} + // Restriction on Datasource. type DataSourceRestriction struct { // Filter options restricting the results. If multiple filters are present, they are grouped by object type before joining. Filters with the same object type are joined conjunctively, then the resulting expressions are joined disjunctively. The maximum number of elements is 20. NOTE: Suggest API supports only few filters at the moment: "objecttype", "type" and "mimetype". For now, schema specific filters cannot be used to filter suggestions. @@ -580,6 +633,240 @@ func (o DateResponseOutput) Year() pulumi.IntOutput { return o.ApplyT(func(v DateResponse) int { return v.Year }).(pulumi.IntOutput) } +type DateResponseArrayOutput struct{ *pulumi.OutputState } + +func (DateResponseArrayOutput) ElementType() reflect.Type { + return reflect.TypeOf((*[]DateResponse)(nil)).Elem() +} + +func (o DateResponseArrayOutput) ToDateResponseArrayOutput() DateResponseArrayOutput { + return o +} + +func (o DateResponseArrayOutput) ToDateResponseArrayOutputWithContext(ctx context.Context) DateResponseArrayOutput { + return o +} + +func (o DateResponseArrayOutput) Index(i pulumi.IntInput) DateResponseOutput { + return pulumi.All(o, i).ApplyT(func(vs []interface{}) DateResponse { + return vs[0].([]DateResponse)[vs[1].(int)] + }).(DateResponseOutput) +} + +// List of date values. +type DateValuesResponse struct { + Values []DateResponse `pulumi:"values"` +} + +// List of date values. +type DateValuesResponseOutput struct{ *pulumi.OutputState } + +func (DateValuesResponseOutput) ElementType() reflect.Type { + return reflect.TypeOf((*DateValuesResponse)(nil)).Elem() +} + +func (o DateValuesResponseOutput) ToDateValuesResponseOutput() DateValuesResponseOutput { + return o +} + +func (o DateValuesResponseOutput) ToDateValuesResponseOutputWithContext(ctx context.Context) DateValuesResponseOutput { + return o +} + +func (o DateValuesResponseOutput) Values() DateResponseArrayOutput { + return o.ApplyT(func(v DateValuesResponse) []DateResponse { return v.Values }).(DateResponseArrayOutput) +} + +// Shared request debug options for all cloudsearch RPC methods. +type DebugOptions struct { + // If you are asked by Google to help with debugging, set this field. Otherwise, ignore this field. + EnableDebugging *bool `pulumi:"enableDebugging"` +} + +// DebugOptionsInput is an input type that accepts DebugOptionsArgs and DebugOptionsOutput values. +// You can construct a concrete instance of `DebugOptionsInput` via: +// +// DebugOptionsArgs{...} +type DebugOptionsInput interface { + pulumi.Input + + ToDebugOptionsOutput() DebugOptionsOutput + ToDebugOptionsOutputWithContext(context.Context) DebugOptionsOutput +} + +// Shared request debug options for all cloudsearch RPC methods. +type DebugOptionsArgs struct { + // If you are asked by Google to help with debugging, set this field. Otherwise, ignore this field. + EnableDebugging pulumi.BoolPtrInput `pulumi:"enableDebugging"` +} + +func (DebugOptionsArgs) ElementType() reflect.Type { + return reflect.TypeOf((*DebugOptions)(nil)).Elem() +} + +func (i DebugOptionsArgs) ToDebugOptionsOutput() DebugOptionsOutput { + return i.ToDebugOptionsOutputWithContext(context.Background()) +} + +func (i DebugOptionsArgs) ToDebugOptionsOutputWithContext(ctx context.Context) DebugOptionsOutput { + return pulumi.ToOutputWithContext(ctx, i).(DebugOptionsOutput) +} + +func (i DebugOptionsArgs) ToDebugOptionsPtrOutput() DebugOptionsPtrOutput { + return i.ToDebugOptionsPtrOutputWithContext(context.Background()) +} + +func (i DebugOptionsArgs) ToDebugOptionsPtrOutputWithContext(ctx context.Context) DebugOptionsPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(DebugOptionsOutput).ToDebugOptionsPtrOutputWithContext(ctx) +} + +// DebugOptionsPtrInput is an input type that accepts DebugOptionsArgs, DebugOptionsPtr and DebugOptionsPtrOutput values. +// You can construct a concrete instance of `DebugOptionsPtrInput` via: +// +// DebugOptionsArgs{...} +// +// or: +// +// nil +type DebugOptionsPtrInput interface { + pulumi.Input + + ToDebugOptionsPtrOutput() DebugOptionsPtrOutput + ToDebugOptionsPtrOutputWithContext(context.Context) DebugOptionsPtrOutput +} + +type debugOptionsPtrType DebugOptionsArgs + +func DebugOptionsPtr(v *DebugOptionsArgs) DebugOptionsPtrInput { + return (*debugOptionsPtrType)(v) +} + +func (*debugOptionsPtrType) ElementType() reflect.Type { + return reflect.TypeOf((**DebugOptions)(nil)).Elem() +} + +func (i *debugOptionsPtrType) ToDebugOptionsPtrOutput() DebugOptionsPtrOutput { + return i.ToDebugOptionsPtrOutputWithContext(context.Background()) +} + +func (i *debugOptionsPtrType) ToDebugOptionsPtrOutputWithContext(ctx context.Context) DebugOptionsPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(DebugOptionsPtrOutput) +} + +// Shared request debug options for all cloudsearch RPC methods. +type DebugOptionsOutput struct{ *pulumi.OutputState } + +func (DebugOptionsOutput) ElementType() reflect.Type { + return reflect.TypeOf((*DebugOptions)(nil)).Elem() +} + +func (o DebugOptionsOutput) ToDebugOptionsOutput() DebugOptionsOutput { + return o +} + +func (o DebugOptionsOutput) ToDebugOptionsOutputWithContext(ctx context.Context) DebugOptionsOutput { + return o +} + +func (o DebugOptionsOutput) ToDebugOptionsPtrOutput() DebugOptionsPtrOutput { + return o.ToDebugOptionsPtrOutputWithContext(context.Background()) +} + +func (o DebugOptionsOutput) ToDebugOptionsPtrOutputWithContext(ctx context.Context) DebugOptionsPtrOutput { + return o.ApplyTWithContext(ctx, func(_ context.Context, v DebugOptions) *DebugOptions { + return &v + }).(DebugOptionsPtrOutput) +} + +// If you are asked by Google to help with debugging, set this field. Otherwise, ignore this field. +func (o DebugOptionsOutput) EnableDebugging() pulumi.BoolPtrOutput { + return o.ApplyT(func(v DebugOptions) *bool { return v.EnableDebugging }).(pulumi.BoolPtrOutput) +} + +type DebugOptionsPtrOutput struct{ *pulumi.OutputState } + +func (DebugOptionsPtrOutput) ElementType() reflect.Type { + return reflect.TypeOf((**DebugOptions)(nil)).Elem() +} + +func (o DebugOptionsPtrOutput) ToDebugOptionsPtrOutput() DebugOptionsPtrOutput { + return o +} + +func (o DebugOptionsPtrOutput) ToDebugOptionsPtrOutputWithContext(ctx context.Context) DebugOptionsPtrOutput { + return o +} + +func (o DebugOptionsPtrOutput) Elem() DebugOptionsOutput { + return o.ApplyT(func(v *DebugOptions) DebugOptions { + if v != nil { + return *v + } + var ret DebugOptions + return ret + }).(DebugOptionsOutput) +} + +// If you are asked by Google to help with debugging, set this field. Otherwise, ignore this field. +func (o DebugOptionsPtrOutput) EnableDebugging() pulumi.BoolPtrOutput { + return o.ApplyT(func(v *DebugOptions) *bool { + if v == nil { + return nil + } + return v.EnableDebugging + }).(pulumi.BoolPtrOutput) +} + +// List of double values. +type DoubleValuesResponse struct { + Values []float64 `pulumi:"values"` +} + +// List of double values. +type DoubleValuesResponseOutput struct{ *pulumi.OutputState } + +func (DoubleValuesResponseOutput) ElementType() reflect.Type { + return reflect.TypeOf((*DoubleValuesResponse)(nil)).Elem() +} + +func (o DoubleValuesResponseOutput) ToDoubleValuesResponseOutput() DoubleValuesResponseOutput { + return o +} + +func (o DoubleValuesResponseOutput) ToDoubleValuesResponseOutputWithContext(ctx context.Context) DoubleValuesResponseOutput { + return o +} + +func (o DoubleValuesResponseOutput) Values() pulumi.Float64ArrayOutput { + return o.ApplyT(func(v DoubleValuesResponse) []float64 { return v.Values }).(pulumi.Float64ArrayOutput) +} + +// List of enum values. +type EnumValuesResponse struct { + // The maximum allowable length for string values is 32 characters. + Values []string `pulumi:"values"` +} + +// List of enum values. +type EnumValuesResponseOutput struct{ *pulumi.OutputState } + +func (EnumValuesResponseOutput) ElementType() reflect.Type { + return reflect.TypeOf((*EnumValuesResponse)(nil)).Elem() +} + +func (o EnumValuesResponseOutput) ToEnumValuesResponseOutput() EnumValuesResponseOutput { + return o +} + +func (o EnumValuesResponseOutput) ToEnumValuesResponseOutputWithContext(ctx context.Context) EnumValuesResponseOutput { + return o +} + +// The maximum allowable length for string values is 32 characters. +func (o EnumValuesResponseOutput) Values() pulumi.StringArrayOutput { + return o.ApplyT(func(v EnumValuesResponse) []string { return v.Values }).(pulumi.StringArrayOutput) +} + // Specifies operators to return facet results for. There will be one FacetResult for every source_name/object_type/operator_name combination. type FacetOptions struct { // If set, describes integer faceting options for the given integer property. The corresponding integer property in the schema should be marked isFacetable. The number of buckets returned would be minimum of this and num_facet_buckets. @@ -790,6 +1077,57 @@ func (o FacetOptionsResponseArrayOutput) Index(i pulumi.IntInput) FacetOptionsRe }).(FacetOptionsResponseOutput) } +type FieldViolationResponse struct { + // The description of the error. + Description string `pulumi:"description"` + // Path of field with violation. + Field string `pulumi:"field"` +} + +type FieldViolationResponseOutput struct{ *pulumi.OutputState } + +func (FieldViolationResponseOutput) ElementType() reflect.Type { + return reflect.TypeOf((*FieldViolationResponse)(nil)).Elem() +} + +func (o FieldViolationResponseOutput) ToFieldViolationResponseOutput() FieldViolationResponseOutput { + return o +} + +func (o FieldViolationResponseOutput) ToFieldViolationResponseOutputWithContext(ctx context.Context) FieldViolationResponseOutput { + return o +} + +// The description of the error. +func (o FieldViolationResponseOutput) Description() pulumi.StringOutput { + return o.ApplyT(func(v FieldViolationResponse) string { return v.Description }).(pulumi.StringOutput) +} + +// Path of field with violation. +func (o FieldViolationResponseOutput) Field() pulumi.StringOutput { + return o.ApplyT(func(v FieldViolationResponse) string { return v.Field }).(pulumi.StringOutput) +} + +type FieldViolationResponseArrayOutput struct{ *pulumi.OutputState } + +func (FieldViolationResponseArrayOutput) ElementType() reflect.Type { + return reflect.TypeOf((*[]FieldViolationResponse)(nil)).Elem() +} + +func (o FieldViolationResponseArrayOutput) ToFieldViolationResponseArrayOutput() FieldViolationResponseArrayOutput { + return o +} + +func (o FieldViolationResponseArrayOutput) ToFieldViolationResponseArrayOutputWithContext(ctx context.Context) FieldViolationResponseArrayOutput { + return o +} + +func (o FieldViolationResponseArrayOutput) Index(i pulumi.IntInput) FieldViolationResponseOutput { + return pulumi.All(o, i).ApplyT(func(vs []interface{}) FieldViolationResponse { + return vs[0].([]FieldViolationResponse)[vs[1].(int)] + }).(FieldViolationResponseOutput) +} + // A generic way of expressing filters in a query, which supports two approaches: **1. Setting a ValueFilter.** The name must match an operator_name defined in the schema for your data source. **2. Setting a CompositeFilter.** The filters are evaluated using the logical operator. The top-level operators can only be either an AND or a NOT. AND can appear only at the top-most level. OR can appear only under a top-level AND. type Filter struct { CompositeFilter *CompositeFilter `pulumi:"compositeFilter"` @@ -1292,248 +1630,871 @@ func (o GSuitePrincipalOutput) GsuiteUserEmail() pulumi.StringPtrOutput { return o.ApplyT(func(v GSuitePrincipal) *string { return v.GsuiteUserEmail }).(pulumi.StringPtrOutput) } -type GSuitePrincipalArrayOutput struct{ *pulumi.OutputState } +type GSuitePrincipalArrayOutput struct{ *pulumi.OutputState } + +func (GSuitePrincipalArrayOutput) ElementType() reflect.Type { + return reflect.TypeOf((*[]GSuitePrincipal)(nil)).Elem() +} + +func (o GSuitePrincipalArrayOutput) ToGSuitePrincipalArrayOutput() GSuitePrincipalArrayOutput { + return o +} + +func (o GSuitePrincipalArrayOutput) ToGSuitePrincipalArrayOutputWithContext(ctx context.Context) GSuitePrincipalArrayOutput { + return o +} + +func (o GSuitePrincipalArrayOutput) Index(i pulumi.IntInput) GSuitePrincipalOutput { + return pulumi.All(o, i).ApplyT(func(vs []interface{}) GSuitePrincipal { + return vs[0].([]GSuitePrincipal)[vs[1].(int)] + }).(GSuitePrincipalOutput) +} + +type GSuitePrincipalResponse struct { + // This principal represents all users of the Google Workspace domain of the customer. + GsuiteDomain bool `pulumi:"gsuiteDomain"` + // This principal references a Google Workspace group name. + GsuiteGroupEmail string `pulumi:"gsuiteGroupEmail"` + // This principal references a Google Workspace user account. + GsuiteUserEmail string `pulumi:"gsuiteUserEmail"` +} + +type GSuitePrincipalResponseOutput struct{ *pulumi.OutputState } + +func (GSuitePrincipalResponseOutput) ElementType() reflect.Type { + return reflect.TypeOf((*GSuitePrincipalResponse)(nil)).Elem() +} + +func (o GSuitePrincipalResponseOutput) ToGSuitePrincipalResponseOutput() GSuitePrincipalResponseOutput { + return o +} + +func (o GSuitePrincipalResponseOutput) ToGSuitePrincipalResponseOutputWithContext(ctx context.Context) GSuitePrincipalResponseOutput { + return o +} + +// This principal represents all users of the Google Workspace domain of the customer. +func (o GSuitePrincipalResponseOutput) GsuiteDomain() pulumi.BoolOutput { + return o.ApplyT(func(v GSuitePrincipalResponse) bool { return v.GsuiteDomain }).(pulumi.BoolOutput) +} + +// This principal references a Google Workspace group name. +func (o GSuitePrincipalResponseOutput) GsuiteGroupEmail() pulumi.StringOutput { + return o.ApplyT(func(v GSuitePrincipalResponse) string { return v.GsuiteGroupEmail }).(pulumi.StringOutput) +} + +// This principal references a Google Workspace user account. +func (o GSuitePrincipalResponseOutput) GsuiteUserEmail() pulumi.StringOutput { + return o.ApplyT(func(v GSuitePrincipalResponse) string { return v.GsuiteUserEmail }).(pulumi.StringOutput) +} + +type GSuitePrincipalResponseArrayOutput struct{ *pulumi.OutputState } + +func (GSuitePrincipalResponseArrayOutput) ElementType() reflect.Type { + return reflect.TypeOf((*[]GSuitePrincipalResponse)(nil)).Elem() +} + +func (o GSuitePrincipalResponseArrayOutput) ToGSuitePrincipalResponseArrayOutput() GSuitePrincipalResponseArrayOutput { + return o +} + +func (o GSuitePrincipalResponseArrayOutput) ToGSuitePrincipalResponseArrayOutputWithContext(ctx context.Context) GSuitePrincipalResponseArrayOutput { + return o +} + +func (o GSuitePrincipalResponseArrayOutput) Index(i pulumi.IntInput) GSuitePrincipalResponseOutput { + return pulumi.All(o, i).ApplyT(func(vs []interface{}) GSuitePrincipalResponse { + return vs[0].([]GSuitePrincipalResponse)[vs[1].(int)] + }).(GSuitePrincipalResponseOutput) +} + +// List of html values. +type HtmlValuesResponse struct { + // The maximum allowable length for html values is 2048 characters. + Values []string `pulumi:"values"` +} + +// List of html values. +type HtmlValuesResponseOutput struct{ *pulumi.OutputState } + +func (HtmlValuesResponseOutput) ElementType() reflect.Type { + return reflect.TypeOf((*HtmlValuesResponse)(nil)).Elem() +} + +func (o HtmlValuesResponseOutput) ToHtmlValuesResponseOutput() HtmlValuesResponseOutput { + return o +} + +func (o HtmlValuesResponseOutput) ToHtmlValuesResponseOutputWithContext(ctx context.Context) HtmlValuesResponseOutput { + return o +} + +// The maximum allowable length for html values is 2048 characters. +func (o HtmlValuesResponseOutput) Values() pulumi.StringArrayOutput { + return o.ApplyT(func(v HtmlValuesResponse) []string { return v.Values }).(pulumi.StringArrayOutput) +} + +// Used to specify integer faceting options. +type IntegerFacetingOptions struct { + // Buckets for given integer values should be in strictly ascending order. For example, if values supplied are (1,5,10,100), the following facet buckets will be formed {<1, [1,5), [5-10), [10-100), >=100}. + IntegerBuckets []string `pulumi:"integerBuckets"` +} + +// IntegerFacetingOptionsInput is an input type that accepts IntegerFacetingOptionsArgs and IntegerFacetingOptionsOutput values. +// You can construct a concrete instance of `IntegerFacetingOptionsInput` via: +// +// IntegerFacetingOptionsArgs{...} +type IntegerFacetingOptionsInput interface { + pulumi.Input + + ToIntegerFacetingOptionsOutput() IntegerFacetingOptionsOutput + ToIntegerFacetingOptionsOutputWithContext(context.Context) IntegerFacetingOptionsOutput +} + +// Used to specify integer faceting options. +type IntegerFacetingOptionsArgs struct { + // Buckets for given integer values should be in strictly ascending order. For example, if values supplied are (1,5,10,100), the following facet buckets will be formed {<1, [1,5), [5-10), [10-100), >=100}. + IntegerBuckets pulumi.StringArrayInput `pulumi:"integerBuckets"` +} + +func (IntegerFacetingOptionsArgs) ElementType() reflect.Type { + return reflect.TypeOf((*IntegerFacetingOptions)(nil)).Elem() +} + +func (i IntegerFacetingOptionsArgs) ToIntegerFacetingOptionsOutput() IntegerFacetingOptionsOutput { + return i.ToIntegerFacetingOptionsOutputWithContext(context.Background()) +} + +func (i IntegerFacetingOptionsArgs) ToIntegerFacetingOptionsOutputWithContext(ctx context.Context) IntegerFacetingOptionsOutput { + return pulumi.ToOutputWithContext(ctx, i).(IntegerFacetingOptionsOutput) +} + +func (i IntegerFacetingOptionsArgs) ToIntegerFacetingOptionsPtrOutput() IntegerFacetingOptionsPtrOutput { + return i.ToIntegerFacetingOptionsPtrOutputWithContext(context.Background()) +} + +func (i IntegerFacetingOptionsArgs) ToIntegerFacetingOptionsPtrOutputWithContext(ctx context.Context) IntegerFacetingOptionsPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(IntegerFacetingOptionsOutput).ToIntegerFacetingOptionsPtrOutputWithContext(ctx) +} + +// IntegerFacetingOptionsPtrInput is an input type that accepts IntegerFacetingOptionsArgs, IntegerFacetingOptionsPtr and IntegerFacetingOptionsPtrOutput values. +// You can construct a concrete instance of `IntegerFacetingOptionsPtrInput` via: +// +// IntegerFacetingOptionsArgs{...} +// +// or: +// +// nil +type IntegerFacetingOptionsPtrInput interface { + pulumi.Input + + ToIntegerFacetingOptionsPtrOutput() IntegerFacetingOptionsPtrOutput + ToIntegerFacetingOptionsPtrOutputWithContext(context.Context) IntegerFacetingOptionsPtrOutput +} + +type integerFacetingOptionsPtrType IntegerFacetingOptionsArgs + +func IntegerFacetingOptionsPtr(v *IntegerFacetingOptionsArgs) IntegerFacetingOptionsPtrInput { + return (*integerFacetingOptionsPtrType)(v) +} + +func (*integerFacetingOptionsPtrType) ElementType() reflect.Type { + return reflect.TypeOf((**IntegerFacetingOptions)(nil)).Elem() +} + +func (i *integerFacetingOptionsPtrType) ToIntegerFacetingOptionsPtrOutput() IntegerFacetingOptionsPtrOutput { + return i.ToIntegerFacetingOptionsPtrOutputWithContext(context.Background()) +} + +func (i *integerFacetingOptionsPtrType) ToIntegerFacetingOptionsPtrOutputWithContext(ctx context.Context) IntegerFacetingOptionsPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(IntegerFacetingOptionsPtrOutput) +} + +// Used to specify integer faceting options. +type IntegerFacetingOptionsOutput struct{ *pulumi.OutputState } + +func (IntegerFacetingOptionsOutput) ElementType() reflect.Type { + return reflect.TypeOf((*IntegerFacetingOptions)(nil)).Elem() +} + +func (o IntegerFacetingOptionsOutput) ToIntegerFacetingOptionsOutput() IntegerFacetingOptionsOutput { + return o +} + +func (o IntegerFacetingOptionsOutput) ToIntegerFacetingOptionsOutputWithContext(ctx context.Context) IntegerFacetingOptionsOutput { + return o +} + +func (o IntegerFacetingOptionsOutput) ToIntegerFacetingOptionsPtrOutput() IntegerFacetingOptionsPtrOutput { + return o.ToIntegerFacetingOptionsPtrOutputWithContext(context.Background()) +} + +func (o IntegerFacetingOptionsOutput) ToIntegerFacetingOptionsPtrOutputWithContext(ctx context.Context) IntegerFacetingOptionsPtrOutput { + return o.ApplyTWithContext(ctx, func(_ context.Context, v IntegerFacetingOptions) *IntegerFacetingOptions { + return &v + }).(IntegerFacetingOptionsPtrOutput) +} + +// Buckets for given integer values should be in strictly ascending order. For example, if values supplied are (1,5,10,100), the following facet buckets will be formed {<1, [1,5), [5-10), [10-100), >=100}. +func (o IntegerFacetingOptionsOutput) IntegerBuckets() pulumi.StringArrayOutput { + return o.ApplyT(func(v IntegerFacetingOptions) []string { return v.IntegerBuckets }).(pulumi.StringArrayOutput) +} + +type IntegerFacetingOptionsPtrOutput struct{ *pulumi.OutputState } + +func (IntegerFacetingOptionsPtrOutput) ElementType() reflect.Type { + return reflect.TypeOf((**IntegerFacetingOptions)(nil)).Elem() +} + +func (o IntegerFacetingOptionsPtrOutput) ToIntegerFacetingOptionsPtrOutput() IntegerFacetingOptionsPtrOutput { + return o +} + +func (o IntegerFacetingOptionsPtrOutput) ToIntegerFacetingOptionsPtrOutputWithContext(ctx context.Context) IntegerFacetingOptionsPtrOutput { + return o +} + +func (o IntegerFacetingOptionsPtrOutput) Elem() IntegerFacetingOptionsOutput { + return o.ApplyT(func(v *IntegerFacetingOptions) IntegerFacetingOptions { + if v != nil { + return *v + } + var ret IntegerFacetingOptions + return ret + }).(IntegerFacetingOptionsOutput) +} + +// Buckets for given integer values should be in strictly ascending order. For example, if values supplied are (1,5,10,100), the following facet buckets will be formed {<1, [1,5), [5-10), [10-100), >=100}. +func (o IntegerFacetingOptionsPtrOutput) IntegerBuckets() pulumi.StringArrayOutput { + return o.ApplyT(func(v *IntegerFacetingOptions) []string { + if v == nil { + return nil + } + return v.IntegerBuckets + }).(pulumi.StringArrayOutput) +} + +// Used to specify integer faceting options. +type IntegerFacetingOptionsResponse struct { + // Buckets for given integer values should be in strictly ascending order. For example, if values supplied are (1,5,10,100), the following facet buckets will be formed {<1, [1,5), [5-10), [10-100), >=100}. + IntegerBuckets []string `pulumi:"integerBuckets"` +} + +// Used to specify integer faceting options. +type IntegerFacetingOptionsResponseOutput struct{ *pulumi.OutputState } + +func (IntegerFacetingOptionsResponseOutput) ElementType() reflect.Type { + return reflect.TypeOf((*IntegerFacetingOptionsResponse)(nil)).Elem() +} + +func (o IntegerFacetingOptionsResponseOutput) ToIntegerFacetingOptionsResponseOutput() IntegerFacetingOptionsResponseOutput { + return o +} + +func (o IntegerFacetingOptionsResponseOutput) ToIntegerFacetingOptionsResponseOutputWithContext(ctx context.Context) IntegerFacetingOptionsResponseOutput { + return o +} + +// Buckets for given integer values should be in strictly ascending order. For example, if values supplied are (1,5,10,100), the following facet buckets will be formed {<1, [1,5), [5-10), [10-100), >=100}. +func (o IntegerFacetingOptionsResponseOutput) IntegerBuckets() pulumi.StringArrayOutput { + return o.ApplyT(func(v IntegerFacetingOptionsResponse) []string { return v.IntegerBuckets }).(pulumi.StringArrayOutput) +} + +// List of integer values. +type IntegerValuesResponse struct { + Values []string `pulumi:"values"` +} + +// List of integer values. +type IntegerValuesResponseOutput struct{ *pulumi.OutputState } + +func (IntegerValuesResponseOutput) ElementType() reflect.Type { + return reflect.TypeOf((*IntegerValuesResponse)(nil)).Elem() +} + +func (o IntegerValuesResponseOutput) ToIntegerValuesResponseOutput() IntegerValuesResponseOutput { + return o +} + +func (o IntegerValuesResponseOutput) ToIntegerValuesResponseOutputWithContext(ctx context.Context) IntegerValuesResponseOutput { + return o +} + +func (o IntegerValuesResponseOutput) Values() pulumi.StringArrayOutput { + return o.ApplyT(func(v IntegerValuesResponse) []string { return v.Values }).(pulumi.StringArrayOutput) +} + +// Represents an interaction between a user and an item. +type InteractionResponse struct { + // The time when the user acted on the item. If multiple actions of the same type exist for a single user, only the most recent action is recorded. + InteractionTime string `pulumi:"interactionTime"` + // The user that acted on the item. + Principal PrincipalResponse `pulumi:"principal"` + Type string `pulumi:"type"` +} + +// Represents an interaction between a user and an item. +type InteractionResponseOutput struct{ *pulumi.OutputState } + +func (InteractionResponseOutput) ElementType() reflect.Type { + return reflect.TypeOf((*InteractionResponse)(nil)).Elem() +} + +func (o InteractionResponseOutput) ToInteractionResponseOutput() InteractionResponseOutput { + return o +} + +func (o InteractionResponseOutput) ToInteractionResponseOutputWithContext(ctx context.Context) InteractionResponseOutput { + return o +} + +// The time when the user acted on the item. If multiple actions of the same type exist for a single user, only the most recent action is recorded. +func (o InteractionResponseOutput) InteractionTime() pulumi.StringOutput { + return o.ApplyT(func(v InteractionResponse) string { return v.InteractionTime }).(pulumi.StringOutput) +} + +// The user that acted on the item. +func (o InteractionResponseOutput) Principal() PrincipalResponseOutput { + return o.ApplyT(func(v InteractionResponse) PrincipalResponse { return v.Principal }).(PrincipalResponseOutput) +} + +func (o InteractionResponseOutput) Type() pulumi.StringOutput { + return o.ApplyT(func(v InteractionResponse) string { return v.Type }).(pulumi.StringOutput) +} + +type InteractionResponseArrayOutput struct{ *pulumi.OutputState } + +func (InteractionResponseArrayOutput) ElementType() reflect.Type { + return reflect.TypeOf((*[]InteractionResponse)(nil)).Elem() +} + +func (o InteractionResponseArrayOutput) ToInteractionResponseArrayOutput() InteractionResponseArrayOutput { + return o +} + +func (o InteractionResponseArrayOutput) ToInteractionResponseArrayOutputWithContext(ctx context.Context) InteractionResponseArrayOutput { + return o +} + +func (o InteractionResponseArrayOutput) Index(i pulumi.IntInput) InteractionResponseOutput { + return pulumi.All(o, i).ApplyT(func(vs []interface{}) InteractionResponse { + return vs[0].([]InteractionResponse)[vs[1].(int)] + }).(InteractionResponseOutput) +} + +// Access control list information for the item. For more information see [Map ACLs](https://developers.google.com/cloud-search/docs/guides/acls). +type ItemAclResponse struct { + // Sets the type of access rules to apply when an item inherits its ACL from a parent. This should always be set in tandem with the inheritAclFrom field. Also, when the inheritAclFrom field is set, this field should be set to a valid AclInheritanceType. + AclInheritanceType string `pulumi:"aclInheritanceType"` + // List of principals who are explicitly denied access to the item in search results. While principals are denied access by default, use denied readers to handle exceptions and override the list allowed readers. The maximum number of elements is 100. + DeniedReaders []PrincipalResponse `pulumi:"deniedReaders"` + // The name of the item to inherit the Access Permission List (ACL) from. Note: ACL inheritance *only* provides access permissions to child items and does not define structural relationships, nor does it provide convenient ways to delete large groups of items. Deleting an ACL parent from the index only alters the access permissions of child items that reference the parent in the inheritAclFrom field. The item is still in the index, but may not visible in search results. By contrast, deletion of a container item also deletes all items that reference the container via the containerName field. The maximum length for this field is 1536 characters. + InheritAclFrom string `pulumi:"inheritAclFrom"` + // Optional. List of owners for the item. This field has no bearing on document access permissions. It does, however, offer a slight ranking boosts items where the querying user is an owner. The maximum number of elements is 5. + Owners []PrincipalResponse `pulumi:"owners"` + // List of principals who are allowed to see the item in search results. Optional if inheriting permissions from another item or if the item is not intended to be visible, such as virtual containers. The maximum number of elements is 1000. + Readers []PrincipalResponse `pulumi:"readers"` +} + +// Access control list information for the item. For more information see [Map ACLs](https://developers.google.com/cloud-search/docs/guides/acls). +type ItemAclResponseOutput struct{ *pulumi.OutputState } + +func (ItemAclResponseOutput) ElementType() reflect.Type { + return reflect.TypeOf((*ItemAclResponse)(nil)).Elem() +} + +func (o ItemAclResponseOutput) ToItemAclResponseOutput() ItemAclResponseOutput { + return o +} + +func (o ItemAclResponseOutput) ToItemAclResponseOutputWithContext(ctx context.Context) ItemAclResponseOutput { + return o +} + +// Sets the type of access rules to apply when an item inherits its ACL from a parent. This should always be set in tandem with the inheritAclFrom field. Also, when the inheritAclFrom field is set, this field should be set to a valid AclInheritanceType. +func (o ItemAclResponseOutput) AclInheritanceType() pulumi.StringOutput { + return o.ApplyT(func(v ItemAclResponse) string { return v.AclInheritanceType }).(pulumi.StringOutput) +} + +// List of principals who are explicitly denied access to the item in search results. While principals are denied access by default, use denied readers to handle exceptions and override the list allowed readers. The maximum number of elements is 100. +func (o ItemAclResponseOutput) DeniedReaders() PrincipalResponseArrayOutput { + return o.ApplyT(func(v ItemAclResponse) []PrincipalResponse { return v.DeniedReaders }).(PrincipalResponseArrayOutput) +} + +// The name of the item to inherit the Access Permission List (ACL) from. Note: ACL inheritance *only* provides access permissions to child items and does not define structural relationships, nor does it provide convenient ways to delete large groups of items. Deleting an ACL parent from the index only alters the access permissions of child items that reference the parent in the inheritAclFrom field. The item is still in the index, but may not visible in search results. By contrast, deletion of a container item also deletes all items that reference the container via the containerName field. The maximum length for this field is 1536 characters. +func (o ItemAclResponseOutput) InheritAclFrom() pulumi.StringOutput { + return o.ApplyT(func(v ItemAclResponse) string { return v.InheritAclFrom }).(pulumi.StringOutput) +} + +// Optional. List of owners for the item. This field has no bearing on document access permissions. It does, however, offer a slight ranking boosts items where the querying user is an owner. The maximum number of elements is 5. +func (o ItemAclResponseOutput) Owners() PrincipalResponseArrayOutput { + return o.ApplyT(func(v ItemAclResponse) []PrincipalResponse { return v.Owners }).(PrincipalResponseArrayOutput) +} + +// List of principals who are allowed to see the item in search results. Optional if inheriting permissions from another item or if the item is not intended to be visible, such as virtual containers. The maximum number of elements is 1000. +func (o ItemAclResponseOutput) Readers() PrincipalResponseArrayOutput { + return o.ApplyT(func(v ItemAclResponse) []PrincipalResponse { return v.Readers }).(PrincipalResponseArrayOutput) +} + +// Content of an item to be indexed and surfaced by Cloud Search. Only UTF-8 encoded strings are allowed as inlineContent. If the content is uploaded and not binary, it must be UTF-8 encoded. +type ItemContentResponse struct { + // Upload reference ID of a previously uploaded content via write method. + ContentDataRef UploadItemRefResponse `pulumi:"contentDataRef"` + ContentFormat string `pulumi:"contentFormat"` + // Hashing info calculated and provided by the API client for content. Can be used with the items.push method to calculate modified state. The maximum length is 2048 characters. + Hash string `pulumi:"hash"` + // Content that is supplied inlined within the update method. The maximum length is 102400 bytes (100 KiB). + InlineContent string `pulumi:"inlineContent"` +} + +// Content of an item to be indexed and surfaced by Cloud Search. Only UTF-8 encoded strings are allowed as inlineContent. If the content is uploaded and not binary, it must be UTF-8 encoded. +type ItemContentResponseOutput struct{ *pulumi.OutputState } + +func (ItemContentResponseOutput) ElementType() reflect.Type { + return reflect.TypeOf((*ItemContentResponse)(nil)).Elem() +} + +func (o ItemContentResponseOutput) ToItemContentResponseOutput() ItemContentResponseOutput { + return o +} + +func (o ItemContentResponseOutput) ToItemContentResponseOutputWithContext(ctx context.Context) ItemContentResponseOutput { + return o +} + +// Upload reference ID of a previously uploaded content via write method. +func (o ItemContentResponseOutput) ContentDataRef() UploadItemRefResponseOutput { + return o.ApplyT(func(v ItemContentResponse) UploadItemRefResponse { return v.ContentDataRef }).(UploadItemRefResponseOutput) +} + +func (o ItemContentResponseOutput) ContentFormat() pulumi.StringOutput { + return o.ApplyT(func(v ItemContentResponse) string { return v.ContentFormat }).(pulumi.StringOutput) +} + +// Hashing info calculated and provided by the API client for content. Can be used with the items.push method to calculate modified state. The maximum length is 2048 characters. +func (o ItemContentResponseOutput) Hash() pulumi.StringOutput { + return o.ApplyT(func(v ItemContentResponse) string { return v.Hash }).(pulumi.StringOutput) +} + +// Content that is supplied inlined within the update method. The maximum length is 102400 bytes (100 KiB). +func (o ItemContentResponseOutput) InlineContent() pulumi.StringOutput { + return o.ApplyT(func(v ItemContentResponse) string { return v.InlineContent }).(pulumi.StringOutput) +} + +// Available metadata fields for the item. +type ItemMetadataResponse struct { + // The name of the container for this item. Deletion of the container item leads to automatic deletion of this item. Note: ACLs are not inherited from a container item. To provide ACL inheritance for an item, use the inheritAclFrom field. The maximum length is 1536 characters. + ContainerName string `pulumi:"containerName"` + // The BCP-47 language code for the item, such as "en-US" or "sr-Latn". For more information, see http://www.unicode.org/reports/tr35/#Unicode_locale_identifier. The maximum length is 32 characters. + ContentLanguage string `pulumi:"contentLanguage"` + // A set of named attributes associated with the item. This can be used for influencing the ranking of the item based on the context in the request. The maximum number of elements is 10. + ContextAttributes []ContextAttributeResponse `pulumi:"contextAttributes"` + // The time when the item was created in the source repository. + CreateTime string `pulumi:"createTime"` + // Hashing value provided by the API caller. This can be used with the items.push method to calculate modified state. The maximum length is 2048 characters. + Hash string `pulumi:"hash"` + // A list of interactions for the item. Interactions are used to improve Search quality, but are not exposed to end users. The maximum number of elements is 1000. + Interactions []InteractionResponse `pulumi:"interactions"` + // Additional keywords or phrases that should match the item. Used internally for user generated content. The maximum number of elements is 100. The maximum length is 8192 characters. + Keywords []string `pulumi:"keywords"` + // The original mime-type of ItemContent.content in the source repository. The maximum length is 256 characters. + MimeType string `pulumi:"mimeType"` + // The type of the item. This should correspond to the name of an object definition in the schema registered for the data source. For example, if the schema for the data source contains an object definition with name 'document', then item indexing requests for objects of that type should set objectType to 'document'. The maximum length is 256 characters. + ObjectType string `pulumi:"objectType"` + // Additional search quality metadata of the item + SearchQualityMetadata SearchQualityMetadataResponse `pulumi:"searchQualityMetadata"` + // Link to the source repository serving the data. Seach results apply this link to the title. Whitespace or special characters may cause Cloud Seach result links to trigger a redirect notice; to avoid this, encode the URL. The maximum length is 2048 characters. + SourceRepositoryUrl string `pulumi:"sourceRepositoryUrl"` + // The title of the item. If given, this will be the displayed title of the Search result. The maximum length is 2048 characters. + Title string `pulumi:"title"` + // The time when the item was last modified in the source repository. + UpdateTime string `pulumi:"updateTime"` +} + +// Available metadata fields for the item. +type ItemMetadataResponseOutput struct{ *pulumi.OutputState } + +func (ItemMetadataResponseOutput) ElementType() reflect.Type { + return reflect.TypeOf((*ItemMetadataResponse)(nil)).Elem() +} + +func (o ItemMetadataResponseOutput) ToItemMetadataResponseOutput() ItemMetadataResponseOutput { + return o +} + +func (o ItemMetadataResponseOutput) ToItemMetadataResponseOutputWithContext(ctx context.Context) ItemMetadataResponseOutput { + return o +} + +// The name of the container for this item. Deletion of the container item leads to automatic deletion of this item. Note: ACLs are not inherited from a container item. To provide ACL inheritance for an item, use the inheritAclFrom field. The maximum length is 1536 characters. +func (o ItemMetadataResponseOutput) ContainerName() pulumi.StringOutput { + return o.ApplyT(func(v ItemMetadataResponse) string { return v.ContainerName }).(pulumi.StringOutput) +} + +// The BCP-47 language code for the item, such as "en-US" or "sr-Latn". For more information, see http://www.unicode.org/reports/tr35/#Unicode_locale_identifier. The maximum length is 32 characters. +func (o ItemMetadataResponseOutput) ContentLanguage() pulumi.StringOutput { + return o.ApplyT(func(v ItemMetadataResponse) string { return v.ContentLanguage }).(pulumi.StringOutput) +} + +// A set of named attributes associated with the item. This can be used for influencing the ranking of the item based on the context in the request. The maximum number of elements is 10. +func (o ItemMetadataResponseOutput) ContextAttributes() ContextAttributeResponseArrayOutput { + return o.ApplyT(func(v ItemMetadataResponse) []ContextAttributeResponse { return v.ContextAttributes }).(ContextAttributeResponseArrayOutput) +} + +// The time when the item was created in the source repository. +func (o ItemMetadataResponseOutput) CreateTime() pulumi.StringOutput { + return o.ApplyT(func(v ItemMetadataResponse) string { return v.CreateTime }).(pulumi.StringOutput) +} + +// Hashing value provided by the API caller. This can be used with the items.push method to calculate modified state. The maximum length is 2048 characters. +func (o ItemMetadataResponseOutput) Hash() pulumi.StringOutput { + return o.ApplyT(func(v ItemMetadataResponse) string { return v.Hash }).(pulumi.StringOutput) +} + +// A list of interactions for the item. Interactions are used to improve Search quality, but are not exposed to end users. The maximum number of elements is 1000. +func (o ItemMetadataResponseOutput) Interactions() InteractionResponseArrayOutput { + return o.ApplyT(func(v ItemMetadataResponse) []InteractionResponse { return v.Interactions }).(InteractionResponseArrayOutput) +} + +// Additional keywords or phrases that should match the item. Used internally for user generated content. The maximum number of elements is 100. The maximum length is 8192 characters. +func (o ItemMetadataResponseOutput) Keywords() pulumi.StringArrayOutput { + return o.ApplyT(func(v ItemMetadataResponse) []string { return v.Keywords }).(pulumi.StringArrayOutput) +} + +// The original mime-type of ItemContent.content in the source repository. The maximum length is 256 characters. +func (o ItemMetadataResponseOutput) MimeType() pulumi.StringOutput { + return o.ApplyT(func(v ItemMetadataResponse) string { return v.MimeType }).(pulumi.StringOutput) +} + +// The type of the item. This should correspond to the name of an object definition in the schema registered for the data source. For example, if the schema for the data source contains an object definition with name 'document', then item indexing requests for objects of that type should set objectType to 'document'. The maximum length is 256 characters. +func (o ItemMetadataResponseOutput) ObjectType() pulumi.StringOutput { + return o.ApplyT(func(v ItemMetadataResponse) string { return v.ObjectType }).(pulumi.StringOutput) +} + +// Additional search quality metadata of the item +func (o ItemMetadataResponseOutput) SearchQualityMetadata() SearchQualityMetadataResponseOutput { + return o.ApplyT(func(v ItemMetadataResponse) SearchQualityMetadataResponse { return v.SearchQualityMetadata }).(SearchQualityMetadataResponseOutput) +} + +// Link to the source repository serving the data. Seach results apply this link to the title. Whitespace or special characters may cause Cloud Seach result links to trigger a redirect notice; to avoid this, encode the URL. The maximum length is 2048 characters. +func (o ItemMetadataResponseOutput) SourceRepositoryUrl() pulumi.StringOutput { + return o.ApplyT(func(v ItemMetadataResponse) string { return v.SourceRepositoryUrl }).(pulumi.StringOutput) +} + +// The title of the item. If given, this will be the displayed title of the Search result. The maximum length is 2048 characters. +func (o ItemMetadataResponseOutput) Title() pulumi.StringOutput { + return o.ApplyT(func(v ItemMetadataResponse) string { return v.Title }).(pulumi.StringOutput) +} + +// The time when the item was last modified in the source repository. +func (o ItemMetadataResponseOutput) UpdateTime() pulumi.StringOutput { + return o.ApplyT(func(v ItemMetadataResponse) string { return v.UpdateTime }).(pulumi.StringOutput) +} + +// This contains item's status and any errors. +type ItemStatusResponse struct { + // Status code. + Code string `pulumi:"code"` + // Error details in case the item is in ERROR state. + ProcessingErrors []ProcessingErrorResponse `pulumi:"processingErrors"` + // Repository error reported by connector. + RepositoryErrors []RepositoryErrorResponse `pulumi:"repositoryErrors"` +} + +// This contains item's status and any errors. +type ItemStatusResponseOutput struct{ *pulumi.OutputState } + +func (ItemStatusResponseOutput) ElementType() reflect.Type { + return reflect.TypeOf((*ItemStatusResponse)(nil)).Elem() +} + +func (o ItemStatusResponseOutput) ToItemStatusResponseOutput() ItemStatusResponseOutput { + return o +} + +func (o ItemStatusResponseOutput) ToItemStatusResponseOutputWithContext(ctx context.Context) ItemStatusResponseOutput { + return o +} + +// Status code. +func (o ItemStatusResponseOutput) Code() pulumi.StringOutput { + return o.ApplyT(func(v ItemStatusResponse) string { return v.Code }).(pulumi.StringOutput) +} + +// Error details in case the item is in ERROR state. +func (o ItemStatusResponseOutput) ProcessingErrors() ProcessingErrorResponseArrayOutput { + return o.ApplyT(func(v ItemStatusResponse) []ProcessingErrorResponse { return v.ProcessingErrors }).(ProcessingErrorResponseArrayOutput) +} + +// Repository error reported by connector. +func (o ItemStatusResponseOutput) RepositoryErrors() RepositoryErrorResponseArrayOutput { + return o.ApplyT(func(v ItemStatusResponse) []RepositoryErrorResponse { return v.RepositoryErrors }).(RepositoryErrorResponseArrayOutput) +} + +// Available structured data fields for the item. +type ItemStructuredDataResponse struct { + // Hashing value provided by the API caller. This can be used with the items.push method to calculate modified state. The maximum length is 2048 characters. + Hash string `pulumi:"hash"` + // The structured data object that should conform to a registered object definition in the schema for the data source. + Object StructuredDataObjectResponse `pulumi:"object"` +} + +// Available structured data fields for the item. +type ItemStructuredDataResponseOutput struct{ *pulumi.OutputState } + +func (ItemStructuredDataResponseOutput) ElementType() reflect.Type { + return reflect.TypeOf((*ItemStructuredDataResponse)(nil)).Elem() +} + +func (o ItemStructuredDataResponseOutput) ToItemStructuredDataResponseOutput() ItemStructuredDataResponseOutput { + return o +} + +func (o ItemStructuredDataResponseOutput) ToItemStructuredDataResponseOutputWithContext(ctx context.Context) ItemStructuredDataResponseOutput { + return o +} + +// Hashing value provided by the API caller. This can be used with the items.push method to calculate modified state. The maximum length is 2048 characters. +func (o ItemStructuredDataResponseOutput) Hash() pulumi.StringOutput { + return o.ApplyT(func(v ItemStructuredDataResponse) string { return v.Hash }).(pulumi.StringOutput) +} + +// The structured data object that should conform to a registered object definition in the schema for the data source. +func (o ItemStructuredDataResponseOutput) Object() StructuredDataObjectResponseOutput { + return o.ApplyT(func(v ItemStructuredDataResponse) StructuredDataObjectResponse { return v.Object }).(StructuredDataObjectResponseOutput) +} + +// A typed name-value pair for structured data. The type of the value should be the same as the registered type for the `name` property in the object definition of `objectType`. +type NamedPropertyResponse struct { + BooleanValue bool `pulumi:"booleanValue"` + DateValues DateValuesResponse `pulumi:"dateValues"` + DoubleValues DoubleValuesResponse `pulumi:"doubleValues"` + EnumValues EnumValuesResponse `pulumi:"enumValues"` + HtmlValues HtmlValuesResponse `pulumi:"htmlValues"` + IntegerValues IntegerValuesResponse `pulumi:"integerValues"` + // The name of the property. This name should correspond to the name of the property that was registered for object definition in the schema. The maximum allowable length for this property is 256 characters. + Name string `pulumi:"name"` + ObjectValues ObjectValuesResponse `pulumi:"objectValues"` + TextValues TextValuesResponse `pulumi:"textValues"` + TimestampValues TimestampValuesResponse `pulumi:"timestampValues"` +} -func (GSuitePrincipalArrayOutput) ElementType() reflect.Type { - return reflect.TypeOf((*[]GSuitePrincipal)(nil)).Elem() +// A typed name-value pair for structured data. The type of the value should be the same as the registered type for the `name` property in the object definition of `objectType`. +type NamedPropertyResponseOutput struct{ *pulumi.OutputState } + +func (NamedPropertyResponseOutput) ElementType() reflect.Type { + return reflect.TypeOf((*NamedPropertyResponse)(nil)).Elem() } -func (o GSuitePrincipalArrayOutput) ToGSuitePrincipalArrayOutput() GSuitePrincipalArrayOutput { +func (o NamedPropertyResponseOutput) ToNamedPropertyResponseOutput() NamedPropertyResponseOutput { return o } -func (o GSuitePrincipalArrayOutput) ToGSuitePrincipalArrayOutputWithContext(ctx context.Context) GSuitePrincipalArrayOutput { +func (o NamedPropertyResponseOutput) ToNamedPropertyResponseOutputWithContext(ctx context.Context) NamedPropertyResponseOutput { return o } -func (o GSuitePrincipalArrayOutput) Index(i pulumi.IntInput) GSuitePrincipalOutput { - return pulumi.All(o, i).ApplyT(func(vs []interface{}) GSuitePrincipal { - return vs[0].([]GSuitePrincipal)[vs[1].(int)] - }).(GSuitePrincipalOutput) +func (o NamedPropertyResponseOutput) BooleanValue() pulumi.BoolOutput { + return o.ApplyT(func(v NamedPropertyResponse) bool { return v.BooleanValue }).(pulumi.BoolOutput) } -type GSuitePrincipalResponse struct { - // This principal represents all users of the Google Workspace domain of the customer. - GsuiteDomain bool `pulumi:"gsuiteDomain"` - // This principal references a Google Workspace group name. - GsuiteGroupEmail string `pulumi:"gsuiteGroupEmail"` - // This principal references a Google Workspace user account. - GsuiteUserEmail string `pulumi:"gsuiteUserEmail"` +func (o NamedPropertyResponseOutput) DateValues() DateValuesResponseOutput { + return o.ApplyT(func(v NamedPropertyResponse) DateValuesResponse { return v.DateValues }).(DateValuesResponseOutput) } -type GSuitePrincipalResponseOutput struct{ *pulumi.OutputState } +func (o NamedPropertyResponseOutput) DoubleValues() DoubleValuesResponseOutput { + return o.ApplyT(func(v NamedPropertyResponse) DoubleValuesResponse { return v.DoubleValues }).(DoubleValuesResponseOutput) +} -func (GSuitePrincipalResponseOutput) ElementType() reflect.Type { - return reflect.TypeOf((*GSuitePrincipalResponse)(nil)).Elem() +func (o NamedPropertyResponseOutput) EnumValues() EnumValuesResponseOutput { + return o.ApplyT(func(v NamedPropertyResponse) EnumValuesResponse { return v.EnumValues }).(EnumValuesResponseOutput) } -func (o GSuitePrincipalResponseOutput) ToGSuitePrincipalResponseOutput() GSuitePrincipalResponseOutput { - return o +func (o NamedPropertyResponseOutput) HtmlValues() HtmlValuesResponseOutput { + return o.ApplyT(func(v NamedPropertyResponse) HtmlValuesResponse { return v.HtmlValues }).(HtmlValuesResponseOutput) } -func (o GSuitePrincipalResponseOutput) ToGSuitePrincipalResponseOutputWithContext(ctx context.Context) GSuitePrincipalResponseOutput { - return o +func (o NamedPropertyResponseOutput) IntegerValues() IntegerValuesResponseOutput { + return o.ApplyT(func(v NamedPropertyResponse) IntegerValuesResponse { return v.IntegerValues }).(IntegerValuesResponseOutput) } -// This principal represents all users of the Google Workspace domain of the customer. -func (o GSuitePrincipalResponseOutput) GsuiteDomain() pulumi.BoolOutput { - return o.ApplyT(func(v GSuitePrincipalResponse) bool { return v.GsuiteDomain }).(pulumi.BoolOutput) +// The name of the property. This name should correspond to the name of the property that was registered for object definition in the schema. The maximum allowable length for this property is 256 characters. +func (o NamedPropertyResponseOutput) Name() pulumi.StringOutput { + return o.ApplyT(func(v NamedPropertyResponse) string { return v.Name }).(pulumi.StringOutput) } -// This principal references a Google Workspace group name. -func (o GSuitePrincipalResponseOutput) GsuiteGroupEmail() pulumi.StringOutput { - return o.ApplyT(func(v GSuitePrincipalResponse) string { return v.GsuiteGroupEmail }).(pulumi.StringOutput) +func (o NamedPropertyResponseOutput) ObjectValues() ObjectValuesResponseOutput { + return o.ApplyT(func(v NamedPropertyResponse) ObjectValuesResponse { return v.ObjectValues }).(ObjectValuesResponseOutput) } -// This principal references a Google Workspace user account. -func (o GSuitePrincipalResponseOutput) GsuiteUserEmail() pulumi.StringOutput { - return o.ApplyT(func(v GSuitePrincipalResponse) string { return v.GsuiteUserEmail }).(pulumi.StringOutput) +func (o NamedPropertyResponseOutput) TextValues() TextValuesResponseOutput { + return o.ApplyT(func(v NamedPropertyResponse) TextValuesResponse { return v.TextValues }).(TextValuesResponseOutput) } -type GSuitePrincipalResponseArrayOutput struct{ *pulumi.OutputState } +func (o NamedPropertyResponseOutput) TimestampValues() TimestampValuesResponseOutput { + return o.ApplyT(func(v NamedPropertyResponse) TimestampValuesResponse { return v.TimestampValues }).(TimestampValuesResponseOutput) +} -func (GSuitePrincipalResponseArrayOutput) ElementType() reflect.Type { - return reflect.TypeOf((*[]GSuitePrincipalResponse)(nil)).Elem() +type NamedPropertyResponseArrayOutput struct{ *pulumi.OutputState } + +func (NamedPropertyResponseArrayOutput) ElementType() reflect.Type { + return reflect.TypeOf((*[]NamedPropertyResponse)(nil)).Elem() } -func (o GSuitePrincipalResponseArrayOutput) ToGSuitePrincipalResponseArrayOutput() GSuitePrincipalResponseArrayOutput { +func (o NamedPropertyResponseArrayOutput) ToNamedPropertyResponseArrayOutput() NamedPropertyResponseArrayOutput { return o } -func (o GSuitePrincipalResponseArrayOutput) ToGSuitePrincipalResponseArrayOutputWithContext(ctx context.Context) GSuitePrincipalResponseArrayOutput { +func (o NamedPropertyResponseArrayOutput) ToNamedPropertyResponseArrayOutputWithContext(ctx context.Context) NamedPropertyResponseArrayOutput { return o } -func (o GSuitePrincipalResponseArrayOutput) Index(i pulumi.IntInput) GSuitePrincipalResponseOutput { - return pulumi.All(o, i).ApplyT(func(vs []interface{}) GSuitePrincipalResponse { - return vs[0].([]GSuitePrincipalResponse)[vs[1].(int)] - }).(GSuitePrincipalResponseOutput) +func (o NamedPropertyResponseArrayOutput) Index(i pulumi.IntInput) NamedPropertyResponseOutput { + return pulumi.All(o, i).ApplyT(func(vs []interface{}) NamedPropertyResponse { + return vs[0].([]NamedPropertyResponse)[vs[1].(int)] + }).(NamedPropertyResponseOutput) } -// Used to specify integer faceting options. -type IntegerFacetingOptions struct { - // Buckets for given integer values should be in strictly ascending order. For example, if values supplied are (1,5,10,100), the following facet buckets will be formed {<1, [1,5), [5-10), [10-100), >=100}. - IntegerBuckets []string `pulumi:"integerBuckets"` +// List of object values. +type ObjectValuesResponse struct { + Values []StructuredDataObjectResponse `pulumi:"values"` } -// IntegerFacetingOptionsInput is an input type that accepts IntegerFacetingOptionsArgs and IntegerFacetingOptionsOutput values. -// You can construct a concrete instance of `IntegerFacetingOptionsInput` via: -// -// IntegerFacetingOptionsArgs{...} -type IntegerFacetingOptionsInput interface { - pulumi.Input +// List of object values. +type ObjectValuesResponseOutput struct{ *pulumi.OutputState } - ToIntegerFacetingOptionsOutput() IntegerFacetingOptionsOutput - ToIntegerFacetingOptionsOutputWithContext(context.Context) IntegerFacetingOptionsOutput +func (ObjectValuesResponseOutput) ElementType() reflect.Type { + return reflect.TypeOf((*ObjectValuesResponse)(nil)).Elem() } -// Used to specify integer faceting options. -type IntegerFacetingOptionsArgs struct { - // Buckets for given integer values should be in strictly ascending order. For example, if values supplied are (1,5,10,100), the following facet buckets will be formed {<1, [1,5), [5-10), [10-100), >=100}. - IntegerBuckets pulumi.StringArrayInput `pulumi:"integerBuckets"` +func (o ObjectValuesResponseOutput) ToObjectValuesResponseOutput() ObjectValuesResponseOutput { + return o } -func (IntegerFacetingOptionsArgs) ElementType() reflect.Type { - return reflect.TypeOf((*IntegerFacetingOptions)(nil)).Elem() +func (o ObjectValuesResponseOutput) ToObjectValuesResponseOutputWithContext(ctx context.Context) ObjectValuesResponseOutput { + return o } -func (i IntegerFacetingOptionsArgs) ToIntegerFacetingOptionsOutput() IntegerFacetingOptionsOutput { - return i.ToIntegerFacetingOptionsOutputWithContext(context.Background()) +func (o ObjectValuesResponseOutput) Values() StructuredDataObjectResponseArrayOutput { + return o.ApplyT(func(v ObjectValuesResponse) []StructuredDataObjectResponse { return v.Values }).(StructuredDataObjectResponseArrayOutput) } -func (i IntegerFacetingOptionsArgs) ToIntegerFacetingOptionsOutputWithContext(ctx context.Context) IntegerFacetingOptionsOutput { - return pulumi.ToOutputWithContext(ctx, i).(IntegerFacetingOptionsOutput) +// Reference to a user, group, or domain. +type PrincipalResponse struct { + // This principal is a group identified using an external identity. The name field must specify the group resource name with this format: identitysources/{source_id}/groups/{ID} + GroupResourceName string `pulumi:"groupResourceName"` + // This principal is a Google Workspace user, group or domain. + GsuitePrincipal GSuitePrincipalResponse `pulumi:"gsuitePrincipal"` + // This principal is a user identified using an external identity. The name field must specify the user resource name with this format: identitysources/{source_id}/users/{ID} + UserResourceName string `pulumi:"userResourceName"` } -func (i IntegerFacetingOptionsArgs) ToIntegerFacetingOptionsPtrOutput() IntegerFacetingOptionsPtrOutput { - return i.ToIntegerFacetingOptionsPtrOutputWithContext(context.Background()) -} +// Reference to a user, group, or domain. +type PrincipalResponseOutput struct{ *pulumi.OutputState } -func (i IntegerFacetingOptionsArgs) ToIntegerFacetingOptionsPtrOutputWithContext(ctx context.Context) IntegerFacetingOptionsPtrOutput { - return pulumi.ToOutputWithContext(ctx, i).(IntegerFacetingOptionsOutput).ToIntegerFacetingOptionsPtrOutputWithContext(ctx) +func (PrincipalResponseOutput) ElementType() reflect.Type { + return reflect.TypeOf((*PrincipalResponse)(nil)).Elem() } -// IntegerFacetingOptionsPtrInput is an input type that accepts IntegerFacetingOptionsArgs, IntegerFacetingOptionsPtr and IntegerFacetingOptionsPtrOutput values. -// You can construct a concrete instance of `IntegerFacetingOptionsPtrInput` via: -// -// IntegerFacetingOptionsArgs{...} -// -// or: -// -// nil -type IntegerFacetingOptionsPtrInput interface { - pulumi.Input - - ToIntegerFacetingOptionsPtrOutput() IntegerFacetingOptionsPtrOutput - ToIntegerFacetingOptionsPtrOutputWithContext(context.Context) IntegerFacetingOptionsPtrOutput +func (o PrincipalResponseOutput) ToPrincipalResponseOutput() PrincipalResponseOutput { + return o } -type integerFacetingOptionsPtrType IntegerFacetingOptionsArgs - -func IntegerFacetingOptionsPtr(v *IntegerFacetingOptionsArgs) IntegerFacetingOptionsPtrInput { - return (*integerFacetingOptionsPtrType)(v) +func (o PrincipalResponseOutput) ToPrincipalResponseOutputWithContext(ctx context.Context) PrincipalResponseOutput { + return o } -func (*integerFacetingOptionsPtrType) ElementType() reflect.Type { - return reflect.TypeOf((**IntegerFacetingOptions)(nil)).Elem() +// This principal is a group identified using an external identity. The name field must specify the group resource name with this format: identitysources/{source_id}/groups/{ID} +func (o PrincipalResponseOutput) GroupResourceName() pulumi.StringOutput { + return o.ApplyT(func(v PrincipalResponse) string { return v.GroupResourceName }).(pulumi.StringOutput) } -func (i *integerFacetingOptionsPtrType) ToIntegerFacetingOptionsPtrOutput() IntegerFacetingOptionsPtrOutput { - return i.ToIntegerFacetingOptionsPtrOutputWithContext(context.Background()) +// This principal is a Google Workspace user, group or domain. +func (o PrincipalResponseOutput) GsuitePrincipal() GSuitePrincipalResponseOutput { + return o.ApplyT(func(v PrincipalResponse) GSuitePrincipalResponse { return v.GsuitePrincipal }).(GSuitePrincipalResponseOutput) } -func (i *integerFacetingOptionsPtrType) ToIntegerFacetingOptionsPtrOutputWithContext(ctx context.Context) IntegerFacetingOptionsPtrOutput { - return pulumi.ToOutputWithContext(ctx, i).(IntegerFacetingOptionsPtrOutput) +// This principal is a user identified using an external identity. The name field must specify the user resource name with this format: identitysources/{source_id}/users/{ID} +func (o PrincipalResponseOutput) UserResourceName() pulumi.StringOutput { + return o.ApplyT(func(v PrincipalResponse) string { return v.UserResourceName }).(pulumi.StringOutput) } -// Used to specify integer faceting options. -type IntegerFacetingOptionsOutput struct{ *pulumi.OutputState } +type PrincipalResponseArrayOutput struct{ *pulumi.OutputState } -func (IntegerFacetingOptionsOutput) ElementType() reflect.Type { - return reflect.TypeOf((*IntegerFacetingOptions)(nil)).Elem() +func (PrincipalResponseArrayOutput) ElementType() reflect.Type { + return reflect.TypeOf((*[]PrincipalResponse)(nil)).Elem() } -func (o IntegerFacetingOptionsOutput) ToIntegerFacetingOptionsOutput() IntegerFacetingOptionsOutput { +func (o PrincipalResponseArrayOutput) ToPrincipalResponseArrayOutput() PrincipalResponseArrayOutput { return o } -func (o IntegerFacetingOptionsOutput) ToIntegerFacetingOptionsOutputWithContext(ctx context.Context) IntegerFacetingOptionsOutput { +func (o PrincipalResponseArrayOutput) ToPrincipalResponseArrayOutputWithContext(ctx context.Context) PrincipalResponseArrayOutput { return o } -func (o IntegerFacetingOptionsOutput) ToIntegerFacetingOptionsPtrOutput() IntegerFacetingOptionsPtrOutput { - return o.ToIntegerFacetingOptionsPtrOutputWithContext(context.Background()) -} - -func (o IntegerFacetingOptionsOutput) ToIntegerFacetingOptionsPtrOutputWithContext(ctx context.Context) IntegerFacetingOptionsPtrOutput { - return o.ApplyTWithContext(ctx, func(_ context.Context, v IntegerFacetingOptions) *IntegerFacetingOptions { - return &v - }).(IntegerFacetingOptionsPtrOutput) +func (o PrincipalResponseArrayOutput) Index(i pulumi.IntInput) PrincipalResponseOutput { + return pulumi.All(o, i).ApplyT(func(vs []interface{}) PrincipalResponse { + return vs[0].([]PrincipalResponse)[vs[1].(int)] + }).(PrincipalResponseOutput) } -// Buckets for given integer values should be in strictly ascending order. For example, if values supplied are (1,5,10,100), the following facet buckets will be formed {<1, [1,5), [5-10), [10-100), >=100}. -func (o IntegerFacetingOptionsOutput) IntegerBuckets() pulumi.StringArrayOutput { - return o.ApplyT(func(v IntegerFacetingOptions) []string { return v.IntegerBuckets }).(pulumi.StringArrayOutput) +type ProcessingErrorResponse struct { + // Error code indicating the nature of the error. + Code string `pulumi:"code"` + // The description of the error. + ErrorMessage string `pulumi:"errorMessage"` + // In case the item fields are invalid, this field contains the details about the validation errors. + FieldViolations []FieldViolationResponse `pulumi:"fieldViolations"` } -type IntegerFacetingOptionsPtrOutput struct{ *pulumi.OutputState } +type ProcessingErrorResponseOutput struct{ *pulumi.OutputState } -func (IntegerFacetingOptionsPtrOutput) ElementType() reflect.Type { - return reflect.TypeOf((**IntegerFacetingOptions)(nil)).Elem() +func (ProcessingErrorResponseOutput) ElementType() reflect.Type { + return reflect.TypeOf((*ProcessingErrorResponse)(nil)).Elem() } -func (o IntegerFacetingOptionsPtrOutput) ToIntegerFacetingOptionsPtrOutput() IntegerFacetingOptionsPtrOutput { +func (o ProcessingErrorResponseOutput) ToProcessingErrorResponseOutput() ProcessingErrorResponseOutput { return o } -func (o IntegerFacetingOptionsPtrOutput) ToIntegerFacetingOptionsPtrOutputWithContext(ctx context.Context) IntegerFacetingOptionsPtrOutput { +func (o ProcessingErrorResponseOutput) ToProcessingErrorResponseOutputWithContext(ctx context.Context) ProcessingErrorResponseOutput { return o } -func (o IntegerFacetingOptionsPtrOutput) Elem() IntegerFacetingOptionsOutput { - return o.ApplyT(func(v *IntegerFacetingOptions) IntegerFacetingOptions { - if v != nil { - return *v - } - var ret IntegerFacetingOptions - return ret - }).(IntegerFacetingOptionsOutput) +// Error code indicating the nature of the error. +func (o ProcessingErrorResponseOutput) Code() pulumi.StringOutput { + return o.ApplyT(func(v ProcessingErrorResponse) string { return v.Code }).(pulumi.StringOutput) } -// Buckets for given integer values should be in strictly ascending order. For example, if values supplied are (1,5,10,100), the following facet buckets will be formed {<1, [1,5), [5-10), [10-100), >=100}. -func (o IntegerFacetingOptionsPtrOutput) IntegerBuckets() pulumi.StringArrayOutput { - return o.ApplyT(func(v *IntegerFacetingOptions) []string { - if v == nil { - return nil - } - return v.IntegerBuckets - }).(pulumi.StringArrayOutput) +// The description of the error. +func (o ProcessingErrorResponseOutput) ErrorMessage() pulumi.StringOutput { + return o.ApplyT(func(v ProcessingErrorResponse) string { return v.ErrorMessage }).(pulumi.StringOutput) } -// Used to specify integer faceting options. -type IntegerFacetingOptionsResponse struct { - // Buckets for given integer values should be in strictly ascending order. For example, if values supplied are (1,5,10,100), the following facet buckets will be formed {<1, [1,5), [5-10), [10-100), >=100}. - IntegerBuckets []string `pulumi:"integerBuckets"` +// In case the item fields are invalid, this field contains the details about the validation errors. +func (o ProcessingErrorResponseOutput) FieldViolations() FieldViolationResponseArrayOutput { + return o.ApplyT(func(v ProcessingErrorResponse) []FieldViolationResponse { return v.FieldViolations }).(FieldViolationResponseArrayOutput) } -// Used to specify integer faceting options. -type IntegerFacetingOptionsResponseOutput struct{ *pulumi.OutputState } +type ProcessingErrorResponseArrayOutput struct{ *pulumi.OutputState } -func (IntegerFacetingOptionsResponseOutput) ElementType() reflect.Type { - return reflect.TypeOf((*IntegerFacetingOptionsResponse)(nil)).Elem() +func (ProcessingErrorResponseArrayOutput) ElementType() reflect.Type { + return reflect.TypeOf((*[]ProcessingErrorResponse)(nil)).Elem() } -func (o IntegerFacetingOptionsResponseOutput) ToIntegerFacetingOptionsResponseOutput() IntegerFacetingOptionsResponseOutput { +func (o ProcessingErrorResponseArrayOutput) ToProcessingErrorResponseArrayOutput() ProcessingErrorResponseArrayOutput { return o } -func (o IntegerFacetingOptionsResponseOutput) ToIntegerFacetingOptionsResponseOutputWithContext(ctx context.Context) IntegerFacetingOptionsResponseOutput { +func (o ProcessingErrorResponseArrayOutput) ToProcessingErrorResponseArrayOutputWithContext(ctx context.Context) ProcessingErrorResponseArrayOutput { return o } -// Buckets for given integer values should be in strictly ascending order. For example, if values supplied are (1,5,10,100), the following facet buckets will be formed {<1, [1,5), [5-10), [10-100), >=100}. -func (o IntegerFacetingOptionsResponseOutput) IntegerBuckets() pulumi.StringArrayOutput { - return o.ApplyT(func(v IntegerFacetingOptionsResponse) []string { return v.IntegerBuckets }).(pulumi.StringArrayOutput) +func (o ProcessingErrorResponseArrayOutput) Index(i pulumi.IntInput) ProcessingErrorResponseOutput { + return pulumi.All(o, i).ApplyT(func(vs []interface{}) ProcessingErrorResponse { + return vs[0].([]ProcessingErrorResponse)[vs[1].(int)] + }).(ProcessingErrorResponseOutput) } // Default options to interpret user query. @@ -1728,6 +2689,66 @@ func (o QueryInterpretationConfigResponseOutput) ForceVerbatimMode() pulumi.Bool return o.ApplyT(func(v QueryInterpretationConfigResponse) bool { return v.ForceVerbatimMode }).(pulumi.BoolOutput) } +// Errors when the connector is communicating to the source repository. +type RepositoryErrorResponse struct { + // Message that describes the error. The maximum allowable length of the message is 8192 characters. + ErrorMessage string `pulumi:"errorMessage"` + // Error codes. Matches the definition of HTTP status codes. + HttpStatusCode int `pulumi:"httpStatusCode"` + // The type of error. + Type string `pulumi:"type"` +} + +// Errors when the connector is communicating to the source repository. +type RepositoryErrorResponseOutput struct{ *pulumi.OutputState } + +func (RepositoryErrorResponseOutput) ElementType() reflect.Type { + return reflect.TypeOf((*RepositoryErrorResponse)(nil)).Elem() +} + +func (o RepositoryErrorResponseOutput) ToRepositoryErrorResponseOutput() RepositoryErrorResponseOutput { + return o +} + +func (o RepositoryErrorResponseOutput) ToRepositoryErrorResponseOutputWithContext(ctx context.Context) RepositoryErrorResponseOutput { + return o +} + +// Message that describes the error. The maximum allowable length of the message is 8192 characters. +func (o RepositoryErrorResponseOutput) ErrorMessage() pulumi.StringOutput { + return o.ApplyT(func(v RepositoryErrorResponse) string { return v.ErrorMessage }).(pulumi.StringOutput) +} + +// Error codes. Matches the definition of HTTP status codes. +func (o RepositoryErrorResponseOutput) HttpStatusCode() pulumi.IntOutput { + return o.ApplyT(func(v RepositoryErrorResponse) int { return v.HttpStatusCode }).(pulumi.IntOutput) +} + +// The type of error. +func (o RepositoryErrorResponseOutput) Type() pulumi.StringOutput { + return o.ApplyT(func(v RepositoryErrorResponse) string { return v.Type }).(pulumi.StringOutput) +} + +type RepositoryErrorResponseArrayOutput struct{ *pulumi.OutputState } + +func (RepositoryErrorResponseArrayOutput) ElementType() reflect.Type { + return reflect.TypeOf((*[]RepositoryErrorResponse)(nil)).Elem() +} + +func (o RepositoryErrorResponseArrayOutput) ToRepositoryErrorResponseArrayOutput() RepositoryErrorResponseArrayOutput { + return o +} + +func (o RepositoryErrorResponseArrayOutput) ToRepositoryErrorResponseArrayOutputWithContext(ctx context.Context) RepositoryErrorResponseArrayOutput { + return o +} + +func (o RepositoryErrorResponseArrayOutput) Index(i pulumi.IntInput) RepositoryErrorResponseOutput { + return pulumi.All(o, i).ApplyT(func(vs []interface{}) RepositoryErrorResponse { + return vs[0].([]RepositoryErrorResponse)[vs[1].(int)] + }).(RepositoryErrorResponseOutput) +} + // Scoring configurations for a source while processing a Search or Suggest request. type ScoringConfig struct { // Whether to use freshness as a ranking signal. By default, freshness is used as a ranking signal. Note that this setting is not available in the Admin UI. @@ -1920,6 +2941,32 @@ func (o ScoringConfigResponseOutput) DisablePersonalization() pulumi.BoolOutput return o.ApplyT(func(v ScoringConfigResponse) bool { return v.DisablePersonalization }).(pulumi.BoolOutput) } +// Additional search quality metadata of the item. +type SearchQualityMetadataResponse struct { + // An indication of the quality of the item, used to influence search quality. Value should be between 0.0 (lowest quality) and 1.0 (highest quality). The default value is 0.0. + Quality float64 `pulumi:"quality"` +} + +// Additional search quality metadata of the item. +type SearchQualityMetadataResponseOutput struct{ *pulumi.OutputState } + +func (SearchQualityMetadataResponseOutput) ElementType() reflect.Type { + return reflect.TypeOf((*SearchQualityMetadataResponse)(nil)).Elem() +} + +func (o SearchQualityMetadataResponseOutput) ToSearchQualityMetadataResponseOutput() SearchQualityMetadataResponseOutput { + return o +} + +func (o SearchQualityMetadataResponseOutput) ToSearchQualityMetadataResponseOutputWithContext(ctx context.Context) SearchQualityMetadataResponseOutput { + return o +} + +// An indication of the quality of the item, used to influence search quality. Value should be between 0.0 (lowest quality) and 1.0 (highest quality). The default value is 0.0. +func (o SearchQualityMetadataResponseOutput) Quality() pulumi.Float64Output { + return o.ApplyT(func(v SearchQualityMetadataResponse) float64 { return v.Quality }).(pulumi.Float64Output) +} + type SortOptions struct { // The name of the operator corresponding to the field to sort on. The corresponding property must be marked as sortable. OperatorName *string `pulumi:"operatorName"` @@ -2835,6 +3882,128 @@ func (o SourceScoringConfigResponseOutput) SourceImportance() pulumi.StringOutpu return o.ApplyT(func(v SourceScoringConfigResponse) string { return v.SourceImportance }).(pulumi.StringOutput) } +// A structured data object consisting of named properties. +type StructuredDataObjectResponse struct { + // The properties for the object. The maximum number of elements is 1000. + Properties []NamedPropertyResponse `pulumi:"properties"` +} + +// A structured data object consisting of named properties. +type StructuredDataObjectResponseOutput struct{ *pulumi.OutputState } + +func (StructuredDataObjectResponseOutput) ElementType() reflect.Type { + return reflect.TypeOf((*StructuredDataObjectResponse)(nil)).Elem() +} + +func (o StructuredDataObjectResponseOutput) ToStructuredDataObjectResponseOutput() StructuredDataObjectResponseOutput { + return o +} + +func (o StructuredDataObjectResponseOutput) ToStructuredDataObjectResponseOutputWithContext(ctx context.Context) StructuredDataObjectResponseOutput { + return o +} + +// The properties for the object. The maximum number of elements is 1000. +func (o StructuredDataObjectResponseOutput) Properties() NamedPropertyResponseArrayOutput { + return o.ApplyT(func(v StructuredDataObjectResponse) []NamedPropertyResponse { return v.Properties }).(NamedPropertyResponseArrayOutput) +} + +type StructuredDataObjectResponseArrayOutput struct{ *pulumi.OutputState } + +func (StructuredDataObjectResponseArrayOutput) ElementType() reflect.Type { + return reflect.TypeOf((*[]StructuredDataObjectResponse)(nil)).Elem() +} + +func (o StructuredDataObjectResponseArrayOutput) ToStructuredDataObjectResponseArrayOutput() StructuredDataObjectResponseArrayOutput { + return o +} + +func (o StructuredDataObjectResponseArrayOutput) ToStructuredDataObjectResponseArrayOutputWithContext(ctx context.Context) StructuredDataObjectResponseArrayOutput { + return o +} + +func (o StructuredDataObjectResponseArrayOutput) Index(i pulumi.IntInput) StructuredDataObjectResponseOutput { + return pulumi.All(o, i).ApplyT(func(vs []interface{}) StructuredDataObjectResponse { + return vs[0].([]StructuredDataObjectResponse)[vs[1].(int)] + }).(StructuredDataObjectResponseOutput) +} + +// List of text values. +type TextValuesResponse struct { + // The maximum allowable length for text values is 2048 characters. + Values []string `pulumi:"values"` +} + +// List of text values. +type TextValuesResponseOutput struct{ *pulumi.OutputState } + +func (TextValuesResponseOutput) ElementType() reflect.Type { + return reflect.TypeOf((*TextValuesResponse)(nil)).Elem() +} + +func (o TextValuesResponseOutput) ToTextValuesResponseOutput() TextValuesResponseOutput { + return o +} + +func (o TextValuesResponseOutput) ToTextValuesResponseOutputWithContext(ctx context.Context) TextValuesResponseOutput { + return o +} + +// The maximum allowable length for text values is 2048 characters. +func (o TextValuesResponseOutput) Values() pulumi.StringArrayOutput { + return o.ApplyT(func(v TextValuesResponse) []string { return v.Values }).(pulumi.StringArrayOutput) +} + +// List of timestamp values. +type TimestampValuesResponse struct { + Values []string `pulumi:"values"` +} + +// List of timestamp values. +type TimestampValuesResponseOutput struct{ *pulumi.OutputState } + +func (TimestampValuesResponseOutput) ElementType() reflect.Type { + return reflect.TypeOf((*TimestampValuesResponse)(nil)).Elem() +} + +func (o TimestampValuesResponseOutput) ToTimestampValuesResponseOutput() TimestampValuesResponseOutput { + return o +} + +func (o TimestampValuesResponseOutput) ToTimestampValuesResponseOutputWithContext(ctx context.Context) TimestampValuesResponseOutput { + return o +} + +func (o TimestampValuesResponseOutput) Values() pulumi.StringArrayOutput { + return o.ApplyT(func(v TimestampValuesResponse) []string { return v.Values }).(pulumi.StringArrayOutput) +} + +// Represents an upload session reference. This reference is created via upload method. This reference is valid for 30 days after its creation. Updating of item content may refer to this uploaded content via contentDataRef. +type UploadItemRefResponse struct { + // The name of the content reference. The maximum length is 2048 characters. + Name string `pulumi:"name"` +} + +// Represents an upload session reference. This reference is created via upload method. This reference is valid for 30 days after its creation. Updating of item content may refer to this uploaded content via contentDataRef. +type UploadItemRefResponseOutput struct{ *pulumi.OutputState } + +func (UploadItemRefResponseOutput) ElementType() reflect.Type { + return reflect.TypeOf((*UploadItemRefResponse)(nil)).Elem() +} + +func (o UploadItemRefResponseOutput) ToUploadItemRefResponseOutput() UploadItemRefResponseOutput { + return o +} + +func (o UploadItemRefResponseOutput) ToUploadItemRefResponseOutputWithContext(ctx context.Context) UploadItemRefResponseOutput { + return o +} + +// The name of the content reference. The maximum length is 2048 characters. +func (o UploadItemRefResponseOutput) Name() pulumi.StringOutput { + return o.ApplyT(func(v UploadItemRefResponse) string { return v.Name }).(pulumi.StringOutput) +} + // Definition of a single value with generic type. type Value struct { BooleanValue *bool `pulumi:"booleanValue"` @@ -3289,6 +4458,8 @@ func init() { pulumi.RegisterInputType(reflect.TypeOf((*DataSourceRestrictionArrayInput)(nil)).Elem(), DataSourceRestrictionArray{}) pulumi.RegisterInputType(reflect.TypeOf((*DateInput)(nil)).Elem(), DateArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*DatePtrInput)(nil)).Elem(), DateArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*DebugOptionsInput)(nil)).Elem(), DebugOptionsArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*DebugOptionsPtrInput)(nil)).Elem(), DebugOptionsArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*FacetOptionsInput)(nil)).Elem(), FacetOptionsArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*FacetOptionsArrayInput)(nil)).Elem(), FacetOptionsArray{}) pulumi.RegisterInputType(reflect.TypeOf((*FilterInput)(nil)).Elem(), FilterArgs{}) @@ -3321,6 +4492,8 @@ func init() { pulumi.RegisterOutputType(CompositeFilterOutput{}) pulumi.RegisterOutputType(CompositeFilterPtrOutput{}) pulumi.RegisterOutputType(CompositeFilterResponseOutput{}) + pulumi.RegisterOutputType(ContextAttributeResponseOutput{}) + pulumi.RegisterOutputType(ContextAttributeResponseArrayOutput{}) pulumi.RegisterOutputType(DataSourceRestrictionOutput{}) pulumi.RegisterOutputType(DataSourceRestrictionArrayOutput{}) pulumi.RegisterOutputType(DataSourceRestrictionResponseOutput{}) @@ -3328,10 +4501,18 @@ func init() { pulumi.RegisterOutputType(DateOutput{}) pulumi.RegisterOutputType(DatePtrOutput{}) pulumi.RegisterOutputType(DateResponseOutput{}) + pulumi.RegisterOutputType(DateResponseArrayOutput{}) + pulumi.RegisterOutputType(DateValuesResponseOutput{}) + pulumi.RegisterOutputType(DebugOptionsOutput{}) + pulumi.RegisterOutputType(DebugOptionsPtrOutput{}) + pulumi.RegisterOutputType(DoubleValuesResponseOutput{}) + pulumi.RegisterOutputType(EnumValuesResponseOutput{}) pulumi.RegisterOutputType(FacetOptionsOutput{}) pulumi.RegisterOutputType(FacetOptionsArrayOutput{}) pulumi.RegisterOutputType(FacetOptionsResponseOutput{}) pulumi.RegisterOutputType(FacetOptionsResponseArrayOutput{}) + pulumi.RegisterOutputType(FieldViolationResponseOutput{}) + pulumi.RegisterOutputType(FieldViolationResponseArrayOutput{}) pulumi.RegisterOutputType(FilterOutput{}) pulumi.RegisterOutputType(FilterPtrOutput{}) pulumi.RegisterOutputType(FilterArrayOutput{}) @@ -3345,15 +4526,34 @@ func init() { pulumi.RegisterOutputType(GSuitePrincipalArrayOutput{}) pulumi.RegisterOutputType(GSuitePrincipalResponseOutput{}) pulumi.RegisterOutputType(GSuitePrincipalResponseArrayOutput{}) + pulumi.RegisterOutputType(HtmlValuesResponseOutput{}) pulumi.RegisterOutputType(IntegerFacetingOptionsOutput{}) pulumi.RegisterOutputType(IntegerFacetingOptionsPtrOutput{}) pulumi.RegisterOutputType(IntegerFacetingOptionsResponseOutput{}) + pulumi.RegisterOutputType(IntegerValuesResponseOutput{}) + pulumi.RegisterOutputType(InteractionResponseOutput{}) + pulumi.RegisterOutputType(InteractionResponseArrayOutput{}) + pulumi.RegisterOutputType(ItemAclResponseOutput{}) + pulumi.RegisterOutputType(ItemContentResponseOutput{}) + pulumi.RegisterOutputType(ItemMetadataResponseOutput{}) + pulumi.RegisterOutputType(ItemStatusResponseOutput{}) + pulumi.RegisterOutputType(ItemStructuredDataResponseOutput{}) + pulumi.RegisterOutputType(NamedPropertyResponseOutput{}) + pulumi.RegisterOutputType(NamedPropertyResponseArrayOutput{}) + pulumi.RegisterOutputType(ObjectValuesResponseOutput{}) + pulumi.RegisterOutputType(PrincipalResponseOutput{}) + pulumi.RegisterOutputType(PrincipalResponseArrayOutput{}) + pulumi.RegisterOutputType(ProcessingErrorResponseOutput{}) + pulumi.RegisterOutputType(ProcessingErrorResponseArrayOutput{}) pulumi.RegisterOutputType(QueryInterpretationConfigOutput{}) pulumi.RegisterOutputType(QueryInterpretationConfigPtrOutput{}) pulumi.RegisterOutputType(QueryInterpretationConfigResponseOutput{}) + pulumi.RegisterOutputType(RepositoryErrorResponseOutput{}) + pulumi.RegisterOutputType(RepositoryErrorResponseArrayOutput{}) pulumi.RegisterOutputType(ScoringConfigOutput{}) pulumi.RegisterOutputType(ScoringConfigPtrOutput{}) pulumi.RegisterOutputType(ScoringConfigResponseOutput{}) + pulumi.RegisterOutputType(SearchQualityMetadataResponseOutput{}) pulumi.RegisterOutputType(SortOptionsOutput{}) pulumi.RegisterOutputType(SortOptionsPtrOutput{}) pulumi.RegisterOutputType(SortOptionsResponseOutput{}) @@ -3370,6 +4570,11 @@ func init() { pulumi.RegisterOutputType(SourceScoringConfigOutput{}) pulumi.RegisterOutputType(SourceScoringConfigPtrOutput{}) pulumi.RegisterOutputType(SourceScoringConfigResponseOutput{}) + pulumi.RegisterOutputType(StructuredDataObjectResponseOutput{}) + pulumi.RegisterOutputType(StructuredDataObjectResponseArrayOutput{}) + pulumi.RegisterOutputType(TextValuesResponseOutput{}) + pulumi.RegisterOutputType(TimestampValuesResponseOutput{}) + pulumi.RegisterOutputType(UploadItemRefResponseOutput{}) pulumi.RegisterOutputType(ValueOutput{}) pulumi.RegisterOutputType(ValuePtrOutput{}) pulumi.RegisterOutputType(ValueFilterOutput{}) diff --git a/sdk/nodejs/aiplatform/v1/getModel.ts b/sdk/nodejs/aiplatform/v1/getModel.ts new file mode 100644 index 0000000000..16e30f49c7 --- /dev/null +++ b/sdk/nodejs/aiplatform/v1/getModel.ts @@ -0,0 +1,158 @@ +// *** WARNING: this file was generated by the Pulumi SDK Generator. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +import * as pulumi from "@pulumi/pulumi"; +import * as inputs from "../../types/input"; +import * as outputs from "../../types/output"; +import * as enums from "../../types/enums"; +import * as utilities from "../../utilities"; + +/** + * Gets a Model. + */ +export function getModel(args: GetModelArgs, opts?: pulumi.InvokeOptions): Promise { + + opts = pulumi.mergeOptions(utilities.resourceOptsDefaults(), opts || {}); + return pulumi.runtime.invoke("google-native:aiplatform/v1:getModel", { + "location": args.location, + "modelId": args.modelId, + "project": args.project, + }, opts); +} + +export interface GetModelArgs { + location: string; + modelId: string; + project?: string; +} + +export interface GetModelResult { + /** + * Immutable. The path to the directory containing the Model artifact and any of its supporting files. Not present for AutoML Models or Large Models. + */ + readonly artifactUri: string; + /** + * Input only. The specification of the container that is to be used when deploying this Model. The specification is ingested upon ModelService.UploadModel, and all binaries it contains are copied and stored internally by Vertex AI. Not present for AutoML Models or Large Models. + */ + readonly containerSpec: outputs.aiplatform.v1.GoogleCloudAiplatformV1ModelContainerSpecResponse; + /** + * Timestamp when this Model was uploaded into Vertex AI. + */ + readonly createTime: string; + /** + * The pointers to DeployedModels created from this Model. Note that Model could have been deployed to Endpoints in different Locations. + */ + readonly deployedModels: outputs.aiplatform.v1.GoogleCloudAiplatformV1DeployedModelRefResponse[]; + /** + * The description of the Model. + */ + readonly description: string; + /** + * The display name of the Model. The name can be up to 128 characters long and can consist of any UTF-8 characters. + */ + readonly displayName: string; + /** + * Customer-managed encryption key spec for a Model. If set, this Model and all sub-resources of this Model will be secured by this key. + */ + readonly encryptionSpec: outputs.aiplatform.v1.GoogleCloudAiplatformV1EncryptionSpecResponse; + /** + * Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + */ + readonly etag: string; + /** + * The default explanation specification for this Model. The Model can be used for requesting explanation after being deployed if it is populated. The Model can be used for batch explanation if it is populated. All fields of the explanation_spec can be overridden by explanation_spec of DeployModelRequest.deployed_model, or explanation_spec of BatchPredictionJob. If the default explanation specification is not set for this Model, this Model can still be used for requesting explanation by setting explanation_spec of DeployModelRequest.deployed_model and for batch explanation by setting explanation_spec of BatchPredictionJob. + */ + readonly explanationSpec: outputs.aiplatform.v1.GoogleCloudAiplatformV1ExplanationSpecResponse; + /** + * The labels with user-defined metadata to organize your Models. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. + */ + readonly labels: {[key: string]: string}; + /** + * Immutable. An additional information about the Model; the schema of the metadata can be found in metadata_schema. Unset if the Model does not have any additional information. + */ + readonly metadata: any; + /** + * The resource name of the Artifact that was created in MetadataStore when creating the Model. The Artifact resource name pattern is `projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}`. + */ + readonly metadataArtifact: string; + /** + * Immutable. Points to a YAML file stored on Google Cloud Storage describing additional information about the Model, that is specific to it. Unset if the Model does not have any additional information. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML Models always have this field populated by Vertex AI, if no additional metadata is needed, this field is set to an empty string. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. + */ + readonly metadataSchemaUri: string; + /** + * Source of a model. It can either be automl training pipeline, custom training pipeline, BigQuery ML, or existing Vertex AI Model. + */ + readonly modelSourceInfo: outputs.aiplatform.v1.GoogleCloudAiplatformV1ModelSourceInfoResponse; + /** + * The resource name of the Model. + */ + readonly name: string; + /** + * If this Model is a copy of another Model, this contains info about the original. + */ + readonly originalModelInfo: outputs.aiplatform.v1.GoogleCloudAiplatformV1ModelOriginalModelInfoResponse; + /** + * Optional. This field is populated if the model is produced by a pipeline job. + */ + readonly pipelineJob: string; + /** + * The schemata that describe formats of the Model's predictions and explanations as given and returned via PredictionService.Predict and PredictionService.Explain. + */ + readonly predictSchemata: outputs.aiplatform.v1.GoogleCloudAiplatformV1PredictSchemataResponse; + /** + * When this Model is deployed, its prediction resources are described by the `prediction_resources` field of the Endpoint.deployed_models object. Because not all Models support all resource configuration types, the configuration types this Model supports are listed here. If no configuration types are listed, the Model cannot be deployed to an Endpoint and does not support online predictions (PredictionService.Predict or PredictionService.Explain). Such a Model can serve predictions by using a BatchPredictionJob, if it has at least one entry each in supported_input_storage_formats and supported_output_storage_formats. + */ + readonly supportedDeploymentResourcesTypes: string[]; + /** + * The formats in which this Model may be exported. If empty, this Model is not available for export. + */ + readonly supportedExportFormats: outputs.aiplatform.v1.GoogleCloudAiplatformV1ModelExportFormatResponse[]; + /** + * The formats this Model supports in BatchPredictionJob.input_config. If PredictSchemata.instance_schema_uri exists, the instances should be given as per that schema. The possible formats are: * `jsonl` The JSON Lines format, where each instance is a single line. Uses GcsSource. * `csv` The CSV format, where each instance is a single comma-separated line. The first line in the file is the header, containing comma-separated field names. Uses GcsSource. * `tf-record` The TFRecord format, where each instance is a single record in tfrecord syntax. Uses GcsSource. * `tf-record-gzip` Similar to `tf-record`, but the file is gzipped. Uses GcsSource. * `bigquery` Each instance is a single row in BigQuery. Uses BigQuerySource. * `file-list` Each line of the file is the location of an instance to process, uses `gcs_source` field of the InputConfig object. If this Model doesn't support any of these formats it means it cannot be used with a BatchPredictionJob. However, if it has supported_deployment_resources_types, it could serve online predictions by using PredictionService.Predict or PredictionService.Explain. + */ + readonly supportedInputStorageFormats: string[]; + /** + * The formats this Model supports in BatchPredictionJob.output_config. If both PredictSchemata.instance_schema_uri and PredictSchemata.prediction_schema_uri exist, the predictions are returned together with their instances. In other words, the prediction has the original instance data first, followed by the actual prediction content (as per the schema). The possible formats are: * `jsonl` The JSON Lines format, where each prediction is a single line. Uses GcsDestination. * `csv` The CSV format, where each prediction is a single comma-separated line. The first line in the file is the header, containing comma-separated field names. Uses GcsDestination. * `bigquery` Each prediction is a single row in a BigQuery table, uses BigQueryDestination . If this Model doesn't support any of these formats it means it cannot be used with a BatchPredictionJob. However, if it has supported_deployment_resources_types, it could serve online predictions by using PredictionService.Predict or PredictionService.Explain. + */ + readonly supportedOutputStorageFormats: string[]; + /** + * The resource name of the TrainingPipeline that uploaded this Model, if any. + */ + readonly trainingPipeline: string; + /** + * Timestamp when this Model was most recently updated. + */ + readonly updateTime: string; + /** + * User provided version aliases so that a model version can be referenced via alias (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_alias}` instead of auto-generated version id (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_id})`. The format is a-z{0,126}[a-z0-9] to distinguish from version_id. A default version alias will be created for the first version of the model, and there must be exactly one default version alias for a model. + */ + readonly versionAliases: string[]; + /** + * Timestamp when this version was created. + */ + readonly versionCreateTime: string; + /** + * The description of this version. + */ + readonly versionDescription: string; + /** + * Immutable. The version ID of the model. A new version is committed when a new model version is uploaded or trained under an existing model id. It is an auto-incrementing decimal number in string representation. + */ + readonly versionId: string; + /** + * Timestamp when this version was most recently updated. + */ + readonly versionUpdateTime: string; +} +/** + * Gets a Model. + */ +export function getModelOutput(args: GetModelOutputArgs, opts?: pulumi.InvokeOptions): pulumi.Output { + return pulumi.output(args).apply((a: any) => getModel(a, opts)) +} + +export interface GetModelOutputArgs { + location: pulumi.Input; + modelId: pulumi.Input; + project?: pulumi.Input; +} diff --git a/sdk/nodejs/aiplatform/v1/index.ts b/sdk/nodejs/aiplatform/v1/index.ts index 75cff93e3d..0223a09a92 100644 --- a/sdk/nodejs/aiplatform/v1/index.ts +++ b/sdk/nodejs/aiplatform/v1/index.ts @@ -250,6 +250,11 @@ export const getMetadataStore: typeof import("./getMetadataStore").getMetadataSt export const getMetadataStoreOutput: typeof import("./getMetadataStore").getMetadataStoreOutput = null as any; utilities.lazyLoad(exports, ["getMetadataStore","getMetadataStoreOutput"], () => require("./getMetadataStore")); +export { GetModelArgs, GetModelResult, GetModelOutputArgs } from "./getModel"; +export const getModel: typeof import("./getModel").getModel = null as any; +export const getModelOutput: typeof import("./getModel").getModelOutput = null as any; +utilities.lazyLoad(exports, ["getModel","getModelOutput"], () => require("./getModel")); + export { GetModelDeploymentMonitoringJobArgs, GetModelDeploymentMonitoringJobResult, GetModelDeploymentMonitoringJobOutputArgs } from "./getModelDeploymentMonitoringJob"; export const getModelDeploymentMonitoringJob: typeof import("./getModelDeploymentMonitoringJob").getModelDeploymentMonitoringJob = null as any; export const getModelDeploymentMonitoringJobOutput: typeof import("./getModelDeploymentMonitoringJob").getModelDeploymentMonitoringJobOutput = null as any; @@ -340,6 +345,11 @@ export type MetadataStore = import("./metadataStore").MetadataStore; export const MetadataStore: typeof import("./metadataStore").MetadataStore = null as any; utilities.lazyLoad(exports, ["MetadataStore"], () => require("./metadataStore")); +export { ModelArgs } from "./model"; +export type Model = import("./model").Model; +export const Model: typeof import("./model").Model = null as any; +utilities.lazyLoad(exports, ["Model"], () => require("./model")); + export { ModelDeploymentMonitoringJobArgs } from "./modelDeploymentMonitoringJob"; export type ModelDeploymentMonitoringJob = import("./modelDeploymentMonitoringJob").ModelDeploymentMonitoringJob; export const ModelDeploymentMonitoringJob: typeof import("./modelDeploymentMonitoringJob").ModelDeploymentMonitoringJob = null as any; @@ -481,6 +491,8 @@ const _module = { return new MetadataSchema(name, undefined, { urn }) case "google-native:aiplatform/v1:MetadataStore": return new MetadataStore(name, undefined, { urn }) + case "google-native:aiplatform/v1:Model": + return new Model(name, undefined, { urn }) case "google-native:aiplatform/v1:ModelDeploymentMonitoringJob": return new ModelDeploymentMonitoringJob(name, undefined, { urn }) case "google-native:aiplatform/v1:NasJob": diff --git a/sdk/nodejs/aiplatform/v1/model.ts b/sdk/nodejs/aiplatform/v1/model.ts new file mode 100644 index 0000000000..3386fd6b6d --- /dev/null +++ b/sdk/nodejs/aiplatform/v1/model.ts @@ -0,0 +1,325 @@ +// *** WARNING: this file was generated by the Pulumi SDK Generator. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +import * as pulumi from "@pulumi/pulumi"; +import * as inputs from "../../types/input"; +import * as outputs from "../../types/output"; +import * as enums from "../../types/enums"; +import * as utilities from "../../utilities"; + +/** + * Uploads a Model artifact into Vertex AI. + */ +export class Model extends pulumi.CustomResource { + /** + * Get an existing Model resource's state with the given name, ID, and optional extra + * properties used to qualify the lookup. + * + * @param name The _unique_ name of the resulting resource. + * @param id The _unique_ provider ID of the resource to lookup. + * @param opts Optional settings to control the behavior of the CustomResource. + */ + public static get(name: string, id: pulumi.Input, opts?: pulumi.CustomResourceOptions): Model { + return new Model(name, undefined as any, { ...opts, id: id }); + } + + /** @internal */ + public static readonly __pulumiType = 'google-native:aiplatform/v1:Model'; + + /** + * Returns true if the given object is an instance of Model. This is designed to work even + * when multiple copies of the Pulumi SDK have been loaded into the same process. + */ + public static isInstance(obj: any): obj is Model { + if (obj === undefined || obj === null) { + return false; + } + return obj['__pulumiType'] === Model.__pulumiType; + } + + /** + * Immutable. The path to the directory containing the Model artifact and any of its supporting files. Not present for AutoML Models or Large Models. + */ + public readonly artifactUri!: pulumi.Output; + /** + * Input only. The specification of the container that is to be used when deploying this Model. The specification is ingested upon ModelService.UploadModel, and all binaries it contains are copied and stored internally by Vertex AI. Not present for AutoML Models or Large Models. + */ + public readonly containerSpec!: pulumi.Output; + /** + * Timestamp when this Model was uploaded into Vertex AI. + */ + public /*out*/ readonly createTime!: pulumi.Output; + /** + * The pointers to DeployedModels created from this Model. Note that Model could have been deployed to Endpoints in different Locations. + */ + public /*out*/ readonly deployedModels!: pulumi.Output; + /** + * The description of the Model. + */ + public readonly description!: pulumi.Output; + /** + * The display name of the Model. The name can be up to 128 characters long and can consist of any UTF-8 characters. + */ + public readonly displayName!: pulumi.Output; + /** + * Customer-managed encryption key spec for a Model. If set, this Model and all sub-resources of this Model will be secured by this key. + */ + public readonly encryptionSpec!: pulumi.Output; + /** + * Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + */ + public readonly etag!: pulumi.Output; + /** + * The default explanation specification for this Model. The Model can be used for requesting explanation after being deployed if it is populated. The Model can be used for batch explanation if it is populated. All fields of the explanation_spec can be overridden by explanation_spec of DeployModelRequest.deployed_model, or explanation_spec of BatchPredictionJob. If the default explanation specification is not set for this Model, this Model can still be used for requesting explanation by setting explanation_spec of DeployModelRequest.deployed_model and for batch explanation by setting explanation_spec of BatchPredictionJob. + */ + public readonly explanationSpec!: pulumi.Output; + /** + * The labels with user-defined metadata to organize your Models. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. + */ + public readonly labels!: pulumi.Output<{[key: string]: string}>; + public readonly location!: pulumi.Output; + /** + * Immutable. An additional information about the Model; the schema of the metadata can be found in metadata_schema. Unset if the Model does not have any additional information. + */ + public readonly metadata!: pulumi.Output; + /** + * The resource name of the Artifact that was created in MetadataStore when creating the Model. The Artifact resource name pattern is `projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}`. + */ + public /*out*/ readonly metadataArtifact!: pulumi.Output; + /** + * Immutable. Points to a YAML file stored on Google Cloud Storage describing additional information about the Model, that is specific to it. Unset if the Model does not have any additional information. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML Models always have this field populated by Vertex AI, if no additional metadata is needed, this field is set to an empty string. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. + */ + public readonly metadataSchemaUri!: pulumi.Output; + /** + * Source of a model. It can either be automl training pipeline, custom training pipeline, BigQuery ML, or existing Vertex AI Model. + */ + public /*out*/ readonly modelSourceInfo!: pulumi.Output; + /** + * The resource name of the Model. + */ + public readonly name!: pulumi.Output; + /** + * If this Model is a copy of another Model, this contains info about the original. + */ + public /*out*/ readonly originalModelInfo!: pulumi.Output; + /** + * Optional. This field is populated if the model is produced by a pipeline job. + */ + public readonly pipelineJob!: pulumi.Output; + /** + * The schemata that describe formats of the Model's predictions and explanations as given and returned via PredictionService.Predict and PredictionService.Explain. + */ + public readonly predictSchemata!: pulumi.Output; + public readonly project!: pulumi.Output; + /** + * When this Model is deployed, its prediction resources are described by the `prediction_resources` field of the Endpoint.deployed_models object. Because not all Models support all resource configuration types, the configuration types this Model supports are listed here. If no configuration types are listed, the Model cannot be deployed to an Endpoint and does not support online predictions (PredictionService.Predict or PredictionService.Explain). Such a Model can serve predictions by using a BatchPredictionJob, if it has at least one entry each in supported_input_storage_formats and supported_output_storage_formats. + */ + public /*out*/ readonly supportedDeploymentResourcesTypes!: pulumi.Output; + /** + * The formats in which this Model may be exported. If empty, this Model is not available for export. + */ + public /*out*/ readonly supportedExportFormats!: pulumi.Output; + /** + * The formats this Model supports in BatchPredictionJob.input_config. If PredictSchemata.instance_schema_uri exists, the instances should be given as per that schema. The possible formats are: * `jsonl` The JSON Lines format, where each instance is a single line. Uses GcsSource. * `csv` The CSV format, where each instance is a single comma-separated line. The first line in the file is the header, containing comma-separated field names. Uses GcsSource. * `tf-record` The TFRecord format, where each instance is a single record in tfrecord syntax. Uses GcsSource. * `tf-record-gzip` Similar to `tf-record`, but the file is gzipped. Uses GcsSource. * `bigquery` Each instance is a single row in BigQuery. Uses BigQuerySource. * `file-list` Each line of the file is the location of an instance to process, uses `gcs_source` field of the InputConfig object. If this Model doesn't support any of these formats it means it cannot be used with a BatchPredictionJob. However, if it has supported_deployment_resources_types, it could serve online predictions by using PredictionService.Predict or PredictionService.Explain. + */ + public /*out*/ readonly supportedInputStorageFormats!: pulumi.Output; + /** + * The formats this Model supports in BatchPredictionJob.output_config. If both PredictSchemata.instance_schema_uri and PredictSchemata.prediction_schema_uri exist, the predictions are returned together with their instances. In other words, the prediction has the original instance data first, followed by the actual prediction content (as per the schema). The possible formats are: * `jsonl` The JSON Lines format, where each prediction is a single line. Uses GcsDestination. * `csv` The CSV format, where each prediction is a single comma-separated line. The first line in the file is the header, containing comma-separated field names. Uses GcsDestination. * `bigquery` Each prediction is a single row in a BigQuery table, uses BigQueryDestination . If this Model doesn't support any of these formats it means it cannot be used with a BatchPredictionJob. However, if it has supported_deployment_resources_types, it could serve online predictions by using PredictionService.Predict or PredictionService.Explain. + */ + public /*out*/ readonly supportedOutputStorageFormats!: pulumi.Output; + /** + * The resource name of the TrainingPipeline that uploaded this Model, if any. + */ + public /*out*/ readonly trainingPipeline!: pulumi.Output; + /** + * Timestamp when this Model was most recently updated. + */ + public /*out*/ readonly updateTime!: pulumi.Output; + /** + * User provided version aliases so that a model version can be referenced via alias (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_alias}` instead of auto-generated version id (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_id})`. The format is a-z{0,126}[a-z0-9] to distinguish from version_id. A default version alias will be created for the first version of the model, and there must be exactly one default version alias for a model. + */ + public readonly versionAliases!: pulumi.Output; + /** + * Timestamp when this version was created. + */ + public /*out*/ readonly versionCreateTime!: pulumi.Output; + /** + * The description of this version. + */ + public readonly versionDescription!: pulumi.Output; + /** + * Immutable. The version ID of the model. A new version is committed when a new model version is uploaded or trained under an existing model id. It is an auto-incrementing decimal number in string representation. + */ + public /*out*/ readonly versionId!: pulumi.Output; + /** + * Timestamp when this version was most recently updated. + */ + public /*out*/ readonly versionUpdateTime!: pulumi.Output; + + /** + * Create a Model resource with the given unique name, arguments, and options. + * + * @param name The _unique_ name of the resource. + * @param args The arguments to use to populate this resource's properties. + * @param opts A bag of options that control this resource's behavior. + */ + constructor(name: string, args: ModelArgs, opts?: pulumi.CustomResourceOptions) { + let resourceInputs: pulumi.Inputs = {}; + opts = opts || {}; + if (!opts.id) { + if ((!args || args.displayName === undefined) && !opts.urn) { + throw new Error("Missing required property 'displayName'"); + } + resourceInputs["artifactUri"] = args ? args.artifactUri : undefined; + resourceInputs["containerSpec"] = args ? args.containerSpec : undefined; + resourceInputs["description"] = args ? args.description : undefined; + resourceInputs["displayName"] = args ? args.displayName : undefined; + resourceInputs["encryptionSpec"] = args ? args.encryptionSpec : undefined; + resourceInputs["etag"] = args ? args.etag : undefined; + resourceInputs["explanationSpec"] = args ? args.explanationSpec : undefined; + resourceInputs["labels"] = args ? args.labels : undefined; + resourceInputs["location"] = args ? args.location : undefined; + resourceInputs["metadata"] = args ? args.metadata : undefined; + resourceInputs["metadataSchemaUri"] = args ? args.metadataSchemaUri : undefined; + resourceInputs["modelId"] = args ? args.modelId : undefined; + resourceInputs["name"] = args ? args.name : undefined; + resourceInputs["parentModel"] = args ? args.parentModel : undefined; + resourceInputs["pipelineJob"] = args ? args.pipelineJob : undefined; + resourceInputs["predictSchemata"] = args ? args.predictSchemata : undefined; + resourceInputs["project"] = args ? args.project : undefined; + resourceInputs["serviceAccount"] = args ? args.serviceAccount : undefined; + resourceInputs["versionAliases"] = args ? args.versionAliases : undefined; + resourceInputs["versionDescription"] = args ? args.versionDescription : undefined; + resourceInputs["createTime"] = undefined /*out*/; + resourceInputs["deployedModels"] = undefined /*out*/; + resourceInputs["metadataArtifact"] = undefined /*out*/; + resourceInputs["modelSourceInfo"] = undefined /*out*/; + resourceInputs["originalModelInfo"] = undefined /*out*/; + resourceInputs["supportedDeploymentResourcesTypes"] = undefined /*out*/; + resourceInputs["supportedExportFormats"] = undefined /*out*/; + resourceInputs["supportedInputStorageFormats"] = undefined /*out*/; + resourceInputs["supportedOutputStorageFormats"] = undefined /*out*/; + resourceInputs["trainingPipeline"] = undefined /*out*/; + resourceInputs["updateTime"] = undefined /*out*/; + resourceInputs["versionCreateTime"] = undefined /*out*/; + resourceInputs["versionId"] = undefined /*out*/; + resourceInputs["versionUpdateTime"] = undefined /*out*/; + } else { + resourceInputs["artifactUri"] = undefined /*out*/; + resourceInputs["containerSpec"] = undefined /*out*/; + resourceInputs["createTime"] = undefined /*out*/; + resourceInputs["deployedModels"] = undefined /*out*/; + resourceInputs["description"] = undefined /*out*/; + resourceInputs["displayName"] = undefined /*out*/; + resourceInputs["encryptionSpec"] = undefined /*out*/; + resourceInputs["etag"] = undefined /*out*/; + resourceInputs["explanationSpec"] = undefined /*out*/; + resourceInputs["labels"] = undefined /*out*/; + resourceInputs["location"] = undefined /*out*/; + resourceInputs["metadata"] = undefined /*out*/; + resourceInputs["metadataArtifact"] = undefined /*out*/; + resourceInputs["metadataSchemaUri"] = undefined /*out*/; + resourceInputs["modelSourceInfo"] = undefined /*out*/; + resourceInputs["name"] = undefined /*out*/; + resourceInputs["originalModelInfo"] = undefined /*out*/; + resourceInputs["pipelineJob"] = undefined /*out*/; + resourceInputs["predictSchemata"] = undefined /*out*/; + resourceInputs["project"] = undefined /*out*/; + resourceInputs["supportedDeploymentResourcesTypes"] = undefined /*out*/; + resourceInputs["supportedExportFormats"] = undefined /*out*/; + resourceInputs["supportedInputStorageFormats"] = undefined /*out*/; + resourceInputs["supportedOutputStorageFormats"] = undefined /*out*/; + resourceInputs["trainingPipeline"] = undefined /*out*/; + resourceInputs["updateTime"] = undefined /*out*/; + resourceInputs["versionAliases"] = undefined /*out*/; + resourceInputs["versionCreateTime"] = undefined /*out*/; + resourceInputs["versionDescription"] = undefined /*out*/; + resourceInputs["versionId"] = undefined /*out*/; + resourceInputs["versionUpdateTime"] = undefined /*out*/; + } + opts = pulumi.mergeOptions(utilities.resourceOptsDefaults(), opts); + const replaceOnChanges = { replaceOnChanges: ["location", "project"] }; + opts = pulumi.mergeOptions(opts, replaceOnChanges); + super(Model.__pulumiType, name, resourceInputs, opts); + } +} + +/** + * The set of arguments for constructing a Model resource. + */ +export interface ModelArgs { + /** + * Immutable. The path to the directory containing the Model artifact and any of its supporting files. Not present for AutoML Models or Large Models. + */ + artifactUri?: pulumi.Input; + /** + * Input only. The specification of the container that is to be used when deploying this Model. The specification is ingested upon ModelService.UploadModel, and all binaries it contains are copied and stored internally by Vertex AI. Not present for AutoML Models or Large Models. + */ + containerSpec?: pulumi.Input; + /** + * The description of the Model. + */ + description?: pulumi.Input; + /** + * The display name of the Model. The name can be up to 128 characters long and can consist of any UTF-8 characters. + */ + displayName: pulumi.Input; + /** + * Customer-managed encryption key spec for a Model. If set, this Model and all sub-resources of this Model will be secured by this key. + */ + encryptionSpec?: pulumi.Input; + /** + * Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + */ + etag?: pulumi.Input; + /** + * The default explanation specification for this Model. The Model can be used for requesting explanation after being deployed if it is populated. The Model can be used for batch explanation if it is populated. All fields of the explanation_spec can be overridden by explanation_spec of DeployModelRequest.deployed_model, or explanation_spec of BatchPredictionJob. If the default explanation specification is not set for this Model, this Model can still be used for requesting explanation by setting explanation_spec of DeployModelRequest.deployed_model and for batch explanation by setting explanation_spec of BatchPredictionJob. + */ + explanationSpec?: pulumi.Input; + /** + * The labels with user-defined metadata to organize your Models. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. + */ + labels?: pulumi.Input<{[key: string]: pulumi.Input}>; + location?: pulumi.Input; + /** + * Immutable. An additional information about the Model; the schema of the metadata can be found in metadata_schema. Unset if the Model does not have any additional information. + */ + metadata?: any; + /** + * Immutable. Points to a YAML file stored on Google Cloud Storage describing additional information about the Model, that is specific to it. Unset if the Model does not have any additional information. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML Models always have this field populated by Vertex AI, if no additional metadata is needed, this field is set to an empty string. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. + */ + metadataSchemaUri?: pulumi.Input; + /** + * Optional. The ID to use for the uploaded Model, which will become the final component of the model resource name. This value may be up to 63 characters, and valid characters are `[a-z0-9_-]`. The first character cannot be a number or hyphen. + */ + modelId?: pulumi.Input; + /** + * The resource name of the Model. + */ + name?: pulumi.Input; + /** + * Optional. The resource name of the model into which to upload the version. Only specify this field when uploading a new version. + */ + parentModel?: pulumi.Input; + /** + * Optional. This field is populated if the model is produced by a pipeline job. + */ + pipelineJob?: pulumi.Input; + /** + * The schemata that describe formats of the Model's predictions and explanations as given and returned via PredictionService.Predict and PredictionService.Explain. + */ + predictSchemata?: pulumi.Input; + project?: pulumi.Input; + /** + * Optional. The user-provided custom service account to use to do the model upload. If empty, [Vertex AI Service Agent](https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) will be used to access resources needed to upload the model. This account must belong to the target project where the model is uploaded to, i.e., the project specified in the `parent` field of this request and have necessary read permissions (to Google Cloud Storage, Artifact Registry, etc.). + */ + serviceAccount?: pulumi.Input; + /** + * User provided version aliases so that a model version can be referenced via alias (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_alias}` instead of auto-generated version id (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_id})`. The format is a-z{0,126}[a-z0-9] to distinguish from version_id. A default version alias will be created for the first version of the model, and there must be exactly one default version alias for a model. + */ + versionAliases?: pulumi.Input[]>; + /** + * The description of this version. + */ + versionDescription?: pulumi.Input; +} diff --git a/sdk/nodejs/aiplatform/v1beta1/getModel.ts b/sdk/nodejs/aiplatform/v1beta1/getModel.ts new file mode 100644 index 0000000000..2efe82622c --- /dev/null +++ b/sdk/nodejs/aiplatform/v1beta1/getModel.ts @@ -0,0 +1,154 @@ +// *** WARNING: this file was generated by the Pulumi SDK Generator. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +import * as pulumi from "@pulumi/pulumi"; +import * as inputs from "../../types/input"; +import * as outputs from "../../types/output"; +import * as enums from "../../types/enums"; +import * as utilities from "../../utilities"; + +/** + * Gets a Model. + */ +export function getModel(args: GetModelArgs, opts?: pulumi.InvokeOptions): Promise { + + opts = pulumi.mergeOptions(utilities.resourceOptsDefaults(), opts || {}); + return pulumi.runtime.invoke("google-native:aiplatform/v1beta1:getModel", { + "location": args.location, + "modelId": args.modelId, + "project": args.project, + }, opts); +} + +export interface GetModelArgs { + location: string; + modelId: string; + project?: string; +} + +export interface GetModelResult { + /** + * Immutable. The path to the directory containing the Model artifact and any of its supporting files. Not present for AutoML Models or Large Models. + */ + readonly artifactUri: string; + /** + * Input only. The specification of the container that is to be used when deploying this Model. The specification is ingested upon ModelService.UploadModel, and all binaries it contains are copied and stored internally by Vertex AI. Not present for AutoML Models or Large Models. + */ + readonly containerSpec: outputs.aiplatform.v1beta1.GoogleCloudAiplatformV1beta1ModelContainerSpecResponse; + /** + * Timestamp when this Model was uploaded into Vertex AI. + */ + readonly createTime: string; + /** + * The pointers to DeployedModels created from this Model. Note that Model could have been deployed to Endpoints in different Locations. + */ + readonly deployedModels: outputs.aiplatform.v1beta1.GoogleCloudAiplatformV1beta1DeployedModelRefResponse[]; + /** + * The description of the Model. + */ + readonly description: string; + /** + * The display name of the Model. The name can be up to 128 characters long and can consist of any UTF-8 characters. + */ + readonly displayName: string; + /** + * Customer-managed encryption key spec for a Model. If set, this Model and all sub-resources of this Model will be secured by this key. + */ + readonly encryptionSpec: outputs.aiplatform.v1beta1.GoogleCloudAiplatformV1beta1EncryptionSpecResponse; + /** + * Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + */ + readonly etag: string; + /** + * The default explanation specification for this Model. The Model can be used for requesting explanation after being deployed if it is populated. The Model can be used for batch explanation if it is populated. All fields of the explanation_spec can be overridden by explanation_spec of DeployModelRequest.deployed_model, or explanation_spec of BatchPredictionJob. If the default explanation specification is not set for this Model, this Model can still be used for requesting explanation by setting explanation_spec of DeployModelRequest.deployed_model and for batch explanation by setting explanation_spec of BatchPredictionJob. + */ + readonly explanationSpec: outputs.aiplatform.v1beta1.GoogleCloudAiplatformV1beta1ExplanationSpecResponse; + /** + * The labels with user-defined metadata to organize your Models. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. + */ + readonly labels: {[key: string]: string}; + /** + * Immutable. An additional information about the Model; the schema of the metadata can be found in metadata_schema. Unset if the Model does not have any additional information. + */ + readonly metadata: any; + /** + * The resource name of the Artifact that was created in MetadataStore when creating the Model. The Artifact resource name pattern is `projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}`. + */ + readonly metadataArtifact: string; + /** + * Immutable. Points to a YAML file stored on Google Cloud Storage describing additional information about the Model, that is specific to it. Unset if the Model does not have any additional information. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML Models always have this field populated by Vertex AI, if no additional metadata is needed, this field is set to an empty string. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. + */ + readonly metadataSchemaUri: string; + /** + * Source of a model. It can either be automl training pipeline, custom training pipeline, BigQuery ML, or existing Vertex AI Model. + */ + readonly modelSourceInfo: outputs.aiplatform.v1beta1.GoogleCloudAiplatformV1beta1ModelSourceInfoResponse; + /** + * The resource name of the Model. + */ + readonly name: string; + /** + * If this Model is a copy of another Model, this contains info about the original. + */ + readonly originalModelInfo: outputs.aiplatform.v1beta1.GoogleCloudAiplatformV1beta1ModelOriginalModelInfoResponse; + /** + * The schemata that describe formats of the Model's predictions and explanations as given and returned via PredictionService.Predict and PredictionService.Explain. + */ + readonly predictSchemata: outputs.aiplatform.v1beta1.GoogleCloudAiplatformV1beta1PredictSchemataResponse; + /** + * When this Model is deployed, its prediction resources are described by the `prediction_resources` field of the Endpoint.deployed_models object. Because not all Models support all resource configuration types, the configuration types this Model supports are listed here. If no configuration types are listed, the Model cannot be deployed to an Endpoint and does not support online predictions (PredictionService.Predict or PredictionService.Explain). Such a Model can serve predictions by using a BatchPredictionJob, if it has at least one entry each in supported_input_storage_formats and supported_output_storage_formats. + */ + readonly supportedDeploymentResourcesTypes: string[]; + /** + * The formats in which this Model may be exported. If empty, this Model is not available for export. + */ + readonly supportedExportFormats: outputs.aiplatform.v1beta1.GoogleCloudAiplatformV1beta1ModelExportFormatResponse[]; + /** + * The formats this Model supports in BatchPredictionJob.input_config. If PredictSchemata.instance_schema_uri exists, the instances should be given as per that schema. The possible formats are: * `jsonl` The JSON Lines format, where each instance is a single line. Uses GcsSource. * `csv` The CSV format, where each instance is a single comma-separated line. The first line in the file is the header, containing comma-separated field names. Uses GcsSource. * `tf-record` The TFRecord format, where each instance is a single record in tfrecord syntax. Uses GcsSource. * `tf-record-gzip` Similar to `tf-record`, but the file is gzipped. Uses GcsSource. * `bigquery` Each instance is a single row in BigQuery. Uses BigQuerySource. * `file-list` Each line of the file is the location of an instance to process, uses `gcs_source` field of the InputConfig object. If this Model doesn't support any of these formats it means it cannot be used with a BatchPredictionJob. However, if it has supported_deployment_resources_types, it could serve online predictions by using PredictionService.Predict or PredictionService.Explain. + */ + readonly supportedInputStorageFormats: string[]; + /** + * The formats this Model supports in BatchPredictionJob.output_config. If both PredictSchemata.instance_schema_uri and PredictSchemata.prediction_schema_uri exist, the predictions are returned together with their instances. In other words, the prediction has the original instance data first, followed by the actual prediction content (as per the schema). The possible formats are: * `jsonl` The JSON Lines format, where each prediction is a single line. Uses GcsDestination. * `csv` The CSV format, where each prediction is a single comma-separated line. The first line in the file is the header, containing comma-separated field names. Uses GcsDestination. * `bigquery` Each prediction is a single row in a BigQuery table, uses BigQueryDestination . If this Model doesn't support any of these formats it means it cannot be used with a BatchPredictionJob. However, if it has supported_deployment_resources_types, it could serve online predictions by using PredictionService.Predict or PredictionService.Explain. + */ + readonly supportedOutputStorageFormats: string[]; + /** + * The resource name of the TrainingPipeline that uploaded this Model, if any. + */ + readonly trainingPipeline: string; + /** + * Timestamp when this Model was most recently updated. + */ + readonly updateTime: string; + /** + * User provided version aliases so that a model version can be referenced via alias (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_alias}` instead of auto-generated version id (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_id})`. The format is a-z{0,126}[a-z0-9] to distinguish from version_id. A default version alias will be created for the first version of the model, and there must be exactly one default version alias for a model. + */ + readonly versionAliases: string[]; + /** + * Timestamp when this version was created. + */ + readonly versionCreateTime: string; + /** + * The description of this version. + */ + readonly versionDescription: string; + /** + * Immutable. The version ID of the model. A new version is committed when a new model version is uploaded or trained under an existing model id. It is an auto-incrementing decimal number in string representation. + */ + readonly versionId: string; + /** + * Timestamp when this version was most recently updated. + */ + readonly versionUpdateTime: string; +} +/** + * Gets a Model. + */ +export function getModelOutput(args: GetModelOutputArgs, opts?: pulumi.InvokeOptions): pulumi.Output { + return pulumi.output(args).apply((a: any) => getModel(a, opts)) +} + +export interface GetModelOutputArgs { + location: pulumi.Input; + modelId: pulumi.Input; + project?: pulumi.Input; +} diff --git a/sdk/nodejs/aiplatform/v1beta1/index.ts b/sdk/nodejs/aiplatform/v1beta1/index.ts index 94fff6deda..8e502919a1 100644 --- a/sdk/nodejs/aiplatform/v1beta1/index.ts +++ b/sdk/nodejs/aiplatform/v1beta1/index.ts @@ -270,6 +270,11 @@ export const getMetadataStore: typeof import("./getMetadataStore").getMetadataSt export const getMetadataStoreOutput: typeof import("./getMetadataStore").getMetadataStoreOutput = null as any; utilities.lazyLoad(exports, ["getMetadataStore","getMetadataStoreOutput"], () => require("./getMetadataStore")); +export { GetModelArgs, GetModelResult, GetModelOutputArgs } from "./getModel"; +export const getModel: typeof import("./getModel").getModel = null as any; +export const getModelOutput: typeof import("./getModel").getModelOutput = null as any; +utilities.lazyLoad(exports, ["getModel","getModelOutput"], () => require("./getModel")); + export { GetModelDeploymentMonitoringJobArgs, GetModelDeploymentMonitoringJobResult, GetModelDeploymentMonitoringJobOutputArgs } from "./getModelDeploymentMonitoringJob"; export const getModelDeploymentMonitoringJob: typeof import("./getModelDeploymentMonitoringJob").getModelDeploymentMonitoringJob = null as any; export const getModelDeploymentMonitoringJobOutput: typeof import("./getModelDeploymentMonitoringJob").getModelDeploymentMonitoringJobOutput = null as any; @@ -370,6 +375,11 @@ export type MetadataStore = import("./metadataStore").MetadataStore; export const MetadataStore: typeof import("./metadataStore").MetadataStore = null as any; utilities.lazyLoad(exports, ["MetadataStore"], () => require("./metadataStore")); +export { ModelArgs } from "./model"; +export type Model = import("./model").Model; +export const Model: typeof import("./model").Model = null as any; +utilities.lazyLoad(exports, ["Model"], () => require("./model")); + export { ModelDeploymentMonitoringJobArgs } from "./modelDeploymentMonitoringJob"; export type ModelDeploymentMonitoringJob = import("./modelDeploymentMonitoringJob").ModelDeploymentMonitoringJob; export const ModelDeploymentMonitoringJob: typeof import("./modelDeploymentMonitoringJob").ModelDeploymentMonitoringJob = null as any; @@ -537,6 +547,8 @@ const _module = { return new MetadataSchema(name, undefined, { urn }) case "google-native:aiplatform/v1beta1:MetadataStore": return new MetadataStore(name, undefined, { urn }) + case "google-native:aiplatform/v1beta1:Model": + return new Model(name, undefined, { urn }) case "google-native:aiplatform/v1beta1:ModelDeploymentMonitoringJob": return new ModelDeploymentMonitoringJob(name, undefined, { urn }) case "google-native:aiplatform/v1beta1:ModelIamBinding": diff --git a/sdk/nodejs/aiplatform/v1beta1/model.ts b/sdk/nodejs/aiplatform/v1beta1/model.ts new file mode 100644 index 0000000000..7e0b6b3ad2 --- /dev/null +++ b/sdk/nodejs/aiplatform/v1beta1/model.ts @@ -0,0 +1,315 @@ +// *** WARNING: this file was generated by the Pulumi SDK Generator. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +import * as pulumi from "@pulumi/pulumi"; +import * as inputs from "../../types/input"; +import * as outputs from "../../types/output"; +import * as enums from "../../types/enums"; +import * as utilities from "../../utilities"; + +/** + * Uploads a Model artifact into Vertex AI. + */ +export class Model extends pulumi.CustomResource { + /** + * Get an existing Model resource's state with the given name, ID, and optional extra + * properties used to qualify the lookup. + * + * @param name The _unique_ name of the resulting resource. + * @param id The _unique_ provider ID of the resource to lookup. + * @param opts Optional settings to control the behavior of the CustomResource. + */ + public static get(name: string, id: pulumi.Input, opts?: pulumi.CustomResourceOptions): Model { + return new Model(name, undefined as any, { ...opts, id: id }); + } + + /** @internal */ + public static readonly __pulumiType = 'google-native:aiplatform/v1beta1:Model'; + + /** + * Returns true if the given object is an instance of Model. This is designed to work even + * when multiple copies of the Pulumi SDK have been loaded into the same process. + */ + public static isInstance(obj: any): obj is Model { + if (obj === undefined || obj === null) { + return false; + } + return obj['__pulumiType'] === Model.__pulumiType; + } + + /** + * Immutable. The path to the directory containing the Model artifact and any of its supporting files. Not present for AutoML Models or Large Models. + */ + public readonly artifactUri!: pulumi.Output; + /** + * Input only. The specification of the container that is to be used when deploying this Model. The specification is ingested upon ModelService.UploadModel, and all binaries it contains are copied and stored internally by Vertex AI. Not present for AutoML Models or Large Models. + */ + public readonly containerSpec!: pulumi.Output; + /** + * Timestamp when this Model was uploaded into Vertex AI. + */ + public /*out*/ readonly createTime!: pulumi.Output; + /** + * The pointers to DeployedModels created from this Model. Note that Model could have been deployed to Endpoints in different Locations. + */ + public /*out*/ readonly deployedModels!: pulumi.Output; + /** + * The description of the Model. + */ + public readonly description!: pulumi.Output; + /** + * The display name of the Model. The name can be up to 128 characters long and can consist of any UTF-8 characters. + */ + public readonly displayName!: pulumi.Output; + /** + * Customer-managed encryption key spec for a Model. If set, this Model and all sub-resources of this Model will be secured by this key. + */ + public readonly encryptionSpec!: pulumi.Output; + /** + * Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + */ + public readonly etag!: pulumi.Output; + /** + * The default explanation specification for this Model. The Model can be used for requesting explanation after being deployed if it is populated. The Model can be used for batch explanation if it is populated. All fields of the explanation_spec can be overridden by explanation_spec of DeployModelRequest.deployed_model, or explanation_spec of BatchPredictionJob. If the default explanation specification is not set for this Model, this Model can still be used for requesting explanation by setting explanation_spec of DeployModelRequest.deployed_model and for batch explanation by setting explanation_spec of BatchPredictionJob. + */ + public readonly explanationSpec!: pulumi.Output; + /** + * The labels with user-defined metadata to organize your Models. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. + */ + public readonly labels!: pulumi.Output<{[key: string]: string}>; + public readonly location!: pulumi.Output; + /** + * Immutable. An additional information about the Model; the schema of the metadata can be found in metadata_schema. Unset if the Model does not have any additional information. + */ + public readonly metadata!: pulumi.Output; + /** + * The resource name of the Artifact that was created in MetadataStore when creating the Model. The Artifact resource name pattern is `projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}`. + */ + public /*out*/ readonly metadataArtifact!: pulumi.Output; + /** + * Immutable. Points to a YAML file stored on Google Cloud Storage describing additional information about the Model, that is specific to it. Unset if the Model does not have any additional information. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML Models always have this field populated by Vertex AI, if no additional metadata is needed, this field is set to an empty string. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. + */ + public readonly metadataSchemaUri!: pulumi.Output; + /** + * Source of a model. It can either be automl training pipeline, custom training pipeline, BigQuery ML, or existing Vertex AI Model. + */ + public /*out*/ readonly modelSourceInfo!: pulumi.Output; + /** + * The resource name of the Model. + */ + public readonly name!: pulumi.Output; + /** + * If this Model is a copy of another Model, this contains info about the original. + */ + public /*out*/ readonly originalModelInfo!: pulumi.Output; + /** + * The schemata that describe formats of the Model's predictions and explanations as given and returned via PredictionService.Predict and PredictionService.Explain. + */ + public readonly predictSchemata!: pulumi.Output; + public readonly project!: pulumi.Output; + /** + * When this Model is deployed, its prediction resources are described by the `prediction_resources` field of the Endpoint.deployed_models object. Because not all Models support all resource configuration types, the configuration types this Model supports are listed here. If no configuration types are listed, the Model cannot be deployed to an Endpoint and does not support online predictions (PredictionService.Predict or PredictionService.Explain). Such a Model can serve predictions by using a BatchPredictionJob, if it has at least one entry each in supported_input_storage_formats and supported_output_storage_formats. + */ + public /*out*/ readonly supportedDeploymentResourcesTypes!: pulumi.Output; + /** + * The formats in which this Model may be exported. If empty, this Model is not available for export. + */ + public /*out*/ readonly supportedExportFormats!: pulumi.Output; + /** + * The formats this Model supports in BatchPredictionJob.input_config. If PredictSchemata.instance_schema_uri exists, the instances should be given as per that schema. The possible formats are: * `jsonl` The JSON Lines format, where each instance is a single line. Uses GcsSource. * `csv` The CSV format, where each instance is a single comma-separated line. The first line in the file is the header, containing comma-separated field names. Uses GcsSource. * `tf-record` The TFRecord format, where each instance is a single record in tfrecord syntax. Uses GcsSource. * `tf-record-gzip` Similar to `tf-record`, but the file is gzipped. Uses GcsSource. * `bigquery` Each instance is a single row in BigQuery. Uses BigQuerySource. * `file-list` Each line of the file is the location of an instance to process, uses `gcs_source` field of the InputConfig object. If this Model doesn't support any of these formats it means it cannot be used with a BatchPredictionJob. However, if it has supported_deployment_resources_types, it could serve online predictions by using PredictionService.Predict or PredictionService.Explain. + */ + public /*out*/ readonly supportedInputStorageFormats!: pulumi.Output; + /** + * The formats this Model supports in BatchPredictionJob.output_config. If both PredictSchemata.instance_schema_uri and PredictSchemata.prediction_schema_uri exist, the predictions are returned together with their instances. In other words, the prediction has the original instance data first, followed by the actual prediction content (as per the schema). The possible formats are: * `jsonl` The JSON Lines format, where each prediction is a single line. Uses GcsDestination. * `csv` The CSV format, where each prediction is a single comma-separated line. The first line in the file is the header, containing comma-separated field names. Uses GcsDestination. * `bigquery` Each prediction is a single row in a BigQuery table, uses BigQueryDestination . If this Model doesn't support any of these formats it means it cannot be used with a BatchPredictionJob. However, if it has supported_deployment_resources_types, it could serve online predictions by using PredictionService.Predict or PredictionService.Explain. + */ + public /*out*/ readonly supportedOutputStorageFormats!: pulumi.Output; + /** + * The resource name of the TrainingPipeline that uploaded this Model, if any. + */ + public /*out*/ readonly trainingPipeline!: pulumi.Output; + /** + * Timestamp when this Model was most recently updated. + */ + public /*out*/ readonly updateTime!: pulumi.Output; + /** + * User provided version aliases so that a model version can be referenced via alias (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_alias}` instead of auto-generated version id (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_id})`. The format is a-z{0,126}[a-z0-9] to distinguish from version_id. A default version alias will be created for the first version of the model, and there must be exactly one default version alias for a model. + */ + public readonly versionAliases!: pulumi.Output; + /** + * Timestamp when this version was created. + */ + public /*out*/ readonly versionCreateTime!: pulumi.Output; + /** + * The description of this version. + */ + public readonly versionDescription!: pulumi.Output; + /** + * Immutable. The version ID of the model. A new version is committed when a new model version is uploaded or trained under an existing model id. It is an auto-incrementing decimal number in string representation. + */ + public /*out*/ readonly versionId!: pulumi.Output; + /** + * Timestamp when this version was most recently updated. + */ + public /*out*/ readonly versionUpdateTime!: pulumi.Output; + + /** + * Create a Model resource with the given unique name, arguments, and options. + * + * @param name The _unique_ name of the resource. + * @param args The arguments to use to populate this resource's properties. + * @param opts A bag of options that control this resource's behavior. + */ + constructor(name: string, args: ModelArgs, opts?: pulumi.CustomResourceOptions) { + let resourceInputs: pulumi.Inputs = {}; + opts = opts || {}; + if (!opts.id) { + if ((!args || args.displayName === undefined) && !opts.urn) { + throw new Error("Missing required property 'displayName'"); + } + resourceInputs["artifactUri"] = args ? args.artifactUri : undefined; + resourceInputs["containerSpec"] = args ? args.containerSpec : undefined; + resourceInputs["description"] = args ? args.description : undefined; + resourceInputs["displayName"] = args ? args.displayName : undefined; + resourceInputs["encryptionSpec"] = args ? args.encryptionSpec : undefined; + resourceInputs["etag"] = args ? args.etag : undefined; + resourceInputs["explanationSpec"] = args ? args.explanationSpec : undefined; + resourceInputs["labels"] = args ? args.labels : undefined; + resourceInputs["location"] = args ? args.location : undefined; + resourceInputs["metadata"] = args ? args.metadata : undefined; + resourceInputs["metadataSchemaUri"] = args ? args.metadataSchemaUri : undefined; + resourceInputs["modelId"] = args ? args.modelId : undefined; + resourceInputs["name"] = args ? args.name : undefined; + resourceInputs["parentModel"] = args ? args.parentModel : undefined; + resourceInputs["predictSchemata"] = args ? args.predictSchemata : undefined; + resourceInputs["project"] = args ? args.project : undefined; + resourceInputs["serviceAccount"] = args ? args.serviceAccount : undefined; + resourceInputs["versionAliases"] = args ? args.versionAliases : undefined; + resourceInputs["versionDescription"] = args ? args.versionDescription : undefined; + resourceInputs["createTime"] = undefined /*out*/; + resourceInputs["deployedModels"] = undefined /*out*/; + resourceInputs["metadataArtifact"] = undefined /*out*/; + resourceInputs["modelSourceInfo"] = undefined /*out*/; + resourceInputs["originalModelInfo"] = undefined /*out*/; + resourceInputs["supportedDeploymentResourcesTypes"] = undefined /*out*/; + resourceInputs["supportedExportFormats"] = undefined /*out*/; + resourceInputs["supportedInputStorageFormats"] = undefined /*out*/; + resourceInputs["supportedOutputStorageFormats"] = undefined /*out*/; + resourceInputs["trainingPipeline"] = undefined /*out*/; + resourceInputs["updateTime"] = undefined /*out*/; + resourceInputs["versionCreateTime"] = undefined /*out*/; + resourceInputs["versionId"] = undefined /*out*/; + resourceInputs["versionUpdateTime"] = undefined /*out*/; + } else { + resourceInputs["artifactUri"] = undefined /*out*/; + resourceInputs["containerSpec"] = undefined /*out*/; + resourceInputs["createTime"] = undefined /*out*/; + resourceInputs["deployedModels"] = undefined /*out*/; + resourceInputs["description"] = undefined /*out*/; + resourceInputs["displayName"] = undefined /*out*/; + resourceInputs["encryptionSpec"] = undefined /*out*/; + resourceInputs["etag"] = undefined /*out*/; + resourceInputs["explanationSpec"] = undefined /*out*/; + resourceInputs["labels"] = undefined /*out*/; + resourceInputs["location"] = undefined /*out*/; + resourceInputs["metadata"] = undefined /*out*/; + resourceInputs["metadataArtifact"] = undefined /*out*/; + resourceInputs["metadataSchemaUri"] = undefined /*out*/; + resourceInputs["modelSourceInfo"] = undefined /*out*/; + resourceInputs["name"] = undefined /*out*/; + resourceInputs["originalModelInfo"] = undefined /*out*/; + resourceInputs["predictSchemata"] = undefined /*out*/; + resourceInputs["project"] = undefined /*out*/; + resourceInputs["supportedDeploymentResourcesTypes"] = undefined /*out*/; + resourceInputs["supportedExportFormats"] = undefined /*out*/; + resourceInputs["supportedInputStorageFormats"] = undefined /*out*/; + resourceInputs["supportedOutputStorageFormats"] = undefined /*out*/; + resourceInputs["trainingPipeline"] = undefined /*out*/; + resourceInputs["updateTime"] = undefined /*out*/; + resourceInputs["versionAliases"] = undefined /*out*/; + resourceInputs["versionCreateTime"] = undefined /*out*/; + resourceInputs["versionDescription"] = undefined /*out*/; + resourceInputs["versionId"] = undefined /*out*/; + resourceInputs["versionUpdateTime"] = undefined /*out*/; + } + opts = pulumi.mergeOptions(utilities.resourceOptsDefaults(), opts); + const replaceOnChanges = { replaceOnChanges: ["location", "project"] }; + opts = pulumi.mergeOptions(opts, replaceOnChanges); + super(Model.__pulumiType, name, resourceInputs, opts); + } +} + +/** + * The set of arguments for constructing a Model resource. + */ +export interface ModelArgs { + /** + * Immutable. The path to the directory containing the Model artifact and any of its supporting files. Not present for AutoML Models or Large Models. + */ + artifactUri?: pulumi.Input; + /** + * Input only. The specification of the container that is to be used when deploying this Model. The specification is ingested upon ModelService.UploadModel, and all binaries it contains are copied and stored internally by Vertex AI. Not present for AutoML Models or Large Models. + */ + containerSpec?: pulumi.Input; + /** + * The description of the Model. + */ + description?: pulumi.Input; + /** + * The display name of the Model. The name can be up to 128 characters long and can consist of any UTF-8 characters. + */ + displayName: pulumi.Input; + /** + * Customer-managed encryption key spec for a Model. If set, this Model and all sub-resources of this Model will be secured by this key. + */ + encryptionSpec?: pulumi.Input; + /** + * Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + */ + etag?: pulumi.Input; + /** + * The default explanation specification for this Model. The Model can be used for requesting explanation after being deployed if it is populated. The Model can be used for batch explanation if it is populated. All fields of the explanation_spec can be overridden by explanation_spec of DeployModelRequest.deployed_model, or explanation_spec of BatchPredictionJob. If the default explanation specification is not set for this Model, this Model can still be used for requesting explanation by setting explanation_spec of DeployModelRequest.deployed_model and for batch explanation by setting explanation_spec of BatchPredictionJob. + */ + explanationSpec?: pulumi.Input; + /** + * The labels with user-defined metadata to organize your Models. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. + */ + labels?: pulumi.Input<{[key: string]: pulumi.Input}>; + location?: pulumi.Input; + /** + * Immutable. An additional information about the Model; the schema of the metadata can be found in metadata_schema. Unset if the Model does not have any additional information. + */ + metadata?: any; + /** + * Immutable. Points to a YAML file stored on Google Cloud Storage describing additional information about the Model, that is specific to it. Unset if the Model does not have any additional information. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML Models always have this field populated by Vertex AI, if no additional metadata is needed, this field is set to an empty string. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. + */ + metadataSchemaUri?: pulumi.Input; + /** + * Optional. The ID to use for the uploaded Model, which will become the final component of the model resource name. This value may be up to 63 characters, and valid characters are `[a-z0-9_-]`. The first character cannot be a number or hyphen. + */ + modelId?: pulumi.Input; + /** + * The resource name of the Model. + */ + name?: pulumi.Input; + /** + * Optional. The resource name of the model into which to upload the version. Only specify this field when uploading a new version. + */ + parentModel?: pulumi.Input; + /** + * The schemata that describe formats of the Model's predictions and explanations as given and returned via PredictionService.Predict and PredictionService.Explain. + */ + predictSchemata?: pulumi.Input; + project?: pulumi.Input; + /** + * Optional. The user-provided custom service account to use to do the model upload. If empty, [Vertex AI Service Agent](https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) will be used to access resources needed to upload the model. This account must belong to the target project where the model is uploaded to, i.e., the project specified in the `parent` field of this request and have necessary read permissions (to Google Cloud Storage, Artifact Registry, etc.). + */ + serviceAccount?: pulumi.Input; + /** + * User provided version aliases so that a model version can be referenced via alias (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_alias}` instead of auto-generated version id (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_id})`. The format is a-z{0,126}[a-z0-9] to distinguish from version_id. A default version alias will be created for the first version of the model, and there must be exactly one default version alias for a model. + */ + versionAliases?: pulumi.Input[]>; + /** + * The description of this version. + */ + versionDescription?: pulumi.Input; +} diff --git a/sdk/nodejs/cloudsearch/v1/getItem.ts b/sdk/nodejs/cloudsearch/v1/getItem.ts new file mode 100644 index 0000000000..5620eafd5b --- /dev/null +++ b/sdk/nodejs/cloudsearch/v1/getItem.ts @@ -0,0 +1,85 @@ +// *** WARNING: this file was generated by the Pulumi SDK Generator. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +import * as pulumi from "@pulumi/pulumi"; +import * as inputs from "../../types/input"; +import * as outputs from "../../types/output"; +import * as enums from "../../types/enums"; +import * as utilities from "../../utilities"; + +/** + * Gets Item resource by item name. This API requires an admin or service account to execute. The service account used is the one whitelisted in the corresponding data source. + */ +export function getItem(args: GetItemArgs, opts?: pulumi.InvokeOptions): Promise { + + opts = pulumi.mergeOptions(utilities.resourceOptsDefaults(), opts || {}); + return pulumi.runtime.invoke("google-native:cloudsearch/v1:getItem", { + "connectorName": args.connectorName, + "datasourceId": args.datasourceId, + "debugOptionsEnableDebugging": args.debugOptionsEnableDebugging, + "itemId": args.itemId, + }, opts); +} + +export interface GetItemArgs { + connectorName?: string; + datasourceId: string; + debugOptionsEnableDebugging?: boolean; + itemId: string; +} + +export interface GetItemResult { + /** + * Access control list for this item. + */ + readonly acl: outputs.cloudsearch.v1.ItemAclResponse; + /** + * Item content to be indexed and made text searchable. + */ + readonly content: outputs.cloudsearch.v1.ItemContentResponse; + /** + * The type for this item. + */ + readonly itemType: string; + /** + * The metadata information. + */ + readonly metadata: outputs.cloudsearch.v1.ItemMetadataResponse; + /** + * The name of the Item. Format: datasources/{source_id}/items/{item_id} This is a required field. The maximum length is 1536 characters. + */ + readonly name: string; + /** + * Additional state connector can store for this item. The maximum length is 10000 bytes. + */ + readonly payload: string; + /** + * Queue this item belongs to. The maximum length is 100 characters. + */ + readonly queue: string; + /** + * Status of the item. Output only field. + */ + readonly status: outputs.cloudsearch.v1.ItemStatusResponse; + /** + * The structured data for the item that should conform to a registered object definition in the schema for the data source. + */ + readonly structuredData: outputs.cloudsearch.v1.ItemStructuredDataResponse; + /** + * The indexing system stores the version from the datasource as a byte string and compares the Item version in the index to the version of the queued Item using lexical ordering. Cloud Search Indexing won't index or delete any queued item with a version value that is less than or equal to the version of the currently indexed item. The maximum length for this field is 1024 bytes. For information on how item version affects the deletion process, refer to [Handle revisions after manual deletes](https://developers.google.com/cloud-search/docs/guides/operations). + */ + readonly version: string; +} +/** + * Gets Item resource by item name. This API requires an admin or service account to execute. The service account used is the one whitelisted in the corresponding data source. + */ +export function getItemOutput(args: GetItemOutputArgs, opts?: pulumi.InvokeOptions): pulumi.Output { + return pulumi.output(args).apply((a: any) => getItem(a, opts)) +} + +export interface GetItemOutputArgs { + connectorName?: pulumi.Input; + datasourceId: pulumi.Input; + debugOptionsEnableDebugging?: pulumi.Input; + itemId: pulumi.Input; +} diff --git a/sdk/nodejs/cloudsearch/v1/index.ts b/sdk/nodejs/cloudsearch/v1/index.ts index eb18697e69..5dd796981d 100644 --- a/sdk/nodejs/cloudsearch/v1/index.ts +++ b/sdk/nodejs/cloudsearch/v1/index.ts @@ -15,11 +15,21 @@ export const getDataSource: typeof import("./getDataSource").getDataSource = nul export const getDataSourceOutput: typeof import("./getDataSource").getDataSourceOutput = null as any; utilities.lazyLoad(exports, ["getDataSource","getDataSourceOutput"], () => require("./getDataSource")); +export { GetItemArgs, GetItemResult, GetItemOutputArgs } from "./getItem"; +export const getItem: typeof import("./getItem").getItem = null as any; +export const getItemOutput: typeof import("./getItem").getItemOutput = null as any; +utilities.lazyLoad(exports, ["getItem","getItemOutput"], () => require("./getItem")); + export { GetSearchApplicationArgs, GetSearchApplicationResult, GetSearchApplicationOutputArgs } from "./getSearchApplication"; export const getSearchApplication: typeof import("./getSearchApplication").getSearchApplication = null as any; export const getSearchApplicationOutput: typeof import("./getSearchApplication").getSearchApplicationOutput = null as any; utilities.lazyLoad(exports, ["getSearchApplication","getSearchApplicationOutput"], () => require("./getSearchApplication")); +export { ItemArgs } from "./item"; +export type Item = import("./item").Item; +export const Item: typeof import("./item").Item = null as any; +utilities.lazyLoad(exports, ["Item"], () => require("./item")); + export { SearchApplicationArgs } from "./searchApplication"; export type SearchApplication = import("./searchApplication").SearchApplication; export const SearchApplication: typeof import("./searchApplication").SearchApplication = null as any; @@ -35,6 +45,8 @@ const _module = { switch (type) { case "google-native:cloudsearch/v1:DataSource": return new DataSource(name, undefined, { urn }) + case "google-native:cloudsearch/v1:Item": + return new Item(name, undefined, { urn }) case "google-native:cloudsearch/v1:SearchApplication": return new SearchApplication(name, undefined, { urn }) default: diff --git a/sdk/nodejs/cloudsearch/v1/item.ts b/sdk/nodejs/cloudsearch/v1/item.ts new file mode 100644 index 0000000000..ee7aa6e12b --- /dev/null +++ b/sdk/nodejs/cloudsearch/v1/item.ts @@ -0,0 +1,150 @@ +// *** WARNING: this file was generated by the Pulumi SDK Generator. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +import * as pulumi from "@pulumi/pulumi"; +import * as inputs from "../../types/input"; +import * as outputs from "../../types/output"; +import * as enums from "../../types/enums"; +import * as utilities from "../../utilities"; + +/** + * Creates an upload session for uploading item content. For items smaller than 100 KB, it's easier to embed the content inline within an index request. This API requires an admin or service account to execute. The service account used is the one whitelisted in the corresponding data source. + * Auto-naming is currently not supported for this resource. + */ +export class Item extends pulumi.CustomResource { + /** + * Get an existing Item resource's state with the given name, ID, and optional extra + * properties used to qualify the lookup. + * + * @param name The _unique_ name of the resulting resource. + * @param id The _unique_ provider ID of the resource to lookup. + * @param opts Optional settings to control the behavior of the CustomResource. + */ + public static get(name: string, id: pulumi.Input, opts?: pulumi.CustomResourceOptions): Item { + return new Item(name, undefined as any, { ...opts, id: id }); + } + + /** @internal */ + public static readonly __pulumiType = 'google-native:cloudsearch/v1:Item'; + + /** + * Returns true if the given object is an instance of Item. This is designed to work even + * when multiple copies of the Pulumi SDK have been loaded into the same process. + */ + public static isInstance(obj: any): obj is Item { + if (obj === undefined || obj === null) { + return false; + } + return obj['__pulumiType'] === Item.__pulumiType; + } + + /** + * Access control list for this item. + */ + public /*out*/ readonly acl!: pulumi.Output; + /** + * Item content to be indexed and made text searchable. + */ + public /*out*/ readonly content!: pulumi.Output; + public readonly datasourceId!: pulumi.Output; + public readonly itemId!: pulumi.Output; + /** + * The type for this item. + */ + public /*out*/ readonly itemType!: pulumi.Output; + /** + * The metadata information. + */ + public /*out*/ readonly metadata!: pulumi.Output; + /** + * The name of the Item. Format: datasources/{source_id}/items/{item_id} This is a required field. The maximum length is 1536 characters. + */ + public /*out*/ readonly name!: pulumi.Output; + /** + * Additional state connector can store for this item. The maximum length is 10000 bytes. + */ + public /*out*/ readonly payload!: pulumi.Output; + /** + * Queue this item belongs to. The maximum length is 100 characters. + */ + public /*out*/ readonly queue!: pulumi.Output; + /** + * Status of the item. Output only field. + */ + public /*out*/ readonly status!: pulumi.Output; + /** + * The structured data for the item that should conform to a registered object definition in the schema for the data source. + */ + public /*out*/ readonly structuredData!: pulumi.Output; + /** + * The indexing system stores the version from the datasource as a byte string and compares the Item version in the index to the version of the queued Item using lexical ordering. Cloud Search Indexing won't index or delete any queued item with a version value that is less than or equal to the version of the currently indexed item. The maximum length for this field is 1024 bytes. For information on how item version affects the deletion process, refer to [Handle revisions after manual deletes](https://developers.google.com/cloud-search/docs/guides/operations). + */ + public /*out*/ readonly version!: pulumi.Output; + + /** + * Create a Item resource with the given unique name, arguments, and options. + * + * @param name The _unique_ name of the resource. + * @param args The arguments to use to populate this resource's properties. + * @param opts A bag of options that control this resource's behavior. + */ + constructor(name: string, args: ItemArgs, opts?: pulumi.CustomResourceOptions) { + let resourceInputs: pulumi.Inputs = {}; + opts = opts || {}; + if (!opts.id) { + if ((!args || args.datasourceId === undefined) && !opts.urn) { + throw new Error("Missing required property 'datasourceId'"); + } + if ((!args || args.itemId === undefined) && !opts.urn) { + throw new Error("Missing required property 'itemId'"); + } + resourceInputs["connectorName"] = args ? args.connectorName : undefined; + resourceInputs["datasourceId"] = args ? args.datasourceId : undefined; + resourceInputs["debugOptions"] = args ? args.debugOptions : undefined; + resourceInputs["itemId"] = args ? args.itemId : undefined; + resourceInputs["acl"] = undefined /*out*/; + resourceInputs["content"] = undefined /*out*/; + resourceInputs["itemType"] = undefined /*out*/; + resourceInputs["metadata"] = undefined /*out*/; + resourceInputs["name"] = undefined /*out*/; + resourceInputs["payload"] = undefined /*out*/; + resourceInputs["queue"] = undefined /*out*/; + resourceInputs["status"] = undefined /*out*/; + resourceInputs["structuredData"] = undefined /*out*/; + resourceInputs["version"] = undefined /*out*/; + } else { + resourceInputs["acl"] = undefined /*out*/; + resourceInputs["content"] = undefined /*out*/; + resourceInputs["datasourceId"] = undefined /*out*/; + resourceInputs["itemId"] = undefined /*out*/; + resourceInputs["itemType"] = undefined /*out*/; + resourceInputs["metadata"] = undefined /*out*/; + resourceInputs["name"] = undefined /*out*/; + resourceInputs["payload"] = undefined /*out*/; + resourceInputs["queue"] = undefined /*out*/; + resourceInputs["status"] = undefined /*out*/; + resourceInputs["structuredData"] = undefined /*out*/; + resourceInputs["version"] = undefined /*out*/; + } + opts = pulumi.mergeOptions(utilities.resourceOptsDefaults(), opts); + const replaceOnChanges = { replaceOnChanges: ["datasourceId", "itemId"] }; + opts = pulumi.mergeOptions(opts, replaceOnChanges); + super(Item.__pulumiType, name, resourceInputs, opts); + } +} + +/** + * The set of arguments for constructing a Item resource. + */ +export interface ItemArgs { + /** + * The name of connector making this call. Format: datasources/{source_id}/connectors/{ID} + */ + connectorName?: pulumi.Input; + datasourceId: pulumi.Input; + /** + * Common debug options. + */ + debugOptions?: pulumi.Input; + itemId: pulumi.Input; +} diff --git a/sdk/nodejs/tsconfig.json b/sdk/nodejs/tsconfig.json index 5116c0d3e5..f4ca2188f3 100644 --- a/sdk/nodejs/tsconfig.json +++ b/sdk/nodejs/tsconfig.json @@ -86,6 +86,7 @@ "aiplatform/v1/getIndexEndpoint.ts", "aiplatform/v1/getMetadataSchema.ts", "aiplatform/v1/getMetadataStore.ts", + "aiplatform/v1/getModel.ts", "aiplatform/v1/getModelDeploymentMonitoringJob.ts", "aiplatform/v1/getNasJob.ts", "aiplatform/v1/getNotebookRuntimeTemplate.ts", @@ -105,6 +106,7 @@ "aiplatform/v1/index_.ts", "aiplatform/v1/metadataSchema.ts", "aiplatform/v1/metadataStore.ts", + "aiplatform/v1/model.ts", "aiplatform/v1/modelDeploymentMonitoringJob.ts", "aiplatform/v1/nasJob.ts", "aiplatform/v1/notebookRuntimeTemplate.ts", @@ -173,6 +175,7 @@ "aiplatform/v1beta1/getIndexEndpoint.ts", "aiplatform/v1beta1/getMetadataSchema.ts", "aiplatform/v1beta1/getMetadataStore.ts", + "aiplatform/v1beta1/getModel.ts", "aiplatform/v1beta1/getModelDeploymentMonitoringJob.ts", "aiplatform/v1beta1/getModelIamPolicy.ts", "aiplatform/v1beta1/getNasJob.ts", @@ -194,6 +197,7 @@ "aiplatform/v1beta1/index_.ts", "aiplatform/v1beta1/metadataSchema.ts", "aiplatform/v1beta1/metadataStore.ts", + "aiplatform/v1beta1/model.ts", "aiplatform/v1beta1/modelDeploymentMonitoringJob.ts", "aiplatform/v1beta1/modelIamBinding.ts", "aiplatform/v1beta1/modelIamMember.ts", @@ -1042,8 +1046,10 @@ "cloudsearch/index.ts", "cloudsearch/v1/dataSource.ts", "cloudsearch/v1/getDataSource.ts", + "cloudsearch/v1/getItem.ts", "cloudsearch/v1/getSearchApplication.ts", "cloudsearch/v1/index.ts", + "cloudsearch/v1/item.ts", "cloudsearch/v1/searchApplication.ts", "cloudsupport/index.ts", "cloudsupport/v2/case.ts", diff --git a/sdk/nodejs/types/input.ts b/sdk/nodejs/types/input.ts index 002e201702..6a8b62949c 100644 --- a/sdk/nodejs/types/input.ts +++ b/sdk/nodejs/types/input.ts @@ -18116,6 +18116,16 @@ export namespace cloudsearch { year?: pulumi.Input; } + /** + * Shared request debug options for all cloudsearch RPC methods. + */ + export interface DebugOptionsArgs { + /** + * If you are asked by Google to help with debugging, set this field. Otherwise, ignore this field. + */ + enableDebugging?: pulumi.Input; + } + /** * Specifies operators to return facet results for. There will be one FacetResult for every source_name/object_type/operator_name combination. */ diff --git a/sdk/nodejs/types/output.ts b/sdk/nodejs/types/output.ts index 09b6e5f447..39264a64cf 100644 --- a/sdk/nodejs/types/output.ts +++ b/sdk/nodejs/types/output.ts @@ -23759,6 +23759,20 @@ export namespace cloudsearch { subFilters: outputs.cloudsearch.v1.FilterResponse[]; } + /** + * A named attribute associated with an item which can be used for influencing the ranking of the item based on the context in the request. + */ + export interface ContextAttributeResponse { + /** + * The name of the attribute. It should not be empty. The maximum length is 32 characters. The name must start with a letter and can only contain letters (A-Z, a-z) or numbers (0-9). The name will be normalized (lower-cased) before being matched. + */ + name: string; + /** + * Text values of the attribute. The maximum number of elements is 10. The maximum length of an element in the array is 32 characters. The value will be normalized (lower-cased) before being matched. + */ + values: string[]; + } + /** * Restriction on Datasource. */ @@ -23791,6 +23805,30 @@ export namespace cloudsearch { year: number; } + /** + * List of date values. + */ + export interface DateValuesResponse { + values: outputs.cloudsearch.v1.DateResponse[]; + } + + /** + * List of double values. + */ + export interface DoubleValuesResponse { + values: number[]; + } + + /** + * List of enum values. + */ + export interface EnumValuesResponse { + /** + * The maximum allowable length for string values is 32 characters. + */ + values: string[]; + } + /** * Specifies operators to return facet results for. There will be one FacetResult for every source_name/object_type/operator_name combination. */ @@ -23817,6 +23855,17 @@ export namespace cloudsearch { sourceName: string; } + export interface FieldViolationResponse { + /** + * The description of the error. + */ + description: string; + /** + * Path of field with violation. + */ + field: string; + } + /** * Filter options to be applied on query. */ @@ -23854,6 +23903,16 @@ export namespace cloudsearch { gsuiteUserEmail: string; } + /** + * List of html values. + */ + export interface HtmlValuesResponse { + /** + * The maximum allowable length for html values is 2048 characters. + */ + values: string[]; + } + /** * Used to specify integer faceting options. */ @@ -23864,6 +23923,222 @@ export namespace cloudsearch { integerBuckets: string[]; } + /** + * List of integer values. + */ + export interface IntegerValuesResponse { + values: string[]; + } + + /** + * Represents an interaction between a user and an item. + */ + export interface InteractionResponse { + /** + * The time when the user acted on the item. If multiple actions of the same type exist for a single user, only the most recent action is recorded. + */ + interactionTime: string; + /** + * The user that acted on the item. + */ + principal: outputs.cloudsearch.v1.PrincipalResponse; + type: string; + } + + /** + * Access control list information for the item. For more information see [Map ACLs](https://developers.google.com/cloud-search/docs/guides/acls). + */ + export interface ItemAclResponse { + /** + * Sets the type of access rules to apply when an item inherits its ACL from a parent. This should always be set in tandem with the inheritAclFrom field. Also, when the inheritAclFrom field is set, this field should be set to a valid AclInheritanceType. + */ + aclInheritanceType: string; + /** + * List of principals who are explicitly denied access to the item in search results. While principals are denied access by default, use denied readers to handle exceptions and override the list allowed readers. The maximum number of elements is 100. + */ + deniedReaders: outputs.cloudsearch.v1.PrincipalResponse[]; + /** + * The name of the item to inherit the Access Permission List (ACL) from. Note: ACL inheritance *only* provides access permissions to child items and does not define structural relationships, nor does it provide convenient ways to delete large groups of items. Deleting an ACL parent from the index only alters the access permissions of child items that reference the parent in the inheritAclFrom field. The item is still in the index, but may not visible in search results. By contrast, deletion of a container item also deletes all items that reference the container via the containerName field. The maximum length for this field is 1536 characters. + */ + inheritAclFrom: string; + /** + * Optional. List of owners for the item. This field has no bearing on document access permissions. It does, however, offer a slight ranking boosts items where the querying user is an owner. The maximum number of elements is 5. + */ + owners: outputs.cloudsearch.v1.PrincipalResponse[]; + /** + * List of principals who are allowed to see the item in search results. Optional if inheriting permissions from another item or if the item is not intended to be visible, such as virtual containers. The maximum number of elements is 1000. + */ + readers: outputs.cloudsearch.v1.PrincipalResponse[]; + } + + /** + * Content of an item to be indexed and surfaced by Cloud Search. Only UTF-8 encoded strings are allowed as inlineContent. If the content is uploaded and not binary, it must be UTF-8 encoded. + */ + export interface ItemContentResponse { + /** + * Upload reference ID of a previously uploaded content via write method. + */ + contentDataRef: outputs.cloudsearch.v1.UploadItemRefResponse; + contentFormat: string; + /** + * Hashing info calculated and provided by the API client for content. Can be used with the items.push method to calculate modified state. The maximum length is 2048 characters. + */ + hash: string; + /** + * Content that is supplied inlined within the update method. The maximum length is 102400 bytes (100 KiB). + */ + inlineContent: string; + } + + /** + * Available metadata fields for the item. + */ + export interface ItemMetadataResponse { + /** + * The name of the container for this item. Deletion of the container item leads to automatic deletion of this item. Note: ACLs are not inherited from a container item. To provide ACL inheritance for an item, use the inheritAclFrom field. The maximum length is 1536 characters. + */ + containerName: string; + /** + * The BCP-47 language code for the item, such as "en-US" or "sr-Latn". For more information, see http://www.unicode.org/reports/tr35/#Unicode_locale_identifier. The maximum length is 32 characters. + */ + contentLanguage: string; + /** + * A set of named attributes associated with the item. This can be used for influencing the ranking of the item based on the context in the request. The maximum number of elements is 10. + */ + contextAttributes: outputs.cloudsearch.v1.ContextAttributeResponse[]; + /** + * The time when the item was created in the source repository. + */ + createTime: string; + /** + * Hashing value provided by the API caller. This can be used with the items.push method to calculate modified state. The maximum length is 2048 characters. + */ + hash: string; + /** + * A list of interactions for the item. Interactions are used to improve Search quality, but are not exposed to end users. The maximum number of elements is 1000. + */ + interactions: outputs.cloudsearch.v1.InteractionResponse[]; + /** + * Additional keywords or phrases that should match the item. Used internally for user generated content. The maximum number of elements is 100. The maximum length is 8192 characters. + */ + keywords: string[]; + /** + * The original mime-type of ItemContent.content in the source repository. The maximum length is 256 characters. + */ + mimeType: string; + /** + * The type of the item. This should correspond to the name of an object definition in the schema registered for the data source. For example, if the schema for the data source contains an object definition with name 'document', then item indexing requests for objects of that type should set objectType to 'document'. The maximum length is 256 characters. + */ + objectType: string; + /** + * Additional search quality metadata of the item + */ + searchQualityMetadata: outputs.cloudsearch.v1.SearchQualityMetadataResponse; + /** + * Link to the source repository serving the data. Seach results apply this link to the title. Whitespace or special characters may cause Cloud Seach result links to trigger a redirect notice; to avoid this, encode the URL. The maximum length is 2048 characters. + */ + sourceRepositoryUrl: string; + /** + * The title of the item. If given, this will be the displayed title of the Search result. The maximum length is 2048 characters. + */ + title: string; + /** + * The time when the item was last modified in the source repository. + */ + updateTime: string; + } + + /** + * This contains item's status and any errors. + */ + export interface ItemStatusResponse { + /** + * Status code. + */ + code: string; + /** + * Error details in case the item is in ERROR state. + */ + processingErrors: outputs.cloudsearch.v1.ProcessingErrorResponse[]; + /** + * Repository error reported by connector. + */ + repositoryErrors: outputs.cloudsearch.v1.RepositoryErrorResponse[]; + } + + /** + * Available structured data fields for the item. + */ + export interface ItemStructuredDataResponse { + /** + * Hashing value provided by the API caller. This can be used with the items.push method to calculate modified state. The maximum length is 2048 characters. + */ + hash: string; + /** + * The structured data object that should conform to a registered object definition in the schema for the data source. + */ + object: outputs.cloudsearch.v1.StructuredDataObjectResponse; + } + + /** + * A typed name-value pair for structured data. The type of the value should be the same as the registered type for the `name` property in the object definition of `objectType`. + */ + export interface NamedPropertyResponse { + booleanValue: boolean; + dateValues: outputs.cloudsearch.v1.DateValuesResponse; + doubleValues: outputs.cloudsearch.v1.DoubleValuesResponse; + enumValues: outputs.cloudsearch.v1.EnumValuesResponse; + htmlValues: outputs.cloudsearch.v1.HtmlValuesResponse; + integerValues: outputs.cloudsearch.v1.IntegerValuesResponse; + /** + * The name of the property. This name should correspond to the name of the property that was registered for object definition in the schema. The maximum allowable length for this property is 256 characters. + */ + name: string; + objectValues: outputs.cloudsearch.v1.ObjectValuesResponse; + textValues: outputs.cloudsearch.v1.TextValuesResponse; + timestampValues: outputs.cloudsearch.v1.TimestampValuesResponse; + } + + /** + * List of object values. + */ + export interface ObjectValuesResponse { + values: outputs.cloudsearch.v1.StructuredDataObjectResponse[]; + } + + /** + * Reference to a user, group, or domain. + */ + export interface PrincipalResponse { + /** + * This principal is a group identified using an external identity. The name field must specify the group resource name with this format: identitysources/{source_id}/groups/{ID} + */ + groupResourceName: string; + /** + * This principal is a Google Workspace user, group or domain. + */ + gsuitePrincipal: outputs.cloudsearch.v1.GSuitePrincipalResponse; + /** + * This principal is a user identified using an external identity. The name field must specify the user resource name with this format: identitysources/{source_id}/users/{ID} + */ + userResourceName: string; + } + + export interface ProcessingErrorResponse { + /** + * Error code indicating the nature of the error. + */ + code: string; + /** + * The description of the error. + */ + errorMessage: string; + /** + * In case the item fields are invalid, this field contains the details about the validation errors. + */ + fieldViolations: outputs.cloudsearch.v1.FieldViolationResponse[]; + } + /** * Default options to interpret user query. */ @@ -23878,6 +24153,24 @@ export namespace cloudsearch { forceVerbatimMode: boolean; } + /** + * Errors when the connector is communicating to the source repository. + */ + export interface RepositoryErrorResponse { + /** + * Message that describes the error. The maximum allowable length of the message is 8192 characters. + */ + errorMessage: string; + /** + * Error codes. Matches the definition of HTTP status codes. + */ + httpStatusCode: number; + /** + * The type of error. + */ + type: string; + } + /** * Scoring configurations for a source while processing a Search or Suggest request. */ @@ -23892,6 +24185,16 @@ export namespace cloudsearch { disablePersonalization: boolean; } + /** + * Additional search quality metadata of the item. + */ + export interface SearchQualityMetadataResponse { + /** + * An indication of the quality of the item, used to influence search quality. Value should be between 0.0 (lowest quality) and 1.0 (highest quality). The default value is 0.0. + */ + quality: number; + } + export interface SortOptionsResponse { /** * The name of the operator corresponding to the field to sort on. The corresponding property must be marked as sortable. @@ -23959,6 +24262,43 @@ export namespace cloudsearch { sourceImportance: string; } + /** + * A structured data object consisting of named properties. + */ + export interface StructuredDataObjectResponse { + /** + * The properties for the object. The maximum number of elements is 1000. + */ + properties: outputs.cloudsearch.v1.NamedPropertyResponse[]; + } + + /** + * List of text values. + */ + export interface TextValuesResponse { + /** + * The maximum allowable length for text values is 2048 characters. + */ + values: string[]; + } + + /** + * List of timestamp values. + */ + export interface TimestampValuesResponse { + values: string[]; + } + + /** + * Represents an upload session reference. This reference is created via upload method. This reference is valid for 30 days after its creation. Updating of item content may refer to this uploaded content via contentDataRef. + */ + export interface UploadItemRefResponse { + /** + * The name of the content reference. The maximum length is 2048 characters. + */ + name: string; + } + export interface ValueFilterResponse { /** * The `operator_name` applied to the query, such as *price_greater_than*. The filter can work against both types of filters defined in the schema for your data source: 1. `operator_name`, where the query filters results by the property that matches the value. 2. `greater_than_operator_name` or `less_than_operator_name` in your schema. The query filters the results for the property values that are greater than or less than the supplied value in the query. diff --git a/sdk/python/pulumi_google_native/__init__.py b/sdk/python/pulumi_google_native/__init__.py index dd9dfd386b..f41bf344a7 100644 --- a/sdk/python/pulumi_google_native/__init__.py +++ b/sdk/python/pulumi_google_native/__init__.py @@ -508,6 +508,7 @@ "google-native:aiplatform/v1:IndexEndpoint": "IndexEndpoint", "google-native:aiplatform/v1:MetadataSchema": "MetadataSchema", "google-native:aiplatform/v1:MetadataStore": "MetadataStore", + "google-native:aiplatform/v1:Model": "Model", "google-native:aiplatform/v1:ModelDeploymentMonitoringJob": "ModelDeploymentMonitoringJob", "google-native:aiplatform/v1:NasJob": "NasJob", "google-native:aiplatform/v1:NotebookRuntimeTemplate": "NotebookRuntimeTemplate", @@ -562,6 +563,7 @@ "google-native:aiplatform/v1beta1:IndexEndpoint": "IndexEndpoint", "google-native:aiplatform/v1beta1:MetadataSchema": "MetadataSchema", "google-native:aiplatform/v1beta1:MetadataStore": "MetadataStore", + "google-native:aiplatform/v1beta1:Model": "Model", "google-native:aiplatform/v1beta1:ModelDeploymentMonitoringJob": "ModelDeploymentMonitoringJob", "google-native:aiplatform/v1beta1:ModelIamBinding": "ModelIamBinding", "google-native:aiplatform/v1beta1:ModelIamMember": "ModelIamMember", @@ -1462,6 +1464,7 @@ "fqn": "pulumi_google_native.cloudsearch.v1", "classes": { "google-native:cloudsearch/v1:DataSource": "DataSource", + "google-native:cloudsearch/v1:Item": "Item", "google-native:cloudsearch/v1:SearchApplication": "SearchApplication" } }, diff --git a/sdk/python/pulumi_google_native/aiplatform/v1/__init__.py b/sdk/python/pulumi_google_native/aiplatform/v1/__init__.py index 39c9a829d0..02f16ad12c 100644 --- a/sdk/python/pulumi_google_native/aiplatform/v1/__init__.py +++ b/sdk/python/pulumi_google_native/aiplatform/v1/__init__.py @@ -55,6 +55,7 @@ from .get_index_endpoint import * from .get_metadata_schema import * from .get_metadata_store import * +from .get_model import * from .get_model_deployment_monitoring_job import * from .get_nas_job import * from .get_notebook_runtime_template import * @@ -73,6 +74,7 @@ from .index_endpoint import * from .metadata_schema import * from .metadata_store import * +from .model import * from .model_deployment_monitoring_job import * from .nas_job import * from .notebook_runtime_template import * diff --git a/sdk/python/pulumi_google_native/aiplatform/v1/get_model.py b/sdk/python/pulumi_google_native/aiplatform/v1/get_model.py new file mode 100644 index 0000000000..9ef7c82ba3 --- /dev/null +++ b/sdk/python/pulumi_google_native/aiplatform/v1/get_model.py @@ -0,0 +1,435 @@ +# coding=utf-8 +# *** WARNING: this file was generated by the Pulumi SDK Generator. *** +# *** Do not edit by hand unless you're certain you know what you are doing! *** + +import copy +import warnings +import pulumi +import pulumi.runtime +from typing import Any, Mapping, Optional, Sequence, Union, overload +from ... import _utilities +from . import outputs + +__all__ = [ + 'GetModelResult', + 'AwaitableGetModelResult', + 'get_model', + 'get_model_output', +] + +@pulumi.output_type +class GetModelResult: + def __init__(__self__, artifact_uri=None, container_spec=None, create_time=None, deployed_models=None, description=None, display_name=None, encryption_spec=None, etag=None, explanation_spec=None, labels=None, metadata=None, metadata_artifact=None, metadata_schema_uri=None, model_source_info=None, name=None, original_model_info=None, pipeline_job=None, predict_schemata=None, supported_deployment_resources_types=None, supported_export_formats=None, supported_input_storage_formats=None, supported_output_storage_formats=None, training_pipeline=None, update_time=None, version_aliases=None, version_create_time=None, version_description=None, version_id=None, version_update_time=None): + if artifact_uri and not isinstance(artifact_uri, str): + raise TypeError("Expected argument 'artifact_uri' to be a str") + pulumi.set(__self__, "artifact_uri", artifact_uri) + if container_spec and not isinstance(container_spec, dict): + raise TypeError("Expected argument 'container_spec' to be a dict") + pulumi.set(__self__, "container_spec", container_spec) + if create_time and not isinstance(create_time, str): + raise TypeError("Expected argument 'create_time' to be a str") + pulumi.set(__self__, "create_time", create_time) + if deployed_models and not isinstance(deployed_models, list): + raise TypeError("Expected argument 'deployed_models' to be a list") + pulumi.set(__self__, "deployed_models", deployed_models) + if description and not isinstance(description, str): + raise TypeError("Expected argument 'description' to be a str") + pulumi.set(__self__, "description", description) + if display_name and not isinstance(display_name, str): + raise TypeError("Expected argument 'display_name' to be a str") + pulumi.set(__self__, "display_name", display_name) + if encryption_spec and not isinstance(encryption_spec, dict): + raise TypeError("Expected argument 'encryption_spec' to be a dict") + pulumi.set(__self__, "encryption_spec", encryption_spec) + if etag and not isinstance(etag, str): + raise TypeError("Expected argument 'etag' to be a str") + pulumi.set(__self__, "etag", etag) + if explanation_spec and not isinstance(explanation_spec, dict): + raise TypeError("Expected argument 'explanation_spec' to be a dict") + pulumi.set(__self__, "explanation_spec", explanation_spec) + if labels and not isinstance(labels, dict): + raise TypeError("Expected argument 'labels' to be a dict") + pulumi.set(__self__, "labels", labels) + if metadata and not isinstance(metadata, dict): + raise TypeError("Expected argument 'metadata' to be a dict") + pulumi.set(__self__, "metadata", metadata) + if metadata_artifact and not isinstance(metadata_artifact, str): + raise TypeError("Expected argument 'metadata_artifact' to be a str") + pulumi.set(__self__, "metadata_artifact", metadata_artifact) + if metadata_schema_uri and not isinstance(metadata_schema_uri, str): + raise TypeError("Expected argument 'metadata_schema_uri' to be a str") + pulumi.set(__self__, "metadata_schema_uri", metadata_schema_uri) + if model_source_info and not isinstance(model_source_info, dict): + raise TypeError("Expected argument 'model_source_info' to be a dict") + pulumi.set(__self__, "model_source_info", model_source_info) + if name and not isinstance(name, str): + raise TypeError("Expected argument 'name' to be a str") + pulumi.set(__self__, "name", name) + if original_model_info and not isinstance(original_model_info, dict): + raise TypeError("Expected argument 'original_model_info' to be a dict") + pulumi.set(__self__, "original_model_info", original_model_info) + if pipeline_job and not isinstance(pipeline_job, str): + raise TypeError("Expected argument 'pipeline_job' to be a str") + pulumi.set(__self__, "pipeline_job", pipeline_job) + if predict_schemata and not isinstance(predict_schemata, dict): + raise TypeError("Expected argument 'predict_schemata' to be a dict") + pulumi.set(__self__, "predict_schemata", predict_schemata) + if supported_deployment_resources_types and not isinstance(supported_deployment_resources_types, list): + raise TypeError("Expected argument 'supported_deployment_resources_types' to be a list") + pulumi.set(__self__, "supported_deployment_resources_types", supported_deployment_resources_types) + if supported_export_formats and not isinstance(supported_export_formats, list): + raise TypeError("Expected argument 'supported_export_formats' to be a list") + pulumi.set(__self__, "supported_export_formats", supported_export_formats) + if supported_input_storage_formats and not isinstance(supported_input_storage_formats, list): + raise TypeError("Expected argument 'supported_input_storage_formats' to be a list") + pulumi.set(__self__, "supported_input_storage_formats", supported_input_storage_formats) + if supported_output_storage_formats and not isinstance(supported_output_storage_formats, list): + raise TypeError("Expected argument 'supported_output_storage_formats' to be a list") + pulumi.set(__self__, "supported_output_storage_formats", supported_output_storage_formats) + if training_pipeline and not isinstance(training_pipeline, str): + raise TypeError("Expected argument 'training_pipeline' to be a str") + pulumi.set(__self__, "training_pipeline", training_pipeline) + if update_time and not isinstance(update_time, str): + raise TypeError("Expected argument 'update_time' to be a str") + pulumi.set(__self__, "update_time", update_time) + if version_aliases and not isinstance(version_aliases, list): + raise TypeError("Expected argument 'version_aliases' to be a list") + pulumi.set(__self__, "version_aliases", version_aliases) + if version_create_time and not isinstance(version_create_time, str): + raise TypeError("Expected argument 'version_create_time' to be a str") + pulumi.set(__self__, "version_create_time", version_create_time) + if version_description and not isinstance(version_description, str): + raise TypeError("Expected argument 'version_description' to be a str") + pulumi.set(__self__, "version_description", version_description) + if version_id and not isinstance(version_id, str): + raise TypeError("Expected argument 'version_id' to be a str") + pulumi.set(__self__, "version_id", version_id) + if version_update_time and not isinstance(version_update_time, str): + raise TypeError("Expected argument 'version_update_time' to be a str") + pulumi.set(__self__, "version_update_time", version_update_time) + + @property + @pulumi.getter(name="artifactUri") + def artifact_uri(self) -> str: + """ + Immutable. The path to the directory containing the Model artifact and any of its supporting files. Not present for AutoML Models or Large Models. + """ + return pulumi.get(self, "artifact_uri") + + @property + @pulumi.getter(name="containerSpec") + def container_spec(self) -> 'outputs.GoogleCloudAiplatformV1ModelContainerSpecResponse': + """ + Input only. The specification of the container that is to be used when deploying this Model. The specification is ingested upon ModelService.UploadModel, and all binaries it contains are copied and stored internally by Vertex AI. Not present for AutoML Models or Large Models. + """ + return pulumi.get(self, "container_spec") + + @property + @pulumi.getter(name="createTime") + def create_time(self) -> str: + """ + Timestamp when this Model was uploaded into Vertex AI. + """ + return pulumi.get(self, "create_time") + + @property + @pulumi.getter(name="deployedModels") + def deployed_models(self) -> Sequence['outputs.GoogleCloudAiplatformV1DeployedModelRefResponse']: + """ + The pointers to DeployedModels created from this Model. Note that Model could have been deployed to Endpoints in different Locations. + """ + return pulumi.get(self, "deployed_models") + + @property + @pulumi.getter + def description(self) -> str: + """ + The description of the Model. + """ + return pulumi.get(self, "description") + + @property + @pulumi.getter(name="displayName") + def display_name(self) -> str: + """ + The display name of the Model. The name can be up to 128 characters long and can consist of any UTF-8 characters. + """ + return pulumi.get(self, "display_name") + + @property + @pulumi.getter(name="encryptionSpec") + def encryption_spec(self) -> 'outputs.GoogleCloudAiplatformV1EncryptionSpecResponse': + """ + Customer-managed encryption key spec for a Model. If set, this Model and all sub-resources of this Model will be secured by this key. + """ + return pulumi.get(self, "encryption_spec") + + @property + @pulumi.getter + def etag(self) -> str: + """ + Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + """ + return pulumi.get(self, "etag") + + @property + @pulumi.getter(name="explanationSpec") + def explanation_spec(self) -> 'outputs.GoogleCloudAiplatformV1ExplanationSpecResponse': + """ + The default explanation specification for this Model. The Model can be used for requesting explanation after being deployed if it is populated. The Model can be used for batch explanation if it is populated. All fields of the explanation_spec can be overridden by explanation_spec of DeployModelRequest.deployed_model, or explanation_spec of BatchPredictionJob. If the default explanation specification is not set for this Model, this Model can still be used for requesting explanation by setting explanation_spec of DeployModelRequest.deployed_model and for batch explanation by setting explanation_spec of BatchPredictionJob. + """ + return pulumi.get(self, "explanation_spec") + + @property + @pulumi.getter + def labels(self) -> Mapping[str, str]: + """ + The labels with user-defined metadata to organize your Models. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. + """ + return pulumi.get(self, "labels") + + @property + @pulumi.getter + def metadata(self) -> Any: + """ + Immutable. An additional information about the Model; the schema of the metadata can be found in metadata_schema. Unset if the Model does not have any additional information. + """ + return pulumi.get(self, "metadata") + + @property + @pulumi.getter(name="metadataArtifact") + def metadata_artifact(self) -> str: + """ + The resource name of the Artifact that was created in MetadataStore when creating the Model. The Artifact resource name pattern is `projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}`. + """ + return pulumi.get(self, "metadata_artifact") + + @property + @pulumi.getter(name="metadataSchemaUri") + def metadata_schema_uri(self) -> str: + """ + Immutable. Points to a YAML file stored on Google Cloud Storage describing additional information about the Model, that is specific to it. Unset if the Model does not have any additional information. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML Models always have this field populated by Vertex AI, if no additional metadata is needed, this field is set to an empty string. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. + """ + return pulumi.get(self, "metadata_schema_uri") + + @property + @pulumi.getter(name="modelSourceInfo") + def model_source_info(self) -> 'outputs.GoogleCloudAiplatformV1ModelSourceInfoResponse': + """ + Source of a model. It can either be automl training pipeline, custom training pipeline, BigQuery ML, or existing Vertex AI Model. + """ + return pulumi.get(self, "model_source_info") + + @property + @pulumi.getter + def name(self) -> str: + """ + The resource name of the Model. + """ + return pulumi.get(self, "name") + + @property + @pulumi.getter(name="originalModelInfo") + def original_model_info(self) -> 'outputs.GoogleCloudAiplatformV1ModelOriginalModelInfoResponse': + """ + If this Model is a copy of another Model, this contains info about the original. + """ + return pulumi.get(self, "original_model_info") + + @property + @pulumi.getter(name="pipelineJob") + def pipeline_job(self) -> str: + """ + Optional. This field is populated if the model is produced by a pipeline job. + """ + return pulumi.get(self, "pipeline_job") + + @property + @pulumi.getter(name="predictSchemata") + def predict_schemata(self) -> 'outputs.GoogleCloudAiplatformV1PredictSchemataResponse': + """ + The schemata that describe formats of the Model's predictions and explanations as given and returned via PredictionService.Predict and PredictionService.Explain. + """ + return pulumi.get(self, "predict_schemata") + + @property + @pulumi.getter(name="supportedDeploymentResourcesTypes") + def supported_deployment_resources_types(self) -> Sequence[str]: + """ + When this Model is deployed, its prediction resources are described by the `prediction_resources` field of the Endpoint.deployed_models object. Because not all Models support all resource configuration types, the configuration types this Model supports are listed here. If no configuration types are listed, the Model cannot be deployed to an Endpoint and does not support online predictions (PredictionService.Predict or PredictionService.Explain). Such a Model can serve predictions by using a BatchPredictionJob, if it has at least one entry each in supported_input_storage_formats and supported_output_storage_formats. + """ + return pulumi.get(self, "supported_deployment_resources_types") + + @property + @pulumi.getter(name="supportedExportFormats") + def supported_export_formats(self) -> Sequence['outputs.GoogleCloudAiplatformV1ModelExportFormatResponse']: + """ + The formats in which this Model may be exported. If empty, this Model is not available for export. + """ + return pulumi.get(self, "supported_export_formats") + + @property + @pulumi.getter(name="supportedInputStorageFormats") + def supported_input_storage_formats(self) -> Sequence[str]: + """ + The formats this Model supports in BatchPredictionJob.input_config. If PredictSchemata.instance_schema_uri exists, the instances should be given as per that schema. The possible formats are: * `jsonl` The JSON Lines format, where each instance is a single line. Uses GcsSource. * `csv` The CSV format, where each instance is a single comma-separated line. The first line in the file is the header, containing comma-separated field names. Uses GcsSource. * `tf-record` The TFRecord format, where each instance is a single record in tfrecord syntax. Uses GcsSource. * `tf-record-gzip` Similar to `tf-record`, but the file is gzipped. Uses GcsSource. * `bigquery` Each instance is a single row in BigQuery. Uses BigQuerySource. * `file-list` Each line of the file is the location of an instance to process, uses `gcs_source` field of the InputConfig object. If this Model doesn't support any of these formats it means it cannot be used with a BatchPredictionJob. However, if it has supported_deployment_resources_types, it could serve online predictions by using PredictionService.Predict or PredictionService.Explain. + """ + return pulumi.get(self, "supported_input_storage_formats") + + @property + @pulumi.getter(name="supportedOutputStorageFormats") + def supported_output_storage_formats(self) -> Sequence[str]: + """ + The formats this Model supports in BatchPredictionJob.output_config. If both PredictSchemata.instance_schema_uri and PredictSchemata.prediction_schema_uri exist, the predictions are returned together with their instances. In other words, the prediction has the original instance data first, followed by the actual prediction content (as per the schema). The possible formats are: * `jsonl` The JSON Lines format, where each prediction is a single line. Uses GcsDestination. * `csv` The CSV format, where each prediction is a single comma-separated line. The first line in the file is the header, containing comma-separated field names. Uses GcsDestination. * `bigquery` Each prediction is a single row in a BigQuery table, uses BigQueryDestination . If this Model doesn't support any of these formats it means it cannot be used with a BatchPredictionJob. However, if it has supported_deployment_resources_types, it could serve online predictions by using PredictionService.Predict or PredictionService.Explain. + """ + return pulumi.get(self, "supported_output_storage_formats") + + @property + @pulumi.getter(name="trainingPipeline") + def training_pipeline(self) -> str: + """ + The resource name of the TrainingPipeline that uploaded this Model, if any. + """ + return pulumi.get(self, "training_pipeline") + + @property + @pulumi.getter(name="updateTime") + def update_time(self) -> str: + """ + Timestamp when this Model was most recently updated. + """ + return pulumi.get(self, "update_time") + + @property + @pulumi.getter(name="versionAliases") + def version_aliases(self) -> Sequence[str]: + """ + User provided version aliases so that a model version can be referenced via alias (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_alias}` instead of auto-generated version id (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_id})`. The format is a-z{0,126}[a-z0-9] to distinguish from version_id. A default version alias will be created for the first version of the model, and there must be exactly one default version alias for a model. + """ + return pulumi.get(self, "version_aliases") + + @property + @pulumi.getter(name="versionCreateTime") + def version_create_time(self) -> str: + """ + Timestamp when this version was created. + """ + return pulumi.get(self, "version_create_time") + + @property + @pulumi.getter(name="versionDescription") + def version_description(self) -> str: + """ + The description of this version. + """ + return pulumi.get(self, "version_description") + + @property + @pulumi.getter(name="versionId") + def version_id(self) -> str: + """ + Immutable. The version ID of the model. A new version is committed when a new model version is uploaded or trained under an existing model id. It is an auto-incrementing decimal number in string representation. + """ + return pulumi.get(self, "version_id") + + @property + @pulumi.getter(name="versionUpdateTime") + def version_update_time(self) -> str: + """ + Timestamp when this version was most recently updated. + """ + return pulumi.get(self, "version_update_time") + + +class AwaitableGetModelResult(GetModelResult): + # pylint: disable=using-constant-test + def __await__(self): + if False: + yield self + return GetModelResult( + artifact_uri=self.artifact_uri, + container_spec=self.container_spec, + create_time=self.create_time, + deployed_models=self.deployed_models, + description=self.description, + display_name=self.display_name, + encryption_spec=self.encryption_spec, + etag=self.etag, + explanation_spec=self.explanation_spec, + labels=self.labels, + metadata=self.metadata, + metadata_artifact=self.metadata_artifact, + metadata_schema_uri=self.metadata_schema_uri, + model_source_info=self.model_source_info, + name=self.name, + original_model_info=self.original_model_info, + pipeline_job=self.pipeline_job, + predict_schemata=self.predict_schemata, + supported_deployment_resources_types=self.supported_deployment_resources_types, + supported_export_formats=self.supported_export_formats, + supported_input_storage_formats=self.supported_input_storage_formats, + supported_output_storage_formats=self.supported_output_storage_formats, + training_pipeline=self.training_pipeline, + update_time=self.update_time, + version_aliases=self.version_aliases, + version_create_time=self.version_create_time, + version_description=self.version_description, + version_id=self.version_id, + version_update_time=self.version_update_time) + + +def get_model(location: Optional[str] = None, + model_id: Optional[str] = None, + project: Optional[str] = None, + opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetModelResult: + """ + Gets a Model. + """ + __args__ = dict() + __args__['location'] = location + __args__['modelId'] = model_id + __args__['project'] = project + opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts) + __ret__ = pulumi.runtime.invoke('google-native:aiplatform/v1:getModel', __args__, opts=opts, typ=GetModelResult).value + + return AwaitableGetModelResult( + artifact_uri=pulumi.get(__ret__, 'artifact_uri'), + container_spec=pulumi.get(__ret__, 'container_spec'), + create_time=pulumi.get(__ret__, 'create_time'), + deployed_models=pulumi.get(__ret__, 'deployed_models'), + description=pulumi.get(__ret__, 'description'), + display_name=pulumi.get(__ret__, 'display_name'), + encryption_spec=pulumi.get(__ret__, 'encryption_spec'), + etag=pulumi.get(__ret__, 'etag'), + explanation_spec=pulumi.get(__ret__, 'explanation_spec'), + labels=pulumi.get(__ret__, 'labels'), + metadata=pulumi.get(__ret__, 'metadata'), + metadata_artifact=pulumi.get(__ret__, 'metadata_artifact'), + metadata_schema_uri=pulumi.get(__ret__, 'metadata_schema_uri'), + model_source_info=pulumi.get(__ret__, 'model_source_info'), + name=pulumi.get(__ret__, 'name'), + original_model_info=pulumi.get(__ret__, 'original_model_info'), + pipeline_job=pulumi.get(__ret__, 'pipeline_job'), + predict_schemata=pulumi.get(__ret__, 'predict_schemata'), + supported_deployment_resources_types=pulumi.get(__ret__, 'supported_deployment_resources_types'), + supported_export_formats=pulumi.get(__ret__, 'supported_export_formats'), + supported_input_storage_formats=pulumi.get(__ret__, 'supported_input_storage_formats'), + supported_output_storage_formats=pulumi.get(__ret__, 'supported_output_storage_formats'), + training_pipeline=pulumi.get(__ret__, 'training_pipeline'), + update_time=pulumi.get(__ret__, 'update_time'), + version_aliases=pulumi.get(__ret__, 'version_aliases'), + version_create_time=pulumi.get(__ret__, 'version_create_time'), + version_description=pulumi.get(__ret__, 'version_description'), + version_id=pulumi.get(__ret__, 'version_id'), + version_update_time=pulumi.get(__ret__, 'version_update_time')) + + +@_utilities.lift_output_func(get_model) +def get_model_output(location: Optional[pulumi.Input[str]] = None, + model_id: Optional[pulumi.Input[str]] = None, + project: Optional[pulumi.Input[Optional[str]]] = None, + opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetModelResult]: + """ + Gets a Model. + """ + ... diff --git a/sdk/python/pulumi_google_native/aiplatform/v1/model.py b/sdk/python/pulumi_google_native/aiplatform/v1/model.py new file mode 100644 index 0000000000..b76e8f3e95 --- /dev/null +++ b/sdk/python/pulumi_google_native/aiplatform/v1/model.py @@ -0,0 +1,773 @@ +# coding=utf-8 +# *** WARNING: this file was generated by the Pulumi SDK Generator. *** +# *** Do not edit by hand unless you're certain you know what you are doing! *** + +import copy +import warnings +import pulumi +import pulumi.runtime +from typing import Any, Mapping, Optional, Sequence, Union, overload +from ... import _utilities +from . import outputs +from ._enums import * +from ._inputs import * + +__all__ = ['ModelArgs', 'Model'] + +@pulumi.input_type +class ModelArgs: + def __init__(__self__, *, + display_name: pulumi.Input[str], + artifact_uri: Optional[pulumi.Input[str]] = None, + container_spec: Optional[pulumi.Input['GoogleCloudAiplatformV1ModelContainerSpecArgs']] = None, + description: Optional[pulumi.Input[str]] = None, + encryption_spec: Optional[pulumi.Input['GoogleCloudAiplatformV1EncryptionSpecArgs']] = None, + etag: Optional[pulumi.Input[str]] = None, + explanation_spec: Optional[pulumi.Input['GoogleCloudAiplatformV1ExplanationSpecArgs']] = None, + labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, + location: Optional[pulumi.Input[str]] = None, + metadata: Optional[Any] = None, + metadata_schema_uri: Optional[pulumi.Input[str]] = None, + model_id: Optional[pulumi.Input[str]] = None, + name: Optional[pulumi.Input[str]] = None, + parent_model: Optional[pulumi.Input[str]] = None, + pipeline_job: Optional[pulumi.Input[str]] = None, + predict_schemata: Optional[pulumi.Input['GoogleCloudAiplatformV1PredictSchemataArgs']] = None, + project: Optional[pulumi.Input[str]] = None, + service_account: Optional[pulumi.Input[str]] = None, + version_aliases: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, + version_description: Optional[pulumi.Input[str]] = None): + """ + The set of arguments for constructing a Model resource. + :param pulumi.Input[str] display_name: The display name of the Model. The name can be up to 128 characters long and can consist of any UTF-8 characters. + :param pulumi.Input[str] artifact_uri: Immutable. The path to the directory containing the Model artifact and any of its supporting files. Not present for AutoML Models or Large Models. + :param pulumi.Input['GoogleCloudAiplatformV1ModelContainerSpecArgs'] container_spec: Input only. The specification of the container that is to be used when deploying this Model. The specification is ingested upon ModelService.UploadModel, and all binaries it contains are copied and stored internally by Vertex AI. Not present for AutoML Models or Large Models. + :param pulumi.Input[str] description: The description of the Model. + :param pulumi.Input['GoogleCloudAiplatformV1EncryptionSpecArgs'] encryption_spec: Customer-managed encryption key spec for a Model. If set, this Model and all sub-resources of this Model will be secured by this key. + :param pulumi.Input[str] etag: Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + :param pulumi.Input['GoogleCloudAiplatformV1ExplanationSpecArgs'] explanation_spec: The default explanation specification for this Model. The Model can be used for requesting explanation after being deployed if it is populated. The Model can be used for batch explanation if it is populated. All fields of the explanation_spec can be overridden by explanation_spec of DeployModelRequest.deployed_model, or explanation_spec of BatchPredictionJob. If the default explanation specification is not set for this Model, this Model can still be used for requesting explanation by setting explanation_spec of DeployModelRequest.deployed_model and for batch explanation by setting explanation_spec of BatchPredictionJob. + :param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: The labels with user-defined metadata to organize your Models. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. + :param Any metadata: Immutable. An additional information about the Model; the schema of the metadata can be found in metadata_schema. Unset if the Model does not have any additional information. + :param pulumi.Input[str] metadata_schema_uri: Immutable. Points to a YAML file stored on Google Cloud Storage describing additional information about the Model, that is specific to it. Unset if the Model does not have any additional information. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML Models always have this field populated by Vertex AI, if no additional metadata is needed, this field is set to an empty string. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. + :param pulumi.Input[str] model_id: Optional. The ID to use for the uploaded Model, which will become the final component of the model resource name. This value may be up to 63 characters, and valid characters are `[a-z0-9_-]`. The first character cannot be a number or hyphen. + :param pulumi.Input[str] name: The resource name of the Model. + :param pulumi.Input[str] parent_model: Optional. The resource name of the model into which to upload the version. Only specify this field when uploading a new version. + :param pulumi.Input[str] pipeline_job: Optional. This field is populated if the model is produced by a pipeline job. + :param pulumi.Input['GoogleCloudAiplatformV1PredictSchemataArgs'] predict_schemata: The schemata that describe formats of the Model's predictions and explanations as given and returned via PredictionService.Predict and PredictionService.Explain. + :param pulumi.Input[str] service_account: Optional. The user-provided custom service account to use to do the model upload. If empty, [Vertex AI Service Agent](https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) will be used to access resources needed to upload the model. This account must belong to the target project where the model is uploaded to, i.e., the project specified in the `parent` field of this request and have necessary read permissions (to Google Cloud Storage, Artifact Registry, etc.). + :param pulumi.Input[Sequence[pulumi.Input[str]]] version_aliases: User provided version aliases so that a model version can be referenced via alias (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_alias}` instead of auto-generated version id (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_id})`. The format is a-z{0,126}[a-z0-9] to distinguish from version_id. A default version alias will be created for the first version of the model, and there must be exactly one default version alias for a model. + :param pulumi.Input[str] version_description: The description of this version. + """ + pulumi.set(__self__, "display_name", display_name) + if artifact_uri is not None: + pulumi.set(__self__, "artifact_uri", artifact_uri) + if container_spec is not None: + pulumi.set(__self__, "container_spec", container_spec) + if description is not None: + pulumi.set(__self__, "description", description) + if encryption_spec is not None: + pulumi.set(__self__, "encryption_spec", encryption_spec) + if etag is not None: + pulumi.set(__self__, "etag", etag) + if explanation_spec is not None: + pulumi.set(__self__, "explanation_spec", explanation_spec) + if labels is not None: + pulumi.set(__self__, "labels", labels) + if location is not None: + pulumi.set(__self__, "location", location) + if metadata is not None: + pulumi.set(__self__, "metadata", metadata) + if metadata_schema_uri is not None: + pulumi.set(__self__, "metadata_schema_uri", metadata_schema_uri) + if model_id is not None: + pulumi.set(__self__, "model_id", model_id) + if name is not None: + pulumi.set(__self__, "name", name) + if parent_model is not None: + pulumi.set(__self__, "parent_model", parent_model) + if pipeline_job is not None: + pulumi.set(__self__, "pipeline_job", pipeline_job) + if predict_schemata is not None: + pulumi.set(__self__, "predict_schemata", predict_schemata) + if project is not None: + pulumi.set(__self__, "project", project) + if service_account is not None: + pulumi.set(__self__, "service_account", service_account) + if version_aliases is not None: + pulumi.set(__self__, "version_aliases", version_aliases) + if version_description is not None: + pulumi.set(__self__, "version_description", version_description) + + @property + @pulumi.getter(name="displayName") + def display_name(self) -> pulumi.Input[str]: + """ + The display name of the Model. The name can be up to 128 characters long and can consist of any UTF-8 characters. + """ + return pulumi.get(self, "display_name") + + @display_name.setter + def display_name(self, value: pulumi.Input[str]): + pulumi.set(self, "display_name", value) + + @property + @pulumi.getter(name="artifactUri") + def artifact_uri(self) -> Optional[pulumi.Input[str]]: + """ + Immutable. The path to the directory containing the Model artifact and any of its supporting files. Not present for AutoML Models or Large Models. + """ + return pulumi.get(self, "artifact_uri") + + @artifact_uri.setter + def artifact_uri(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "artifact_uri", value) + + @property + @pulumi.getter(name="containerSpec") + def container_spec(self) -> Optional[pulumi.Input['GoogleCloudAiplatformV1ModelContainerSpecArgs']]: + """ + Input only. The specification of the container that is to be used when deploying this Model. The specification is ingested upon ModelService.UploadModel, and all binaries it contains are copied and stored internally by Vertex AI. Not present for AutoML Models or Large Models. + """ + return pulumi.get(self, "container_spec") + + @container_spec.setter + def container_spec(self, value: Optional[pulumi.Input['GoogleCloudAiplatformV1ModelContainerSpecArgs']]): + pulumi.set(self, "container_spec", value) + + @property + @pulumi.getter + def description(self) -> Optional[pulumi.Input[str]]: + """ + The description of the Model. + """ + return pulumi.get(self, "description") + + @description.setter + def description(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "description", value) + + @property + @pulumi.getter(name="encryptionSpec") + def encryption_spec(self) -> Optional[pulumi.Input['GoogleCloudAiplatformV1EncryptionSpecArgs']]: + """ + Customer-managed encryption key spec for a Model. If set, this Model and all sub-resources of this Model will be secured by this key. + """ + return pulumi.get(self, "encryption_spec") + + @encryption_spec.setter + def encryption_spec(self, value: Optional[pulumi.Input['GoogleCloudAiplatformV1EncryptionSpecArgs']]): + pulumi.set(self, "encryption_spec", value) + + @property + @pulumi.getter + def etag(self) -> Optional[pulumi.Input[str]]: + """ + Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + """ + return pulumi.get(self, "etag") + + @etag.setter + def etag(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "etag", value) + + @property + @pulumi.getter(name="explanationSpec") + def explanation_spec(self) -> Optional[pulumi.Input['GoogleCloudAiplatformV1ExplanationSpecArgs']]: + """ + The default explanation specification for this Model. The Model can be used for requesting explanation after being deployed if it is populated. The Model can be used for batch explanation if it is populated. All fields of the explanation_spec can be overridden by explanation_spec of DeployModelRequest.deployed_model, or explanation_spec of BatchPredictionJob. If the default explanation specification is not set for this Model, this Model can still be used for requesting explanation by setting explanation_spec of DeployModelRequest.deployed_model and for batch explanation by setting explanation_spec of BatchPredictionJob. + """ + return pulumi.get(self, "explanation_spec") + + @explanation_spec.setter + def explanation_spec(self, value: Optional[pulumi.Input['GoogleCloudAiplatformV1ExplanationSpecArgs']]): + pulumi.set(self, "explanation_spec", value) + + @property + @pulumi.getter + def labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: + """ + The labels with user-defined metadata to organize your Models. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. + """ + return pulumi.get(self, "labels") + + @labels.setter + def labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): + pulumi.set(self, "labels", value) + + @property + @pulumi.getter + def location(self) -> Optional[pulumi.Input[str]]: + return pulumi.get(self, "location") + + @location.setter + def location(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "location", value) + + @property + @pulumi.getter + def metadata(self) -> Optional[Any]: + """ + Immutable. An additional information about the Model; the schema of the metadata can be found in metadata_schema. Unset if the Model does not have any additional information. + """ + return pulumi.get(self, "metadata") + + @metadata.setter + def metadata(self, value: Optional[Any]): + pulumi.set(self, "metadata", value) + + @property + @pulumi.getter(name="metadataSchemaUri") + def metadata_schema_uri(self) -> Optional[pulumi.Input[str]]: + """ + Immutable. Points to a YAML file stored on Google Cloud Storage describing additional information about the Model, that is specific to it. Unset if the Model does not have any additional information. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML Models always have this field populated by Vertex AI, if no additional metadata is needed, this field is set to an empty string. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. + """ + return pulumi.get(self, "metadata_schema_uri") + + @metadata_schema_uri.setter + def metadata_schema_uri(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "metadata_schema_uri", value) + + @property + @pulumi.getter(name="modelId") + def model_id(self) -> Optional[pulumi.Input[str]]: + """ + Optional. The ID to use for the uploaded Model, which will become the final component of the model resource name. This value may be up to 63 characters, and valid characters are `[a-z0-9_-]`. The first character cannot be a number or hyphen. + """ + return pulumi.get(self, "model_id") + + @model_id.setter + def model_id(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "model_id", value) + + @property + @pulumi.getter + def name(self) -> Optional[pulumi.Input[str]]: + """ + The resource name of the Model. + """ + return pulumi.get(self, "name") + + @name.setter + def name(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "name", value) + + @property + @pulumi.getter(name="parentModel") + def parent_model(self) -> Optional[pulumi.Input[str]]: + """ + Optional. The resource name of the model into which to upload the version. Only specify this field when uploading a new version. + """ + return pulumi.get(self, "parent_model") + + @parent_model.setter + def parent_model(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "parent_model", value) + + @property + @pulumi.getter(name="pipelineJob") + def pipeline_job(self) -> Optional[pulumi.Input[str]]: + """ + Optional. This field is populated if the model is produced by a pipeline job. + """ + return pulumi.get(self, "pipeline_job") + + @pipeline_job.setter + def pipeline_job(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "pipeline_job", value) + + @property + @pulumi.getter(name="predictSchemata") + def predict_schemata(self) -> Optional[pulumi.Input['GoogleCloudAiplatformV1PredictSchemataArgs']]: + """ + The schemata that describe formats of the Model's predictions and explanations as given and returned via PredictionService.Predict and PredictionService.Explain. + """ + return pulumi.get(self, "predict_schemata") + + @predict_schemata.setter + def predict_schemata(self, value: Optional[pulumi.Input['GoogleCloudAiplatformV1PredictSchemataArgs']]): + pulumi.set(self, "predict_schemata", value) + + @property + @pulumi.getter + def project(self) -> Optional[pulumi.Input[str]]: + return pulumi.get(self, "project") + + @project.setter + def project(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "project", value) + + @property + @pulumi.getter(name="serviceAccount") + def service_account(self) -> Optional[pulumi.Input[str]]: + """ + Optional. The user-provided custom service account to use to do the model upload. If empty, [Vertex AI Service Agent](https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) will be used to access resources needed to upload the model. This account must belong to the target project where the model is uploaded to, i.e., the project specified in the `parent` field of this request and have necessary read permissions (to Google Cloud Storage, Artifact Registry, etc.). + """ + return pulumi.get(self, "service_account") + + @service_account.setter + def service_account(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "service_account", value) + + @property + @pulumi.getter(name="versionAliases") + def version_aliases(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: + """ + User provided version aliases so that a model version can be referenced via alias (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_alias}` instead of auto-generated version id (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_id})`. The format is a-z{0,126}[a-z0-9] to distinguish from version_id. A default version alias will be created for the first version of the model, and there must be exactly one default version alias for a model. + """ + return pulumi.get(self, "version_aliases") + + @version_aliases.setter + def version_aliases(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): + pulumi.set(self, "version_aliases", value) + + @property + @pulumi.getter(name="versionDescription") + def version_description(self) -> Optional[pulumi.Input[str]]: + """ + The description of this version. + """ + return pulumi.get(self, "version_description") + + @version_description.setter + def version_description(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "version_description", value) + + +class Model(pulumi.CustomResource): + @overload + def __init__(__self__, + resource_name: str, + opts: Optional[pulumi.ResourceOptions] = None, + artifact_uri: Optional[pulumi.Input[str]] = None, + container_spec: Optional[pulumi.Input[pulumi.InputType['GoogleCloudAiplatformV1ModelContainerSpecArgs']]] = None, + description: Optional[pulumi.Input[str]] = None, + display_name: Optional[pulumi.Input[str]] = None, + encryption_spec: Optional[pulumi.Input[pulumi.InputType['GoogleCloudAiplatformV1EncryptionSpecArgs']]] = None, + etag: Optional[pulumi.Input[str]] = None, + explanation_spec: Optional[pulumi.Input[pulumi.InputType['GoogleCloudAiplatformV1ExplanationSpecArgs']]] = None, + labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, + location: Optional[pulumi.Input[str]] = None, + metadata: Optional[Any] = None, + metadata_schema_uri: Optional[pulumi.Input[str]] = None, + model_id: Optional[pulumi.Input[str]] = None, + name: Optional[pulumi.Input[str]] = None, + parent_model: Optional[pulumi.Input[str]] = None, + pipeline_job: Optional[pulumi.Input[str]] = None, + predict_schemata: Optional[pulumi.Input[pulumi.InputType['GoogleCloudAiplatformV1PredictSchemataArgs']]] = None, + project: Optional[pulumi.Input[str]] = None, + service_account: Optional[pulumi.Input[str]] = None, + version_aliases: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, + version_description: Optional[pulumi.Input[str]] = None, + __props__=None): + """ + Uploads a Model artifact into Vertex AI. + + :param str resource_name: The name of the resource. + :param pulumi.ResourceOptions opts: Options for the resource. + :param pulumi.Input[str] artifact_uri: Immutable. The path to the directory containing the Model artifact and any of its supporting files. Not present for AutoML Models or Large Models. + :param pulumi.Input[pulumi.InputType['GoogleCloudAiplatformV1ModelContainerSpecArgs']] container_spec: Input only. The specification of the container that is to be used when deploying this Model. The specification is ingested upon ModelService.UploadModel, and all binaries it contains are copied and stored internally by Vertex AI. Not present for AutoML Models or Large Models. + :param pulumi.Input[str] description: The description of the Model. + :param pulumi.Input[str] display_name: The display name of the Model. The name can be up to 128 characters long and can consist of any UTF-8 characters. + :param pulumi.Input[pulumi.InputType['GoogleCloudAiplatformV1EncryptionSpecArgs']] encryption_spec: Customer-managed encryption key spec for a Model. If set, this Model and all sub-resources of this Model will be secured by this key. + :param pulumi.Input[str] etag: Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + :param pulumi.Input[pulumi.InputType['GoogleCloudAiplatformV1ExplanationSpecArgs']] explanation_spec: The default explanation specification for this Model. The Model can be used for requesting explanation after being deployed if it is populated. The Model can be used for batch explanation if it is populated. All fields of the explanation_spec can be overridden by explanation_spec of DeployModelRequest.deployed_model, or explanation_spec of BatchPredictionJob. If the default explanation specification is not set for this Model, this Model can still be used for requesting explanation by setting explanation_spec of DeployModelRequest.deployed_model and for batch explanation by setting explanation_spec of BatchPredictionJob. + :param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: The labels with user-defined metadata to organize your Models. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. + :param Any metadata: Immutable. An additional information about the Model; the schema of the metadata can be found in metadata_schema. Unset if the Model does not have any additional information. + :param pulumi.Input[str] metadata_schema_uri: Immutable. Points to a YAML file stored on Google Cloud Storage describing additional information about the Model, that is specific to it. Unset if the Model does not have any additional information. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML Models always have this field populated by Vertex AI, if no additional metadata is needed, this field is set to an empty string. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. + :param pulumi.Input[str] model_id: Optional. The ID to use for the uploaded Model, which will become the final component of the model resource name. This value may be up to 63 characters, and valid characters are `[a-z0-9_-]`. The first character cannot be a number or hyphen. + :param pulumi.Input[str] name: The resource name of the Model. + :param pulumi.Input[str] parent_model: Optional. The resource name of the model into which to upload the version. Only specify this field when uploading a new version. + :param pulumi.Input[str] pipeline_job: Optional. This field is populated if the model is produced by a pipeline job. + :param pulumi.Input[pulumi.InputType['GoogleCloudAiplatformV1PredictSchemataArgs']] predict_schemata: The schemata that describe formats of the Model's predictions and explanations as given and returned via PredictionService.Predict and PredictionService.Explain. + :param pulumi.Input[str] service_account: Optional. The user-provided custom service account to use to do the model upload. If empty, [Vertex AI Service Agent](https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) will be used to access resources needed to upload the model. This account must belong to the target project where the model is uploaded to, i.e., the project specified in the `parent` field of this request and have necessary read permissions (to Google Cloud Storage, Artifact Registry, etc.). + :param pulumi.Input[Sequence[pulumi.Input[str]]] version_aliases: User provided version aliases so that a model version can be referenced via alias (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_alias}` instead of auto-generated version id (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_id})`. The format is a-z{0,126}[a-z0-9] to distinguish from version_id. A default version alias will be created for the first version of the model, and there must be exactly one default version alias for a model. + :param pulumi.Input[str] version_description: The description of this version. + """ + ... + @overload + def __init__(__self__, + resource_name: str, + args: ModelArgs, + opts: Optional[pulumi.ResourceOptions] = None): + """ + Uploads a Model artifact into Vertex AI. + + :param str resource_name: The name of the resource. + :param ModelArgs args: The arguments to use to populate this resource's properties. + :param pulumi.ResourceOptions opts: Options for the resource. + """ + ... + def __init__(__self__, resource_name: str, *args, **kwargs): + resource_args, opts = _utilities.get_resource_args_opts(ModelArgs, pulumi.ResourceOptions, *args, **kwargs) + if resource_args is not None: + __self__._internal_init(resource_name, opts, **resource_args.__dict__) + else: + __self__._internal_init(resource_name, *args, **kwargs) + + def _internal_init(__self__, + resource_name: str, + opts: Optional[pulumi.ResourceOptions] = None, + artifact_uri: Optional[pulumi.Input[str]] = None, + container_spec: Optional[pulumi.Input[pulumi.InputType['GoogleCloudAiplatformV1ModelContainerSpecArgs']]] = None, + description: Optional[pulumi.Input[str]] = None, + display_name: Optional[pulumi.Input[str]] = None, + encryption_spec: Optional[pulumi.Input[pulumi.InputType['GoogleCloudAiplatformV1EncryptionSpecArgs']]] = None, + etag: Optional[pulumi.Input[str]] = None, + explanation_spec: Optional[pulumi.Input[pulumi.InputType['GoogleCloudAiplatformV1ExplanationSpecArgs']]] = None, + labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, + location: Optional[pulumi.Input[str]] = None, + metadata: Optional[Any] = None, + metadata_schema_uri: Optional[pulumi.Input[str]] = None, + model_id: Optional[pulumi.Input[str]] = None, + name: Optional[pulumi.Input[str]] = None, + parent_model: Optional[pulumi.Input[str]] = None, + pipeline_job: Optional[pulumi.Input[str]] = None, + predict_schemata: Optional[pulumi.Input[pulumi.InputType['GoogleCloudAiplatformV1PredictSchemataArgs']]] = None, + project: Optional[pulumi.Input[str]] = None, + service_account: Optional[pulumi.Input[str]] = None, + version_aliases: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, + version_description: Optional[pulumi.Input[str]] = None, + __props__=None): + opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts) + if not isinstance(opts, pulumi.ResourceOptions): + raise TypeError('Expected resource options to be a ResourceOptions instance') + if opts.id is None: + if __props__ is not None: + raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') + __props__ = ModelArgs.__new__(ModelArgs) + + __props__.__dict__["artifact_uri"] = artifact_uri + __props__.__dict__["container_spec"] = container_spec + __props__.__dict__["description"] = description + if display_name is None and not opts.urn: + raise TypeError("Missing required property 'display_name'") + __props__.__dict__["display_name"] = display_name + __props__.__dict__["encryption_spec"] = encryption_spec + __props__.__dict__["etag"] = etag + __props__.__dict__["explanation_spec"] = explanation_spec + __props__.__dict__["labels"] = labels + __props__.__dict__["location"] = location + __props__.__dict__["metadata"] = metadata + __props__.__dict__["metadata_schema_uri"] = metadata_schema_uri + __props__.__dict__["model_id"] = model_id + __props__.__dict__["name"] = name + __props__.__dict__["parent_model"] = parent_model + __props__.__dict__["pipeline_job"] = pipeline_job + __props__.__dict__["predict_schemata"] = predict_schemata + __props__.__dict__["project"] = project + __props__.__dict__["service_account"] = service_account + __props__.__dict__["version_aliases"] = version_aliases + __props__.__dict__["version_description"] = version_description + __props__.__dict__["create_time"] = None + __props__.__dict__["deployed_models"] = None + __props__.__dict__["metadata_artifact"] = None + __props__.__dict__["model_source_info"] = None + __props__.__dict__["original_model_info"] = None + __props__.__dict__["supported_deployment_resources_types"] = None + __props__.__dict__["supported_export_formats"] = None + __props__.__dict__["supported_input_storage_formats"] = None + __props__.__dict__["supported_output_storage_formats"] = None + __props__.__dict__["training_pipeline"] = None + __props__.__dict__["update_time"] = None + __props__.__dict__["version_create_time"] = None + __props__.__dict__["version_id"] = None + __props__.__dict__["version_update_time"] = None + replace_on_changes = pulumi.ResourceOptions(replace_on_changes=["location", "project"]) + opts = pulumi.ResourceOptions.merge(opts, replace_on_changes) + super(Model, __self__).__init__( + 'google-native:aiplatform/v1:Model', + resource_name, + __props__, + opts) + + @staticmethod + def get(resource_name: str, + id: pulumi.Input[str], + opts: Optional[pulumi.ResourceOptions] = None) -> 'Model': + """ + Get an existing Model resource's state with the given name, id, and optional extra + properties used to qualify the lookup. + + :param str resource_name: The unique name of the resulting resource. + :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. + :param pulumi.ResourceOptions opts: Options for the resource. + """ + opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) + + __props__ = ModelArgs.__new__(ModelArgs) + + __props__.__dict__["artifact_uri"] = None + __props__.__dict__["container_spec"] = None + __props__.__dict__["create_time"] = None + __props__.__dict__["deployed_models"] = None + __props__.__dict__["description"] = None + __props__.__dict__["display_name"] = None + __props__.__dict__["encryption_spec"] = None + __props__.__dict__["etag"] = None + __props__.__dict__["explanation_spec"] = None + __props__.__dict__["labels"] = None + __props__.__dict__["location"] = None + __props__.__dict__["metadata"] = None + __props__.__dict__["metadata_artifact"] = None + __props__.__dict__["metadata_schema_uri"] = None + __props__.__dict__["model_source_info"] = None + __props__.__dict__["name"] = None + __props__.__dict__["original_model_info"] = None + __props__.__dict__["pipeline_job"] = None + __props__.__dict__["predict_schemata"] = None + __props__.__dict__["project"] = None + __props__.__dict__["supported_deployment_resources_types"] = None + __props__.__dict__["supported_export_formats"] = None + __props__.__dict__["supported_input_storage_formats"] = None + __props__.__dict__["supported_output_storage_formats"] = None + __props__.__dict__["training_pipeline"] = None + __props__.__dict__["update_time"] = None + __props__.__dict__["version_aliases"] = None + __props__.__dict__["version_create_time"] = None + __props__.__dict__["version_description"] = None + __props__.__dict__["version_id"] = None + __props__.__dict__["version_update_time"] = None + return Model(resource_name, opts=opts, __props__=__props__) + + @property + @pulumi.getter(name="artifactUri") + def artifact_uri(self) -> pulumi.Output[str]: + """ + Immutable. The path to the directory containing the Model artifact and any of its supporting files. Not present for AutoML Models or Large Models. + """ + return pulumi.get(self, "artifact_uri") + + @property + @pulumi.getter(name="containerSpec") + def container_spec(self) -> pulumi.Output['outputs.GoogleCloudAiplatformV1ModelContainerSpecResponse']: + """ + Input only. The specification of the container that is to be used when deploying this Model. The specification is ingested upon ModelService.UploadModel, and all binaries it contains are copied and stored internally by Vertex AI. Not present for AutoML Models or Large Models. + """ + return pulumi.get(self, "container_spec") + + @property + @pulumi.getter(name="createTime") + def create_time(self) -> pulumi.Output[str]: + """ + Timestamp when this Model was uploaded into Vertex AI. + """ + return pulumi.get(self, "create_time") + + @property + @pulumi.getter(name="deployedModels") + def deployed_models(self) -> pulumi.Output[Sequence['outputs.GoogleCloudAiplatformV1DeployedModelRefResponse']]: + """ + The pointers to DeployedModels created from this Model. Note that Model could have been deployed to Endpoints in different Locations. + """ + return pulumi.get(self, "deployed_models") + + @property + @pulumi.getter + def description(self) -> pulumi.Output[str]: + """ + The description of the Model. + """ + return pulumi.get(self, "description") + + @property + @pulumi.getter(name="displayName") + def display_name(self) -> pulumi.Output[str]: + """ + The display name of the Model. The name can be up to 128 characters long and can consist of any UTF-8 characters. + """ + return pulumi.get(self, "display_name") + + @property + @pulumi.getter(name="encryptionSpec") + def encryption_spec(self) -> pulumi.Output['outputs.GoogleCloudAiplatformV1EncryptionSpecResponse']: + """ + Customer-managed encryption key spec for a Model. If set, this Model and all sub-resources of this Model will be secured by this key. + """ + return pulumi.get(self, "encryption_spec") + + @property + @pulumi.getter + def etag(self) -> pulumi.Output[str]: + """ + Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + """ + return pulumi.get(self, "etag") + + @property + @pulumi.getter(name="explanationSpec") + def explanation_spec(self) -> pulumi.Output['outputs.GoogleCloudAiplatformV1ExplanationSpecResponse']: + """ + The default explanation specification for this Model. The Model can be used for requesting explanation after being deployed if it is populated. The Model can be used for batch explanation if it is populated. All fields of the explanation_spec can be overridden by explanation_spec of DeployModelRequest.deployed_model, or explanation_spec of BatchPredictionJob. If the default explanation specification is not set for this Model, this Model can still be used for requesting explanation by setting explanation_spec of DeployModelRequest.deployed_model and for batch explanation by setting explanation_spec of BatchPredictionJob. + """ + return pulumi.get(self, "explanation_spec") + + @property + @pulumi.getter + def labels(self) -> pulumi.Output[Mapping[str, str]]: + """ + The labels with user-defined metadata to organize your Models. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. + """ + return pulumi.get(self, "labels") + + @property + @pulumi.getter + def location(self) -> pulumi.Output[str]: + return pulumi.get(self, "location") + + @property + @pulumi.getter + def metadata(self) -> pulumi.Output[Any]: + """ + Immutable. An additional information about the Model; the schema of the metadata can be found in metadata_schema. Unset if the Model does not have any additional information. + """ + return pulumi.get(self, "metadata") + + @property + @pulumi.getter(name="metadataArtifact") + def metadata_artifact(self) -> pulumi.Output[str]: + """ + The resource name of the Artifact that was created in MetadataStore when creating the Model. The Artifact resource name pattern is `projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}`. + """ + return pulumi.get(self, "metadata_artifact") + + @property + @pulumi.getter(name="metadataSchemaUri") + def metadata_schema_uri(self) -> pulumi.Output[str]: + """ + Immutable. Points to a YAML file stored on Google Cloud Storage describing additional information about the Model, that is specific to it. Unset if the Model does not have any additional information. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML Models always have this field populated by Vertex AI, if no additional metadata is needed, this field is set to an empty string. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. + """ + return pulumi.get(self, "metadata_schema_uri") + + @property + @pulumi.getter(name="modelSourceInfo") + def model_source_info(self) -> pulumi.Output['outputs.GoogleCloudAiplatformV1ModelSourceInfoResponse']: + """ + Source of a model. It can either be automl training pipeline, custom training pipeline, BigQuery ML, or existing Vertex AI Model. + """ + return pulumi.get(self, "model_source_info") + + @property + @pulumi.getter + def name(self) -> pulumi.Output[str]: + """ + The resource name of the Model. + """ + return pulumi.get(self, "name") + + @property + @pulumi.getter(name="originalModelInfo") + def original_model_info(self) -> pulumi.Output['outputs.GoogleCloudAiplatformV1ModelOriginalModelInfoResponse']: + """ + If this Model is a copy of another Model, this contains info about the original. + """ + return pulumi.get(self, "original_model_info") + + @property + @pulumi.getter(name="pipelineJob") + def pipeline_job(self) -> pulumi.Output[str]: + """ + Optional. This field is populated if the model is produced by a pipeline job. + """ + return pulumi.get(self, "pipeline_job") + + @property + @pulumi.getter(name="predictSchemata") + def predict_schemata(self) -> pulumi.Output['outputs.GoogleCloudAiplatformV1PredictSchemataResponse']: + """ + The schemata that describe formats of the Model's predictions and explanations as given and returned via PredictionService.Predict and PredictionService.Explain. + """ + return pulumi.get(self, "predict_schemata") + + @property + @pulumi.getter + def project(self) -> pulumi.Output[str]: + return pulumi.get(self, "project") + + @property + @pulumi.getter(name="supportedDeploymentResourcesTypes") + def supported_deployment_resources_types(self) -> pulumi.Output[Sequence[str]]: + """ + When this Model is deployed, its prediction resources are described by the `prediction_resources` field of the Endpoint.deployed_models object. Because not all Models support all resource configuration types, the configuration types this Model supports are listed here. If no configuration types are listed, the Model cannot be deployed to an Endpoint and does not support online predictions (PredictionService.Predict or PredictionService.Explain). Such a Model can serve predictions by using a BatchPredictionJob, if it has at least one entry each in supported_input_storage_formats and supported_output_storage_formats. + """ + return pulumi.get(self, "supported_deployment_resources_types") + + @property + @pulumi.getter(name="supportedExportFormats") + def supported_export_formats(self) -> pulumi.Output[Sequence['outputs.GoogleCloudAiplatformV1ModelExportFormatResponse']]: + """ + The formats in which this Model may be exported. If empty, this Model is not available for export. + """ + return pulumi.get(self, "supported_export_formats") + + @property + @pulumi.getter(name="supportedInputStorageFormats") + def supported_input_storage_formats(self) -> pulumi.Output[Sequence[str]]: + """ + The formats this Model supports in BatchPredictionJob.input_config. If PredictSchemata.instance_schema_uri exists, the instances should be given as per that schema. The possible formats are: * `jsonl` The JSON Lines format, where each instance is a single line. Uses GcsSource. * `csv` The CSV format, where each instance is a single comma-separated line. The first line in the file is the header, containing comma-separated field names. Uses GcsSource. * `tf-record` The TFRecord format, where each instance is a single record in tfrecord syntax. Uses GcsSource. * `tf-record-gzip` Similar to `tf-record`, but the file is gzipped. Uses GcsSource. * `bigquery` Each instance is a single row in BigQuery. Uses BigQuerySource. * `file-list` Each line of the file is the location of an instance to process, uses `gcs_source` field of the InputConfig object. If this Model doesn't support any of these formats it means it cannot be used with a BatchPredictionJob. However, if it has supported_deployment_resources_types, it could serve online predictions by using PredictionService.Predict or PredictionService.Explain. + """ + return pulumi.get(self, "supported_input_storage_formats") + + @property + @pulumi.getter(name="supportedOutputStorageFormats") + def supported_output_storage_formats(self) -> pulumi.Output[Sequence[str]]: + """ + The formats this Model supports in BatchPredictionJob.output_config. If both PredictSchemata.instance_schema_uri and PredictSchemata.prediction_schema_uri exist, the predictions are returned together with their instances. In other words, the prediction has the original instance data first, followed by the actual prediction content (as per the schema). The possible formats are: * `jsonl` The JSON Lines format, where each prediction is a single line. Uses GcsDestination. * `csv` The CSV format, where each prediction is a single comma-separated line. The first line in the file is the header, containing comma-separated field names. Uses GcsDestination. * `bigquery` Each prediction is a single row in a BigQuery table, uses BigQueryDestination . If this Model doesn't support any of these formats it means it cannot be used with a BatchPredictionJob. However, if it has supported_deployment_resources_types, it could serve online predictions by using PredictionService.Predict or PredictionService.Explain. + """ + return pulumi.get(self, "supported_output_storage_formats") + + @property + @pulumi.getter(name="trainingPipeline") + def training_pipeline(self) -> pulumi.Output[str]: + """ + The resource name of the TrainingPipeline that uploaded this Model, if any. + """ + return pulumi.get(self, "training_pipeline") + + @property + @pulumi.getter(name="updateTime") + def update_time(self) -> pulumi.Output[str]: + """ + Timestamp when this Model was most recently updated. + """ + return pulumi.get(self, "update_time") + + @property + @pulumi.getter(name="versionAliases") + def version_aliases(self) -> pulumi.Output[Sequence[str]]: + """ + User provided version aliases so that a model version can be referenced via alias (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_alias}` instead of auto-generated version id (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_id})`. The format is a-z{0,126}[a-z0-9] to distinguish from version_id. A default version alias will be created for the first version of the model, and there must be exactly one default version alias for a model. + """ + return pulumi.get(self, "version_aliases") + + @property + @pulumi.getter(name="versionCreateTime") + def version_create_time(self) -> pulumi.Output[str]: + """ + Timestamp when this version was created. + """ + return pulumi.get(self, "version_create_time") + + @property + @pulumi.getter(name="versionDescription") + def version_description(self) -> pulumi.Output[str]: + """ + The description of this version. + """ + return pulumi.get(self, "version_description") + + @property + @pulumi.getter(name="versionId") + def version_id(self) -> pulumi.Output[str]: + """ + Immutable. The version ID of the model. A new version is committed when a new model version is uploaded or trained under an existing model id. It is an auto-incrementing decimal number in string representation. + """ + return pulumi.get(self, "version_id") + + @property + @pulumi.getter(name="versionUpdateTime") + def version_update_time(self) -> pulumi.Output[str]: + """ + Timestamp when this version was most recently updated. + """ + return pulumi.get(self, "version_update_time") + diff --git a/sdk/python/pulumi_google_native/aiplatform/v1beta1/__init__.py b/sdk/python/pulumi_google_native/aiplatform/v1beta1/__init__.py index eda8c0faa3..f4df995366 100644 --- a/sdk/python/pulumi_google_native/aiplatform/v1beta1/__init__.py +++ b/sdk/python/pulumi_google_native/aiplatform/v1beta1/__init__.py @@ -59,6 +59,7 @@ from .get_index_endpoint import * from .get_metadata_schema import * from .get_metadata_store import * +from .get_model import * from .get_model_deployment_monitoring_job import * from .get_model_iam_policy import * from .get_nas_job import * @@ -79,6 +80,7 @@ from .index_endpoint import * from .metadata_schema import * from .metadata_store import * +from .model import * from .model_deployment_monitoring_job import * from .model_iam_binding import * from .model_iam_member import * diff --git a/sdk/python/pulumi_google_native/aiplatform/v1beta1/get_model.py b/sdk/python/pulumi_google_native/aiplatform/v1beta1/get_model.py new file mode 100644 index 0000000000..4d3f67aa21 --- /dev/null +++ b/sdk/python/pulumi_google_native/aiplatform/v1beta1/get_model.py @@ -0,0 +1,422 @@ +# coding=utf-8 +# *** WARNING: this file was generated by the Pulumi SDK Generator. *** +# *** Do not edit by hand unless you're certain you know what you are doing! *** + +import copy +import warnings +import pulumi +import pulumi.runtime +from typing import Any, Mapping, Optional, Sequence, Union, overload +from ... import _utilities +from . import outputs + +__all__ = [ + 'GetModelResult', + 'AwaitableGetModelResult', + 'get_model', + 'get_model_output', +] + +@pulumi.output_type +class GetModelResult: + def __init__(__self__, artifact_uri=None, container_spec=None, create_time=None, deployed_models=None, description=None, display_name=None, encryption_spec=None, etag=None, explanation_spec=None, labels=None, metadata=None, metadata_artifact=None, metadata_schema_uri=None, model_source_info=None, name=None, original_model_info=None, predict_schemata=None, supported_deployment_resources_types=None, supported_export_formats=None, supported_input_storage_formats=None, supported_output_storage_formats=None, training_pipeline=None, update_time=None, version_aliases=None, version_create_time=None, version_description=None, version_id=None, version_update_time=None): + if artifact_uri and not isinstance(artifact_uri, str): + raise TypeError("Expected argument 'artifact_uri' to be a str") + pulumi.set(__self__, "artifact_uri", artifact_uri) + if container_spec and not isinstance(container_spec, dict): + raise TypeError("Expected argument 'container_spec' to be a dict") + pulumi.set(__self__, "container_spec", container_spec) + if create_time and not isinstance(create_time, str): + raise TypeError("Expected argument 'create_time' to be a str") + pulumi.set(__self__, "create_time", create_time) + if deployed_models and not isinstance(deployed_models, list): + raise TypeError("Expected argument 'deployed_models' to be a list") + pulumi.set(__self__, "deployed_models", deployed_models) + if description and not isinstance(description, str): + raise TypeError("Expected argument 'description' to be a str") + pulumi.set(__self__, "description", description) + if display_name and not isinstance(display_name, str): + raise TypeError("Expected argument 'display_name' to be a str") + pulumi.set(__self__, "display_name", display_name) + if encryption_spec and not isinstance(encryption_spec, dict): + raise TypeError("Expected argument 'encryption_spec' to be a dict") + pulumi.set(__self__, "encryption_spec", encryption_spec) + if etag and not isinstance(etag, str): + raise TypeError("Expected argument 'etag' to be a str") + pulumi.set(__self__, "etag", etag) + if explanation_spec and not isinstance(explanation_spec, dict): + raise TypeError("Expected argument 'explanation_spec' to be a dict") + pulumi.set(__self__, "explanation_spec", explanation_spec) + if labels and not isinstance(labels, dict): + raise TypeError("Expected argument 'labels' to be a dict") + pulumi.set(__self__, "labels", labels) + if metadata and not isinstance(metadata, dict): + raise TypeError("Expected argument 'metadata' to be a dict") + pulumi.set(__self__, "metadata", metadata) + if metadata_artifact and not isinstance(metadata_artifact, str): + raise TypeError("Expected argument 'metadata_artifact' to be a str") + pulumi.set(__self__, "metadata_artifact", metadata_artifact) + if metadata_schema_uri and not isinstance(metadata_schema_uri, str): + raise TypeError("Expected argument 'metadata_schema_uri' to be a str") + pulumi.set(__self__, "metadata_schema_uri", metadata_schema_uri) + if model_source_info and not isinstance(model_source_info, dict): + raise TypeError("Expected argument 'model_source_info' to be a dict") + pulumi.set(__self__, "model_source_info", model_source_info) + if name and not isinstance(name, str): + raise TypeError("Expected argument 'name' to be a str") + pulumi.set(__self__, "name", name) + if original_model_info and not isinstance(original_model_info, dict): + raise TypeError("Expected argument 'original_model_info' to be a dict") + pulumi.set(__self__, "original_model_info", original_model_info) + if predict_schemata and not isinstance(predict_schemata, dict): + raise TypeError("Expected argument 'predict_schemata' to be a dict") + pulumi.set(__self__, "predict_schemata", predict_schemata) + if supported_deployment_resources_types and not isinstance(supported_deployment_resources_types, list): + raise TypeError("Expected argument 'supported_deployment_resources_types' to be a list") + pulumi.set(__self__, "supported_deployment_resources_types", supported_deployment_resources_types) + if supported_export_formats and not isinstance(supported_export_formats, list): + raise TypeError("Expected argument 'supported_export_formats' to be a list") + pulumi.set(__self__, "supported_export_formats", supported_export_formats) + if supported_input_storage_formats and not isinstance(supported_input_storage_formats, list): + raise TypeError("Expected argument 'supported_input_storage_formats' to be a list") + pulumi.set(__self__, "supported_input_storage_formats", supported_input_storage_formats) + if supported_output_storage_formats and not isinstance(supported_output_storage_formats, list): + raise TypeError("Expected argument 'supported_output_storage_formats' to be a list") + pulumi.set(__self__, "supported_output_storage_formats", supported_output_storage_formats) + if training_pipeline and not isinstance(training_pipeline, str): + raise TypeError("Expected argument 'training_pipeline' to be a str") + pulumi.set(__self__, "training_pipeline", training_pipeline) + if update_time and not isinstance(update_time, str): + raise TypeError("Expected argument 'update_time' to be a str") + pulumi.set(__self__, "update_time", update_time) + if version_aliases and not isinstance(version_aliases, list): + raise TypeError("Expected argument 'version_aliases' to be a list") + pulumi.set(__self__, "version_aliases", version_aliases) + if version_create_time and not isinstance(version_create_time, str): + raise TypeError("Expected argument 'version_create_time' to be a str") + pulumi.set(__self__, "version_create_time", version_create_time) + if version_description and not isinstance(version_description, str): + raise TypeError("Expected argument 'version_description' to be a str") + pulumi.set(__self__, "version_description", version_description) + if version_id and not isinstance(version_id, str): + raise TypeError("Expected argument 'version_id' to be a str") + pulumi.set(__self__, "version_id", version_id) + if version_update_time and not isinstance(version_update_time, str): + raise TypeError("Expected argument 'version_update_time' to be a str") + pulumi.set(__self__, "version_update_time", version_update_time) + + @property + @pulumi.getter(name="artifactUri") + def artifact_uri(self) -> str: + """ + Immutable. The path to the directory containing the Model artifact and any of its supporting files. Not present for AutoML Models or Large Models. + """ + return pulumi.get(self, "artifact_uri") + + @property + @pulumi.getter(name="containerSpec") + def container_spec(self) -> 'outputs.GoogleCloudAiplatformV1beta1ModelContainerSpecResponse': + """ + Input only. The specification of the container that is to be used when deploying this Model. The specification is ingested upon ModelService.UploadModel, and all binaries it contains are copied and stored internally by Vertex AI. Not present for AutoML Models or Large Models. + """ + return pulumi.get(self, "container_spec") + + @property + @pulumi.getter(name="createTime") + def create_time(self) -> str: + """ + Timestamp when this Model was uploaded into Vertex AI. + """ + return pulumi.get(self, "create_time") + + @property + @pulumi.getter(name="deployedModels") + def deployed_models(self) -> Sequence['outputs.GoogleCloudAiplatformV1beta1DeployedModelRefResponse']: + """ + The pointers to DeployedModels created from this Model. Note that Model could have been deployed to Endpoints in different Locations. + """ + return pulumi.get(self, "deployed_models") + + @property + @pulumi.getter + def description(self) -> str: + """ + The description of the Model. + """ + return pulumi.get(self, "description") + + @property + @pulumi.getter(name="displayName") + def display_name(self) -> str: + """ + The display name of the Model. The name can be up to 128 characters long and can consist of any UTF-8 characters. + """ + return pulumi.get(self, "display_name") + + @property + @pulumi.getter(name="encryptionSpec") + def encryption_spec(self) -> 'outputs.GoogleCloudAiplatformV1beta1EncryptionSpecResponse': + """ + Customer-managed encryption key spec for a Model. If set, this Model and all sub-resources of this Model will be secured by this key. + """ + return pulumi.get(self, "encryption_spec") + + @property + @pulumi.getter + def etag(self) -> str: + """ + Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + """ + return pulumi.get(self, "etag") + + @property + @pulumi.getter(name="explanationSpec") + def explanation_spec(self) -> 'outputs.GoogleCloudAiplatformV1beta1ExplanationSpecResponse': + """ + The default explanation specification for this Model. The Model can be used for requesting explanation after being deployed if it is populated. The Model can be used for batch explanation if it is populated. All fields of the explanation_spec can be overridden by explanation_spec of DeployModelRequest.deployed_model, or explanation_spec of BatchPredictionJob. If the default explanation specification is not set for this Model, this Model can still be used for requesting explanation by setting explanation_spec of DeployModelRequest.deployed_model and for batch explanation by setting explanation_spec of BatchPredictionJob. + """ + return pulumi.get(self, "explanation_spec") + + @property + @pulumi.getter + def labels(self) -> Mapping[str, str]: + """ + The labels with user-defined metadata to organize your Models. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. + """ + return pulumi.get(self, "labels") + + @property + @pulumi.getter + def metadata(self) -> Any: + """ + Immutable. An additional information about the Model; the schema of the metadata can be found in metadata_schema. Unset if the Model does not have any additional information. + """ + return pulumi.get(self, "metadata") + + @property + @pulumi.getter(name="metadataArtifact") + def metadata_artifact(self) -> str: + """ + The resource name of the Artifact that was created in MetadataStore when creating the Model. The Artifact resource name pattern is `projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}`. + """ + return pulumi.get(self, "metadata_artifact") + + @property + @pulumi.getter(name="metadataSchemaUri") + def metadata_schema_uri(self) -> str: + """ + Immutable. Points to a YAML file stored on Google Cloud Storage describing additional information about the Model, that is specific to it. Unset if the Model does not have any additional information. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML Models always have this field populated by Vertex AI, if no additional metadata is needed, this field is set to an empty string. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. + """ + return pulumi.get(self, "metadata_schema_uri") + + @property + @pulumi.getter(name="modelSourceInfo") + def model_source_info(self) -> 'outputs.GoogleCloudAiplatformV1beta1ModelSourceInfoResponse': + """ + Source of a model. It can either be automl training pipeline, custom training pipeline, BigQuery ML, or existing Vertex AI Model. + """ + return pulumi.get(self, "model_source_info") + + @property + @pulumi.getter + def name(self) -> str: + """ + The resource name of the Model. + """ + return pulumi.get(self, "name") + + @property + @pulumi.getter(name="originalModelInfo") + def original_model_info(self) -> 'outputs.GoogleCloudAiplatformV1beta1ModelOriginalModelInfoResponse': + """ + If this Model is a copy of another Model, this contains info about the original. + """ + return pulumi.get(self, "original_model_info") + + @property + @pulumi.getter(name="predictSchemata") + def predict_schemata(self) -> 'outputs.GoogleCloudAiplatformV1beta1PredictSchemataResponse': + """ + The schemata that describe formats of the Model's predictions and explanations as given and returned via PredictionService.Predict and PredictionService.Explain. + """ + return pulumi.get(self, "predict_schemata") + + @property + @pulumi.getter(name="supportedDeploymentResourcesTypes") + def supported_deployment_resources_types(self) -> Sequence[str]: + """ + When this Model is deployed, its prediction resources are described by the `prediction_resources` field of the Endpoint.deployed_models object. Because not all Models support all resource configuration types, the configuration types this Model supports are listed here. If no configuration types are listed, the Model cannot be deployed to an Endpoint and does not support online predictions (PredictionService.Predict or PredictionService.Explain). Such a Model can serve predictions by using a BatchPredictionJob, if it has at least one entry each in supported_input_storage_formats and supported_output_storage_formats. + """ + return pulumi.get(self, "supported_deployment_resources_types") + + @property + @pulumi.getter(name="supportedExportFormats") + def supported_export_formats(self) -> Sequence['outputs.GoogleCloudAiplatformV1beta1ModelExportFormatResponse']: + """ + The formats in which this Model may be exported. If empty, this Model is not available for export. + """ + return pulumi.get(self, "supported_export_formats") + + @property + @pulumi.getter(name="supportedInputStorageFormats") + def supported_input_storage_formats(self) -> Sequence[str]: + """ + The formats this Model supports in BatchPredictionJob.input_config. If PredictSchemata.instance_schema_uri exists, the instances should be given as per that schema. The possible formats are: * `jsonl` The JSON Lines format, where each instance is a single line. Uses GcsSource. * `csv` The CSV format, where each instance is a single comma-separated line. The first line in the file is the header, containing comma-separated field names. Uses GcsSource. * `tf-record` The TFRecord format, where each instance is a single record in tfrecord syntax. Uses GcsSource. * `tf-record-gzip` Similar to `tf-record`, but the file is gzipped. Uses GcsSource. * `bigquery` Each instance is a single row in BigQuery. Uses BigQuerySource. * `file-list` Each line of the file is the location of an instance to process, uses `gcs_source` field of the InputConfig object. If this Model doesn't support any of these formats it means it cannot be used with a BatchPredictionJob. However, if it has supported_deployment_resources_types, it could serve online predictions by using PredictionService.Predict or PredictionService.Explain. + """ + return pulumi.get(self, "supported_input_storage_formats") + + @property + @pulumi.getter(name="supportedOutputStorageFormats") + def supported_output_storage_formats(self) -> Sequence[str]: + """ + The formats this Model supports in BatchPredictionJob.output_config. If both PredictSchemata.instance_schema_uri and PredictSchemata.prediction_schema_uri exist, the predictions are returned together with their instances. In other words, the prediction has the original instance data first, followed by the actual prediction content (as per the schema). The possible formats are: * `jsonl` The JSON Lines format, where each prediction is a single line. Uses GcsDestination. * `csv` The CSV format, where each prediction is a single comma-separated line. The first line in the file is the header, containing comma-separated field names. Uses GcsDestination. * `bigquery` Each prediction is a single row in a BigQuery table, uses BigQueryDestination . If this Model doesn't support any of these formats it means it cannot be used with a BatchPredictionJob. However, if it has supported_deployment_resources_types, it could serve online predictions by using PredictionService.Predict or PredictionService.Explain. + """ + return pulumi.get(self, "supported_output_storage_formats") + + @property + @pulumi.getter(name="trainingPipeline") + def training_pipeline(self) -> str: + """ + The resource name of the TrainingPipeline that uploaded this Model, if any. + """ + return pulumi.get(self, "training_pipeline") + + @property + @pulumi.getter(name="updateTime") + def update_time(self) -> str: + """ + Timestamp when this Model was most recently updated. + """ + return pulumi.get(self, "update_time") + + @property + @pulumi.getter(name="versionAliases") + def version_aliases(self) -> Sequence[str]: + """ + User provided version aliases so that a model version can be referenced via alias (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_alias}` instead of auto-generated version id (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_id})`. The format is a-z{0,126}[a-z0-9] to distinguish from version_id. A default version alias will be created for the first version of the model, and there must be exactly one default version alias for a model. + """ + return pulumi.get(self, "version_aliases") + + @property + @pulumi.getter(name="versionCreateTime") + def version_create_time(self) -> str: + """ + Timestamp when this version was created. + """ + return pulumi.get(self, "version_create_time") + + @property + @pulumi.getter(name="versionDescription") + def version_description(self) -> str: + """ + The description of this version. + """ + return pulumi.get(self, "version_description") + + @property + @pulumi.getter(name="versionId") + def version_id(self) -> str: + """ + Immutable. The version ID of the model. A new version is committed when a new model version is uploaded or trained under an existing model id. It is an auto-incrementing decimal number in string representation. + """ + return pulumi.get(self, "version_id") + + @property + @pulumi.getter(name="versionUpdateTime") + def version_update_time(self) -> str: + """ + Timestamp when this version was most recently updated. + """ + return pulumi.get(self, "version_update_time") + + +class AwaitableGetModelResult(GetModelResult): + # pylint: disable=using-constant-test + def __await__(self): + if False: + yield self + return GetModelResult( + artifact_uri=self.artifact_uri, + container_spec=self.container_spec, + create_time=self.create_time, + deployed_models=self.deployed_models, + description=self.description, + display_name=self.display_name, + encryption_spec=self.encryption_spec, + etag=self.etag, + explanation_spec=self.explanation_spec, + labels=self.labels, + metadata=self.metadata, + metadata_artifact=self.metadata_artifact, + metadata_schema_uri=self.metadata_schema_uri, + model_source_info=self.model_source_info, + name=self.name, + original_model_info=self.original_model_info, + predict_schemata=self.predict_schemata, + supported_deployment_resources_types=self.supported_deployment_resources_types, + supported_export_formats=self.supported_export_formats, + supported_input_storage_formats=self.supported_input_storage_formats, + supported_output_storage_formats=self.supported_output_storage_formats, + training_pipeline=self.training_pipeline, + update_time=self.update_time, + version_aliases=self.version_aliases, + version_create_time=self.version_create_time, + version_description=self.version_description, + version_id=self.version_id, + version_update_time=self.version_update_time) + + +def get_model(location: Optional[str] = None, + model_id: Optional[str] = None, + project: Optional[str] = None, + opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetModelResult: + """ + Gets a Model. + """ + __args__ = dict() + __args__['location'] = location + __args__['modelId'] = model_id + __args__['project'] = project + opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts) + __ret__ = pulumi.runtime.invoke('google-native:aiplatform/v1beta1:getModel', __args__, opts=opts, typ=GetModelResult).value + + return AwaitableGetModelResult( + artifact_uri=pulumi.get(__ret__, 'artifact_uri'), + container_spec=pulumi.get(__ret__, 'container_spec'), + create_time=pulumi.get(__ret__, 'create_time'), + deployed_models=pulumi.get(__ret__, 'deployed_models'), + description=pulumi.get(__ret__, 'description'), + display_name=pulumi.get(__ret__, 'display_name'), + encryption_spec=pulumi.get(__ret__, 'encryption_spec'), + etag=pulumi.get(__ret__, 'etag'), + explanation_spec=pulumi.get(__ret__, 'explanation_spec'), + labels=pulumi.get(__ret__, 'labels'), + metadata=pulumi.get(__ret__, 'metadata'), + metadata_artifact=pulumi.get(__ret__, 'metadata_artifact'), + metadata_schema_uri=pulumi.get(__ret__, 'metadata_schema_uri'), + model_source_info=pulumi.get(__ret__, 'model_source_info'), + name=pulumi.get(__ret__, 'name'), + original_model_info=pulumi.get(__ret__, 'original_model_info'), + predict_schemata=pulumi.get(__ret__, 'predict_schemata'), + supported_deployment_resources_types=pulumi.get(__ret__, 'supported_deployment_resources_types'), + supported_export_formats=pulumi.get(__ret__, 'supported_export_formats'), + supported_input_storage_formats=pulumi.get(__ret__, 'supported_input_storage_formats'), + supported_output_storage_formats=pulumi.get(__ret__, 'supported_output_storage_formats'), + training_pipeline=pulumi.get(__ret__, 'training_pipeline'), + update_time=pulumi.get(__ret__, 'update_time'), + version_aliases=pulumi.get(__ret__, 'version_aliases'), + version_create_time=pulumi.get(__ret__, 'version_create_time'), + version_description=pulumi.get(__ret__, 'version_description'), + version_id=pulumi.get(__ret__, 'version_id'), + version_update_time=pulumi.get(__ret__, 'version_update_time')) + + +@_utilities.lift_output_func(get_model) +def get_model_output(location: Optional[pulumi.Input[str]] = None, + model_id: Optional[pulumi.Input[str]] = None, + project: Optional[pulumi.Input[Optional[str]]] = None, + opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetModelResult]: + """ + Gets a Model. + """ + ... diff --git a/sdk/python/pulumi_google_native/aiplatform/v1beta1/model.py b/sdk/python/pulumi_google_native/aiplatform/v1beta1/model.py new file mode 100644 index 0000000000..daad27ebf7 --- /dev/null +++ b/sdk/python/pulumi_google_native/aiplatform/v1beta1/model.py @@ -0,0 +1,744 @@ +# coding=utf-8 +# *** WARNING: this file was generated by the Pulumi SDK Generator. *** +# *** Do not edit by hand unless you're certain you know what you are doing! *** + +import copy +import warnings +import pulumi +import pulumi.runtime +from typing import Any, Mapping, Optional, Sequence, Union, overload +from ... import _utilities +from . import outputs +from ._enums import * +from ._inputs import * + +__all__ = ['ModelArgs', 'Model'] + +@pulumi.input_type +class ModelArgs: + def __init__(__self__, *, + display_name: pulumi.Input[str], + artifact_uri: Optional[pulumi.Input[str]] = None, + container_spec: Optional[pulumi.Input['GoogleCloudAiplatformV1beta1ModelContainerSpecArgs']] = None, + description: Optional[pulumi.Input[str]] = None, + encryption_spec: Optional[pulumi.Input['GoogleCloudAiplatformV1beta1EncryptionSpecArgs']] = None, + etag: Optional[pulumi.Input[str]] = None, + explanation_spec: Optional[pulumi.Input['GoogleCloudAiplatformV1beta1ExplanationSpecArgs']] = None, + labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, + location: Optional[pulumi.Input[str]] = None, + metadata: Optional[Any] = None, + metadata_schema_uri: Optional[pulumi.Input[str]] = None, + model_id: Optional[pulumi.Input[str]] = None, + name: Optional[pulumi.Input[str]] = None, + parent_model: Optional[pulumi.Input[str]] = None, + predict_schemata: Optional[pulumi.Input['GoogleCloudAiplatformV1beta1PredictSchemataArgs']] = None, + project: Optional[pulumi.Input[str]] = None, + service_account: Optional[pulumi.Input[str]] = None, + version_aliases: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, + version_description: Optional[pulumi.Input[str]] = None): + """ + The set of arguments for constructing a Model resource. + :param pulumi.Input[str] display_name: The display name of the Model. The name can be up to 128 characters long and can consist of any UTF-8 characters. + :param pulumi.Input[str] artifact_uri: Immutable. The path to the directory containing the Model artifact and any of its supporting files. Not present for AutoML Models or Large Models. + :param pulumi.Input['GoogleCloudAiplatformV1beta1ModelContainerSpecArgs'] container_spec: Input only. The specification of the container that is to be used when deploying this Model. The specification is ingested upon ModelService.UploadModel, and all binaries it contains are copied and stored internally by Vertex AI. Not present for AutoML Models or Large Models. + :param pulumi.Input[str] description: The description of the Model. + :param pulumi.Input['GoogleCloudAiplatformV1beta1EncryptionSpecArgs'] encryption_spec: Customer-managed encryption key spec for a Model. If set, this Model and all sub-resources of this Model will be secured by this key. + :param pulumi.Input[str] etag: Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + :param pulumi.Input['GoogleCloudAiplatformV1beta1ExplanationSpecArgs'] explanation_spec: The default explanation specification for this Model. The Model can be used for requesting explanation after being deployed if it is populated. The Model can be used for batch explanation if it is populated. All fields of the explanation_spec can be overridden by explanation_spec of DeployModelRequest.deployed_model, or explanation_spec of BatchPredictionJob. If the default explanation specification is not set for this Model, this Model can still be used for requesting explanation by setting explanation_spec of DeployModelRequest.deployed_model and for batch explanation by setting explanation_spec of BatchPredictionJob. + :param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: The labels with user-defined metadata to organize your Models. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. + :param Any metadata: Immutable. An additional information about the Model; the schema of the metadata can be found in metadata_schema. Unset if the Model does not have any additional information. + :param pulumi.Input[str] metadata_schema_uri: Immutable. Points to a YAML file stored on Google Cloud Storage describing additional information about the Model, that is specific to it. Unset if the Model does not have any additional information. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML Models always have this field populated by Vertex AI, if no additional metadata is needed, this field is set to an empty string. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. + :param pulumi.Input[str] model_id: Optional. The ID to use for the uploaded Model, which will become the final component of the model resource name. This value may be up to 63 characters, and valid characters are `[a-z0-9_-]`. The first character cannot be a number or hyphen. + :param pulumi.Input[str] name: The resource name of the Model. + :param pulumi.Input[str] parent_model: Optional. The resource name of the model into which to upload the version. Only specify this field when uploading a new version. + :param pulumi.Input['GoogleCloudAiplatformV1beta1PredictSchemataArgs'] predict_schemata: The schemata that describe formats of the Model's predictions and explanations as given and returned via PredictionService.Predict and PredictionService.Explain. + :param pulumi.Input[str] service_account: Optional. The user-provided custom service account to use to do the model upload. If empty, [Vertex AI Service Agent](https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) will be used to access resources needed to upload the model. This account must belong to the target project where the model is uploaded to, i.e., the project specified in the `parent` field of this request and have necessary read permissions (to Google Cloud Storage, Artifact Registry, etc.). + :param pulumi.Input[Sequence[pulumi.Input[str]]] version_aliases: User provided version aliases so that a model version can be referenced via alias (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_alias}` instead of auto-generated version id (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_id})`. The format is a-z{0,126}[a-z0-9] to distinguish from version_id. A default version alias will be created for the first version of the model, and there must be exactly one default version alias for a model. + :param pulumi.Input[str] version_description: The description of this version. + """ + pulumi.set(__self__, "display_name", display_name) + if artifact_uri is not None: + pulumi.set(__self__, "artifact_uri", artifact_uri) + if container_spec is not None: + pulumi.set(__self__, "container_spec", container_spec) + if description is not None: + pulumi.set(__self__, "description", description) + if encryption_spec is not None: + pulumi.set(__self__, "encryption_spec", encryption_spec) + if etag is not None: + pulumi.set(__self__, "etag", etag) + if explanation_spec is not None: + pulumi.set(__self__, "explanation_spec", explanation_spec) + if labels is not None: + pulumi.set(__self__, "labels", labels) + if location is not None: + pulumi.set(__self__, "location", location) + if metadata is not None: + pulumi.set(__self__, "metadata", metadata) + if metadata_schema_uri is not None: + pulumi.set(__self__, "metadata_schema_uri", metadata_schema_uri) + if model_id is not None: + pulumi.set(__self__, "model_id", model_id) + if name is not None: + pulumi.set(__self__, "name", name) + if parent_model is not None: + pulumi.set(__self__, "parent_model", parent_model) + if predict_schemata is not None: + pulumi.set(__self__, "predict_schemata", predict_schemata) + if project is not None: + pulumi.set(__self__, "project", project) + if service_account is not None: + pulumi.set(__self__, "service_account", service_account) + if version_aliases is not None: + pulumi.set(__self__, "version_aliases", version_aliases) + if version_description is not None: + pulumi.set(__self__, "version_description", version_description) + + @property + @pulumi.getter(name="displayName") + def display_name(self) -> pulumi.Input[str]: + """ + The display name of the Model. The name can be up to 128 characters long and can consist of any UTF-8 characters. + """ + return pulumi.get(self, "display_name") + + @display_name.setter + def display_name(self, value: pulumi.Input[str]): + pulumi.set(self, "display_name", value) + + @property + @pulumi.getter(name="artifactUri") + def artifact_uri(self) -> Optional[pulumi.Input[str]]: + """ + Immutable. The path to the directory containing the Model artifact and any of its supporting files. Not present for AutoML Models or Large Models. + """ + return pulumi.get(self, "artifact_uri") + + @artifact_uri.setter + def artifact_uri(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "artifact_uri", value) + + @property + @pulumi.getter(name="containerSpec") + def container_spec(self) -> Optional[pulumi.Input['GoogleCloudAiplatformV1beta1ModelContainerSpecArgs']]: + """ + Input only. The specification of the container that is to be used when deploying this Model. The specification is ingested upon ModelService.UploadModel, and all binaries it contains are copied and stored internally by Vertex AI. Not present for AutoML Models or Large Models. + """ + return pulumi.get(self, "container_spec") + + @container_spec.setter + def container_spec(self, value: Optional[pulumi.Input['GoogleCloudAiplatformV1beta1ModelContainerSpecArgs']]): + pulumi.set(self, "container_spec", value) + + @property + @pulumi.getter + def description(self) -> Optional[pulumi.Input[str]]: + """ + The description of the Model. + """ + return pulumi.get(self, "description") + + @description.setter + def description(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "description", value) + + @property + @pulumi.getter(name="encryptionSpec") + def encryption_spec(self) -> Optional[pulumi.Input['GoogleCloudAiplatformV1beta1EncryptionSpecArgs']]: + """ + Customer-managed encryption key spec for a Model. If set, this Model and all sub-resources of this Model will be secured by this key. + """ + return pulumi.get(self, "encryption_spec") + + @encryption_spec.setter + def encryption_spec(self, value: Optional[pulumi.Input['GoogleCloudAiplatformV1beta1EncryptionSpecArgs']]): + pulumi.set(self, "encryption_spec", value) + + @property + @pulumi.getter + def etag(self) -> Optional[pulumi.Input[str]]: + """ + Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + """ + return pulumi.get(self, "etag") + + @etag.setter + def etag(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "etag", value) + + @property + @pulumi.getter(name="explanationSpec") + def explanation_spec(self) -> Optional[pulumi.Input['GoogleCloudAiplatformV1beta1ExplanationSpecArgs']]: + """ + The default explanation specification for this Model. The Model can be used for requesting explanation after being deployed if it is populated. The Model can be used for batch explanation if it is populated. All fields of the explanation_spec can be overridden by explanation_spec of DeployModelRequest.deployed_model, or explanation_spec of BatchPredictionJob. If the default explanation specification is not set for this Model, this Model can still be used for requesting explanation by setting explanation_spec of DeployModelRequest.deployed_model and for batch explanation by setting explanation_spec of BatchPredictionJob. + """ + return pulumi.get(self, "explanation_spec") + + @explanation_spec.setter + def explanation_spec(self, value: Optional[pulumi.Input['GoogleCloudAiplatformV1beta1ExplanationSpecArgs']]): + pulumi.set(self, "explanation_spec", value) + + @property + @pulumi.getter + def labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: + """ + The labels with user-defined metadata to organize your Models. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. + """ + return pulumi.get(self, "labels") + + @labels.setter + def labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): + pulumi.set(self, "labels", value) + + @property + @pulumi.getter + def location(self) -> Optional[pulumi.Input[str]]: + return pulumi.get(self, "location") + + @location.setter + def location(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "location", value) + + @property + @pulumi.getter + def metadata(self) -> Optional[Any]: + """ + Immutable. An additional information about the Model; the schema of the metadata can be found in metadata_schema. Unset if the Model does not have any additional information. + """ + return pulumi.get(self, "metadata") + + @metadata.setter + def metadata(self, value: Optional[Any]): + pulumi.set(self, "metadata", value) + + @property + @pulumi.getter(name="metadataSchemaUri") + def metadata_schema_uri(self) -> Optional[pulumi.Input[str]]: + """ + Immutable. Points to a YAML file stored on Google Cloud Storage describing additional information about the Model, that is specific to it. Unset if the Model does not have any additional information. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML Models always have this field populated by Vertex AI, if no additional metadata is needed, this field is set to an empty string. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. + """ + return pulumi.get(self, "metadata_schema_uri") + + @metadata_schema_uri.setter + def metadata_schema_uri(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "metadata_schema_uri", value) + + @property + @pulumi.getter(name="modelId") + def model_id(self) -> Optional[pulumi.Input[str]]: + """ + Optional. The ID to use for the uploaded Model, which will become the final component of the model resource name. This value may be up to 63 characters, and valid characters are `[a-z0-9_-]`. The first character cannot be a number or hyphen. + """ + return pulumi.get(self, "model_id") + + @model_id.setter + def model_id(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "model_id", value) + + @property + @pulumi.getter + def name(self) -> Optional[pulumi.Input[str]]: + """ + The resource name of the Model. + """ + return pulumi.get(self, "name") + + @name.setter + def name(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "name", value) + + @property + @pulumi.getter(name="parentModel") + def parent_model(self) -> Optional[pulumi.Input[str]]: + """ + Optional. The resource name of the model into which to upload the version. Only specify this field when uploading a new version. + """ + return pulumi.get(self, "parent_model") + + @parent_model.setter + def parent_model(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "parent_model", value) + + @property + @pulumi.getter(name="predictSchemata") + def predict_schemata(self) -> Optional[pulumi.Input['GoogleCloudAiplatformV1beta1PredictSchemataArgs']]: + """ + The schemata that describe formats of the Model's predictions and explanations as given and returned via PredictionService.Predict and PredictionService.Explain. + """ + return pulumi.get(self, "predict_schemata") + + @predict_schemata.setter + def predict_schemata(self, value: Optional[pulumi.Input['GoogleCloudAiplatformV1beta1PredictSchemataArgs']]): + pulumi.set(self, "predict_schemata", value) + + @property + @pulumi.getter + def project(self) -> Optional[pulumi.Input[str]]: + return pulumi.get(self, "project") + + @project.setter + def project(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "project", value) + + @property + @pulumi.getter(name="serviceAccount") + def service_account(self) -> Optional[pulumi.Input[str]]: + """ + Optional. The user-provided custom service account to use to do the model upload. If empty, [Vertex AI Service Agent](https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) will be used to access resources needed to upload the model. This account must belong to the target project where the model is uploaded to, i.e., the project specified in the `parent` field of this request and have necessary read permissions (to Google Cloud Storage, Artifact Registry, etc.). + """ + return pulumi.get(self, "service_account") + + @service_account.setter + def service_account(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "service_account", value) + + @property + @pulumi.getter(name="versionAliases") + def version_aliases(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: + """ + User provided version aliases so that a model version can be referenced via alias (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_alias}` instead of auto-generated version id (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_id})`. The format is a-z{0,126}[a-z0-9] to distinguish from version_id. A default version alias will be created for the first version of the model, and there must be exactly one default version alias for a model. + """ + return pulumi.get(self, "version_aliases") + + @version_aliases.setter + def version_aliases(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): + pulumi.set(self, "version_aliases", value) + + @property + @pulumi.getter(name="versionDescription") + def version_description(self) -> Optional[pulumi.Input[str]]: + """ + The description of this version. + """ + return pulumi.get(self, "version_description") + + @version_description.setter + def version_description(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "version_description", value) + + +class Model(pulumi.CustomResource): + @overload + def __init__(__self__, + resource_name: str, + opts: Optional[pulumi.ResourceOptions] = None, + artifact_uri: Optional[pulumi.Input[str]] = None, + container_spec: Optional[pulumi.Input[pulumi.InputType['GoogleCloudAiplatformV1beta1ModelContainerSpecArgs']]] = None, + description: Optional[pulumi.Input[str]] = None, + display_name: Optional[pulumi.Input[str]] = None, + encryption_spec: Optional[pulumi.Input[pulumi.InputType['GoogleCloudAiplatformV1beta1EncryptionSpecArgs']]] = None, + etag: Optional[pulumi.Input[str]] = None, + explanation_spec: Optional[pulumi.Input[pulumi.InputType['GoogleCloudAiplatformV1beta1ExplanationSpecArgs']]] = None, + labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, + location: Optional[pulumi.Input[str]] = None, + metadata: Optional[Any] = None, + metadata_schema_uri: Optional[pulumi.Input[str]] = None, + model_id: Optional[pulumi.Input[str]] = None, + name: Optional[pulumi.Input[str]] = None, + parent_model: Optional[pulumi.Input[str]] = None, + predict_schemata: Optional[pulumi.Input[pulumi.InputType['GoogleCloudAiplatformV1beta1PredictSchemataArgs']]] = None, + project: Optional[pulumi.Input[str]] = None, + service_account: Optional[pulumi.Input[str]] = None, + version_aliases: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, + version_description: Optional[pulumi.Input[str]] = None, + __props__=None): + """ + Uploads a Model artifact into Vertex AI. + + :param str resource_name: The name of the resource. + :param pulumi.ResourceOptions opts: Options for the resource. + :param pulumi.Input[str] artifact_uri: Immutable. The path to the directory containing the Model artifact and any of its supporting files. Not present for AutoML Models or Large Models. + :param pulumi.Input[pulumi.InputType['GoogleCloudAiplatformV1beta1ModelContainerSpecArgs']] container_spec: Input only. The specification of the container that is to be used when deploying this Model. The specification is ingested upon ModelService.UploadModel, and all binaries it contains are copied and stored internally by Vertex AI. Not present for AutoML Models or Large Models. + :param pulumi.Input[str] description: The description of the Model. + :param pulumi.Input[str] display_name: The display name of the Model. The name can be up to 128 characters long and can consist of any UTF-8 characters. + :param pulumi.Input[pulumi.InputType['GoogleCloudAiplatformV1beta1EncryptionSpecArgs']] encryption_spec: Customer-managed encryption key spec for a Model. If set, this Model and all sub-resources of this Model will be secured by this key. + :param pulumi.Input[str] etag: Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + :param pulumi.Input[pulumi.InputType['GoogleCloudAiplatformV1beta1ExplanationSpecArgs']] explanation_spec: The default explanation specification for this Model. The Model can be used for requesting explanation after being deployed if it is populated. The Model can be used for batch explanation if it is populated. All fields of the explanation_spec can be overridden by explanation_spec of DeployModelRequest.deployed_model, or explanation_spec of BatchPredictionJob. If the default explanation specification is not set for this Model, this Model can still be used for requesting explanation by setting explanation_spec of DeployModelRequest.deployed_model and for batch explanation by setting explanation_spec of BatchPredictionJob. + :param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: The labels with user-defined metadata to organize your Models. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. + :param Any metadata: Immutable. An additional information about the Model; the schema of the metadata can be found in metadata_schema. Unset if the Model does not have any additional information. + :param pulumi.Input[str] metadata_schema_uri: Immutable. Points to a YAML file stored on Google Cloud Storage describing additional information about the Model, that is specific to it. Unset if the Model does not have any additional information. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML Models always have this field populated by Vertex AI, if no additional metadata is needed, this field is set to an empty string. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. + :param pulumi.Input[str] model_id: Optional. The ID to use for the uploaded Model, which will become the final component of the model resource name. This value may be up to 63 characters, and valid characters are `[a-z0-9_-]`. The first character cannot be a number or hyphen. + :param pulumi.Input[str] name: The resource name of the Model. + :param pulumi.Input[str] parent_model: Optional. The resource name of the model into which to upload the version. Only specify this field when uploading a new version. + :param pulumi.Input[pulumi.InputType['GoogleCloudAiplatformV1beta1PredictSchemataArgs']] predict_schemata: The schemata that describe formats of the Model's predictions and explanations as given and returned via PredictionService.Predict and PredictionService.Explain. + :param pulumi.Input[str] service_account: Optional. The user-provided custom service account to use to do the model upload. If empty, [Vertex AI Service Agent](https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) will be used to access resources needed to upload the model. This account must belong to the target project where the model is uploaded to, i.e., the project specified in the `parent` field of this request and have necessary read permissions (to Google Cloud Storage, Artifact Registry, etc.). + :param pulumi.Input[Sequence[pulumi.Input[str]]] version_aliases: User provided version aliases so that a model version can be referenced via alias (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_alias}` instead of auto-generated version id (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_id})`. The format is a-z{0,126}[a-z0-9] to distinguish from version_id. A default version alias will be created for the first version of the model, and there must be exactly one default version alias for a model. + :param pulumi.Input[str] version_description: The description of this version. + """ + ... + @overload + def __init__(__self__, + resource_name: str, + args: ModelArgs, + opts: Optional[pulumi.ResourceOptions] = None): + """ + Uploads a Model artifact into Vertex AI. + + :param str resource_name: The name of the resource. + :param ModelArgs args: The arguments to use to populate this resource's properties. + :param pulumi.ResourceOptions opts: Options for the resource. + """ + ... + def __init__(__self__, resource_name: str, *args, **kwargs): + resource_args, opts = _utilities.get_resource_args_opts(ModelArgs, pulumi.ResourceOptions, *args, **kwargs) + if resource_args is not None: + __self__._internal_init(resource_name, opts, **resource_args.__dict__) + else: + __self__._internal_init(resource_name, *args, **kwargs) + + def _internal_init(__self__, + resource_name: str, + opts: Optional[pulumi.ResourceOptions] = None, + artifact_uri: Optional[pulumi.Input[str]] = None, + container_spec: Optional[pulumi.Input[pulumi.InputType['GoogleCloudAiplatformV1beta1ModelContainerSpecArgs']]] = None, + description: Optional[pulumi.Input[str]] = None, + display_name: Optional[pulumi.Input[str]] = None, + encryption_spec: Optional[pulumi.Input[pulumi.InputType['GoogleCloudAiplatformV1beta1EncryptionSpecArgs']]] = None, + etag: Optional[pulumi.Input[str]] = None, + explanation_spec: Optional[pulumi.Input[pulumi.InputType['GoogleCloudAiplatformV1beta1ExplanationSpecArgs']]] = None, + labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, + location: Optional[pulumi.Input[str]] = None, + metadata: Optional[Any] = None, + metadata_schema_uri: Optional[pulumi.Input[str]] = None, + model_id: Optional[pulumi.Input[str]] = None, + name: Optional[pulumi.Input[str]] = None, + parent_model: Optional[pulumi.Input[str]] = None, + predict_schemata: Optional[pulumi.Input[pulumi.InputType['GoogleCloudAiplatformV1beta1PredictSchemataArgs']]] = None, + project: Optional[pulumi.Input[str]] = None, + service_account: Optional[pulumi.Input[str]] = None, + version_aliases: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, + version_description: Optional[pulumi.Input[str]] = None, + __props__=None): + opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts) + if not isinstance(opts, pulumi.ResourceOptions): + raise TypeError('Expected resource options to be a ResourceOptions instance') + if opts.id is None: + if __props__ is not None: + raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') + __props__ = ModelArgs.__new__(ModelArgs) + + __props__.__dict__["artifact_uri"] = artifact_uri + __props__.__dict__["container_spec"] = container_spec + __props__.__dict__["description"] = description + if display_name is None and not opts.urn: + raise TypeError("Missing required property 'display_name'") + __props__.__dict__["display_name"] = display_name + __props__.__dict__["encryption_spec"] = encryption_spec + __props__.__dict__["etag"] = etag + __props__.__dict__["explanation_spec"] = explanation_spec + __props__.__dict__["labels"] = labels + __props__.__dict__["location"] = location + __props__.__dict__["metadata"] = metadata + __props__.__dict__["metadata_schema_uri"] = metadata_schema_uri + __props__.__dict__["model_id"] = model_id + __props__.__dict__["name"] = name + __props__.__dict__["parent_model"] = parent_model + __props__.__dict__["predict_schemata"] = predict_schemata + __props__.__dict__["project"] = project + __props__.__dict__["service_account"] = service_account + __props__.__dict__["version_aliases"] = version_aliases + __props__.__dict__["version_description"] = version_description + __props__.__dict__["create_time"] = None + __props__.__dict__["deployed_models"] = None + __props__.__dict__["metadata_artifact"] = None + __props__.__dict__["model_source_info"] = None + __props__.__dict__["original_model_info"] = None + __props__.__dict__["supported_deployment_resources_types"] = None + __props__.__dict__["supported_export_formats"] = None + __props__.__dict__["supported_input_storage_formats"] = None + __props__.__dict__["supported_output_storage_formats"] = None + __props__.__dict__["training_pipeline"] = None + __props__.__dict__["update_time"] = None + __props__.__dict__["version_create_time"] = None + __props__.__dict__["version_id"] = None + __props__.__dict__["version_update_time"] = None + replace_on_changes = pulumi.ResourceOptions(replace_on_changes=["location", "project"]) + opts = pulumi.ResourceOptions.merge(opts, replace_on_changes) + super(Model, __self__).__init__( + 'google-native:aiplatform/v1beta1:Model', + resource_name, + __props__, + opts) + + @staticmethod + def get(resource_name: str, + id: pulumi.Input[str], + opts: Optional[pulumi.ResourceOptions] = None) -> 'Model': + """ + Get an existing Model resource's state with the given name, id, and optional extra + properties used to qualify the lookup. + + :param str resource_name: The unique name of the resulting resource. + :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. + :param pulumi.ResourceOptions opts: Options for the resource. + """ + opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) + + __props__ = ModelArgs.__new__(ModelArgs) + + __props__.__dict__["artifact_uri"] = None + __props__.__dict__["container_spec"] = None + __props__.__dict__["create_time"] = None + __props__.__dict__["deployed_models"] = None + __props__.__dict__["description"] = None + __props__.__dict__["display_name"] = None + __props__.__dict__["encryption_spec"] = None + __props__.__dict__["etag"] = None + __props__.__dict__["explanation_spec"] = None + __props__.__dict__["labels"] = None + __props__.__dict__["location"] = None + __props__.__dict__["metadata"] = None + __props__.__dict__["metadata_artifact"] = None + __props__.__dict__["metadata_schema_uri"] = None + __props__.__dict__["model_source_info"] = None + __props__.__dict__["name"] = None + __props__.__dict__["original_model_info"] = None + __props__.__dict__["predict_schemata"] = None + __props__.__dict__["project"] = None + __props__.__dict__["supported_deployment_resources_types"] = None + __props__.__dict__["supported_export_formats"] = None + __props__.__dict__["supported_input_storage_formats"] = None + __props__.__dict__["supported_output_storage_formats"] = None + __props__.__dict__["training_pipeline"] = None + __props__.__dict__["update_time"] = None + __props__.__dict__["version_aliases"] = None + __props__.__dict__["version_create_time"] = None + __props__.__dict__["version_description"] = None + __props__.__dict__["version_id"] = None + __props__.__dict__["version_update_time"] = None + return Model(resource_name, opts=opts, __props__=__props__) + + @property + @pulumi.getter(name="artifactUri") + def artifact_uri(self) -> pulumi.Output[str]: + """ + Immutable. The path to the directory containing the Model artifact and any of its supporting files. Not present for AutoML Models or Large Models. + """ + return pulumi.get(self, "artifact_uri") + + @property + @pulumi.getter(name="containerSpec") + def container_spec(self) -> pulumi.Output['outputs.GoogleCloudAiplatformV1beta1ModelContainerSpecResponse']: + """ + Input only. The specification of the container that is to be used when deploying this Model. The specification is ingested upon ModelService.UploadModel, and all binaries it contains are copied and stored internally by Vertex AI. Not present for AutoML Models or Large Models. + """ + return pulumi.get(self, "container_spec") + + @property + @pulumi.getter(name="createTime") + def create_time(self) -> pulumi.Output[str]: + """ + Timestamp when this Model was uploaded into Vertex AI. + """ + return pulumi.get(self, "create_time") + + @property + @pulumi.getter(name="deployedModels") + def deployed_models(self) -> pulumi.Output[Sequence['outputs.GoogleCloudAiplatformV1beta1DeployedModelRefResponse']]: + """ + The pointers to DeployedModels created from this Model. Note that Model could have been deployed to Endpoints in different Locations. + """ + return pulumi.get(self, "deployed_models") + + @property + @pulumi.getter + def description(self) -> pulumi.Output[str]: + """ + The description of the Model. + """ + return pulumi.get(self, "description") + + @property + @pulumi.getter(name="displayName") + def display_name(self) -> pulumi.Output[str]: + """ + The display name of the Model. The name can be up to 128 characters long and can consist of any UTF-8 characters. + """ + return pulumi.get(self, "display_name") + + @property + @pulumi.getter(name="encryptionSpec") + def encryption_spec(self) -> pulumi.Output['outputs.GoogleCloudAiplatformV1beta1EncryptionSpecResponse']: + """ + Customer-managed encryption key spec for a Model. If set, this Model and all sub-resources of this Model will be secured by this key. + """ + return pulumi.get(self, "encryption_spec") + + @property + @pulumi.getter + def etag(self) -> pulumi.Output[str]: + """ + Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + """ + return pulumi.get(self, "etag") + + @property + @pulumi.getter(name="explanationSpec") + def explanation_spec(self) -> pulumi.Output['outputs.GoogleCloudAiplatformV1beta1ExplanationSpecResponse']: + """ + The default explanation specification for this Model. The Model can be used for requesting explanation after being deployed if it is populated. The Model can be used for batch explanation if it is populated. All fields of the explanation_spec can be overridden by explanation_spec of DeployModelRequest.deployed_model, or explanation_spec of BatchPredictionJob. If the default explanation specification is not set for this Model, this Model can still be used for requesting explanation by setting explanation_spec of DeployModelRequest.deployed_model and for batch explanation by setting explanation_spec of BatchPredictionJob. + """ + return pulumi.get(self, "explanation_spec") + + @property + @pulumi.getter + def labels(self) -> pulumi.Output[Mapping[str, str]]: + """ + The labels with user-defined metadata to organize your Models. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. + """ + return pulumi.get(self, "labels") + + @property + @pulumi.getter + def location(self) -> pulumi.Output[str]: + return pulumi.get(self, "location") + + @property + @pulumi.getter + def metadata(self) -> pulumi.Output[Any]: + """ + Immutable. An additional information about the Model; the schema of the metadata can be found in metadata_schema. Unset if the Model does not have any additional information. + """ + return pulumi.get(self, "metadata") + + @property + @pulumi.getter(name="metadataArtifact") + def metadata_artifact(self) -> pulumi.Output[str]: + """ + The resource name of the Artifact that was created in MetadataStore when creating the Model. The Artifact resource name pattern is `projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}`. + """ + return pulumi.get(self, "metadata_artifact") + + @property + @pulumi.getter(name="metadataSchemaUri") + def metadata_schema_uri(self) -> pulumi.Output[str]: + """ + Immutable. Points to a YAML file stored on Google Cloud Storage describing additional information about the Model, that is specific to it. Unset if the Model does not have any additional information. The schema is defined as an OpenAPI 3.0.2 [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). AutoML Models always have this field populated by Vertex AI, if no additional metadata is needed, this field is set to an empty string. Note: The URI given on output will be immutable and probably different, including the URI scheme, than the one given on input. The output URI will point to a location where the user only has a read access. + """ + return pulumi.get(self, "metadata_schema_uri") + + @property + @pulumi.getter(name="modelSourceInfo") + def model_source_info(self) -> pulumi.Output['outputs.GoogleCloudAiplatformV1beta1ModelSourceInfoResponse']: + """ + Source of a model. It can either be automl training pipeline, custom training pipeline, BigQuery ML, or existing Vertex AI Model. + """ + return pulumi.get(self, "model_source_info") + + @property + @pulumi.getter + def name(self) -> pulumi.Output[str]: + """ + The resource name of the Model. + """ + return pulumi.get(self, "name") + + @property + @pulumi.getter(name="originalModelInfo") + def original_model_info(self) -> pulumi.Output['outputs.GoogleCloudAiplatformV1beta1ModelOriginalModelInfoResponse']: + """ + If this Model is a copy of another Model, this contains info about the original. + """ + return pulumi.get(self, "original_model_info") + + @property + @pulumi.getter(name="predictSchemata") + def predict_schemata(self) -> pulumi.Output['outputs.GoogleCloudAiplatformV1beta1PredictSchemataResponse']: + """ + The schemata that describe formats of the Model's predictions and explanations as given and returned via PredictionService.Predict and PredictionService.Explain. + """ + return pulumi.get(self, "predict_schemata") + + @property + @pulumi.getter + def project(self) -> pulumi.Output[str]: + return pulumi.get(self, "project") + + @property + @pulumi.getter(name="supportedDeploymentResourcesTypes") + def supported_deployment_resources_types(self) -> pulumi.Output[Sequence[str]]: + """ + When this Model is deployed, its prediction resources are described by the `prediction_resources` field of the Endpoint.deployed_models object. Because not all Models support all resource configuration types, the configuration types this Model supports are listed here. If no configuration types are listed, the Model cannot be deployed to an Endpoint and does not support online predictions (PredictionService.Predict or PredictionService.Explain). Such a Model can serve predictions by using a BatchPredictionJob, if it has at least one entry each in supported_input_storage_formats and supported_output_storage_formats. + """ + return pulumi.get(self, "supported_deployment_resources_types") + + @property + @pulumi.getter(name="supportedExportFormats") + def supported_export_formats(self) -> pulumi.Output[Sequence['outputs.GoogleCloudAiplatformV1beta1ModelExportFormatResponse']]: + """ + The formats in which this Model may be exported. If empty, this Model is not available for export. + """ + return pulumi.get(self, "supported_export_formats") + + @property + @pulumi.getter(name="supportedInputStorageFormats") + def supported_input_storage_formats(self) -> pulumi.Output[Sequence[str]]: + """ + The formats this Model supports in BatchPredictionJob.input_config. If PredictSchemata.instance_schema_uri exists, the instances should be given as per that schema. The possible formats are: * `jsonl` The JSON Lines format, where each instance is a single line. Uses GcsSource. * `csv` The CSV format, where each instance is a single comma-separated line. The first line in the file is the header, containing comma-separated field names. Uses GcsSource. * `tf-record` The TFRecord format, where each instance is a single record in tfrecord syntax. Uses GcsSource. * `tf-record-gzip` Similar to `tf-record`, but the file is gzipped. Uses GcsSource. * `bigquery` Each instance is a single row in BigQuery. Uses BigQuerySource. * `file-list` Each line of the file is the location of an instance to process, uses `gcs_source` field of the InputConfig object. If this Model doesn't support any of these formats it means it cannot be used with a BatchPredictionJob. However, if it has supported_deployment_resources_types, it could serve online predictions by using PredictionService.Predict or PredictionService.Explain. + """ + return pulumi.get(self, "supported_input_storage_formats") + + @property + @pulumi.getter(name="supportedOutputStorageFormats") + def supported_output_storage_formats(self) -> pulumi.Output[Sequence[str]]: + """ + The formats this Model supports in BatchPredictionJob.output_config. If both PredictSchemata.instance_schema_uri and PredictSchemata.prediction_schema_uri exist, the predictions are returned together with their instances. In other words, the prediction has the original instance data first, followed by the actual prediction content (as per the schema). The possible formats are: * `jsonl` The JSON Lines format, where each prediction is a single line. Uses GcsDestination. * `csv` The CSV format, where each prediction is a single comma-separated line. The first line in the file is the header, containing comma-separated field names. Uses GcsDestination. * `bigquery` Each prediction is a single row in a BigQuery table, uses BigQueryDestination . If this Model doesn't support any of these formats it means it cannot be used with a BatchPredictionJob. However, if it has supported_deployment_resources_types, it could serve online predictions by using PredictionService.Predict or PredictionService.Explain. + """ + return pulumi.get(self, "supported_output_storage_formats") + + @property + @pulumi.getter(name="trainingPipeline") + def training_pipeline(self) -> pulumi.Output[str]: + """ + The resource name of the TrainingPipeline that uploaded this Model, if any. + """ + return pulumi.get(self, "training_pipeline") + + @property + @pulumi.getter(name="updateTime") + def update_time(self) -> pulumi.Output[str]: + """ + Timestamp when this Model was most recently updated. + """ + return pulumi.get(self, "update_time") + + @property + @pulumi.getter(name="versionAliases") + def version_aliases(self) -> pulumi.Output[Sequence[str]]: + """ + User provided version aliases so that a model version can be referenced via alias (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_alias}` instead of auto-generated version id (i.e. `projects/{project}/locations/{location}/models/{model_id}@{version_id})`. The format is a-z{0,126}[a-z0-9] to distinguish from version_id. A default version alias will be created for the first version of the model, and there must be exactly one default version alias for a model. + """ + return pulumi.get(self, "version_aliases") + + @property + @pulumi.getter(name="versionCreateTime") + def version_create_time(self) -> pulumi.Output[str]: + """ + Timestamp when this version was created. + """ + return pulumi.get(self, "version_create_time") + + @property + @pulumi.getter(name="versionDescription") + def version_description(self) -> pulumi.Output[str]: + """ + The description of this version. + """ + return pulumi.get(self, "version_description") + + @property + @pulumi.getter(name="versionId") + def version_id(self) -> pulumi.Output[str]: + """ + Immutable. The version ID of the model. A new version is committed when a new model version is uploaded or trained under an existing model id. It is an auto-incrementing decimal number in string representation. + """ + return pulumi.get(self, "version_id") + + @property + @pulumi.getter(name="versionUpdateTime") + def version_update_time(self) -> pulumi.Output[str]: + """ + Timestamp when this version was most recently updated. + """ + return pulumi.get(self, "version_update_time") + diff --git a/sdk/python/pulumi_google_native/cloudsearch/v1/__init__.py b/sdk/python/pulumi_google_native/cloudsearch/v1/__init__.py index e873710267..fa60955771 100644 --- a/sdk/python/pulumi_google_native/cloudsearch/v1/__init__.py +++ b/sdk/python/pulumi_google_native/cloudsearch/v1/__init__.py @@ -8,7 +8,9 @@ from ._enums import * from .data_source import * from .get_data_source import * +from .get_item import * from .get_search_application import * +from .item import * from .search_application import * from ._inputs import * from . import outputs diff --git a/sdk/python/pulumi_google_native/cloudsearch/v1/_inputs.py b/sdk/python/pulumi_google_native/cloudsearch/v1/_inputs.py index f96ad4060e..8269da858b 100644 --- a/sdk/python/pulumi_google_native/cloudsearch/v1/_inputs.py +++ b/sdk/python/pulumi_google_native/cloudsearch/v1/_inputs.py @@ -14,6 +14,7 @@ 'CompositeFilterArgs', 'DataSourceRestrictionArgs', 'DateArgs', + 'DebugOptionsArgs', 'FacetOptionsArgs', 'FilterOptionsArgs', 'FilterArgs', @@ -165,6 +166,30 @@ def year(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "year", value) +@pulumi.input_type +class DebugOptionsArgs: + def __init__(__self__, *, + enable_debugging: Optional[pulumi.Input[bool]] = None): + """ + Shared request debug options for all cloudsearch RPC methods. + :param pulumi.Input[bool] enable_debugging: If you are asked by Google to help with debugging, set this field. Otherwise, ignore this field. + """ + if enable_debugging is not None: + pulumi.set(__self__, "enable_debugging", enable_debugging) + + @property + @pulumi.getter(name="enableDebugging") + def enable_debugging(self) -> Optional[pulumi.Input[bool]]: + """ + If you are asked by Google to help with debugging, set this field. Otherwise, ignore this field. + """ + return pulumi.get(self, "enable_debugging") + + @enable_debugging.setter + def enable_debugging(self, value: Optional[pulumi.Input[bool]]): + pulumi.set(self, "enable_debugging", value) + + @pulumi.input_type class FacetOptionsArgs: def __init__(__self__, *, diff --git a/sdk/python/pulumi_google_native/cloudsearch/v1/get_item.py b/sdk/python/pulumi_google_native/cloudsearch/v1/get_item.py new file mode 100644 index 0000000000..1f17b504e7 --- /dev/null +++ b/sdk/python/pulumi_google_native/cloudsearch/v1/get_item.py @@ -0,0 +1,191 @@ +# coding=utf-8 +# *** WARNING: this file was generated by the Pulumi SDK Generator. *** +# *** Do not edit by hand unless you're certain you know what you are doing! *** + +import copy +import warnings +import pulumi +import pulumi.runtime +from typing import Any, Mapping, Optional, Sequence, Union, overload +from ... import _utilities +from . import outputs + +__all__ = [ + 'GetItemResult', + 'AwaitableGetItemResult', + 'get_item', + 'get_item_output', +] + +@pulumi.output_type +class GetItemResult: + def __init__(__self__, acl=None, content=None, item_type=None, metadata=None, name=None, payload=None, queue=None, status=None, structured_data=None, version=None): + if acl and not isinstance(acl, dict): + raise TypeError("Expected argument 'acl' to be a dict") + pulumi.set(__self__, "acl", acl) + if content and not isinstance(content, dict): + raise TypeError("Expected argument 'content' to be a dict") + pulumi.set(__self__, "content", content) + if item_type and not isinstance(item_type, str): + raise TypeError("Expected argument 'item_type' to be a str") + pulumi.set(__self__, "item_type", item_type) + if metadata and not isinstance(metadata, dict): + raise TypeError("Expected argument 'metadata' to be a dict") + pulumi.set(__self__, "metadata", metadata) + if name and not isinstance(name, str): + raise TypeError("Expected argument 'name' to be a str") + pulumi.set(__self__, "name", name) + if payload and not isinstance(payload, str): + raise TypeError("Expected argument 'payload' to be a str") + pulumi.set(__self__, "payload", payload) + if queue and not isinstance(queue, str): + raise TypeError("Expected argument 'queue' to be a str") + pulumi.set(__self__, "queue", queue) + if status and not isinstance(status, dict): + raise TypeError("Expected argument 'status' to be a dict") + pulumi.set(__self__, "status", status) + if structured_data and not isinstance(structured_data, dict): + raise TypeError("Expected argument 'structured_data' to be a dict") + pulumi.set(__self__, "structured_data", structured_data) + if version and not isinstance(version, str): + raise TypeError("Expected argument 'version' to be a str") + pulumi.set(__self__, "version", version) + + @property + @pulumi.getter + def acl(self) -> 'outputs.ItemAclResponse': + """ + Access control list for this item. + """ + return pulumi.get(self, "acl") + + @property + @pulumi.getter + def content(self) -> 'outputs.ItemContentResponse': + """ + Item content to be indexed and made text searchable. + """ + return pulumi.get(self, "content") + + @property + @pulumi.getter(name="itemType") + def item_type(self) -> str: + """ + The type for this item. + """ + return pulumi.get(self, "item_type") + + @property + @pulumi.getter + def metadata(self) -> 'outputs.ItemMetadataResponse': + """ + The metadata information. + """ + return pulumi.get(self, "metadata") + + @property + @pulumi.getter + def name(self) -> str: + """ + The name of the Item. Format: datasources/{source_id}/items/{item_id} This is a required field. The maximum length is 1536 characters. + """ + return pulumi.get(self, "name") + + @property + @pulumi.getter + def payload(self) -> str: + """ + Additional state connector can store for this item. The maximum length is 10000 bytes. + """ + return pulumi.get(self, "payload") + + @property + @pulumi.getter + def queue(self) -> str: + """ + Queue this item belongs to. The maximum length is 100 characters. + """ + return pulumi.get(self, "queue") + + @property + @pulumi.getter + def status(self) -> 'outputs.ItemStatusResponse': + """ + Status of the item. Output only field. + """ + return pulumi.get(self, "status") + + @property + @pulumi.getter(name="structuredData") + def structured_data(self) -> 'outputs.ItemStructuredDataResponse': + """ + The structured data for the item that should conform to a registered object definition in the schema for the data source. + """ + return pulumi.get(self, "structured_data") + + @property + @pulumi.getter + def version(self) -> str: + """ + The indexing system stores the version from the datasource as a byte string and compares the Item version in the index to the version of the queued Item using lexical ordering. Cloud Search Indexing won't index or delete any queued item with a version value that is less than or equal to the version of the currently indexed item. The maximum length for this field is 1024 bytes. For information on how item version affects the deletion process, refer to [Handle revisions after manual deletes](https://developers.google.com/cloud-search/docs/guides/operations). + """ + return pulumi.get(self, "version") + + +class AwaitableGetItemResult(GetItemResult): + # pylint: disable=using-constant-test + def __await__(self): + if False: + yield self + return GetItemResult( + acl=self.acl, + content=self.content, + item_type=self.item_type, + metadata=self.metadata, + name=self.name, + payload=self.payload, + queue=self.queue, + status=self.status, + structured_data=self.structured_data, + version=self.version) + + +def get_item(connector_name: Optional[str] = None, + datasource_id: Optional[str] = None, + debug_options_enable_debugging: Optional[bool] = None, + item_id: Optional[str] = None, + opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetItemResult: + """ + Gets Item resource by item name. This API requires an admin or service account to execute. The service account used is the one whitelisted in the corresponding data source. + """ + __args__ = dict() + __args__['connectorName'] = connector_name + __args__['datasourceId'] = datasource_id + __args__['debugOptionsEnableDebugging'] = debug_options_enable_debugging + __args__['itemId'] = item_id + opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts) + __ret__ = pulumi.runtime.invoke('google-native:cloudsearch/v1:getItem', __args__, opts=opts, typ=GetItemResult).value + + return AwaitableGetItemResult( + acl=pulumi.get(__ret__, 'acl'), + content=pulumi.get(__ret__, 'content'), + item_type=pulumi.get(__ret__, 'item_type'), + metadata=pulumi.get(__ret__, 'metadata'), + name=pulumi.get(__ret__, 'name'), + payload=pulumi.get(__ret__, 'payload'), + queue=pulumi.get(__ret__, 'queue'), + status=pulumi.get(__ret__, 'status'), + structured_data=pulumi.get(__ret__, 'structured_data'), + version=pulumi.get(__ret__, 'version')) + + +@_utilities.lift_output_func(get_item) +def get_item_output(connector_name: Optional[pulumi.Input[Optional[str]]] = None, + datasource_id: Optional[pulumi.Input[str]] = None, + debug_options_enable_debugging: Optional[pulumi.Input[Optional[bool]]] = None, + item_id: Optional[pulumi.Input[str]] = None, + opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetItemResult]: + """ + Gets Item resource by item name. This API requires an admin or service account to execute. The service account used is the one whitelisted in the corresponding data source. + """ + ... diff --git a/sdk/python/pulumi_google_native/cloudsearch/v1/item.py b/sdk/python/pulumi_google_native/cloudsearch/v1/item.py new file mode 100644 index 0000000000..d5ffc9d05d --- /dev/null +++ b/sdk/python/pulumi_google_native/cloudsearch/v1/item.py @@ -0,0 +1,280 @@ +# coding=utf-8 +# *** WARNING: this file was generated by the Pulumi SDK Generator. *** +# *** Do not edit by hand unless you're certain you know what you are doing! *** + +import copy +import warnings +import pulumi +import pulumi.runtime +from typing import Any, Mapping, Optional, Sequence, Union, overload +from ... import _utilities +from . import outputs +from ._inputs import * + +__all__ = ['ItemArgs', 'Item'] + +@pulumi.input_type +class ItemArgs: + def __init__(__self__, *, + datasource_id: pulumi.Input[str], + item_id: pulumi.Input[str], + connector_name: Optional[pulumi.Input[str]] = None, + debug_options: Optional[pulumi.Input['DebugOptionsArgs']] = None): + """ + The set of arguments for constructing a Item resource. + :param pulumi.Input[str] connector_name: The name of connector making this call. Format: datasources/{source_id}/connectors/{ID} + :param pulumi.Input['DebugOptionsArgs'] debug_options: Common debug options. + """ + pulumi.set(__self__, "datasource_id", datasource_id) + pulumi.set(__self__, "item_id", item_id) + if connector_name is not None: + pulumi.set(__self__, "connector_name", connector_name) + if debug_options is not None: + pulumi.set(__self__, "debug_options", debug_options) + + @property + @pulumi.getter(name="datasourceId") + def datasource_id(self) -> pulumi.Input[str]: + return pulumi.get(self, "datasource_id") + + @datasource_id.setter + def datasource_id(self, value: pulumi.Input[str]): + pulumi.set(self, "datasource_id", value) + + @property + @pulumi.getter(name="itemId") + def item_id(self) -> pulumi.Input[str]: + return pulumi.get(self, "item_id") + + @item_id.setter + def item_id(self, value: pulumi.Input[str]): + pulumi.set(self, "item_id", value) + + @property + @pulumi.getter(name="connectorName") + def connector_name(self) -> Optional[pulumi.Input[str]]: + """ + The name of connector making this call. Format: datasources/{source_id}/connectors/{ID} + """ + return pulumi.get(self, "connector_name") + + @connector_name.setter + def connector_name(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "connector_name", value) + + @property + @pulumi.getter(name="debugOptions") + def debug_options(self) -> Optional[pulumi.Input['DebugOptionsArgs']]: + """ + Common debug options. + """ + return pulumi.get(self, "debug_options") + + @debug_options.setter + def debug_options(self, value: Optional[pulumi.Input['DebugOptionsArgs']]): + pulumi.set(self, "debug_options", value) + + +class Item(pulumi.CustomResource): + @overload + def __init__(__self__, + resource_name: str, + opts: Optional[pulumi.ResourceOptions] = None, + connector_name: Optional[pulumi.Input[str]] = None, + datasource_id: Optional[pulumi.Input[str]] = None, + debug_options: Optional[pulumi.Input[pulumi.InputType['DebugOptionsArgs']]] = None, + item_id: Optional[pulumi.Input[str]] = None, + __props__=None): + """ + Creates an upload session for uploading item content. For items smaller than 100 KB, it's easier to embed the content inline within an index request. This API requires an admin or service account to execute. The service account used is the one whitelisted in the corresponding data source. + Auto-naming is currently not supported for this resource. + + :param str resource_name: The name of the resource. + :param pulumi.ResourceOptions opts: Options for the resource. + :param pulumi.Input[str] connector_name: The name of connector making this call. Format: datasources/{source_id}/connectors/{ID} + :param pulumi.Input[pulumi.InputType['DebugOptionsArgs']] debug_options: Common debug options. + """ + ... + @overload + def __init__(__self__, + resource_name: str, + args: ItemArgs, + opts: Optional[pulumi.ResourceOptions] = None): + """ + Creates an upload session for uploading item content. For items smaller than 100 KB, it's easier to embed the content inline within an index request. This API requires an admin or service account to execute. The service account used is the one whitelisted in the corresponding data source. + Auto-naming is currently not supported for this resource. + + :param str resource_name: The name of the resource. + :param ItemArgs args: The arguments to use to populate this resource's properties. + :param pulumi.ResourceOptions opts: Options for the resource. + """ + ... + def __init__(__self__, resource_name: str, *args, **kwargs): + resource_args, opts = _utilities.get_resource_args_opts(ItemArgs, pulumi.ResourceOptions, *args, **kwargs) + if resource_args is not None: + __self__._internal_init(resource_name, opts, **resource_args.__dict__) + else: + __self__._internal_init(resource_name, *args, **kwargs) + + def _internal_init(__self__, + resource_name: str, + opts: Optional[pulumi.ResourceOptions] = None, + connector_name: Optional[pulumi.Input[str]] = None, + datasource_id: Optional[pulumi.Input[str]] = None, + debug_options: Optional[pulumi.Input[pulumi.InputType['DebugOptionsArgs']]] = None, + item_id: Optional[pulumi.Input[str]] = None, + __props__=None): + opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts) + if not isinstance(opts, pulumi.ResourceOptions): + raise TypeError('Expected resource options to be a ResourceOptions instance') + if opts.id is None: + if __props__ is not None: + raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') + __props__ = ItemArgs.__new__(ItemArgs) + + __props__.__dict__["connector_name"] = connector_name + if datasource_id is None and not opts.urn: + raise TypeError("Missing required property 'datasource_id'") + __props__.__dict__["datasource_id"] = datasource_id + __props__.__dict__["debug_options"] = debug_options + if item_id is None and not opts.urn: + raise TypeError("Missing required property 'item_id'") + __props__.__dict__["item_id"] = item_id + __props__.__dict__["acl"] = None + __props__.__dict__["content"] = None + __props__.__dict__["item_type"] = None + __props__.__dict__["metadata"] = None + __props__.__dict__["name"] = None + __props__.__dict__["payload"] = None + __props__.__dict__["queue"] = None + __props__.__dict__["status"] = None + __props__.__dict__["structured_data"] = None + __props__.__dict__["version"] = None + replace_on_changes = pulumi.ResourceOptions(replace_on_changes=["datasource_id", "item_id"]) + opts = pulumi.ResourceOptions.merge(opts, replace_on_changes) + super(Item, __self__).__init__( + 'google-native:cloudsearch/v1:Item', + resource_name, + __props__, + opts) + + @staticmethod + def get(resource_name: str, + id: pulumi.Input[str], + opts: Optional[pulumi.ResourceOptions] = None) -> 'Item': + """ + Get an existing Item resource's state with the given name, id, and optional extra + properties used to qualify the lookup. + + :param str resource_name: The unique name of the resulting resource. + :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. + :param pulumi.ResourceOptions opts: Options for the resource. + """ + opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) + + __props__ = ItemArgs.__new__(ItemArgs) + + __props__.__dict__["acl"] = None + __props__.__dict__["content"] = None + __props__.__dict__["datasource_id"] = None + __props__.__dict__["item_id"] = None + __props__.__dict__["item_type"] = None + __props__.__dict__["metadata"] = None + __props__.__dict__["name"] = None + __props__.__dict__["payload"] = None + __props__.__dict__["queue"] = None + __props__.__dict__["status"] = None + __props__.__dict__["structured_data"] = None + __props__.__dict__["version"] = None + return Item(resource_name, opts=opts, __props__=__props__) + + @property + @pulumi.getter + def acl(self) -> pulumi.Output['outputs.ItemAclResponse']: + """ + Access control list for this item. + """ + return pulumi.get(self, "acl") + + @property + @pulumi.getter + def content(self) -> pulumi.Output['outputs.ItemContentResponse']: + """ + Item content to be indexed and made text searchable. + """ + return pulumi.get(self, "content") + + @property + @pulumi.getter(name="datasourceId") + def datasource_id(self) -> pulumi.Output[str]: + return pulumi.get(self, "datasource_id") + + @property + @pulumi.getter(name="itemId") + def item_id(self) -> pulumi.Output[str]: + return pulumi.get(self, "item_id") + + @property + @pulumi.getter(name="itemType") + def item_type(self) -> pulumi.Output[str]: + """ + The type for this item. + """ + return pulumi.get(self, "item_type") + + @property + @pulumi.getter + def metadata(self) -> pulumi.Output['outputs.ItemMetadataResponse']: + """ + The metadata information. + """ + return pulumi.get(self, "metadata") + + @property + @pulumi.getter + def name(self) -> pulumi.Output[str]: + """ + The name of the Item. Format: datasources/{source_id}/items/{item_id} This is a required field. The maximum length is 1536 characters. + """ + return pulumi.get(self, "name") + + @property + @pulumi.getter + def payload(self) -> pulumi.Output[str]: + """ + Additional state connector can store for this item. The maximum length is 10000 bytes. + """ + return pulumi.get(self, "payload") + + @property + @pulumi.getter + def queue(self) -> pulumi.Output[str]: + """ + Queue this item belongs to. The maximum length is 100 characters. + """ + return pulumi.get(self, "queue") + + @property + @pulumi.getter + def status(self) -> pulumi.Output['outputs.ItemStatusResponse']: + """ + Status of the item. Output only field. + """ + return pulumi.get(self, "status") + + @property + @pulumi.getter(name="structuredData") + def structured_data(self) -> pulumi.Output['outputs.ItemStructuredDataResponse']: + """ + The structured data for the item that should conform to a registered object definition in the schema for the data source. + """ + return pulumi.get(self, "structured_data") + + @property + @pulumi.getter + def version(self) -> pulumi.Output[str]: + """ + The indexing system stores the version from the datasource as a byte string and compares the Item version in the index to the version of the queued Item using lexical ordering. Cloud Search Indexing won't index or delete any queued item with a version value that is less than or equal to the version of the currently indexed item. The maximum length for this field is 1024 bytes. For information on how item version affects the deletion process, refer to [Handle revisions after manual deletes](https://developers.google.com/cloud-search/docs/guides/operations). + """ + return pulumi.get(self, "version") + diff --git a/sdk/python/pulumi_google_native/cloudsearch/v1/outputs.py b/sdk/python/pulumi_google_native/cloudsearch/v1/outputs.py index a55b611a15..b87882bcb6 100644 --- a/sdk/python/pulumi_google_native/cloudsearch/v1/outputs.py +++ b/sdk/python/pulumi_google_native/cloudsearch/v1/outputs.py @@ -13,20 +13,43 @@ __all__ = [ 'CompositeFilterResponse', + 'ContextAttributeResponse', 'DataSourceRestrictionResponse', 'DateResponse', + 'DateValuesResponse', + 'DoubleValuesResponse', + 'EnumValuesResponse', 'FacetOptionsResponse', + 'FieldViolationResponse', 'FilterOptionsResponse', 'FilterResponse', 'GSuitePrincipalResponse', + 'HtmlValuesResponse', 'IntegerFacetingOptionsResponse', + 'IntegerValuesResponse', + 'InteractionResponse', + 'ItemAclResponse', + 'ItemContentResponse', + 'ItemMetadataResponse', + 'ItemStatusResponse', + 'ItemStructuredDataResponse', + 'NamedPropertyResponse', + 'ObjectValuesResponse', + 'PrincipalResponse', + 'ProcessingErrorResponse', 'QueryInterpretationConfigResponse', + 'RepositoryErrorResponse', 'ScoringConfigResponse', + 'SearchQualityMetadataResponse', 'SortOptionsResponse', 'SourceConfigResponse', 'SourceCrowdingConfigResponse', 'SourceResponse', 'SourceScoringConfigResponse', + 'StructuredDataObjectResponse', + 'TextValuesResponse', + 'TimestampValuesResponse', + 'UploadItemRefResponse', 'ValueFilterResponse', 'ValueResponse', ] @@ -79,6 +102,39 @@ def sub_filters(self) -> Sequence['outputs.FilterResponse']: return pulumi.get(self, "sub_filters") +@pulumi.output_type +class ContextAttributeResponse(dict): + """ + A named attribute associated with an item which can be used for influencing the ranking of the item based on the context in the request. + """ + def __init__(__self__, *, + name: str, + values: Sequence[str]): + """ + A named attribute associated with an item which can be used for influencing the ranking of the item based on the context in the request. + :param str name: The name of the attribute. It should not be empty. The maximum length is 32 characters. The name must start with a letter and can only contain letters (A-Z, a-z) or numbers (0-9). The name will be normalized (lower-cased) before being matched. + :param Sequence[str] values: Text values of the attribute. The maximum number of elements is 10. The maximum length of an element in the array is 32 characters. The value will be normalized (lower-cased) before being matched. + """ + pulumi.set(__self__, "name", name) + pulumi.set(__self__, "values", values) + + @property + @pulumi.getter + def name(self) -> str: + """ + The name of the attribute. It should not be empty. The maximum length is 32 characters. The name must start with a letter and can only contain letters (A-Z, a-z) or numbers (0-9). The name will be normalized (lower-cased) before being matched. + """ + return pulumi.get(self, "name") + + @property + @pulumi.getter + def values(self) -> Sequence[str]: + """ + Text values of the attribute. The maximum number of elements is 10. The maximum length of an element in the array is 32 characters. The value will be normalized (lower-cased) before being matched. + """ + return pulumi.get(self, "values") + + @pulumi.output_type class DataSourceRestrictionResponse(dict): """ @@ -173,6 +229,64 @@ def year(self) -> int: return pulumi.get(self, "year") +@pulumi.output_type +class DateValuesResponse(dict): + """ + List of date values. + """ + def __init__(__self__, *, + values: Sequence['outputs.DateResponse']): + """ + List of date values. + """ + pulumi.set(__self__, "values", values) + + @property + @pulumi.getter + def values(self) -> Sequence['outputs.DateResponse']: + return pulumi.get(self, "values") + + +@pulumi.output_type +class DoubleValuesResponse(dict): + """ + List of double values. + """ + def __init__(__self__, *, + values: Sequence[float]): + """ + List of double values. + """ + pulumi.set(__self__, "values", values) + + @property + @pulumi.getter + def values(self) -> Sequence[float]: + return pulumi.get(self, "values") + + +@pulumi.output_type +class EnumValuesResponse(dict): + """ + List of enum values. + """ + def __init__(__self__, *, + values: Sequence[str]): + """ + List of enum values. + :param Sequence[str] values: The maximum allowable length for string values is 32 characters. + """ + pulumi.set(__self__, "values", values) + + @property + @pulumi.getter + def values(self) -> Sequence[str]: + """ + The maximum allowable length for string values is 32 characters. + """ + return pulumi.get(self, "values") + + @pulumi.output_type class FacetOptionsResponse(dict): """ @@ -264,6 +378,35 @@ def source_name(self) -> str: return pulumi.get(self, "source_name") +@pulumi.output_type +class FieldViolationResponse(dict): + def __init__(__self__, *, + description: str, + field: str): + """ + :param str description: The description of the error. + :param str field: Path of field with violation. + """ + pulumi.set(__self__, "description", description) + pulumi.set(__self__, "field", field) + + @property + @pulumi.getter + def description(self) -> str: + """ + The description of the error. + """ + return pulumi.get(self, "description") + + @property + @pulumi.getter + def field(self) -> str: + """ + Path of field with violation. + """ + return pulumi.get(self, "field") + + @pulumi.output_type class FilterOptionsResponse(dict): """ @@ -419,6 +562,28 @@ def gsuite_user_email(self) -> str: return pulumi.get(self, "gsuite_user_email") +@pulumi.output_type +class HtmlValuesResponse(dict): + """ + List of html values. + """ + def __init__(__self__, *, + values: Sequence[str]): + """ + List of html values. + :param Sequence[str] values: The maximum allowable length for html values is 2048 characters. + """ + pulumi.set(__self__, "values", values) + + @property + @pulumi.getter + def values(self) -> Sequence[str]: + """ + The maximum allowable length for html values is 2048 characters. + """ + return pulumi.get(self, "values") + + @pulumi.output_type class IntegerFacetingOptionsResponse(dict): """ @@ -459,271 +624,1133 @@ def integer_buckets(self) -> Sequence[str]: @pulumi.output_type -class QueryInterpretationConfigResponse(dict): +class IntegerValuesResponse(dict): """ - Default options to interpret user query. + List of integer values. + """ + def __init__(__self__, *, + values: Sequence[str]): + """ + List of integer values. + """ + pulumi.set(__self__, "values", values) + + @property + @pulumi.getter + def values(self) -> Sequence[str]: + return pulumi.get(self, "values") + + +@pulumi.output_type +class InteractionResponse(dict): + """ + Represents an interaction between a user and an item. """ @staticmethod def __key_warning(key: str): suggest = None - if key == "forceDisableSupplementalResults": - suggest = "force_disable_supplemental_results" - elif key == "forceVerbatimMode": - suggest = "force_verbatim_mode" + if key == "interactionTime": + suggest = "interaction_time" if suggest: - pulumi.log.warn(f"Key '{key}' not found in QueryInterpretationConfigResponse. Access the value via the '{suggest}' property getter instead.") + pulumi.log.warn(f"Key '{key}' not found in InteractionResponse. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: - QueryInterpretationConfigResponse.__key_warning(key) + InteractionResponse.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: - QueryInterpretationConfigResponse.__key_warning(key) + InteractionResponse.__key_warning(key) return super().get(key, default) def __init__(__self__, *, - force_disable_supplemental_results: bool, - force_verbatim_mode: bool): + interaction_time: str, + principal: 'outputs.PrincipalResponse', + type: str): """ - Default options to interpret user query. - :param bool force_disable_supplemental_results: Set this flag to disable supplemental results retrieval, setting a flag here will not retrieve supplemental results for queries associated with a given search application. If this flag is set to True, it will take precedence over the option set at Query level. For the default value of False, query level flag will set the correct interpretation for supplemental results. - :param bool force_verbatim_mode: Enable this flag to turn off all internal optimizations like natural language (NL) interpretation of queries, supplemental results retrieval, and usage of synonyms including custom ones. If this flag is set to True, it will take precedence over the option set at Query level. For the default value of False, query level flag will set the correct interpretation for verbatim mode. + Represents an interaction between a user and an item. + :param str interaction_time: The time when the user acted on the item. If multiple actions of the same type exist for a single user, only the most recent action is recorded. + :param 'PrincipalResponse' principal: The user that acted on the item. """ - pulumi.set(__self__, "force_disable_supplemental_results", force_disable_supplemental_results) - pulumi.set(__self__, "force_verbatim_mode", force_verbatim_mode) + pulumi.set(__self__, "interaction_time", interaction_time) + pulumi.set(__self__, "principal", principal) + pulumi.set(__self__, "type", type) @property - @pulumi.getter(name="forceDisableSupplementalResults") - def force_disable_supplemental_results(self) -> bool: + @pulumi.getter(name="interactionTime") + def interaction_time(self) -> str: """ - Set this flag to disable supplemental results retrieval, setting a flag here will not retrieve supplemental results for queries associated with a given search application. If this flag is set to True, it will take precedence over the option set at Query level. For the default value of False, query level flag will set the correct interpretation for supplemental results. + The time when the user acted on the item. If multiple actions of the same type exist for a single user, only the most recent action is recorded. """ - return pulumi.get(self, "force_disable_supplemental_results") + return pulumi.get(self, "interaction_time") @property - @pulumi.getter(name="forceVerbatimMode") - def force_verbatim_mode(self) -> bool: + @pulumi.getter + def principal(self) -> 'outputs.PrincipalResponse': """ - Enable this flag to turn off all internal optimizations like natural language (NL) interpretation of queries, supplemental results retrieval, and usage of synonyms including custom ones. If this flag is set to True, it will take precedence over the option set at Query level. For the default value of False, query level flag will set the correct interpretation for verbatim mode. + The user that acted on the item. """ - return pulumi.get(self, "force_verbatim_mode") + return pulumi.get(self, "principal") + + @property + @pulumi.getter + def type(self) -> str: + return pulumi.get(self, "type") @pulumi.output_type -class ScoringConfigResponse(dict): +class ItemAclResponse(dict): """ - Scoring configurations for a source while processing a Search or Suggest request. + Access control list information for the item. For more information see [Map ACLs](https://developers.google.com/cloud-search/docs/guides/acls). """ @staticmethod def __key_warning(key: str): suggest = None - if key == "disableFreshness": - suggest = "disable_freshness" - elif key == "disablePersonalization": - suggest = "disable_personalization" + if key == "aclInheritanceType": + suggest = "acl_inheritance_type" + elif key == "deniedReaders": + suggest = "denied_readers" + elif key == "inheritAclFrom": + suggest = "inherit_acl_from" if suggest: - pulumi.log.warn(f"Key '{key}' not found in ScoringConfigResponse. Access the value via the '{suggest}' property getter instead.") + pulumi.log.warn(f"Key '{key}' not found in ItemAclResponse. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: - ScoringConfigResponse.__key_warning(key) + ItemAclResponse.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: - ScoringConfigResponse.__key_warning(key) + ItemAclResponse.__key_warning(key) return super().get(key, default) def __init__(__self__, *, - disable_freshness: bool, - disable_personalization: bool): - """ - Scoring configurations for a source while processing a Search or Suggest request. - :param bool disable_freshness: Whether to use freshness as a ranking signal. By default, freshness is used as a ranking signal. Note that this setting is not available in the Admin UI. - :param bool disable_personalization: Whether to personalize the results. By default, personal signals will be used to boost results. - """ - pulumi.set(__self__, "disable_freshness", disable_freshness) - pulumi.set(__self__, "disable_personalization", disable_personalization) + acl_inheritance_type: str, + denied_readers: Sequence['outputs.PrincipalResponse'], + inherit_acl_from: str, + owners: Sequence['outputs.PrincipalResponse'], + readers: Sequence['outputs.PrincipalResponse']): + """ + Access control list information for the item. For more information see [Map ACLs](https://developers.google.com/cloud-search/docs/guides/acls). + :param str acl_inheritance_type: Sets the type of access rules to apply when an item inherits its ACL from a parent. This should always be set in tandem with the inheritAclFrom field. Also, when the inheritAclFrom field is set, this field should be set to a valid AclInheritanceType. + :param Sequence['PrincipalResponse'] denied_readers: List of principals who are explicitly denied access to the item in search results. While principals are denied access by default, use denied readers to handle exceptions and override the list allowed readers. The maximum number of elements is 100. + :param str inherit_acl_from: The name of the item to inherit the Access Permission List (ACL) from. Note: ACL inheritance *only* provides access permissions to child items and does not define structural relationships, nor does it provide convenient ways to delete large groups of items. Deleting an ACL parent from the index only alters the access permissions of child items that reference the parent in the inheritAclFrom field. The item is still in the index, but may not visible in search results. By contrast, deletion of a container item also deletes all items that reference the container via the containerName field. The maximum length for this field is 1536 characters. + :param Sequence['PrincipalResponse'] owners: Optional. List of owners for the item. This field has no bearing on document access permissions. It does, however, offer a slight ranking boosts items where the querying user is an owner. The maximum number of elements is 5. + :param Sequence['PrincipalResponse'] readers: List of principals who are allowed to see the item in search results. Optional if inheriting permissions from another item or if the item is not intended to be visible, such as virtual containers. The maximum number of elements is 1000. + """ + pulumi.set(__self__, "acl_inheritance_type", acl_inheritance_type) + pulumi.set(__self__, "denied_readers", denied_readers) + pulumi.set(__self__, "inherit_acl_from", inherit_acl_from) + pulumi.set(__self__, "owners", owners) + pulumi.set(__self__, "readers", readers) @property - @pulumi.getter(name="disableFreshness") - def disable_freshness(self) -> bool: + @pulumi.getter(name="aclInheritanceType") + def acl_inheritance_type(self) -> str: """ - Whether to use freshness as a ranking signal. By default, freshness is used as a ranking signal. Note that this setting is not available in the Admin UI. + Sets the type of access rules to apply when an item inherits its ACL from a parent. This should always be set in tandem with the inheritAclFrom field. Also, when the inheritAclFrom field is set, this field should be set to a valid AclInheritanceType. """ - return pulumi.get(self, "disable_freshness") + return pulumi.get(self, "acl_inheritance_type") @property - @pulumi.getter(name="disablePersonalization") - def disable_personalization(self) -> bool: + @pulumi.getter(name="deniedReaders") + def denied_readers(self) -> Sequence['outputs.PrincipalResponse']: """ - Whether to personalize the results. By default, personal signals will be used to boost results. + List of principals who are explicitly denied access to the item in search results. While principals are denied access by default, use denied readers to handle exceptions and override the list allowed readers. The maximum number of elements is 100. """ - return pulumi.get(self, "disable_personalization") - - -@pulumi.output_type -class SortOptionsResponse(dict): - @staticmethod - def __key_warning(key: str): - suggest = None - if key == "operatorName": - suggest = "operator_name" - elif key == "sortOrder": - suggest = "sort_order" - - if suggest: - pulumi.log.warn(f"Key '{key}' not found in SortOptionsResponse. Access the value via the '{suggest}' property getter instead.") - - def __getitem__(self, key: str) -> Any: - SortOptionsResponse.__key_warning(key) - return super().__getitem__(key) - - def get(self, key: str, default = None) -> Any: - SortOptionsResponse.__key_warning(key) - return super().get(key, default) + return pulumi.get(self, "denied_readers") - def __init__(__self__, *, - operator_name: str, - sort_order: str): + @property + @pulumi.getter(name="inheritAclFrom") + def inherit_acl_from(self) -> str: """ - :param str operator_name: The name of the operator corresponding to the field to sort on. The corresponding property must be marked as sortable. - :param str sort_order: Ascending is the default sort order + The name of the item to inherit the Access Permission List (ACL) from. Note: ACL inheritance *only* provides access permissions to child items and does not define structural relationships, nor does it provide convenient ways to delete large groups of items. Deleting an ACL parent from the index only alters the access permissions of child items that reference the parent in the inheritAclFrom field. The item is still in the index, but may not visible in search results. By contrast, deletion of a container item also deletes all items that reference the container via the containerName field. The maximum length for this field is 1536 characters. """ - pulumi.set(__self__, "operator_name", operator_name) - pulumi.set(__self__, "sort_order", sort_order) + return pulumi.get(self, "inherit_acl_from") @property - @pulumi.getter(name="operatorName") - def operator_name(self) -> str: + @pulumi.getter + def owners(self) -> Sequence['outputs.PrincipalResponse']: """ - The name of the operator corresponding to the field to sort on. The corresponding property must be marked as sortable. + Optional. List of owners for the item. This field has no bearing on document access permissions. It does, however, offer a slight ranking boosts items where the querying user is an owner. The maximum number of elements is 5. """ - return pulumi.get(self, "operator_name") + return pulumi.get(self, "owners") @property - @pulumi.getter(name="sortOrder") - def sort_order(self) -> str: + @pulumi.getter + def readers(self) -> Sequence['outputs.PrincipalResponse']: """ - Ascending is the default sort order + List of principals who are allowed to see the item in search results. Optional if inheriting permissions from another item or if the item is not intended to be visible, such as virtual containers. The maximum number of elements is 1000. """ - return pulumi.get(self, "sort_order") + return pulumi.get(self, "readers") @pulumi.output_type -class SourceConfigResponse(dict): +class ItemContentResponse(dict): """ - Configurations for a source while processing a Search or Suggest request. + Content of an item to be indexed and surfaced by Cloud Search. Only UTF-8 encoded strings are allowed as inlineContent. If the content is uploaded and not binary, it must be UTF-8 encoded. """ @staticmethod def __key_warning(key: str): suggest = None - if key == "crowdingConfig": - suggest = "crowding_config" - elif key == "scoringConfig": - suggest = "scoring_config" + if key == "contentDataRef": + suggest = "content_data_ref" + elif key == "contentFormat": + suggest = "content_format" + elif key == "inlineContent": + suggest = "inline_content" if suggest: - pulumi.log.warn(f"Key '{key}' not found in SourceConfigResponse. Access the value via the '{suggest}' property getter instead.") + pulumi.log.warn(f"Key '{key}' not found in ItemContentResponse. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: - SourceConfigResponse.__key_warning(key) + ItemContentResponse.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: - SourceConfigResponse.__key_warning(key) + ItemContentResponse.__key_warning(key) return super().get(key, default) def __init__(__self__, *, - crowding_config: 'outputs.SourceCrowdingConfigResponse', - scoring_config: 'outputs.SourceScoringConfigResponse', - source: 'outputs.SourceResponse'): + content_data_ref: 'outputs.UploadItemRefResponse', + content_format: str, + hash: str, + inline_content: str): """ - Configurations for a source while processing a Search or Suggest request. - :param 'SourceCrowdingConfigResponse' crowding_config: The crowding configuration for the source. - :param 'SourceScoringConfigResponse' scoring_config: The scoring configuration for the source. - :param 'SourceResponse' source: The source for which this configuration is to be used. + Content of an item to be indexed and surfaced by Cloud Search. Only UTF-8 encoded strings are allowed as inlineContent. If the content is uploaded and not binary, it must be UTF-8 encoded. + :param 'UploadItemRefResponse' content_data_ref: Upload reference ID of a previously uploaded content via write method. + :param str hash: Hashing info calculated and provided by the API client for content. Can be used with the items.push method to calculate modified state. The maximum length is 2048 characters. + :param str inline_content: Content that is supplied inlined within the update method. The maximum length is 102400 bytes (100 KiB). """ - pulumi.set(__self__, "crowding_config", crowding_config) - pulumi.set(__self__, "scoring_config", scoring_config) - pulumi.set(__self__, "source", source) + pulumi.set(__self__, "content_data_ref", content_data_ref) + pulumi.set(__self__, "content_format", content_format) + pulumi.set(__self__, "hash", hash) + pulumi.set(__self__, "inline_content", inline_content) @property - @pulumi.getter(name="crowdingConfig") - def crowding_config(self) -> 'outputs.SourceCrowdingConfigResponse': + @pulumi.getter(name="contentDataRef") + def content_data_ref(self) -> 'outputs.UploadItemRefResponse': """ - The crowding configuration for the source. + Upload reference ID of a previously uploaded content via write method. """ - return pulumi.get(self, "crowding_config") + return pulumi.get(self, "content_data_ref") @property - @pulumi.getter(name="scoringConfig") - def scoring_config(self) -> 'outputs.SourceScoringConfigResponse': + @pulumi.getter(name="contentFormat") + def content_format(self) -> str: + return pulumi.get(self, "content_format") + + @property + @pulumi.getter + def hash(self) -> str: """ - The scoring configuration for the source. + Hashing info calculated and provided by the API client for content. Can be used with the items.push method to calculate modified state. The maximum length is 2048 characters. """ - return pulumi.get(self, "scoring_config") + return pulumi.get(self, "hash") @property - @pulumi.getter - def source(self) -> 'outputs.SourceResponse': + @pulumi.getter(name="inlineContent") + def inline_content(self) -> str: """ - The source for which this configuration is to be used. + Content that is supplied inlined within the update method. The maximum length is 102400 bytes (100 KiB). """ - return pulumi.get(self, "source") + return pulumi.get(self, "inline_content") @pulumi.output_type -class SourceCrowdingConfigResponse(dict): +class ItemMetadataResponse(dict): """ - Set search results crowding limits. Crowding is a situation in which multiple results from the same source or host "crowd out" other results, diminishing the quality of search for users. To foster better search quality and source diversity in search results, you can set a condition to reduce repetitive results by source. + Available metadata fields for the item. """ @staticmethod def __key_warning(key: str): suggest = None - if key == "numResults": - suggest = "num_results" - elif key == "numSuggestions": - suggest = "num_suggestions" + if key == "containerName": + suggest = "container_name" + elif key == "contentLanguage": + suggest = "content_language" + elif key == "contextAttributes": + suggest = "context_attributes" + elif key == "createTime": + suggest = "create_time" + elif key == "mimeType": + suggest = "mime_type" + elif key == "objectType": + suggest = "object_type" + elif key == "searchQualityMetadata": + suggest = "search_quality_metadata" + elif key == "sourceRepositoryUrl": + suggest = "source_repository_url" + elif key == "updateTime": + suggest = "update_time" if suggest: - pulumi.log.warn(f"Key '{key}' not found in SourceCrowdingConfigResponse. Access the value via the '{suggest}' property getter instead.") + pulumi.log.warn(f"Key '{key}' not found in ItemMetadataResponse. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: - SourceCrowdingConfigResponse.__key_warning(key) + ItemMetadataResponse.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: - SourceCrowdingConfigResponse.__key_warning(key) + ItemMetadataResponse.__key_warning(key) return super().get(key, default) def __init__(__self__, *, - num_results: int, - num_suggestions: int): + container_name: str, + content_language: str, + context_attributes: Sequence['outputs.ContextAttributeResponse'], + create_time: str, + hash: str, + interactions: Sequence['outputs.InteractionResponse'], + keywords: Sequence[str], + mime_type: str, + object_type: str, + search_quality_metadata: 'outputs.SearchQualityMetadataResponse', + source_repository_url: str, + title: str, + update_time: str): + """ + Available metadata fields for the item. + :param str container_name: The name of the container for this item. Deletion of the container item leads to automatic deletion of this item. Note: ACLs are not inherited from a container item. To provide ACL inheritance for an item, use the inheritAclFrom field. The maximum length is 1536 characters. + :param str content_language: The BCP-47 language code for the item, such as "en-US" or "sr-Latn". For more information, see http://www.unicode.org/reports/tr35/#Unicode_locale_identifier. The maximum length is 32 characters. + :param Sequence['ContextAttributeResponse'] context_attributes: A set of named attributes associated with the item. This can be used for influencing the ranking of the item based on the context in the request. The maximum number of elements is 10. + :param str create_time: The time when the item was created in the source repository. + :param str hash: Hashing value provided by the API caller. This can be used with the items.push method to calculate modified state. The maximum length is 2048 characters. + :param Sequence['InteractionResponse'] interactions: A list of interactions for the item. Interactions are used to improve Search quality, but are not exposed to end users. The maximum number of elements is 1000. + :param Sequence[str] keywords: Additional keywords or phrases that should match the item. Used internally for user generated content. The maximum number of elements is 100. The maximum length is 8192 characters. + :param str mime_type: The original mime-type of ItemContent.content in the source repository. The maximum length is 256 characters. + :param str object_type: The type of the item. This should correspond to the name of an object definition in the schema registered for the data source. For example, if the schema for the data source contains an object definition with name 'document', then item indexing requests for objects of that type should set objectType to 'document'. The maximum length is 256 characters. + :param 'SearchQualityMetadataResponse' search_quality_metadata: Additional search quality metadata of the item + :param str source_repository_url: Link to the source repository serving the data. Seach results apply this link to the title. Whitespace or special characters may cause Cloud Seach result links to trigger a redirect notice; to avoid this, encode the URL. The maximum length is 2048 characters. + :param str title: The title of the item. If given, this will be the displayed title of the Search result. The maximum length is 2048 characters. + :param str update_time: The time when the item was last modified in the source repository. + """ + pulumi.set(__self__, "container_name", container_name) + pulumi.set(__self__, "content_language", content_language) + pulumi.set(__self__, "context_attributes", context_attributes) + pulumi.set(__self__, "create_time", create_time) + pulumi.set(__self__, "hash", hash) + pulumi.set(__self__, "interactions", interactions) + pulumi.set(__self__, "keywords", keywords) + pulumi.set(__self__, "mime_type", mime_type) + pulumi.set(__self__, "object_type", object_type) + pulumi.set(__self__, "search_quality_metadata", search_quality_metadata) + pulumi.set(__self__, "source_repository_url", source_repository_url) + pulumi.set(__self__, "title", title) + pulumi.set(__self__, "update_time", update_time) + + @property + @pulumi.getter(name="containerName") + def container_name(self) -> str: """ - Set search results crowding limits. Crowding is a situation in which multiple results from the same source or host "crowd out" other results, diminishing the quality of search for users. To foster better search quality and source diversity in search results, you can set a condition to reduce repetitive results by source. - :param int num_results: Maximum number of results allowed from a datasource in a result page as long as results from other sources are not exhausted. Value specified must not be negative. A default value is used if this value is equal to 0. To disable crowding, set the value greater than 100. - :param int num_suggestions: Maximum number of suggestions allowed from a source. No limits will be set on results if this value is less than or equal to 0. + The name of the container for this item. Deletion of the container item leads to automatic deletion of this item. Note: ACLs are not inherited from a container item. To provide ACL inheritance for an item, use the inheritAclFrom field. The maximum length is 1536 characters. """ - pulumi.set(__self__, "num_results", num_results) - pulumi.set(__self__, "num_suggestions", num_suggestions) + return pulumi.get(self, "container_name") @property - @pulumi.getter(name="numResults") - def num_results(self) -> int: + @pulumi.getter(name="contentLanguage") + def content_language(self) -> str: """ - Maximum number of results allowed from a datasource in a result page as long as results from other sources are not exhausted. Value specified must not be negative. A default value is used if this value is equal to 0. To disable crowding, set the value greater than 100. + The BCP-47 language code for the item, such as "en-US" or "sr-Latn". For more information, see http://www.unicode.org/reports/tr35/#Unicode_locale_identifier. The maximum length is 32 characters. """ - return pulumi.get(self, "num_results") + return pulumi.get(self, "content_language") @property - @pulumi.getter(name="numSuggestions") - def num_suggestions(self) -> int: + @pulumi.getter(name="contextAttributes") + def context_attributes(self) -> Sequence['outputs.ContextAttributeResponse']: """ - Maximum number of suggestions allowed from a source. No limits will be set on results if this value is less than or equal to 0. + A set of named attributes associated with the item. This can be used for influencing the ranking of the item based on the context in the request. The maximum number of elements is 10. """ - return pulumi.get(self, "num_suggestions") - + return pulumi.get(self, "context_attributes") + + @property + @pulumi.getter(name="createTime") + def create_time(self) -> str: + """ + The time when the item was created in the source repository. + """ + return pulumi.get(self, "create_time") + + @property + @pulumi.getter + def hash(self) -> str: + """ + Hashing value provided by the API caller. This can be used with the items.push method to calculate modified state. The maximum length is 2048 characters. + """ + return pulumi.get(self, "hash") + + @property + @pulumi.getter + def interactions(self) -> Sequence['outputs.InteractionResponse']: + """ + A list of interactions for the item. Interactions are used to improve Search quality, but are not exposed to end users. The maximum number of elements is 1000. + """ + return pulumi.get(self, "interactions") + + @property + @pulumi.getter + def keywords(self) -> Sequence[str]: + """ + Additional keywords or phrases that should match the item. Used internally for user generated content. The maximum number of elements is 100. The maximum length is 8192 characters. + """ + return pulumi.get(self, "keywords") + + @property + @pulumi.getter(name="mimeType") + def mime_type(self) -> str: + """ + The original mime-type of ItemContent.content in the source repository. The maximum length is 256 characters. + """ + return pulumi.get(self, "mime_type") + + @property + @pulumi.getter(name="objectType") + def object_type(self) -> str: + """ + The type of the item. This should correspond to the name of an object definition in the schema registered for the data source. For example, if the schema for the data source contains an object definition with name 'document', then item indexing requests for objects of that type should set objectType to 'document'. The maximum length is 256 characters. + """ + return pulumi.get(self, "object_type") + + @property + @pulumi.getter(name="searchQualityMetadata") + def search_quality_metadata(self) -> 'outputs.SearchQualityMetadataResponse': + """ + Additional search quality metadata of the item + """ + return pulumi.get(self, "search_quality_metadata") + + @property + @pulumi.getter(name="sourceRepositoryUrl") + def source_repository_url(self) -> str: + """ + Link to the source repository serving the data. Seach results apply this link to the title. Whitespace or special characters may cause Cloud Seach result links to trigger a redirect notice; to avoid this, encode the URL. The maximum length is 2048 characters. + """ + return pulumi.get(self, "source_repository_url") + + @property + @pulumi.getter + def title(self) -> str: + """ + The title of the item. If given, this will be the displayed title of the Search result. The maximum length is 2048 characters. + """ + return pulumi.get(self, "title") + + @property + @pulumi.getter(name="updateTime") + def update_time(self) -> str: + """ + The time when the item was last modified in the source repository. + """ + return pulumi.get(self, "update_time") + + +@pulumi.output_type +class ItemStatusResponse(dict): + """ + This contains item's status and any errors. + """ + @staticmethod + def __key_warning(key: str): + suggest = None + if key == "processingErrors": + suggest = "processing_errors" + elif key == "repositoryErrors": + suggest = "repository_errors" + + if suggest: + pulumi.log.warn(f"Key '{key}' not found in ItemStatusResponse. Access the value via the '{suggest}' property getter instead.") + + def __getitem__(self, key: str) -> Any: + ItemStatusResponse.__key_warning(key) + return super().__getitem__(key) + + def get(self, key: str, default = None) -> Any: + ItemStatusResponse.__key_warning(key) + return super().get(key, default) + + def __init__(__self__, *, + code: str, + processing_errors: Sequence['outputs.ProcessingErrorResponse'], + repository_errors: Sequence['outputs.RepositoryErrorResponse']): + """ + This contains item's status and any errors. + :param str code: Status code. + :param Sequence['ProcessingErrorResponse'] processing_errors: Error details in case the item is in ERROR state. + :param Sequence['RepositoryErrorResponse'] repository_errors: Repository error reported by connector. + """ + pulumi.set(__self__, "code", code) + pulumi.set(__self__, "processing_errors", processing_errors) + pulumi.set(__self__, "repository_errors", repository_errors) + + @property + @pulumi.getter + def code(self) -> str: + """ + Status code. + """ + return pulumi.get(self, "code") + + @property + @pulumi.getter(name="processingErrors") + def processing_errors(self) -> Sequence['outputs.ProcessingErrorResponse']: + """ + Error details in case the item is in ERROR state. + """ + return pulumi.get(self, "processing_errors") + + @property + @pulumi.getter(name="repositoryErrors") + def repository_errors(self) -> Sequence['outputs.RepositoryErrorResponse']: + """ + Repository error reported by connector. + """ + return pulumi.get(self, "repository_errors") + + +@pulumi.output_type +class ItemStructuredDataResponse(dict): + """ + Available structured data fields for the item. + """ + def __init__(__self__, *, + hash: str, + object: 'outputs.StructuredDataObjectResponse'): + """ + Available structured data fields for the item. + :param str hash: Hashing value provided by the API caller. This can be used with the items.push method to calculate modified state. The maximum length is 2048 characters. + :param 'StructuredDataObjectResponse' object: The structured data object that should conform to a registered object definition in the schema for the data source. + """ + pulumi.set(__self__, "hash", hash) + pulumi.set(__self__, "object", object) + + @property + @pulumi.getter + def hash(self) -> str: + """ + Hashing value provided by the API caller. This can be used with the items.push method to calculate modified state. The maximum length is 2048 characters. + """ + return pulumi.get(self, "hash") + + @property + @pulumi.getter + def object(self) -> 'outputs.StructuredDataObjectResponse': + """ + The structured data object that should conform to a registered object definition in the schema for the data source. + """ + return pulumi.get(self, "object") + + +@pulumi.output_type +class NamedPropertyResponse(dict): + """ + A typed name-value pair for structured data. The type of the value should be the same as the registered type for the `name` property in the object definition of `objectType`. + """ + @staticmethod + def __key_warning(key: str): + suggest = None + if key == "booleanValue": + suggest = "boolean_value" + elif key == "dateValues": + suggest = "date_values" + elif key == "doubleValues": + suggest = "double_values" + elif key == "enumValues": + suggest = "enum_values" + elif key == "htmlValues": + suggest = "html_values" + elif key == "integerValues": + suggest = "integer_values" + elif key == "objectValues": + suggest = "object_values" + elif key == "textValues": + suggest = "text_values" + elif key == "timestampValues": + suggest = "timestamp_values" + + if suggest: + pulumi.log.warn(f"Key '{key}' not found in NamedPropertyResponse. Access the value via the '{suggest}' property getter instead.") + + def __getitem__(self, key: str) -> Any: + NamedPropertyResponse.__key_warning(key) + return super().__getitem__(key) + + def get(self, key: str, default = None) -> Any: + NamedPropertyResponse.__key_warning(key) + return super().get(key, default) + + def __init__(__self__, *, + boolean_value: bool, + date_values: 'outputs.DateValuesResponse', + double_values: 'outputs.DoubleValuesResponse', + enum_values: 'outputs.EnumValuesResponse', + html_values: 'outputs.HtmlValuesResponse', + integer_values: 'outputs.IntegerValuesResponse', + name: str, + object_values: 'outputs.ObjectValuesResponse', + text_values: 'outputs.TextValuesResponse', + timestamp_values: 'outputs.TimestampValuesResponse'): + """ + A typed name-value pair for structured data. The type of the value should be the same as the registered type for the `name` property in the object definition of `objectType`. + :param str name: The name of the property. This name should correspond to the name of the property that was registered for object definition in the schema. The maximum allowable length for this property is 256 characters. + """ + pulumi.set(__self__, "boolean_value", boolean_value) + pulumi.set(__self__, "date_values", date_values) + pulumi.set(__self__, "double_values", double_values) + pulumi.set(__self__, "enum_values", enum_values) + pulumi.set(__self__, "html_values", html_values) + pulumi.set(__self__, "integer_values", integer_values) + pulumi.set(__self__, "name", name) + pulumi.set(__self__, "object_values", object_values) + pulumi.set(__self__, "text_values", text_values) + pulumi.set(__self__, "timestamp_values", timestamp_values) + + @property + @pulumi.getter(name="booleanValue") + def boolean_value(self) -> bool: + return pulumi.get(self, "boolean_value") + + @property + @pulumi.getter(name="dateValues") + def date_values(self) -> 'outputs.DateValuesResponse': + return pulumi.get(self, "date_values") + + @property + @pulumi.getter(name="doubleValues") + def double_values(self) -> 'outputs.DoubleValuesResponse': + return pulumi.get(self, "double_values") + + @property + @pulumi.getter(name="enumValues") + def enum_values(self) -> 'outputs.EnumValuesResponse': + return pulumi.get(self, "enum_values") + + @property + @pulumi.getter(name="htmlValues") + def html_values(self) -> 'outputs.HtmlValuesResponse': + return pulumi.get(self, "html_values") + + @property + @pulumi.getter(name="integerValues") + def integer_values(self) -> 'outputs.IntegerValuesResponse': + return pulumi.get(self, "integer_values") + + @property + @pulumi.getter + def name(self) -> str: + """ + The name of the property. This name should correspond to the name of the property that was registered for object definition in the schema. The maximum allowable length for this property is 256 characters. + """ + return pulumi.get(self, "name") + + @property + @pulumi.getter(name="objectValues") + def object_values(self) -> 'outputs.ObjectValuesResponse': + return pulumi.get(self, "object_values") + + @property + @pulumi.getter(name="textValues") + def text_values(self) -> 'outputs.TextValuesResponse': + return pulumi.get(self, "text_values") + + @property + @pulumi.getter(name="timestampValues") + def timestamp_values(self) -> 'outputs.TimestampValuesResponse': + return pulumi.get(self, "timestamp_values") + + +@pulumi.output_type +class ObjectValuesResponse(dict): + """ + List of object values. + """ + def __init__(__self__, *, + values: Sequence['outputs.StructuredDataObjectResponse']): + """ + List of object values. + """ + pulumi.set(__self__, "values", values) + + @property + @pulumi.getter + def values(self) -> Sequence['outputs.StructuredDataObjectResponse']: + return pulumi.get(self, "values") + + +@pulumi.output_type +class PrincipalResponse(dict): + """ + Reference to a user, group, or domain. + """ + @staticmethod + def __key_warning(key: str): + suggest = None + if key == "groupResourceName": + suggest = "group_resource_name" + elif key == "gsuitePrincipal": + suggest = "gsuite_principal" + elif key == "userResourceName": + suggest = "user_resource_name" + + if suggest: + pulumi.log.warn(f"Key '{key}' not found in PrincipalResponse. Access the value via the '{suggest}' property getter instead.") + + def __getitem__(self, key: str) -> Any: + PrincipalResponse.__key_warning(key) + return super().__getitem__(key) + + def get(self, key: str, default = None) -> Any: + PrincipalResponse.__key_warning(key) + return super().get(key, default) + + def __init__(__self__, *, + group_resource_name: str, + gsuite_principal: 'outputs.GSuitePrincipalResponse', + user_resource_name: str): + """ + Reference to a user, group, or domain. + :param str group_resource_name: This principal is a group identified using an external identity. The name field must specify the group resource name with this format: identitysources/{source_id}/groups/{ID} + :param 'GSuitePrincipalResponse' gsuite_principal: This principal is a Google Workspace user, group or domain. + :param str user_resource_name: This principal is a user identified using an external identity. The name field must specify the user resource name with this format: identitysources/{source_id}/users/{ID} + """ + pulumi.set(__self__, "group_resource_name", group_resource_name) + pulumi.set(__self__, "gsuite_principal", gsuite_principal) + pulumi.set(__self__, "user_resource_name", user_resource_name) + + @property + @pulumi.getter(name="groupResourceName") + def group_resource_name(self) -> str: + """ + This principal is a group identified using an external identity. The name field must specify the group resource name with this format: identitysources/{source_id}/groups/{ID} + """ + return pulumi.get(self, "group_resource_name") + + @property + @pulumi.getter(name="gsuitePrincipal") + def gsuite_principal(self) -> 'outputs.GSuitePrincipalResponse': + """ + This principal is a Google Workspace user, group or domain. + """ + return pulumi.get(self, "gsuite_principal") + + @property + @pulumi.getter(name="userResourceName") + def user_resource_name(self) -> str: + """ + This principal is a user identified using an external identity. The name field must specify the user resource name with this format: identitysources/{source_id}/users/{ID} + """ + return pulumi.get(self, "user_resource_name") + + +@pulumi.output_type +class ProcessingErrorResponse(dict): + @staticmethod + def __key_warning(key: str): + suggest = None + if key == "errorMessage": + suggest = "error_message" + elif key == "fieldViolations": + suggest = "field_violations" + + if suggest: + pulumi.log.warn(f"Key '{key}' not found in ProcessingErrorResponse. Access the value via the '{suggest}' property getter instead.") + + def __getitem__(self, key: str) -> Any: + ProcessingErrorResponse.__key_warning(key) + return super().__getitem__(key) + + def get(self, key: str, default = None) -> Any: + ProcessingErrorResponse.__key_warning(key) + return super().get(key, default) + + def __init__(__self__, *, + code: str, + error_message: str, + field_violations: Sequence['outputs.FieldViolationResponse']): + """ + :param str code: Error code indicating the nature of the error. + :param str error_message: The description of the error. + :param Sequence['FieldViolationResponse'] field_violations: In case the item fields are invalid, this field contains the details about the validation errors. + """ + pulumi.set(__self__, "code", code) + pulumi.set(__self__, "error_message", error_message) + pulumi.set(__self__, "field_violations", field_violations) + + @property + @pulumi.getter + def code(self) -> str: + """ + Error code indicating the nature of the error. + """ + return pulumi.get(self, "code") + + @property + @pulumi.getter(name="errorMessage") + def error_message(self) -> str: + """ + The description of the error. + """ + return pulumi.get(self, "error_message") + + @property + @pulumi.getter(name="fieldViolations") + def field_violations(self) -> Sequence['outputs.FieldViolationResponse']: + """ + In case the item fields are invalid, this field contains the details about the validation errors. + """ + return pulumi.get(self, "field_violations") + + +@pulumi.output_type +class QueryInterpretationConfigResponse(dict): + """ + Default options to interpret user query. + """ + @staticmethod + def __key_warning(key: str): + suggest = None + if key == "forceDisableSupplementalResults": + suggest = "force_disable_supplemental_results" + elif key == "forceVerbatimMode": + suggest = "force_verbatim_mode" + + if suggest: + pulumi.log.warn(f"Key '{key}' not found in QueryInterpretationConfigResponse. Access the value via the '{suggest}' property getter instead.") + + def __getitem__(self, key: str) -> Any: + QueryInterpretationConfigResponse.__key_warning(key) + return super().__getitem__(key) + + def get(self, key: str, default = None) -> Any: + QueryInterpretationConfigResponse.__key_warning(key) + return super().get(key, default) + + def __init__(__self__, *, + force_disable_supplemental_results: bool, + force_verbatim_mode: bool): + """ + Default options to interpret user query. + :param bool force_disable_supplemental_results: Set this flag to disable supplemental results retrieval, setting a flag here will not retrieve supplemental results for queries associated with a given search application. If this flag is set to True, it will take precedence over the option set at Query level. For the default value of False, query level flag will set the correct interpretation for supplemental results. + :param bool force_verbatim_mode: Enable this flag to turn off all internal optimizations like natural language (NL) interpretation of queries, supplemental results retrieval, and usage of synonyms including custom ones. If this flag is set to True, it will take precedence over the option set at Query level. For the default value of False, query level flag will set the correct interpretation for verbatim mode. + """ + pulumi.set(__self__, "force_disable_supplemental_results", force_disable_supplemental_results) + pulumi.set(__self__, "force_verbatim_mode", force_verbatim_mode) + + @property + @pulumi.getter(name="forceDisableSupplementalResults") + def force_disable_supplemental_results(self) -> bool: + """ + Set this flag to disable supplemental results retrieval, setting a flag here will not retrieve supplemental results for queries associated with a given search application. If this flag is set to True, it will take precedence over the option set at Query level. For the default value of False, query level flag will set the correct interpretation for supplemental results. + """ + return pulumi.get(self, "force_disable_supplemental_results") + + @property + @pulumi.getter(name="forceVerbatimMode") + def force_verbatim_mode(self) -> bool: + """ + Enable this flag to turn off all internal optimizations like natural language (NL) interpretation of queries, supplemental results retrieval, and usage of synonyms including custom ones. If this flag is set to True, it will take precedence over the option set at Query level. For the default value of False, query level flag will set the correct interpretation for verbatim mode. + """ + return pulumi.get(self, "force_verbatim_mode") + + +@pulumi.output_type +class RepositoryErrorResponse(dict): + """ + Errors when the connector is communicating to the source repository. + """ + @staticmethod + def __key_warning(key: str): + suggest = None + if key == "errorMessage": + suggest = "error_message" + elif key == "httpStatusCode": + suggest = "http_status_code" + + if suggest: + pulumi.log.warn(f"Key '{key}' not found in RepositoryErrorResponse. Access the value via the '{suggest}' property getter instead.") + + def __getitem__(self, key: str) -> Any: + RepositoryErrorResponse.__key_warning(key) + return super().__getitem__(key) + + def get(self, key: str, default = None) -> Any: + RepositoryErrorResponse.__key_warning(key) + return super().get(key, default) + + def __init__(__self__, *, + error_message: str, + http_status_code: int, + type: str): + """ + Errors when the connector is communicating to the source repository. + :param str error_message: Message that describes the error. The maximum allowable length of the message is 8192 characters. + :param int http_status_code: Error codes. Matches the definition of HTTP status codes. + :param str type: The type of error. + """ + pulumi.set(__self__, "error_message", error_message) + pulumi.set(__self__, "http_status_code", http_status_code) + pulumi.set(__self__, "type", type) + + @property + @pulumi.getter(name="errorMessage") + def error_message(self) -> str: + """ + Message that describes the error. The maximum allowable length of the message is 8192 characters. + """ + return pulumi.get(self, "error_message") + + @property + @pulumi.getter(name="httpStatusCode") + def http_status_code(self) -> int: + """ + Error codes. Matches the definition of HTTP status codes. + """ + return pulumi.get(self, "http_status_code") + + @property + @pulumi.getter + def type(self) -> str: + """ + The type of error. + """ + return pulumi.get(self, "type") + + +@pulumi.output_type +class ScoringConfigResponse(dict): + """ + Scoring configurations for a source while processing a Search or Suggest request. + """ + @staticmethod + def __key_warning(key: str): + suggest = None + if key == "disableFreshness": + suggest = "disable_freshness" + elif key == "disablePersonalization": + suggest = "disable_personalization" + + if suggest: + pulumi.log.warn(f"Key '{key}' not found in ScoringConfigResponse. Access the value via the '{suggest}' property getter instead.") + + def __getitem__(self, key: str) -> Any: + ScoringConfigResponse.__key_warning(key) + return super().__getitem__(key) + + def get(self, key: str, default = None) -> Any: + ScoringConfigResponse.__key_warning(key) + return super().get(key, default) + + def __init__(__self__, *, + disable_freshness: bool, + disable_personalization: bool): + """ + Scoring configurations for a source while processing a Search or Suggest request. + :param bool disable_freshness: Whether to use freshness as a ranking signal. By default, freshness is used as a ranking signal. Note that this setting is not available in the Admin UI. + :param bool disable_personalization: Whether to personalize the results. By default, personal signals will be used to boost results. + """ + pulumi.set(__self__, "disable_freshness", disable_freshness) + pulumi.set(__self__, "disable_personalization", disable_personalization) + + @property + @pulumi.getter(name="disableFreshness") + def disable_freshness(self) -> bool: + """ + Whether to use freshness as a ranking signal. By default, freshness is used as a ranking signal. Note that this setting is not available in the Admin UI. + """ + return pulumi.get(self, "disable_freshness") + + @property + @pulumi.getter(name="disablePersonalization") + def disable_personalization(self) -> bool: + """ + Whether to personalize the results. By default, personal signals will be used to boost results. + """ + return pulumi.get(self, "disable_personalization") + + +@pulumi.output_type +class SearchQualityMetadataResponse(dict): + """ + Additional search quality metadata of the item. + """ + def __init__(__self__, *, + quality: float): + """ + Additional search quality metadata of the item. + :param float quality: An indication of the quality of the item, used to influence search quality. Value should be between 0.0 (lowest quality) and 1.0 (highest quality). The default value is 0.0. + """ + pulumi.set(__self__, "quality", quality) + + @property + @pulumi.getter + def quality(self) -> float: + """ + An indication of the quality of the item, used to influence search quality. Value should be between 0.0 (lowest quality) and 1.0 (highest quality). The default value is 0.0. + """ + return pulumi.get(self, "quality") + + +@pulumi.output_type +class SortOptionsResponse(dict): + @staticmethod + def __key_warning(key: str): + suggest = None + if key == "operatorName": + suggest = "operator_name" + elif key == "sortOrder": + suggest = "sort_order" + + if suggest: + pulumi.log.warn(f"Key '{key}' not found in SortOptionsResponse. Access the value via the '{suggest}' property getter instead.") + + def __getitem__(self, key: str) -> Any: + SortOptionsResponse.__key_warning(key) + return super().__getitem__(key) + + def get(self, key: str, default = None) -> Any: + SortOptionsResponse.__key_warning(key) + return super().get(key, default) + + def __init__(__self__, *, + operator_name: str, + sort_order: str): + """ + :param str operator_name: The name of the operator corresponding to the field to sort on. The corresponding property must be marked as sortable. + :param str sort_order: Ascending is the default sort order + """ + pulumi.set(__self__, "operator_name", operator_name) + pulumi.set(__self__, "sort_order", sort_order) + + @property + @pulumi.getter(name="operatorName") + def operator_name(self) -> str: + """ + The name of the operator corresponding to the field to sort on. The corresponding property must be marked as sortable. + """ + return pulumi.get(self, "operator_name") + + @property + @pulumi.getter(name="sortOrder") + def sort_order(self) -> str: + """ + Ascending is the default sort order + """ + return pulumi.get(self, "sort_order") + + +@pulumi.output_type +class SourceConfigResponse(dict): + """ + Configurations for a source while processing a Search or Suggest request. + """ + @staticmethod + def __key_warning(key: str): + suggest = None + if key == "crowdingConfig": + suggest = "crowding_config" + elif key == "scoringConfig": + suggest = "scoring_config" + + if suggest: + pulumi.log.warn(f"Key '{key}' not found in SourceConfigResponse. Access the value via the '{suggest}' property getter instead.") + + def __getitem__(self, key: str) -> Any: + SourceConfigResponse.__key_warning(key) + return super().__getitem__(key) + + def get(self, key: str, default = None) -> Any: + SourceConfigResponse.__key_warning(key) + return super().get(key, default) + + def __init__(__self__, *, + crowding_config: 'outputs.SourceCrowdingConfigResponse', + scoring_config: 'outputs.SourceScoringConfigResponse', + source: 'outputs.SourceResponse'): + """ + Configurations for a source while processing a Search or Suggest request. + :param 'SourceCrowdingConfigResponse' crowding_config: The crowding configuration for the source. + :param 'SourceScoringConfigResponse' scoring_config: The scoring configuration for the source. + :param 'SourceResponse' source: The source for which this configuration is to be used. + """ + pulumi.set(__self__, "crowding_config", crowding_config) + pulumi.set(__self__, "scoring_config", scoring_config) + pulumi.set(__self__, "source", source) + + @property + @pulumi.getter(name="crowdingConfig") + def crowding_config(self) -> 'outputs.SourceCrowdingConfigResponse': + """ + The crowding configuration for the source. + """ + return pulumi.get(self, "crowding_config") + + @property + @pulumi.getter(name="scoringConfig") + def scoring_config(self) -> 'outputs.SourceScoringConfigResponse': + """ + The scoring configuration for the source. + """ + return pulumi.get(self, "scoring_config") + + @property + @pulumi.getter + def source(self) -> 'outputs.SourceResponse': + """ + The source for which this configuration is to be used. + """ + return pulumi.get(self, "source") + + +@pulumi.output_type +class SourceCrowdingConfigResponse(dict): + """ + Set search results crowding limits. Crowding is a situation in which multiple results from the same source or host "crowd out" other results, diminishing the quality of search for users. To foster better search quality and source diversity in search results, you can set a condition to reduce repetitive results by source. + """ + @staticmethod + def __key_warning(key: str): + suggest = None + if key == "numResults": + suggest = "num_results" + elif key == "numSuggestions": + suggest = "num_suggestions" + + if suggest: + pulumi.log.warn(f"Key '{key}' not found in SourceCrowdingConfigResponse. Access the value via the '{suggest}' property getter instead.") + + def __getitem__(self, key: str) -> Any: + SourceCrowdingConfigResponse.__key_warning(key) + return super().__getitem__(key) + + def get(self, key: str, default = None) -> Any: + SourceCrowdingConfigResponse.__key_warning(key) + return super().get(key, default) + + def __init__(__self__, *, + num_results: int, + num_suggestions: int): + """ + Set search results crowding limits. Crowding is a situation in which multiple results from the same source or host "crowd out" other results, diminishing the quality of search for users. To foster better search quality and source diversity in search results, you can set a condition to reduce repetitive results by source. + :param int num_results: Maximum number of results allowed from a datasource in a result page as long as results from other sources are not exhausted. Value specified must not be negative. A default value is used if this value is equal to 0. To disable crowding, set the value greater than 100. + :param int num_suggestions: Maximum number of suggestions allowed from a source. No limits will be set on results if this value is less than or equal to 0. + """ + pulumi.set(__self__, "num_results", num_results) + pulumi.set(__self__, "num_suggestions", num_suggestions) + + @property + @pulumi.getter(name="numResults") + def num_results(self) -> int: + """ + Maximum number of results allowed from a datasource in a result page as long as results from other sources are not exhausted. Value specified must not be negative. A default value is used if this value is equal to 0. To disable crowding, set the value greater than 100. + """ + return pulumi.get(self, "num_results") + + @property + @pulumi.getter(name="numSuggestions") + def num_suggestions(self) -> int: + """ + Maximum number of suggestions allowed from a source. No limits will be set on results if this value is less than or equal to 0. + """ + return pulumi.get(self, "num_suggestions") + @pulumi.output_type class SourceResponse(dict): @@ -814,6 +1841,90 @@ def source_importance(self) -> str: return pulumi.get(self, "source_importance") +@pulumi.output_type +class StructuredDataObjectResponse(dict): + """ + A structured data object consisting of named properties. + """ + def __init__(__self__, *, + properties: Sequence['outputs.NamedPropertyResponse']): + """ + A structured data object consisting of named properties. + :param Sequence['NamedPropertyResponse'] properties: The properties for the object. The maximum number of elements is 1000. + """ + pulumi.set(__self__, "properties", properties) + + @property + @pulumi.getter + def properties(self) -> Sequence['outputs.NamedPropertyResponse']: + """ + The properties for the object. The maximum number of elements is 1000. + """ + return pulumi.get(self, "properties") + + +@pulumi.output_type +class TextValuesResponse(dict): + """ + List of text values. + """ + def __init__(__self__, *, + values: Sequence[str]): + """ + List of text values. + :param Sequence[str] values: The maximum allowable length for text values is 2048 characters. + """ + pulumi.set(__self__, "values", values) + + @property + @pulumi.getter + def values(self) -> Sequence[str]: + """ + The maximum allowable length for text values is 2048 characters. + """ + return pulumi.get(self, "values") + + +@pulumi.output_type +class TimestampValuesResponse(dict): + """ + List of timestamp values. + """ + def __init__(__self__, *, + values: Sequence[str]): + """ + List of timestamp values. + """ + pulumi.set(__self__, "values", values) + + @property + @pulumi.getter + def values(self) -> Sequence[str]: + return pulumi.get(self, "values") + + +@pulumi.output_type +class UploadItemRefResponse(dict): + """ + Represents an upload session reference. This reference is created via upload method. This reference is valid for 30 days after its creation. Updating of item content may refer to this uploaded content via contentDataRef. + """ + def __init__(__self__, *, + name: str): + """ + Represents an upload session reference. This reference is created via upload method. This reference is valid for 30 days after its creation. Updating of item content may refer to this uploaded content via contentDataRef. + :param str name: The name of the content reference. The maximum length is 2048 characters. + """ + pulumi.set(__self__, "name", name) + + @property + @pulumi.getter + def name(self) -> str: + """ + The name of the content reference. The maximum length is 2048 characters. + """ + return pulumi.get(self, "name") + + @pulumi.output_type class ValueFilterResponse(dict): @staticmethod