diff --git a/pkg/component/ai/embedding.go b/pkg/component/ai/embedding.go
new file mode 100644
index 000000000..012705cb8
--- /dev/null
+++ b/pkg/component/ai/embedding.go
@@ -0,0 +1,63 @@
+package ai
+
+// EmbeddingInput is the standardized input for the embedding model.
+type EmbeddingInput struct {
+ // Data is the the standardized input data for the embedding model.
+ Data EmbeddingInputData `json:"data"`
+ // Parameter is the standardized parameter for the embedding model.
+ Parameter EmbeddingParameter `json:"parameter"`
+}
+
+// EmbeddingInputData is the standardized input data for the embedding model.
+type EmbeddingInputData struct {
+ // Model is the model name.
+ Model string `json:"model"`
+ // Embeddings is the list of data to be embedded.
+ Embeddings []InputEmbedding `json:"embeddings"`
+}
+
+// InputEmbedding is the standardized input data to be embedded.
+type InputEmbedding struct {
+ // Type is the type of the input data. It can be either "text", "image-url", or "image-base64".
+ Type string `json:"type"`
+ // Text is the text to be embedded.
+ Text string `json:"text"`
+ // ImageURL is the URL of the image to be embedded.
+ ImageURL string `json:"image-url"`
+ // ImageBase64 is the base64 encoded image to be embedded.
+ ImageBase64 string `json:"image-base64"`
+}
+
+// EmbeddingParameter is the standardized parameter for the embedding model.
+type EmbeddingParameter struct {
+ // Format is the format of the output embeddings. Default is "float", can be "float" or "base64".
+ Format string `json:"format"`
+ // Dimensions is the number of dimensions of the output embeddings.
+ Dimensions int `json:"dimensions"`
+ // InputType is the type of the input data. It can be "query" or "data".
+ InputType string `json:"input-type"`
+ // Truncate is how to handle inputs longer than the max token length. Defaults to 'End'. Can be 'End', 'Start', or 'None'.
+ Truncate string `json:"truncate"`
+}
+
+// EmbeddingOutput is the standardized output for the embedding model.
+type EmbeddingOutput struct {
+ // Data is the standardized output data for the embedding model.
+ Data EmbeddingOutputData `json:"data"`
+}
+
+// EmbeddingOutputData is the standardized output data for the embedding model.
+type EmbeddingOutputData struct {
+ // Embeddings is the list of output embeddings.
+ Embeddings []OutputEmbedding `json:"embeddings"`
+}
+
+// OutputEmbedding is the standardized output embedding.
+type OutputEmbedding struct {
+ // Index is the index of the output embedding.
+ Index int `json:"index"`
+ // Vector is the output embedding.
+ Vector []any `json:"vector"`
+ // Created is the Unix timestamp (in seconds) of when the embedding was created.
+ Created int `json:"created"`
+}
diff --git a/pkg/component/ai/instill/v0/README.mdx b/pkg/component/ai/instill/v0/README.mdx
index 5369cc3ad..bce0965d2 100644
--- a/pkg/component/ai/instill/v0/README.mdx
+++ b/pkg/component/ai/instill/v0/README.mdx
@@ -18,6 +18,7 @@ It can carry out the following tasks:
- [Text to Image](#text-to-image)
- [Visual Question Answering](#visual-question-answering)
- [Chat](#chat)
+- [Embedding](#embedding)
@@ -582,4 +583,116 @@ The image URL
| Text | `text` | string | Text |
+### Embedding
+
+This task refers to the process of generating vector embeddings from input data, which can be text, images, or other formats. This transformation converts the data into a dense, fixed-length numerical representation that captures the essential features of the original input. These embeddings are typically used in machine learning tasks to represent complex data in a more structured, simplified form.
+
+
+
+| Input | ID | Type | Description |
+| :--- | :--- | :--- | :--- |
+| Task ID (required) | `task` | string | `TASK_EMBEDDING` |
+| [Data](#embedding-data) (required) | `data` | object | Input data. |
+| [Parameter](#embedding-parameter) | `parameter` | object | Input parameter. |
+
+
+
+
+ Input Objects in Embedding
+
+Data
+
+Input data.
+
+
+
+| Field | Field ID | Type | Note |
+| :--- | :--- | :--- | :--- |
+| [Embeddings](#embedding-embeddings) | `embeddings` | array | List of input data to be embedded. |
+| Model | `model` | string | The model to be used for generating embeddings. It should be `namespace/model-name/version`. i.e. `abrc/yolov7-stomata/v0.1.0`. You can see the version from the Versions tab of Model page. |
+
+Parameter
+
+Input parameter.
+
+
+
+| Field | Field ID | Type | Note |
+| :--- | :--- | :--- | :--- |
+| Dimensions | `dimensions` | integer | Number of dimensions in the output embedding vectors. |
+| Data Format | `format` | string | The data format of the embeddings. Defaults to float.
Enum values
|
+| Input Type | `input-type` | string | The type of input data to be embedded (e.g., query, document). |
+| Truncate | `truncate` | string | How to handle inputs longer than the max token length. Defaults to 'End'.
Enum values
|
+
+
+
+
+The embeddings
Object
+
+Embeddings
+
+`embeddings` must fulfill one of the following schemas:
+
+Text
+
+
+
+| Field | Field ID | Type | Note |
+| :--- | :--- | :--- | :--- |
+| Text Content | `text` | string | When the input is text, the raw text is tokenized and processed into a dense, fixed-length vector that captures semantic information such as word meanings and relationships. These text embeddings enable tasks like sentiment analysis, search, or classification. |
+| Text | `type` | string | Must be `"text"` |
+
+
+Image URL
+
+
+
+| Field | Field ID | Type | Note |
+| :--- | :--- | :--- | :--- |
+| Image URL | `image-url` | string | When the input is an image from a URL, the image is first fetched from the URL and then decoded into its original format. It is then processed into a fixed-length vector representing essential visual features like shapes and colors. These image embeddings are useful for tasks like image classification or similarity search, providing structured numerical data for complex visual inputs. |
+| Image URL | `type` | string | Must be `"image-url"` |
+
+
+Image Base64
+
+
+
+| Field | Field ID | Type | Note |
+| :--- | :--- | :--- | :--- |
+| Image File | `image-base64` | string | When the input is an image in base64 format, the base64-encoded data is first decoded into its original image form. The image is then processed and transformed into a dense, fixed-length numerical vector, capturing key visual features like shapes, colors, or textures. |
+| Image File | `type` | string | Must be `"image-base64"` |
+
+
+
+
+
+| Output | ID | Type | Description |
+| :--- | :--- | :--- | :--- |
+| [Data](#embedding-data) | `data` | object | Output data. |
+
+
+
+ Output Objects in Embedding
+
+Data
+
+
+
+| Field | Field ID | Type | Note |
+| :--- | :--- | :--- | :--- |
+| [Embeddings](#embedding-embeddings) | `embeddings` | array | List of generated embeddings. |
+
+
+Embeddings
+
+
+
+| Field | Field ID | Type | Note |
+| :--- | :--- | :--- | :--- |
+| Created | `created` | integer | The Unix timestamp (in seconds) of when the embedding was created. |
+| Index | `index` | integer | The index of the embedding vector in the array. |
+| Embedding Vector | `vector` | array | The embedding vector. |
+
+
+
diff --git a/pkg/component/ai/instill/v0/config/definition.json b/pkg/component/ai/instill/v0/config/definition.json
index 22511a5a7..f94ef68b3 100644
--- a/pkg/component/ai/instill/v0/config/definition.json
+++ b/pkg/component/ai/instill/v0/config/definition.json
@@ -10,7 +10,8 @@
"TASK_TEXT_GENERATION_CHAT",
"TASK_TEXT_TO_IMAGE",
"TASK_VISUAL_QUESTION_ANSWERING",
- "TASK_CHAT"
+ "TASK_CHAT",
+ "TASK_EMBEDDING"
],
"custom": false,
"documentationUrl": "https://www.instill.tech/docs/component/ai/instill-model",
diff --git a/pkg/component/ai/instill/v0/config/tasks.json b/pkg/component/ai/instill/v0/config/tasks.json
index 18cc0c2e7..1ed30ed16 100644
--- a/pkg/component/ai/instill/v0/config/tasks.json
+++ b/pkg/component/ai/instill/v0/config/tasks.json
@@ -218,7 +218,7 @@
},
"system-message": {
"default": "You are a helpful assistant.",
- "description": "The system message helps set the behavior of the assistant. For example, you can modify the personality of the assistant or provide specific instructions about how it should behave throughout the conversation. By default, the model\u2019s behavior is using a generic message as \"You are a helpful assistant.\"",
+ "description": "The system message helps set the behavior of the assistant. For example, you can modify the personality of the assistant or provide specific instructions about how it should behave throughout the conversation. By default, the model’s behavior is using a generic message as \"You are a helpful assistant.\"",
"instillAcceptFormats": [
"string"
],
@@ -377,7 +377,7 @@
},
"system-message": {
"default": "You are a helpful assistant.",
- "description": "The system message helps set the behavior of the assistant. For example, you can modify the personality of the assistant or provide specific instructions about how it should behave throughout the conversation. By default, the model\u2019s behavior is using a generic message as \"You are a helpful assistant.\"",
+ "description": "The system message helps set the behavior of the assistant. For example, you can modify the personality of the assistant or provide specific instructions about how it should behave throughout the conversation. By default, the model’s behavior is using a generic message as \"You are a helpful assistant.\"",
"instillAcceptFormats": [
"string"
],
@@ -576,5 +576,286 @@
"TASK_CHAT": {
"instillShortDescription": "Generate texts from input text prompts and chat history.",
"$ref": "#/TASK_TEXT_GENERATION_CHAT"
+ },
+ "TASK_EMBEDDING": {
+ "title": "Embedding",
+ "instillShortDescription": "This task refers to the process of generating vector embeddings from input data, which can be text, images, or other formats. This transformation converts the data into a dense, fixed-length numerical representation that captures the essential features of the original input. These embeddings are typically used in machine learning tasks to represent complex data in a more structured, simplified form.",
+ "input": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Embedding Input",
+ "description": "Input schema of the embedding task.",
+ "instillShortDescription": "Input schema of the embedding task.",
+ "type": "object",
+ "properties": {
+ "data": {
+ "description": "Input data.",
+ "instillShortDescription": "Input data.",
+ "type": "object",
+ "properties": {
+ "model": {
+ "description": "The model to be used for generating embeddings. It should be `namespace/model-name/version`. i.e. `abrc/yolov7-stomata/v0.1.0`. You can see the version from the Versions tab of Model page.",
+ "instillShortDescription": "The model to be used.",
+ "instillAcceptFormats": [
+ "string"
+ ],
+ "instillUIOrder": 0,
+ "title": "Model",
+ "type": "string"
+ },
+ "embeddings": {
+ "title": "Embeddings",
+ "type": "array",
+ "items": {
+ "type": "object",
+ "oneOf": [
+ {
+ "type": "object",
+ "properties": {
+ "text": {
+ "title": "Text Content",
+ "description": "When the input is text, the raw text is tokenized and processed into a dense, fixed-length vector that captures semantic information such as word meanings and relationships. These text embeddings enable tasks like sentiment analysis, search, or classification.",
+ "instillShortDescription": "Text content.",
+ "instillAcceptFormats": [
+ "string"
+ ],
+ "type": "string",
+ "instillUIOrder": 1
+ },
+ "type": {
+ "title": "Text",
+ "description": "Text input content type.",
+ "instillShortDescription": "Text input content type.",
+ "instillAcceptFormats": [
+ "string"
+ ],
+ "type": "string",
+ "const": "text",
+ "instillUIOrder": 0
+ }
+ },
+ "title": "Text",
+ "required": [
+ "text",
+ "type"
+ ]
+ },
+ {
+ "type": "object",
+ "properties": {
+ "image-url": {
+ "title": "Image URL",
+ "description": "When the input is an image from a URL, the image is first fetched from the URL and then decoded into its original format. It is then processed into a fixed-length vector representing essential visual features like shapes and colors. These image embeddings are useful for tasks like image classification or similarity search, providing structured numerical data for complex visual inputs.",
+ "instillShortDescription": "Image content URL.",
+ "instillAcceptFormats": [
+ "string"
+ ],
+ "type": "string",
+ "instillUIOrder": 1
+ },
+ "type": {
+ "title": "Image URL",
+ "description": "Image URL input content type",
+ "instillShortDescription": "Image URL input content type",
+ "instillAcceptFormats": [
+ "string"
+ ],
+ "type": "string",
+ "const": "image-url",
+ "instillUIOrder": 0
+ }
+ },
+ "title": "Image URL",
+ "required": [
+ "image-url",
+ "type"
+ ]
+ },
+ {
+ "type": "object",
+ "properties": {
+ "image-base64": {
+ "title": "Image File",
+ "description": "When the input is an image in base64 format, the base64-encoded data is first decoded into its original image form. The image is then processed and transformed into a dense, fixed-length numerical vector, capturing key visual features like shapes, colors, or textures.",
+ "instillShortDescription": "Image file input.",
+ "instillAcceptFormats": [
+ "image/*"
+ ],
+ "type": "string",
+ "instillUIOrder": 1
+ },
+ "type": {
+ "title": "Image File",
+ "description": "Image file input content type.",
+ "instillShortDescription": "Image file input content type.",
+ "instillAcceptFormats": [
+ "string"
+ ],
+ "type": "string",
+ "const": "image-base64",
+ "instillUIOrder": 0
+ }
+ },
+ "title": "Image Base64",
+ "required": [
+ "image-base64",
+ "type"
+ ]
+ }
+ ],
+ "title": "Embedding",
+ "description": "Input data to be embedded.",
+ "instillUIOrder": 0,
+ "required": [
+ "type"
+ ]
+
+ },
+ "description": "List of input data to be embedded.",
+ "instillUIOrder": 1
+ }
+ },
+ "required": [
+ "model",
+ "embeddings"
+ ],
+ "instillUIOrder": 0,
+ "title": "Data"
+ },
+ "parameter": {
+ "description": "Input parameter.",
+ "instillShortDescription": "Input parameter.",
+ "type": "object",
+ "properties": {
+ "format": {
+ "title": "Data Format",
+ "type": "string",
+ "description": "The data format of the embeddings. Defaults to float.",
+ "instillShortDescription": "Data format",
+ "instillAcceptFormats": [
+ "string"
+ ],
+ "enum": [
+ "float",
+ "base64"
+ ],
+ "default": "float",
+ "instillUIOrder": 0
+ },
+ "dimensions": {
+ "title": "Dimensions",
+ "type": "integer",
+ "description": "Number of dimensions in the output embedding vectors.",
+ "instillShortDescription": "Number of dimensions",
+ "instillAcceptFormats": [
+ "integer"
+ ],
+ "default": 512,
+ "instillUIOrder": 1
+ },
+ "input-type": {
+ "title": "Input Type",
+ "type": "string",
+ "description": "The type of input data to be embedded (e.g., query, document).",
+ "instillShortDescription": "Type of input data",
+ "instillAcceptFormats": [
+ "string"
+ ],
+ "instillUIOrder": 2
+ },
+ "truncate": {
+ "title": "Truncate",
+ "type": "string",
+ "description": "How to handle inputs longer than the max token length. Defaults to 'End'.",
+ "instillShortDescription": "Truncation handling",
+ "instillAcceptFormats": [
+ "string"
+ ],
+ "enum": [
+ "None",
+ "End",
+ "Start"
+ ],
+ "default": "End",
+ "instillUIOrder": 3
+ }
+ },
+ "title": "Parameter",
+ "instillUIOrder": 1,
+ "required": []
+ }
+ },
+ "required": [
+ "data"
+ ]
+ },
+ "output": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Embedding Output",
+ "description": "Output schema of the embedding task.",
+ "instillShortDescription": "Output schema of the embedding task.",
+ "type": "object",
+ "properties": {
+ "data": {
+ "description": "Output data.",
+ "instillShortDescription": "Output data.",
+ "type": "object",
+ "properties": {
+ "embeddings": {
+ "title": "Embeddings",
+ "type": "array",
+ "description": "List of generated embeddings.",
+ "instillShortDescription": "List of embeddings.",
+ "instillFormat": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "index": {
+ "title": "Index",
+ "type": "integer",
+ "description": "The index of the embedding vector in the array.",
+ "instillShortDescription": "Index in the array",
+ "instillFormat": "integer",
+ "instillUIOrder": 0
+ },
+ "vector": {
+ "title": "Embedding Vector",
+ "type": "array",
+ "description": "The embedding vector.",
+ "instillShortDescription": "Embedding vector.",
+ "instillFormat": "array",
+ "items": {
+ "type": "number"
+ },
+ "instillUIOrder": 1
+ },
+ "created": {
+ "title": "Created",
+ "type": "integer",
+ "description": "The Unix timestamp (in seconds) of when the embedding was created.",
+ "instillShortDescription": "Timestamp of creation",
+ "instillFormat": "integer",
+ "instillUIOrder": 2
+ }
+ },
+ "required": [
+ "index",
+ "vector",
+ "created"
+ ]
+ },
+ "instillUIOrder": 0
+ }
+ },
+ "required": [
+ "embeddings"
+ ],
+ "instillUIOrder": 0,
+ "title": "Data"
+ }
+ },
+ "required": [
+ "data"
+ ]
+ }
}
}
diff --git a/pkg/component/ai/instill/v0/embedding.go b/pkg/component/ai/instill/v0/embedding.go
new file mode 100644
index 000000000..0af84276d
--- /dev/null
+++ b/pkg/component/ai/instill/v0/embedding.go
@@ -0,0 +1,37 @@
+package instill
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/protobuf/types/known/structpb"
+
+ modelpb "github.com/instill-ai/protogen-go/model/model/v1alpha"
+)
+
+func (e *execution) executeEmbedding(grpcClient modelpb.ModelPublicServiceClient, nsID string, modelID string, version string, inputs []*structpb.Struct) ([]*structpb.Struct, error) {
+
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
+ defer cancel()
+
+ ctx = metadata.NewOutgoingContext(ctx, getRequestMetadata(e.SystemVariables))
+
+ res, err := grpcClient.TriggerNamespaceModel(ctx, &modelpb.TriggerNamespaceModelRequest{
+ NamespaceId: nsID,
+ ModelId: modelID,
+ Version: version,
+ TaskInputs: inputs,
+ })
+
+ if err != nil || res == nil {
+ return nil, fmt.Errorf("error triggering model: %v", err)
+ }
+
+ if len(res.TaskOutputs) > 0 {
+ return res.TaskOutputs, nil
+ }
+
+ return nil, fmt.Errorf("no output from model")
+}
diff --git a/pkg/component/ai/instill/v0/main.go b/pkg/component/ai/instill/v0/main.go
index 306a46658..0479f8f66 100644
--- a/pkg/component/ai/instill/v0/main.go
+++ b/pkg/component/ai/instill/v0/main.go
@@ -14,6 +14,7 @@ import (
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/structpb"
+ "github.com/instill-ai/pipeline-backend/pkg/component/ai"
"github.com/instill-ai/pipeline-backend/pkg/component/base"
"github.com/instill-ai/pipeline-backend/pkg/component/internal/util"
@@ -101,12 +102,46 @@ func (e *execution) Execute(ctx context.Context, jobs []*base.Job) error {
defer gRPCCientConn.Close()
}
+ var result []*structpb.Struct
+
+ // We will refactor the component soon to align the data structure with Instill Model.
+ // For now, we move out `TASK_EMBEDDING` to a separate section.
+ if e.Task == "TASK_EMBEDDING" {
+ var inputStruct ai.EmbeddingInput
+ err := base.ConvertFromStructpb(inputs[0], &inputStruct)
+ if err != nil {
+ return fmt.Errorf("convert to defined struct: %w", err)
+ }
+
+ model := inputStruct.Data.Model
+ modelNameSplits := strings.Split(model, "/")
+
+ nsID := modelNameSplits[0]
+ modelID := modelNameSplits[1]
+ version := modelNameSplits[2]
+
+ result, err = e.executeEmbedding(gRPCClient, nsID, modelID, version, inputs)
+
+ if err != nil {
+ return fmt.Errorf("execute embedding: %w", err)
+ }
+
+ for idx, job := range jobs {
+ err = job.Output.Write(ctx, result[idx])
+ if err != nil {
+ job.Error.Error(ctx, err)
+ continue
+ }
+ }
+ return nil
+ }
+
modelNameSplits := strings.Split(inputs[0].GetFields()["model-name"].GetStringValue(), "/")
nsID := modelNameSplits[0]
modelID := modelNameSplits[1]
version := modelNameSplits[2]
- var result []*structpb.Struct
+
switch e.Task {
case "TASK_CLASSIFICATION":
result, err = e.executeVisionTask(gRPCClient, nsID, modelID, version, inputs)
diff --git a/pkg/component/ai/universalai/v0/config/tasks.json b/pkg/component/ai/universalai/v0/config/tasks.json
index e88557c89..299d7ea20 100644
--- a/pkg/component/ai/universalai/v0/config/tasks.json
+++ b/pkg/component/ai/universalai/v0/config/tasks.json
@@ -39,7 +39,8 @@
"instillAcceptFormats": [
"string"
],
- "type": "string"
+ "type": "string",
+ "instillUIOrder": 1
},
"type": {
"title": "Text",
@@ -49,13 +50,15 @@
"string"
],
"type": "string",
- "const": "text"
+ "const": "text",
+ "instillUIOrder": 0
}
},
"required": [
"text",
"type"
- ]
+ ],
+ "title": "Text"
},
{
"type": "object",
@@ -67,7 +70,8 @@
"instillAcceptFormats": [
"string"
],
- "type": "string"
+ "type": "string",
+ "instillUIOrder": 1
},
"type": {
"title": "Image URL",
@@ -77,25 +81,28 @@
"string"
],
"type": "string",
- "const": "image-url"
+ "const": "image-url",
+ "instillUIOrder": 0
}
},
"required": [
"image-url",
"type"
- ]
+ ],
+ "title": "Image URL"
},
{
"type": "object",
"properties": {
"image-base64": {
- "title": "Image File",
+ "title": "Image Base64",
"description": "Image base64 encoded string.",
"instillShortDescription": "Image base64 encoded string.",
"instillAcceptFormats": [
"image/*"
],
- "type": "string"
+ "type": "string",
+ "instillUIOrder": 1
},
"type": {
"title": "Image File",
@@ -105,13 +112,15 @@
"string"
],
"type": "string",
- "const": "image-base64"
+ "const": "image-base64",
+ "instillUIOrder": 0
}
},
"required": [
"image-base64",
"type"
- ]
+ ],
+ "title": "Image Base64"
}
],
"required": []
diff --git a/pkg/component/application/instillapp/v0/config/tasks.json b/pkg/component/application/instillapp/v0/config/tasks.json
index c43d52974..1160f6ee0 100644
--- a/pkg/component/application/instillapp/v0/config/tasks.json
+++ b/pkg/component/application/instillapp/v0/config/tasks.json
@@ -157,6 +157,7 @@
"instillUIOrder": 1
}
},
+ "title": "Text",
"required": [
"text",
"type"
@@ -181,6 +182,7 @@
"instillUIOrder": 1
}
},
+ "title": "Image URL",
"required": [
"image-url",
"type"
@@ -205,6 +207,7 @@
"instillUIOrder": 1
}
},
+ "title": "Image Base64",
"required": [
"image-base64",
"type"
diff --git a/pkg/component/tools/compogen/pkg/gen/readme.go b/pkg/component/tools/compogen/pkg/gen/readme.go
index 8bb71690d..01bfaeecd 100644
--- a/pkg/component/tools/compogen/pkg/gen/readme.go
+++ b/pkg/component/tools/compogen/pkg/gen/readme.go
@@ -481,10 +481,23 @@ func (rt *readmeTask) parseOneOfsProperties(properties map[string]property) {
continue
}
- if op.Type != "object" {
+ if op.Type != "object" && op.Type != "array" {
continue
}
+ if op.Type == "array" {
+ if op.Items.Type != "object" {
+ continue
+ }
+
+ if op.Items.OneOf != nil {
+ rt.OneOfs = append(rt.OneOfs, map[string][]objectSchema{
+ key: op.Items.OneOf,
+ })
+ }
+
+ }
+
if op.OneOf != nil {
rt.OneOfs = append(rt.OneOfs, map[string][]objectSchema{
key: op.OneOf,
diff --git a/pkg/component/tools/compogen/pkg/gen/schema.go b/pkg/component/tools/compogen/pkg/gen/schema.go
index b8668adfd..14a4f7377 100644
--- a/pkg/component/tools/compogen/pkg/gen/schema.go
+++ b/pkg/component/tools/compogen/pkg/gen/schema.go
@@ -15,6 +15,7 @@ type property struct {
Items struct {
Type string `json:"type"`
Properties map[string]property `json:"properties" validate:"omitempty,dive"`
+ OneOf []objectSchema `json:"oneOf" validate:"dive"`
} `json:"items"`
Properties map[string]property `json:"properties" validate:"omitempty,dive"`