Oracle Cloud Infrastructure v3.10.0 published on Wednesday, Nov 5, 2025 by Pulumi
oci.AiLanguage.getModelEvaluationResults
Start a Neo task
Explain and create an oci.AiLanguage.getModelEvaluationResults resource
This data source provides the list of Model Evaluation Results in Oracle Cloud Infrastructure Ai Language service.
Get a (paginated) list of evaluation results for a given model.
Example Usage
import * as pulumi from "@pulumi/pulumi";
import * as oci from "@pulumi/oci";
const testModelEvaluationResults = oci.AiLanguage.getModelEvaluationResults({
modelId: testModel.id,
});
import pulumi
import pulumi_oci as oci
test_model_evaluation_results = oci.AiLanguage.get_model_evaluation_results(model_id=test_model["id"])
package main
import (
"github.com/pulumi/pulumi-oci/sdk/v3/go/oci/ailanguage"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := ailanguage.GetModelEvaluationResults(ctx, &ailanguage.GetModelEvaluationResultsArgs{
ModelId: testModel.Id,
}, nil)
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Oci = Pulumi.Oci;
return await Deployment.RunAsync(() =>
{
var testModelEvaluationResults = Oci.AiLanguage.GetModelEvaluationResults.Invoke(new()
{
ModelId = testModel.Id,
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.oci.AiLanguage.AiLanguageFunctions;
import com.pulumi.oci.AiLanguage.inputs.GetModelEvaluationResultsArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
final var testModelEvaluationResults = AiLanguageFunctions.getModelEvaluationResults(GetModelEvaluationResultsArgs.builder()
.modelId(testModel.id())
.build());
}
}
variables:
testModelEvaluationResults:
fn::invoke:
function: oci:AiLanguage:getModelEvaluationResults
arguments:
modelId: ${testModel.id}
Using getModelEvaluationResults
Two invocation forms are available. The direct form accepts plain arguments and either blocks until the result value is available, or returns a Promise-wrapped result. The output form accepts Input-wrapped arguments and returns an Output-wrapped result.
function getModelEvaluationResults(args: GetModelEvaluationResultsArgs, opts?: InvokeOptions): Promise<GetModelEvaluationResultsResult>
function getModelEvaluationResultsOutput(args: GetModelEvaluationResultsOutputArgs, opts?: InvokeOptions): Output<GetModelEvaluationResultsResult>def get_model_evaluation_results(filters: Optional[Sequence[GetModelEvaluationResultsFilter]] = None,
model_id: Optional[str] = None,
opts: Optional[InvokeOptions] = None) -> GetModelEvaluationResultsResult
def get_model_evaluation_results_output(filters: Optional[pulumi.Input[Sequence[pulumi.Input[GetModelEvaluationResultsFilterArgs]]]] = None,
model_id: Optional[pulumi.Input[str]] = None,
opts: Optional[InvokeOptions] = None) -> Output[GetModelEvaluationResultsResult]func GetModelEvaluationResults(ctx *Context, args *GetModelEvaluationResultsArgs, opts ...InvokeOption) (*GetModelEvaluationResultsResult, error)
func GetModelEvaluationResultsOutput(ctx *Context, args *GetModelEvaluationResultsOutputArgs, opts ...InvokeOption) GetModelEvaluationResultsResultOutput> Note: This function is named GetModelEvaluationResults in the Go SDK.
public static class GetModelEvaluationResults
{
public static Task<GetModelEvaluationResultsResult> InvokeAsync(GetModelEvaluationResultsArgs args, InvokeOptions? opts = null)
public static Output<GetModelEvaluationResultsResult> Invoke(GetModelEvaluationResultsInvokeArgs args, InvokeOptions? opts = null)
}public static CompletableFuture<GetModelEvaluationResultsResult> getModelEvaluationResults(GetModelEvaluationResultsArgs args, InvokeOptions options)
public static Output<GetModelEvaluationResultsResult> getModelEvaluationResults(GetModelEvaluationResultsArgs args, InvokeOptions options)
fn::invoke:
function: oci:AiLanguage/getModelEvaluationResults:getModelEvaluationResults
arguments:
# arguments dictionaryThe following arguments are supported:
- Model
Id string - unique model OCID.
- Filters
List<Get
Model Evaluation Results Filter>
- Model
Id string - unique model OCID.
- Filters
[]Get
Model Evaluation Results Filter
- model
Id String - unique model OCID.
- filters
List<Get
Model Evaluation Results Filter>
- model
Id string - unique model OCID.
- filters
Get
Model Evaluation Results Filter[]
- model_
id str - unique model OCID.
- filters
Sequence[Get
Model Evaluation Results Filter]
- model
Id String - unique model OCID.
- filters List<Property Map>
getModelEvaluationResults Result
The following output properties are available:
- Evaluation
Result List<GetCollections Model Evaluation Results Evaluation Result Collection> - The list of evaluation_result_collection.
- Id string
- The provider-assigned unique ID for this managed resource.
- Model
Id string - Filters
List<Get
Model Evaluation Results Filter>
- Evaluation
Result []GetCollections Model Evaluation Results Evaluation Result Collection - The list of evaluation_result_collection.
- Id string
- The provider-assigned unique ID for this managed resource.
- Model
Id string - Filters
[]Get
Model Evaluation Results Filter
- evaluation
Result List<GetCollections Model Evaluation Results Evaluation Result Collection> - The list of evaluation_result_collection.
- id String
- The provider-assigned unique ID for this managed resource.
- model
Id String - filters
List<Get
Model Evaluation Results Filter>
- evaluation
Result GetCollections Model Evaluation Results Evaluation Result Collection[] - The list of evaluation_result_collection.
- id string
- The provider-assigned unique ID for this managed resource.
- model
Id string - filters
Get
Model Evaluation Results Filter[]
- evaluation_
result_ Sequence[Getcollections Model Evaluation Results Evaluation Result Collection] - The list of evaluation_result_collection.
- id str
- The provider-assigned unique ID for this managed resource.
- model_
id str - filters
Sequence[Get
Model Evaluation Results Filter]
- evaluation
Result List<Property Map>Collections - The list of evaluation_result_collection.
- id String
- The provider-assigned unique ID for this managed resource.
- model
Id String - filters List<Property Map>
Supporting Types
GetModelEvaluationResultsEvaluationResultCollection
- Items
List<Get
Model Evaluation Results Evaluation Result Collection Item> - List of model evaluation analysis
- Items
[]Get
Model Evaluation Results Evaluation Result Collection Item - List of model evaluation analysis
- items
List<Get
Model Evaluation Results Evaluation Result Collection Item> - List of model evaluation analysis
- items
Get
Model Evaluation Results Evaluation Result Collection Item[] - List of model evaluation analysis
- items
Sequence[Get
Model Evaluation Results Evaluation Result Collection Item] - List of model evaluation analysis
- items List<Property Map>
- List of model evaluation analysis
GetModelEvaluationResultsEvaluationResultCollectionItem
- Dictionary<string, string>
- Defined tags for this resource. Each key is predefined and scoped to a namespace. Example:
{"foo-namespace.bar-key": "value"} - Dictionary<string, string>
- Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only. Example:
{"bar-key": "value"} - Location string
- For CSV format location is rowId(1 is header) and for JSONL location is jsonL line sequence(1 is metadata)
- Model
Type string - Model type
- Predicted
Entities List<GetModel Evaluation Results Evaluation Result Collection Item Predicted Entity> - List of true(actual) entities in test data for NER model
- Predicted
Labels List<string> - List of predicted labels by custom multi class or multi label TextClassification model
- Record string
- For CSV format location is rowId(1 is header) and for JSONL location is jsonL line sequence(1 is metadata)
- True
Entities List<GetModel Evaluation Results Evaluation Result Collection Item True Entity> - List of true(actual) entities in test data for NER model
- True
Labels List<string> - List of true(actual) labels in test data for multi class or multi label TextClassification
- map[string]string
- Defined tags for this resource. Each key is predefined and scoped to a namespace. Example:
{"foo-namespace.bar-key": "value"} - map[string]string
- Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only. Example:
{"bar-key": "value"} - Location string
- For CSV format location is rowId(1 is header) and for JSONL location is jsonL line sequence(1 is metadata)
- Model
Type string - Model type
- Predicted
Entities []GetModel Evaluation Results Evaluation Result Collection Item Predicted Entity - List of true(actual) entities in test data for NER model
- Predicted
Labels []string - List of predicted labels by custom multi class or multi label TextClassification model
- Record string
- For CSV format location is rowId(1 is header) and for JSONL location is jsonL line sequence(1 is metadata)
- True
Entities []GetModel Evaluation Results Evaluation Result Collection Item True Entity - List of true(actual) entities in test data for NER model
- True
Labels []string - List of true(actual) labels in test data for multi class or multi label TextClassification
- Map<String,String>
- Defined tags for this resource. Each key is predefined and scoped to a namespace. Example:
{"foo-namespace.bar-key": "value"} - Map<String,String>
- Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only. Example:
{"bar-key": "value"} - location String
- For CSV format location is rowId(1 is header) and for JSONL location is jsonL line sequence(1 is metadata)
- model
Type String - Model type
- predicted
Entities List<GetModel Evaluation Results Evaluation Result Collection Item Predicted Entity> - List of true(actual) entities in test data for NER model
- predicted
Labels List<String> - List of predicted labels by custom multi class or multi label TextClassification model
- record String
- For CSV format location is rowId(1 is header) and for JSONL location is jsonL line sequence(1 is metadata)
- true
Entities List<GetModel Evaluation Results Evaluation Result Collection Item True Entity> - List of true(actual) entities in test data for NER model
- true
Labels List<String> - List of true(actual) labels in test data for multi class or multi label TextClassification
- {[key: string]: string}
- Defined tags for this resource. Each key is predefined and scoped to a namespace. Example:
{"foo-namespace.bar-key": "value"} - {[key: string]: string}
- Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only. Example:
{"bar-key": "value"} - location string
- For CSV format location is rowId(1 is header) and for JSONL location is jsonL line sequence(1 is metadata)
- model
Type string - Model type
- predicted
Entities GetModel Evaluation Results Evaluation Result Collection Item Predicted Entity[] - List of true(actual) entities in test data for NER model
- predicted
Labels string[] - List of predicted labels by custom multi class or multi label TextClassification model
- record string
- For CSV format location is rowId(1 is header) and for JSONL location is jsonL line sequence(1 is metadata)
- true
Entities GetModel Evaluation Results Evaluation Result Collection Item True Entity[] - List of true(actual) entities in test data for NER model
- true
Labels string[] - List of true(actual) labels in test data for multi class or multi label TextClassification
- Mapping[str, str]
- Defined tags for this resource. Each key is predefined and scoped to a namespace. Example:
{"foo-namespace.bar-key": "value"} - Mapping[str, str]
- Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only. Example:
{"bar-key": "value"} - location str
- For CSV format location is rowId(1 is header) and for JSONL location is jsonL line sequence(1 is metadata)
- model_
type str - Model type
- predicted_
entities Sequence[GetModel Evaluation Results Evaluation Result Collection Item Predicted Entity] - List of true(actual) entities in test data for NER model
- predicted_
labels Sequence[str] - List of predicted labels by custom multi class or multi label TextClassification model
- record str
- For CSV format location is rowId(1 is header) and for JSONL location is jsonL line sequence(1 is metadata)
- true_
entities Sequence[GetModel Evaluation Results Evaluation Result Collection Item True Entity] - List of true(actual) entities in test data for NER model
- true_
labels Sequence[str] - List of true(actual) labels in test data for multi class or multi label TextClassification
- Map<String>
- Defined tags for this resource. Each key is predefined and scoped to a namespace. Example:
{"foo-namespace.bar-key": "value"} - Map<String>
- Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only. Example:
{"bar-key": "value"} - location String
- For CSV format location is rowId(1 is header) and for JSONL location is jsonL line sequence(1 is metadata)
- model
Type String - Model type
- predicted
Entities List<Property Map> - List of true(actual) entities in test data for NER model
- predicted
Labels List<String> - List of predicted labels by custom multi class or multi label TextClassification model
- record String
- For CSV format location is rowId(1 is header) and for JSONL location is jsonL line sequence(1 is metadata)
- true
Entities List<Property Map> - List of true(actual) entities in test data for NER model
- true
Labels List<String> - List of true(actual) labels in test data for multi class or multi label TextClassification
GetModelEvaluationResultsEvaluationResultCollectionItemPredictedEntity
GetModelEvaluationResultsEvaluationResultCollectionItemTrueEntity
GetModelEvaluationResultsFilter
Package Details
- Repository
- oci pulumi/pulumi-oci
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the
ociTerraform Provider.
