1. Packages
  2. AWS
  3. API Docs
  4. sagemaker
  5. Model
AWS v6.78.0 published on Thursday, Apr 24, 2025 by Pulumi

aws.sagemaker.Model

Explore with Pulumi AI

Provides a SageMaker AI model resource.

Example Usage

Basic usage:

import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";

const assumeRole = aws.iam.getPolicyDocument({
    statements: [{
        actions: ["sts:AssumeRole"],
        principals: [{
            type: "Service",
            identifiers: ["sagemaker.amazonaws.com"],
        }],
    }],
});
const exampleRole = new aws.iam.Role("example", {assumeRolePolicy: assumeRole.then(assumeRole => assumeRole.json)});
const test = aws.sagemaker.getPrebuiltEcrImage({
    repositoryName: "kmeans",
});
const example = new aws.sagemaker.Model("example", {
    name: "my-model",
    executionRoleArn: exampleRole.arn,
    primaryContainer: {
        image: test.then(test => test.registryPath),
    },
});
Copy
import pulumi
import pulumi_aws as aws

assume_role = aws.iam.get_policy_document(statements=[{
    "actions": ["sts:AssumeRole"],
    "principals": [{
        "type": "Service",
        "identifiers": ["sagemaker.amazonaws.com"],
    }],
}])
example_role = aws.iam.Role("example", assume_role_policy=assume_role.json)
test = aws.sagemaker.get_prebuilt_ecr_image(repository_name="kmeans")
example = aws.sagemaker.Model("example",
    name="my-model",
    execution_role_arn=example_role.arn,
    primary_container={
        "image": test.registry_path,
    })
Copy
package main

import (
	"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/iam"
	"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/sagemaker"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		assumeRole, err := iam.GetPolicyDocument(ctx, &iam.GetPolicyDocumentArgs{
			Statements: []iam.GetPolicyDocumentStatement{
				{
					Actions: []string{
						"sts:AssumeRole",
					},
					Principals: []iam.GetPolicyDocumentStatementPrincipal{
						{
							Type: "Service",
							Identifiers: []string{
								"sagemaker.amazonaws.com",
							},
						},
					},
				},
			},
		}, nil)
		if err != nil {
			return err
		}
		exampleRole, err := iam.NewRole(ctx, "example", &iam.RoleArgs{
			AssumeRolePolicy: pulumi.String(assumeRole.Json),
		})
		if err != nil {
			return err
		}
		test, err := sagemaker.GetPrebuiltEcrImage(ctx, &sagemaker.GetPrebuiltEcrImageArgs{
			RepositoryName: "kmeans",
		}, nil)
		if err != nil {
			return err
		}
		_, err = sagemaker.NewModel(ctx, "example", &sagemaker.ModelArgs{
			Name:             pulumi.String("my-model"),
			ExecutionRoleArn: exampleRole.Arn,
			PrimaryContainer: &sagemaker.ModelPrimaryContainerArgs{
				Image: pulumi.String(test.RegistryPath),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
Copy
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;

return await Deployment.RunAsync(() => 
{
    var assumeRole = Aws.Iam.GetPolicyDocument.Invoke(new()
    {
        Statements = new[]
        {
            new Aws.Iam.Inputs.GetPolicyDocumentStatementInputArgs
            {
                Actions = new[]
                {
                    "sts:AssumeRole",
                },
                Principals = new[]
                {
                    new Aws.Iam.Inputs.GetPolicyDocumentStatementPrincipalInputArgs
                    {
                        Type = "Service",
                        Identifiers = new[]
                        {
                            "sagemaker.amazonaws.com",
                        },
                    },
                },
            },
        },
    });

    var exampleRole = new Aws.Iam.Role("example", new()
    {
        AssumeRolePolicy = assumeRole.Apply(getPolicyDocumentResult => getPolicyDocumentResult.Json),
    });

    var test = Aws.Sagemaker.GetPrebuiltEcrImage.Invoke(new()
    {
        RepositoryName = "kmeans",
    });

    var example = new Aws.Sagemaker.Model("example", new()
    {
        Name = "my-model",
        ExecutionRoleArn = exampleRole.Arn,
        PrimaryContainer = new Aws.Sagemaker.Inputs.ModelPrimaryContainerArgs
        {
            Image = test.Apply(getPrebuiltEcrImageResult => getPrebuiltEcrImageResult.RegistryPath),
        },
    });

});
Copy
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.iam.IamFunctions;
import com.pulumi.aws.iam.inputs.GetPolicyDocumentArgs;
import com.pulumi.aws.iam.Role;
import com.pulumi.aws.iam.RoleArgs;
import com.pulumi.aws.sagemaker.SagemakerFunctions;
import com.pulumi.aws.sagemaker.inputs.GetPrebuiltEcrImageArgs;
import com.pulumi.aws.sagemaker.Model;
import com.pulumi.aws.sagemaker.ModelArgs;
import com.pulumi.aws.sagemaker.inputs.ModelPrimaryContainerArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        final var assumeRole = IamFunctions.getPolicyDocument(GetPolicyDocumentArgs.builder()
            .statements(GetPolicyDocumentStatementArgs.builder()
                .actions("sts:AssumeRole")
                .principals(GetPolicyDocumentStatementPrincipalArgs.builder()
                    .type("Service")
                    .identifiers("sagemaker.amazonaws.com")
                    .build())
                .build())
            .build());

        var exampleRole = new Role("exampleRole", RoleArgs.builder()
            .assumeRolePolicy(assumeRole.json())
            .build());

        final var test = SagemakerFunctions.getPrebuiltEcrImage(GetPrebuiltEcrImageArgs.builder()
            .repositoryName("kmeans")
            .build());

        var example = new Model("example", ModelArgs.builder()
            .name("my-model")
            .executionRoleArn(exampleRole.arn())
            .primaryContainer(ModelPrimaryContainerArgs.builder()
                .image(test.registryPath())
                .build())
            .build());

    }
}
Copy
resources:
  example:
    type: aws:sagemaker:Model
    properties:
      name: my-model
      executionRoleArn: ${exampleRole.arn}
      primaryContainer:
        image: ${test.registryPath}
  exampleRole:
    type: aws:iam:Role
    name: example
    properties:
      assumeRolePolicy: ${assumeRole.json}
variables:
  assumeRole:
    fn::invoke:
      function: aws:iam:getPolicyDocument
      arguments:
        statements:
          - actions:
              - sts:AssumeRole
            principals:
              - type: Service
                identifiers:
                  - sagemaker.amazonaws.com
  test:
    fn::invoke:
      function: aws:sagemaker:getPrebuiltEcrImage
      arguments:
        repositoryName: kmeans
Copy

Inference Execution Config

  • mode - (Required) How containers in a multi-container are run. The following values are valid Serial and Direct.

Create Model Resource

Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.

Constructor syntax

new Model(name: string, args: ModelArgs, opts?: CustomResourceOptions);
@overload
def Model(resource_name: str,
          args: ModelArgs,
          opts: Optional[ResourceOptions] = None)

@overload
def Model(resource_name: str,
          opts: Optional[ResourceOptions] = None,
          execution_role_arn: Optional[str] = None,
          containers: Optional[Sequence[ModelContainerArgs]] = None,
          enable_network_isolation: Optional[bool] = None,
          inference_execution_config: Optional[ModelInferenceExecutionConfigArgs] = None,
          name: Optional[str] = None,
          primary_container: Optional[ModelPrimaryContainerArgs] = None,
          tags: Optional[Mapping[str, str]] = None,
          vpc_config: Optional[ModelVpcConfigArgs] = None)
func NewModel(ctx *Context, name string, args ModelArgs, opts ...ResourceOption) (*Model, error)
public Model(string name, ModelArgs args, CustomResourceOptions? opts = null)
public Model(String name, ModelArgs args)
public Model(String name, ModelArgs args, CustomResourceOptions options)
type: aws:sagemaker:Model
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.

Parameters

name This property is required. string
The unique name of the resource.
args This property is required. ModelArgs
The arguments to resource properties.
opts CustomResourceOptions
Bag of options to control resource's behavior.
resource_name This property is required. str
The unique name of the resource.
args This property is required. ModelArgs
The arguments to resource properties.
opts ResourceOptions
Bag of options to control resource's behavior.
ctx Context
Context object for the current deployment.
name This property is required. string
The unique name of the resource.
args This property is required. ModelArgs
The arguments to resource properties.
opts ResourceOption
Bag of options to control resource's behavior.
name This property is required. string
The unique name of the resource.
args This property is required. ModelArgs
The arguments to resource properties.
opts CustomResourceOptions
Bag of options to control resource's behavior.
name This property is required. String
The unique name of the resource.
args This property is required. ModelArgs
The arguments to resource properties.
options CustomResourceOptions
Bag of options to control resource's behavior.

Constructor example

The following reference example uses placeholder values for all input properties.

var examplemodelResourceResourceFromSagemakermodel = new Aws.Sagemaker.Model("examplemodelResourceResourceFromSagemakermodel", new()
{
    ExecutionRoleArn = "string",
    Containers = new[]
    {
        new Aws.Sagemaker.Inputs.ModelContainerArgs
        {
            ContainerHostname = "string",
            Environment = 
            {
                { "string", "string" },
            },
            Image = "string",
            ImageConfig = new Aws.Sagemaker.Inputs.ModelContainerImageConfigArgs
            {
                RepositoryAccessMode = "string",
                RepositoryAuthConfig = new Aws.Sagemaker.Inputs.ModelContainerImageConfigRepositoryAuthConfigArgs
                {
                    RepositoryCredentialsProviderArn = "string",
                },
            },
            InferenceSpecificationName = "string",
            Mode = "string",
            ModelDataSource = new Aws.Sagemaker.Inputs.ModelContainerModelDataSourceArgs
            {
                S3DataSources = new[]
                {
                    new Aws.Sagemaker.Inputs.ModelContainerModelDataSourceS3DataSourceArgs
                    {
                        CompressionType = "string",
                        S3DataType = "string",
                        S3Uri = "string",
                        ModelAccessConfig = new Aws.Sagemaker.Inputs.ModelContainerModelDataSourceS3DataSourceModelAccessConfigArgs
                        {
                            AcceptEula = false,
                        },
                    },
                },
            },
            ModelDataUrl = "string",
            ModelPackageName = "string",
            MultiModelConfig = new Aws.Sagemaker.Inputs.ModelContainerMultiModelConfigArgs
            {
                ModelCacheSetting = "string",
            },
        },
    },
    EnableNetworkIsolation = false,
    InferenceExecutionConfig = new Aws.Sagemaker.Inputs.ModelInferenceExecutionConfigArgs
    {
        Mode = "string",
    },
    Name = "string",
    PrimaryContainer = new Aws.Sagemaker.Inputs.ModelPrimaryContainerArgs
    {
        ContainerHostname = "string",
        Environment = 
        {
            { "string", "string" },
        },
        Image = "string",
        ImageConfig = new Aws.Sagemaker.Inputs.ModelPrimaryContainerImageConfigArgs
        {
            RepositoryAccessMode = "string",
            RepositoryAuthConfig = new Aws.Sagemaker.Inputs.ModelPrimaryContainerImageConfigRepositoryAuthConfigArgs
            {
                RepositoryCredentialsProviderArn = "string",
            },
        },
        InferenceSpecificationName = "string",
        Mode = "string",
        ModelDataSource = new Aws.Sagemaker.Inputs.ModelPrimaryContainerModelDataSourceArgs
        {
            S3DataSources = new[]
            {
                new Aws.Sagemaker.Inputs.ModelPrimaryContainerModelDataSourceS3DataSourceArgs
                {
                    CompressionType = "string",
                    S3DataType = "string",
                    S3Uri = "string",
                    ModelAccessConfig = new Aws.Sagemaker.Inputs.ModelPrimaryContainerModelDataSourceS3DataSourceModelAccessConfigArgs
                    {
                        AcceptEula = false,
                    },
                },
            },
        },
        ModelDataUrl = "string",
        ModelPackageName = "string",
        MultiModelConfig = new Aws.Sagemaker.Inputs.ModelPrimaryContainerMultiModelConfigArgs
        {
            ModelCacheSetting = "string",
        },
    },
    Tags = 
    {
        { "string", "string" },
    },
    VpcConfig = new Aws.Sagemaker.Inputs.ModelVpcConfigArgs
    {
        SecurityGroupIds = new[]
        {
            "string",
        },
        Subnets = new[]
        {
            "string",
        },
    },
});
Copy
example, err := sagemaker.NewModel(ctx, "examplemodelResourceResourceFromSagemakermodel", &sagemaker.ModelArgs{
	ExecutionRoleArn: pulumi.String("string"),
	Containers: sagemaker.ModelContainerArray{
		&sagemaker.ModelContainerArgs{
			ContainerHostname: pulumi.String("string"),
			Environment: pulumi.StringMap{
				"string": pulumi.String("string"),
			},
			Image: pulumi.String("string"),
			ImageConfig: &sagemaker.ModelContainerImageConfigArgs{
				RepositoryAccessMode: pulumi.String("string"),
				RepositoryAuthConfig: &sagemaker.ModelContainerImageConfigRepositoryAuthConfigArgs{
					RepositoryCredentialsProviderArn: pulumi.String("string"),
				},
			},
			InferenceSpecificationName: pulumi.String("string"),
			Mode:                       pulumi.String("string"),
			ModelDataSource: &sagemaker.ModelContainerModelDataSourceArgs{
				S3DataSources: sagemaker.ModelContainerModelDataSourceS3DataSourceArray{
					&sagemaker.ModelContainerModelDataSourceS3DataSourceArgs{
						CompressionType: pulumi.String("string"),
						S3DataType:      pulumi.String("string"),
						S3Uri:           pulumi.String("string"),
						ModelAccessConfig: &sagemaker.ModelContainerModelDataSourceS3DataSourceModelAccessConfigArgs{
							AcceptEula: pulumi.Bool(false),
						},
					},
				},
			},
			ModelDataUrl:     pulumi.String("string"),
			ModelPackageName: pulumi.String("string"),
			MultiModelConfig: &sagemaker.ModelContainerMultiModelConfigArgs{
				ModelCacheSetting: pulumi.String("string"),
			},
		},
	},
	EnableNetworkIsolation: pulumi.Bool(false),
	InferenceExecutionConfig: &sagemaker.ModelInferenceExecutionConfigArgs{
		Mode: pulumi.String("string"),
	},
	Name: pulumi.String("string"),
	PrimaryContainer: &sagemaker.ModelPrimaryContainerArgs{
		ContainerHostname: pulumi.String("string"),
		Environment: pulumi.StringMap{
			"string": pulumi.String("string"),
		},
		Image: pulumi.String("string"),
		ImageConfig: &sagemaker.ModelPrimaryContainerImageConfigArgs{
			RepositoryAccessMode: pulumi.String("string"),
			RepositoryAuthConfig: &sagemaker.ModelPrimaryContainerImageConfigRepositoryAuthConfigArgs{
				RepositoryCredentialsProviderArn: pulumi.String("string"),
			},
		},
		InferenceSpecificationName: pulumi.String("string"),
		Mode:                       pulumi.String("string"),
		ModelDataSource: &sagemaker.ModelPrimaryContainerModelDataSourceArgs{
			S3DataSources: sagemaker.ModelPrimaryContainerModelDataSourceS3DataSourceArray{
				&sagemaker.ModelPrimaryContainerModelDataSourceS3DataSourceArgs{
					CompressionType: pulumi.String("string"),
					S3DataType:      pulumi.String("string"),
					S3Uri:           pulumi.String("string"),
					ModelAccessConfig: &sagemaker.ModelPrimaryContainerModelDataSourceS3DataSourceModelAccessConfigArgs{
						AcceptEula: pulumi.Bool(false),
					},
				},
			},
		},
		ModelDataUrl:     pulumi.String("string"),
		ModelPackageName: pulumi.String("string"),
		MultiModelConfig: &sagemaker.ModelPrimaryContainerMultiModelConfigArgs{
			ModelCacheSetting: pulumi.String("string"),
		},
	},
	Tags: pulumi.StringMap{
		"string": pulumi.String("string"),
	},
	VpcConfig: &sagemaker.ModelVpcConfigArgs{
		SecurityGroupIds: pulumi.StringArray{
			pulumi.String("string"),
		},
		Subnets: pulumi.StringArray{
			pulumi.String("string"),
		},
	},
})
Copy
var examplemodelResourceResourceFromSagemakermodel = new com.pulumi.aws.sagemaker.Model("examplemodelResourceResourceFromSagemakermodel", com.pulumi.aws.sagemaker.ModelArgs.builder()
    .executionRoleArn("string")
    .containers(ModelContainerArgs.builder()
        .containerHostname("string")
        .environment(Map.of("string", "string"))
        .image("string")
        .imageConfig(ModelContainerImageConfigArgs.builder()
            .repositoryAccessMode("string")
            .repositoryAuthConfig(ModelContainerImageConfigRepositoryAuthConfigArgs.builder()
                .repositoryCredentialsProviderArn("string")
                .build())
            .build())
        .inferenceSpecificationName("string")
        .mode("string")
        .modelDataSource(ModelContainerModelDataSourceArgs.builder()
            .s3DataSources(ModelContainerModelDataSourceS3DataSourceArgs.builder()
                .compressionType("string")
                .s3DataType("string")
                .s3Uri("string")
                .modelAccessConfig(ModelContainerModelDataSourceS3DataSourceModelAccessConfigArgs.builder()
                    .acceptEula(false)
                    .build())
                .build())
            .build())
        .modelDataUrl("string")
        .modelPackageName("string")
        .multiModelConfig(ModelContainerMultiModelConfigArgs.builder()
            .modelCacheSetting("string")
            .build())
        .build())
    .enableNetworkIsolation(false)
    .inferenceExecutionConfig(ModelInferenceExecutionConfigArgs.builder()
        .mode("string")
        .build())
    .name("string")
    .primaryContainer(ModelPrimaryContainerArgs.builder()
        .containerHostname("string")
        .environment(Map.of("string", "string"))
        .image("string")
        .imageConfig(ModelPrimaryContainerImageConfigArgs.builder()
            .repositoryAccessMode("string")
            .repositoryAuthConfig(ModelPrimaryContainerImageConfigRepositoryAuthConfigArgs.builder()
                .repositoryCredentialsProviderArn("string")
                .build())
            .build())
        .inferenceSpecificationName("string")
        .mode("string")
        .modelDataSource(ModelPrimaryContainerModelDataSourceArgs.builder()
            .s3DataSources(ModelPrimaryContainerModelDataSourceS3DataSourceArgs.builder()
                .compressionType("string")
                .s3DataType("string")
                .s3Uri("string")
                .modelAccessConfig(ModelPrimaryContainerModelDataSourceS3DataSourceModelAccessConfigArgs.builder()
                    .acceptEula(false)
                    .build())
                .build())
            .build())
        .modelDataUrl("string")
        .modelPackageName("string")
        .multiModelConfig(ModelPrimaryContainerMultiModelConfigArgs.builder()
            .modelCacheSetting("string")
            .build())
        .build())
    .tags(Map.of("string", "string"))
    .vpcConfig(ModelVpcConfigArgs.builder()
        .securityGroupIds("string")
        .subnets("string")
        .build())
    .build());
Copy
examplemodel_resource_resource_from_sagemakermodel = aws.sagemaker.Model("examplemodelResourceResourceFromSagemakermodel",
    execution_role_arn="string",
    containers=[{
        "container_hostname": "string",
        "environment": {
            "string": "string",
        },
        "image": "string",
        "image_config": {
            "repository_access_mode": "string",
            "repository_auth_config": {
                "repository_credentials_provider_arn": "string",
            },
        },
        "inference_specification_name": "string",
        "mode": "string",
        "model_data_source": {
            "s3_data_sources": [{
                "compression_type": "string",
                "s3_data_type": "string",
                "s3_uri": "string",
                "model_access_config": {
                    "accept_eula": False,
                },
            }],
        },
        "model_data_url": "string",
        "model_package_name": "string",
        "multi_model_config": {
            "model_cache_setting": "string",
        },
    }],
    enable_network_isolation=False,
    inference_execution_config={
        "mode": "string",
    },
    name="string",
    primary_container={
        "container_hostname": "string",
        "environment": {
            "string": "string",
        },
        "image": "string",
        "image_config": {
            "repository_access_mode": "string",
            "repository_auth_config": {
                "repository_credentials_provider_arn": "string",
            },
        },
        "inference_specification_name": "string",
        "mode": "string",
        "model_data_source": {
            "s3_data_sources": [{
                "compression_type": "string",
                "s3_data_type": "string",
                "s3_uri": "string",
                "model_access_config": {
                    "accept_eula": False,
                },
            }],
        },
        "model_data_url": "string",
        "model_package_name": "string",
        "multi_model_config": {
            "model_cache_setting": "string",
        },
    },
    tags={
        "string": "string",
    },
    vpc_config={
        "security_group_ids": ["string"],
        "subnets": ["string"],
    })
Copy
const examplemodelResourceResourceFromSagemakermodel = new aws.sagemaker.Model("examplemodelResourceResourceFromSagemakermodel", {
    executionRoleArn: "string",
    containers: [{
        containerHostname: "string",
        environment: {
            string: "string",
        },
        image: "string",
        imageConfig: {
            repositoryAccessMode: "string",
            repositoryAuthConfig: {
                repositoryCredentialsProviderArn: "string",
            },
        },
        inferenceSpecificationName: "string",
        mode: "string",
        modelDataSource: {
            s3DataSources: [{
                compressionType: "string",
                s3DataType: "string",
                s3Uri: "string",
                modelAccessConfig: {
                    acceptEula: false,
                },
            }],
        },
        modelDataUrl: "string",
        modelPackageName: "string",
        multiModelConfig: {
            modelCacheSetting: "string",
        },
    }],
    enableNetworkIsolation: false,
    inferenceExecutionConfig: {
        mode: "string",
    },
    name: "string",
    primaryContainer: {
        containerHostname: "string",
        environment: {
            string: "string",
        },
        image: "string",
        imageConfig: {
            repositoryAccessMode: "string",
            repositoryAuthConfig: {
                repositoryCredentialsProviderArn: "string",
            },
        },
        inferenceSpecificationName: "string",
        mode: "string",
        modelDataSource: {
            s3DataSources: [{
                compressionType: "string",
                s3DataType: "string",
                s3Uri: "string",
                modelAccessConfig: {
                    acceptEula: false,
                },
            }],
        },
        modelDataUrl: "string",
        modelPackageName: "string",
        multiModelConfig: {
            modelCacheSetting: "string",
        },
    },
    tags: {
        string: "string",
    },
    vpcConfig: {
        securityGroupIds: ["string"],
        subnets: ["string"],
    },
});
Copy
type: aws:sagemaker:Model
properties:
    containers:
        - containerHostname: string
          environment:
            string: string
          image: string
          imageConfig:
            repositoryAccessMode: string
            repositoryAuthConfig:
                repositoryCredentialsProviderArn: string
          inferenceSpecificationName: string
          mode: string
          modelDataSource:
            s3DataSources:
                - compressionType: string
                  modelAccessConfig:
                    acceptEula: false
                  s3DataType: string
                  s3Uri: string
          modelDataUrl: string
          modelPackageName: string
          multiModelConfig:
            modelCacheSetting: string
    enableNetworkIsolation: false
    executionRoleArn: string
    inferenceExecutionConfig:
        mode: string
    name: string
    primaryContainer:
        containerHostname: string
        environment:
            string: string
        image: string
        imageConfig:
            repositoryAccessMode: string
            repositoryAuthConfig:
                repositoryCredentialsProviderArn: string
        inferenceSpecificationName: string
        mode: string
        modelDataSource:
            s3DataSources:
                - compressionType: string
                  modelAccessConfig:
                    acceptEula: false
                  s3DataType: string
                  s3Uri: string
        modelDataUrl: string
        modelPackageName: string
        multiModelConfig:
            modelCacheSetting: string
    tags:
        string: string
    vpcConfig:
        securityGroupIds:
            - string
        subnets:
            - string
Copy

Model Resource Properties

To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

Inputs

In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.

The Model resource accepts the following input properties:

ExecutionRoleArn
This property is required.
Changes to this property will trigger replacement.
string
A role that SageMaker AI can assume to access model artifacts and docker images for deployment.
Containers List<ModelContainer>
Specifies containers in the inference pipeline. If not specified, the primary_container argument is required. Fields are documented below.
EnableNetworkIsolation Changes to this property will trigger replacement. bool
Isolates the model container. No inbound or outbound network calls can be made to or from the model container.
InferenceExecutionConfig Changes to this property will trigger replacement. ModelInferenceExecutionConfig
Specifies details of how containers in a multi-container endpoint are called. see Inference Execution Config.
Name Changes to this property will trigger replacement. string
The name of the model (must be unique). If omitted, this provider will assign a random, unique name.
PrimaryContainer ModelPrimaryContainer
The primary docker image containing inference code that is used when the model is deployed for predictions. If not specified, the container argument is required. Fields are documented below.
Tags Dictionary<string, string>
A map of tags to assign to the resource. .If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.
VpcConfig Changes to this property will trigger replacement. ModelVpcConfig
Specifies the VPC that you want your model to connect to. VpcConfig is used in hosting services and in batch transform.
ExecutionRoleArn
This property is required.
Changes to this property will trigger replacement.
string
A role that SageMaker AI can assume to access model artifacts and docker images for deployment.
Containers []ModelContainerArgs
Specifies containers in the inference pipeline. If not specified, the primary_container argument is required. Fields are documented below.
EnableNetworkIsolation Changes to this property will trigger replacement. bool
Isolates the model container. No inbound or outbound network calls can be made to or from the model container.
InferenceExecutionConfig Changes to this property will trigger replacement. ModelInferenceExecutionConfigArgs
Specifies details of how containers in a multi-container endpoint are called. see Inference Execution Config.
Name Changes to this property will trigger replacement. string
The name of the model (must be unique). If omitted, this provider will assign a random, unique name.
PrimaryContainer ModelPrimaryContainerArgs
The primary docker image containing inference code that is used when the model is deployed for predictions. If not specified, the container argument is required. Fields are documented below.
Tags map[string]string
A map of tags to assign to the resource. .If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.
VpcConfig Changes to this property will trigger replacement. ModelVpcConfigArgs
Specifies the VPC that you want your model to connect to. VpcConfig is used in hosting services and in batch transform.
executionRoleArn
This property is required.
Changes to this property will trigger replacement.
String
A role that SageMaker AI can assume to access model artifacts and docker images for deployment.
containers List<ModelContainer>
Specifies containers in the inference pipeline. If not specified, the primary_container argument is required. Fields are documented below.
enableNetworkIsolation Changes to this property will trigger replacement. Boolean
Isolates the model container. No inbound or outbound network calls can be made to or from the model container.
inferenceExecutionConfig Changes to this property will trigger replacement. ModelInferenceExecutionConfig
Specifies details of how containers in a multi-container endpoint are called. see Inference Execution Config.
name Changes to this property will trigger replacement. String
The name of the model (must be unique). If omitted, this provider will assign a random, unique name.
primaryContainer ModelPrimaryContainer
The primary docker image containing inference code that is used when the model is deployed for predictions. If not specified, the container argument is required. Fields are documented below.
tags Map<String,String>
A map of tags to assign to the resource. .If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.
vpcConfig Changes to this property will trigger replacement. ModelVpcConfig
Specifies the VPC that you want your model to connect to. VpcConfig is used in hosting services and in batch transform.
executionRoleArn
This property is required.
Changes to this property will trigger replacement.
string
A role that SageMaker AI can assume to access model artifacts and docker images for deployment.
containers ModelContainer[]
Specifies containers in the inference pipeline. If not specified, the primary_container argument is required. Fields are documented below.
enableNetworkIsolation Changes to this property will trigger replacement. boolean
Isolates the model container. No inbound or outbound network calls can be made to or from the model container.
inferenceExecutionConfig Changes to this property will trigger replacement. ModelInferenceExecutionConfig
Specifies details of how containers in a multi-container endpoint are called. see Inference Execution Config.
name Changes to this property will trigger replacement. string
The name of the model (must be unique). If omitted, this provider will assign a random, unique name.
primaryContainer ModelPrimaryContainer
The primary docker image containing inference code that is used when the model is deployed for predictions. If not specified, the container argument is required. Fields are documented below.
tags {[key: string]: string}
A map of tags to assign to the resource. .If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.
vpcConfig Changes to this property will trigger replacement. ModelVpcConfig
Specifies the VPC that you want your model to connect to. VpcConfig is used in hosting services and in batch transform.
execution_role_arn
This property is required.
Changes to this property will trigger replacement.
str
A role that SageMaker AI can assume to access model artifacts and docker images for deployment.
containers Sequence[ModelContainerArgs]
Specifies containers in the inference pipeline. If not specified, the primary_container argument is required. Fields are documented below.
enable_network_isolation Changes to this property will trigger replacement. bool
Isolates the model container. No inbound or outbound network calls can be made to or from the model container.
inference_execution_config Changes to this property will trigger replacement. ModelInferenceExecutionConfigArgs
Specifies details of how containers in a multi-container endpoint are called. see Inference Execution Config.
name Changes to this property will trigger replacement. str
The name of the model (must be unique). If omitted, this provider will assign a random, unique name.
primary_container ModelPrimaryContainerArgs
The primary docker image containing inference code that is used when the model is deployed for predictions. If not specified, the container argument is required. Fields are documented below.
tags Mapping[str, str]
A map of tags to assign to the resource. .If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.
vpc_config Changes to this property will trigger replacement. ModelVpcConfigArgs
Specifies the VPC that you want your model to connect to. VpcConfig is used in hosting services and in batch transform.
executionRoleArn
This property is required.
Changes to this property will trigger replacement.
String
A role that SageMaker AI can assume to access model artifacts and docker images for deployment.
containers List<Property Map>
Specifies containers in the inference pipeline. If not specified, the primary_container argument is required. Fields are documented below.
enableNetworkIsolation Changes to this property will trigger replacement. Boolean
Isolates the model container. No inbound or outbound network calls can be made to or from the model container.
inferenceExecutionConfig Changes to this property will trigger replacement. Property Map
Specifies details of how containers in a multi-container endpoint are called. see Inference Execution Config.
name Changes to this property will trigger replacement. String
The name of the model (must be unique). If omitted, this provider will assign a random, unique name.
primaryContainer Property Map
The primary docker image containing inference code that is used when the model is deployed for predictions. If not specified, the container argument is required. Fields are documented below.
tags Map<String>
A map of tags to assign to the resource. .If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.
vpcConfig Changes to this property will trigger replacement. Property Map
Specifies the VPC that you want your model to connect to. VpcConfig is used in hosting services and in batch transform.

Outputs

All input properties are implicitly available as output properties. Additionally, the Model resource produces the following output properties:

Arn string
The Amazon Resource Name (ARN) assigned by AWS to this model.
Id string
The provider-assigned unique ID for this managed resource.
TagsAll Dictionary<string, string>
A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

Deprecated: Please use tags instead.

Arn string
The Amazon Resource Name (ARN) assigned by AWS to this model.
Id string
The provider-assigned unique ID for this managed resource.
TagsAll map[string]string
A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

Deprecated: Please use tags instead.

arn String
The Amazon Resource Name (ARN) assigned by AWS to this model.
id String
The provider-assigned unique ID for this managed resource.
tagsAll Map<String,String>
A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

Deprecated: Please use tags instead.

arn string
The Amazon Resource Name (ARN) assigned by AWS to this model.
id string
The provider-assigned unique ID for this managed resource.
tagsAll {[key: string]: string}
A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

Deprecated: Please use tags instead.

arn str
The Amazon Resource Name (ARN) assigned by AWS to this model.
id str
The provider-assigned unique ID for this managed resource.
tags_all Mapping[str, str]
A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

Deprecated: Please use tags instead.

arn String
The Amazon Resource Name (ARN) assigned by AWS to this model.
id String
The provider-assigned unique ID for this managed resource.
tagsAll Map<String>
A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

Deprecated: Please use tags instead.

Look up Existing Model Resource

Get an existing Model resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

public static get(name: string, id: Input<ID>, state?: ModelState, opts?: CustomResourceOptions): Model
@staticmethod
def get(resource_name: str,
        id: str,
        opts: Optional[ResourceOptions] = None,
        arn: Optional[str] = None,
        containers: Optional[Sequence[ModelContainerArgs]] = None,
        enable_network_isolation: Optional[bool] = None,
        execution_role_arn: Optional[str] = None,
        inference_execution_config: Optional[ModelInferenceExecutionConfigArgs] = None,
        name: Optional[str] = None,
        primary_container: Optional[ModelPrimaryContainerArgs] = None,
        tags: Optional[Mapping[str, str]] = None,
        tags_all: Optional[Mapping[str, str]] = None,
        vpc_config: Optional[ModelVpcConfigArgs] = None) -> Model
func GetModel(ctx *Context, name string, id IDInput, state *ModelState, opts ...ResourceOption) (*Model, error)
public static Model Get(string name, Input<string> id, ModelState? state, CustomResourceOptions? opts = null)
public static Model get(String name, Output<String> id, ModelState state, CustomResourceOptions options)
resources:  _:    type: aws:sagemaker:Model    get:      id: ${id}
name This property is required.
The unique name of the resulting resource.
id This property is required.
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
resource_name This property is required.
The unique name of the resulting resource.
id This property is required.
The unique provider ID of the resource to lookup.
name This property is required.
The unique name of the resulting resource.
id This property is required.
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
name This property is required.
The unique name of the resulting resource.
id This property is required.
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
name This property is required.
The unique name of the resulting resource.
id This property is required.
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
The following state arguments are supported:
Arn string
The Amazon Resource Name (ARN) assigned by AWS to this model.
Containers List<ModelContainer>
Specifies containers in the inference pipeline. If not specified, the primary_container argument is required. Fields are documented below.
EnableNetworkIsolation Changes to this property will trigger replacement. bool
Isolates the model container. No inbound or outbound network calls can be made to or from the model container.
ExecutionRoleArn Changes to this property will trigger replacement. string
A role that SageMaker AI can assume to access model artifacts and docker images for deployment.
InferenceExecutionConfig Changes to this property will trigger replacement. ModelInferenceExecutionConfig
Specifies details of how containers in a multi-container endpoint are called. see Inference Execution Config.
Name Changes to this property will trigger replacement. string
The name of the model (must be unique). If omitted, this provider will assign a random, unique name.
PrimaryContainer ModelPrimaryContainer
The primary docker image containing inference code that is used when the model is deployed for predictions. If not specified, the container argument is required. Fields are documented below.
Tags Dictionary<string, string>
A map of tags to assign to the resource. .If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.
TagsAll Dictionary<string, string>
A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

Deprecated: Please use tags instead.

VpcConfig Changes to this property will trigger replacement. ModelVpcConfig
Specifies the VPC that you want your model to connect to. VpcConfig is used in hosting services and in batch transform.
Arn string
The Amazon Resource Name (ARN) assigned by AWS to this model.
Containers []ModelContainerArgs
Specifies containers in the inference pipeline. If not specified, the primary_container argument is required. Fields are documented below.
EnableNetworkIsolation Changes to this property will trigger replacement. bool
Isolates the model container. No inbound or outbound network calls can be made to or from the model container.
ExecutionRoleArn Changes to this property will trigger replacement. string
A role that SageMaker AI can assume to access model artifacts and docker images for deployment.
InferenceExecutionConfig Changes to this property will trigger replacement. ModelInferenceExecutionConfigArgs
Specifies details of how containers in a multi-container endpoint are called. see Inference Execution Config.
Name Changes to this property will trigger replacement. string
The name of the model (must be unique). If omitted, this provider will assign a random, unique name.
PrimaryContainer ModelPrimaryContainerArgs
The primary docker image containing inference code that is used when the model is deployed for predictions. If not specified, the container argument is required. Fields are documented below.
Tags map[string]string
A map of tags to assign to the resource. .If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.
TagsAll map[string]string
A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

Deprecated: Please use tags instead.

VpcConfig Changes to this property will trigger replacement. ModelVpcConfigArgs
Specifies the VPC that you want your model to connect to. VpcConfig is used in hosting services and in batch transform.
arn String
The Amazon Resource Name (ARN) assigned by AWS to this model.
containers List<ModelContainer>
Specifies containers in the inference pipeline. If not specified, the primary_container argument is required. Fields are documented below.
enableNetworkIsolation Changes to this property will trigger replacement. Boolean
Isolates the model container. No inbound or outbound network calls can be made to or from the model container.
executionRoleArn Changes to this property will trigger replacement. String
A role that SageMaker AI can assume to access model artifacts and docker images for deployment.
inferenceExecutionConfig Changes to this property will trigger replacement. ModelInferenceExecutionConfig
Specifies details of how containers in a multi-container endpoint are called. see Inference Execution Config.
name Changes to this property will trigger replacement. String
The name of the model (must be unique). If omitted, this provider will assign a random, unique name.
primaryContainer ModelPrimaryContainer
The primary docker image containing inference code that is used when the model is deployed for predictions. If not specified, the container argument is required. Fields are documented below.
tags Map<String,String>
A map of tags to assign to the resource. .If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.
tagsAll Map<String,String>
A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

Deprecated: Please use tags instead.

vpcConfig Changes to this property will trigger replacement. ModelVpcConfig
Specifies the VPC that you want your model to connect to. VpcConfig is used in hosting services and in batch transform.
arn string
The Amazon Resource Name (ARN) assigned by AWS to this model.
containers ModelContainer[]
Specifies containers in the inference pipeline. If not specified, the primary_container argument is required. Fields are documented below.
enableNetworkIsolation Changes to this property will trigger replacement. boolean
Isolates the model container. No inbound or outbound network calls can be made to or from the model container.
executionRoleArn Changes to this property will trigger replacement. string
A role that SageMaker AI can assume to access model artifacts and docker images for deployment.
inferenceExecutionConfig Changes to this property will trigger replacement. ModelInferenceExecutionConfig
Specifies details of how containers in a multi-container endpoint are called. see Inference Execution Config.
name Changes to this property will trigger replacement. string
The name of the model (must be unique). If omitted, this provider will assign a random, unique name.
primaryContainer ModelPrimaryContainer
The primary docker image containing inference code that is used when the model is deployed for predictions. If not specified, the container argument is required. Fields are documented below.
tags {[key: string]: string}
A map of tags to assign to the resource. .If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.
tagsAll {[key: string]: string}
A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

Deprecated: Please use tags instead.

vpcConfig Changes to this property will trigger replacement. ModelVpcConfig
Specifies the VPC that you want your model to connect to. VpcConfig is used in hosting services and in batch transform.
arn str
The Amazon Resource Name (ARN) assigned by AWS to this model.
containers Sequence[ModelContainerArgs]
Specifies containers in the inference pipeline. If not specified, the primary_container argument is required. Fields are documented below.
enable_network_isolation Changes to this property will trigger replacement. bool
Isolates the model container. No inbound or outbound network calls can be made to or from the model container.
execution_role_arn Changes to this property will trigger replacement. str
A role that SageMaker AI can assume to access model artifacts and docker images for deployment.
inference_execution_config Changes to this property will trigger replacement. ModelInferenceExecutionConfigArgs
Specifies details of how containers in a multi-container endpoint are called. see Inference Execution Config.
name Changes to this property will trigger replacement. str
The name of the model (must be unique). If omitted, this provider will assign a random, unique name.
primary_container ModelPrimaryContainerArgs
The primary docker image containing inference code that is used when the model is deployed for predictions. If not specified, the container argument is required. Fields are documented below.
tags Mapping[str, str]
A map of tags to assign to the resource. .If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.
tags_all Mapping[str, str]
A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

Deprecated: Please use tags instead.

vpc_config Changes to this property will trigger replacement. ModelVpcConfigArgs
Specifies the VPC that you want your model to connect to. VpcConfig is used in hosting services and in batch transform.
arn String
The Amazon Resource Name (ARN) assigned by AWS to this model.
containers List<Property Map>
Specifies containers in the inference pipeline. If not specified, the primary_container argument is required. Fields are documented below.
enableNetworkIsolation Changes to this property will trigger replacement. Boolean
Isolates the model container. No inbound or outbound network calls can be made to or from the model container.
executionRoleArn Changes to this property will trigger replacement. String
A role that SageMaker AI can assume to access model artifacts and docker images for deployment.
inferenceExecutionConfig Changes to this property will trigger replacement. Property Map
Specifies details of how containers in a multi-container endpoint are called. see Inference Execution Config.
name Changes to this property will trigger replacement. String
The name of the model (must be unique). If omitted, this provider will assign a random, unique name.
primaryContainer Property Map
The primary docker image containing inference code that is used when the model is deployed for predictions. If not specified, the container argument is required. Fields are documented below.
tags Map<String>
A map of tags to assign to the resource. .If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.
tagsAll Map<String>
A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

Deprecated: Please use tags instead.

vpcConfig Changes to this property will trigger replacement. Property Map
Specifies the VPC that you want your model to connect to. VpcConfig is used in hosting services and in batch transform.

Supporting Types

ModelContainer
, ModelContainerArgs

ContainerHostname Changes to this property will trigger replacement. string
The DNS host name for the container.
Environment Changes to this property will trigger replacement. Dictionary<string, string>
Environment variables for the Docker container. A list of key value pairs.
Image Changes to this property will trigger replacement. string
The registry path where the inference code image is stored in Amazon ECR.
ImageConfig ModelContainerImageConfig
Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). For more information see Using a Private Docker Registry for Real-Time Inference Containers. see Image Config.
InferenceSpecificationName Changes to this property will trigger replacement. string
The inference specification name in the model package version.
Mode Changes to this property will trigger replacement. string
The container hosts value SingleModel/MultiModel. The default value is SingleModel.
ModelDataSource ModelContainerModelDataSource
The location of model data to deploy. Use this for uncompressed model deployment. For information about how to deploy an uncompressed model, see Deploying uncompressed models in the AWS SageMaker AI Developer Guide.
ModelDataUrl Changes to this property will trigger replacement. string
The URL for the S3 location where model artifacts are stored.
ModelPackageName Changes to this property will trigger replacement. string
The Amazon Resource Name (ARN) of the model package to use to create the model.
MultiModelConfig Changes to this property will trigger replacement. ModelContainerMultiModelConfig
Specifies additional configuration for multi-model endpoints. see Multi Model Config.
ContainerHostname Changes to this property will trigger replacement. string
The DNS host name for the container.
Environment Changes to this property will trigger replacement. map[string]string
Environment variables for the Docker container. A list of key value pairs.
Image Changes to this property will trigger replacement. string
The registry path where the inference code image is stored in Amazon ECR.
ImageConfig ModelContainerImageConfig
Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). For more information see Using a Private Docker Registry for Real-Time Inference Containers. see Image Config.
InferenceSpecificationName Changes to this property will trigger replacement. string
The inference specification name in the model package version.
Mode Changes to this property will trigger replacement. string
The container hosts value SingleModel/MultiModel. The default value is SingleModel.
ModelDataSource ModelContainerModelDataSource
The location of model data to deploy. Use this for uncompressed model deployment. For information about how to deploy an uncompressed model, see Deploying uncompressed models in the AWS SageMaker AI Developer Guide.
ModelDataUrl Changes to this property will trigger replacement. string
The URL for the S3 location where model artifacts are stored.
ModelPackageName Changes to this property will trigger replacement. string
The Amazon Resource Name (ARN) of the model package to use to create the model.
MultiModelConfig Changes to this property will trigger replacement. ModelContainerMultiModelConfig
Specifies additional configuration for multi-model endpoints. see Multi Model Config.
containerHostname Changes to this property will trigger replacement. String
The DNS host name for the container.
environment Changes to this property will trigger replacement. Map<String,String>
Environment variables for the Docker container. A list of key value pairs.
image Changes to this property will trigger replacement. String
The registry path where the inference code image is stored in Amazon ECR.
imageConfig ModelContainerImageConfig
Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). For more information see Using a Private Docker Registry for Real-Time Inference Containers. see Image Config.
inferenceSpecificationName Changes to this property will trigger replacement. String
The inference specification name in the model package version.
mode Changes to this property will trigger replacement. String
The container hosts value SingleModel/MultiModel. The default value is SingleModel.
modelDataSource ModelContainerModelDataSource
The location of model data to deploy. Use this for uncompressed model deployment. For information about how to deploy an uncompressed model, see Deploying uncompressed models in the AWS SageMaker AI Developer Guide.
modelDataUrl Changes to this property will trigger replacement. String
The URL for the S3 location where model artifacts are stored.
modelPackageName Changes to this property will trigger replacement. String
The Amazon Resource Name (ARN) of the model package to use to create the model.
multiModelConfig Changes to this property will trigger replacement. ModelContainerMultiModelConfig
Specifies additional configuration for multi-model endpoints. see Multi Model Config.
containerHostname Changes to this property will trigger replacement. string
The DNS host name for the container.
environment Changes to this property will trigger replacement. {[key: string]: string}
Environment variables for the Docker container. A list of key value pairs.
image Changes to this property will trigger replacement. string
The registry path where the inference code image is stored in Amazon ECR.
imageConfig ModelContainerImageConfig
Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). For more information see Using a Private Docker Registry for Real-Time Inference Containers. see Image Config.
inferenceSpecificationName Changes to this property will trigger replacement. string
The inference specification name in the model package version.
mode Changes to this property will trigger replacement. string
The container hosts value SingleModel/MultiModel. The default value is SingleModel.
modelDataSource ModelContainerModelDataSource
The location of model data to deploy. Use this for uncompressed model deployment. For information about how to deploy an uncompressed model, see Deploying uncompressed models in the AWS SageMaker AI Developer Guide.
modelDataUrl Changes to this property will trigger replacement. string
The URL for the S3 location where model artifacts are stored.
modelPackageName Changes to this property will trigger replacement. string
The Amazon Resource Name (ARN) of the model package to use to create the model.
multiModelConfig Changes to this property will trigger replacement. ModelContainerMultiModelConfig
Specifies additional configuration for multi-model endpoints. see Multi Model Config.
container_hostname Changes to this property will trigger replacement. str
The DNS host name for the container.
environment Changes to this property will trigger replacement. Mapping[str, str]
Environment variables for the Docker container. A list of key value pairs.
image Changes to this property will trigger replacement. str
The registry path where the inference code image is stored in Amazon ECR.
image_config ModelContainerImageConfig
Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). For more information see Using a Private Docker Registry for Real-Time Inference Containers. see Image Config.
inference_specification_name Changes to this property will trigger replacement. str
The inference specification name in the model package version.
mode Changes to this property will trigger replacement. str
The container hosts value SingleModel/MultiModel. The default value is SingleModel.
model_data_source ModelContainerModelDataSource
The location of model data to deploy. Use this for uncompressed model deployment. For information about how to deploy an uncompressed model, see Deploying uncompressed models in the AWS SageMaker AI Developer Guide.
model_data_url Changes to this property will trigger replacement. str
The URL for the S3 location where model artifacts are stored.
model_package_name Changes to this property will trigger replacement. str
The Amazon Resource Name (ARN) of the model package to use to create the model.
multi_model_config Changes to this property will trigger replacement. ModelContainerMultiModelConfig
Specifies additional configuration for multi-model endpoints. see Multi Model Config.
containerHostname Changes to this property will trigger replacement. String
The DNS host name for the container.
environment Changes to this property will trigger replacement. Map<String>
Environment variables for the Docker container. A list of key value pairs.
image Changes to this property will trigger replacement. String
The registry path where the inference code image is stored in Amazon ECR.
imageConfig Property Map
Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). For more information see Using a Private Docker Registry for Real-Time Inference Containers. see Image Config.
inferenceSpecificationName Changes to this property will trigger replacement. String
The inference specification name in the model package version.
mode Changes to this property will trigger replacement. String
The container hosts value SingleModel/MultiModel. The default value is SingleModel.
modelDataSource Property Map
The location of model data to deploy. Use this for uncompressed model deployment. For information about how to deploy an uncompressed model, see Deploying uncompressed models in the AWS SageMaker AI Developer Guide.
modelDataUrl Changes to this property will trigger replacement. String
The URL for the S3 location where model artifacts are stored.
modelPackageName Changes to this property will trigger replacement. String
The Amazon Resource Name (ARN) of the model package to use to create the model.
multiModelConfig Changes to this property will trigger replacement. Property Map
Specifies additional configuration for multi-model endpoints. see Multi Model Config.

ModelContainerImageConfig
, ModelContainerImageConfigArgs

RepositoryAccessMode
This property is required.
Changes to this property will trigger replacement.
string
Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). Allowed values are: Platform and Vpc.
RepositoryAuthConfig ModelContainerImageConfigRepositoryAuthConfig
Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified Vpc as the value for the RepositoryAccessMode field, and the private Docker registry where the model image is hosted requires authentication. see Repository Auth Config.
RepositoryAccessMode
This property is required.
Changes to this property will trigger replacement.
string
Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). Allowed values are: Platform and Vpc.
RepositoryAuthConfig ModelContainerImageConfigRepositoryAuthConfig
Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified Vpc as the value for the RepositoryAccessMode field, and the private Docker registry where the model image is hosted requires authentication. see Repository Auth Config.
repositoryAccessMode
This property is required.
Changes to this property will trigger replacement.
String
Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). Allowed values are: Platform and Vpc.
repositoryAuthConfig ModelContainerImageConfigRepositoryAuthConfig
Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified Vpc as the value for the RepositoryAccessMode field, and the private Docker registry where the model image is hosted requires authentication. see Repository Auth Config.
repositoryAccessMode
This property is required.
Changes to this property will trigger replacement.
string
Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). Allowed values are: Platform and Vpc.
repositoryAuthConfig ModelContainerImageConfigRepositoryAuthConfig
Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified Vpc as the value for the RepositoryAccessMode field, and the private Docker registry where the model image is hosted requires authentication. see Repository Auth Config.
repository_access_mode
This property is required.
Changes to this property will trigger replacement.
str
Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). Allowed values are: Platform and Vpc.
repository_auth_config ModelContainerImageConfigRepositoryAuthConfig
Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified Vpc as the value for the RepositoryAccessMode field, and the private Docker registry where the model image is hosted requires authentication. see Repository Auth Config.
repositoryAccessMode
This property is required.
Changes to this property will trigger replacement.
String
Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). Allowed values are: Platform and Vpc.
repositoryAuthConfig Property Map
Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified Vpc as the value for the RepositoryAccessMode field, and the private Docker registry where the model image is hosted requires authentication. see Repository Auth Config.

ModelContainerImageConfigRepositoryAuthConfig
, ModelContainerImageConfigRepositoryAuthConfigArgs

RepositoryCredentialsProviderArn
This property is required.
Changes to this property will trigger replacement.
string
The Amazon Resource Name (ARN) of an AWS Lambda function that provides credentials to authenticate to the private Docker registry where your model image is hosted. For information about how to create an AWS Lambda function, see Create a Lambda function with the console in the AWS Lambda Developer Guide.
RepositoryCredentialsProviderArn
This property is required.
Changes to this property will trigger replacement.
string
The Amazon Resource Name (ARN) of an AWS Lambda function that provides credentials to authenticate to the private Docker registry where your model image is hosted. For information about how to create an AWS Lambda function, see Create a Lambda function with the console in the AWS Lambda Developer Guide.
repositoryCredentialsProviderArn
This property is required.
Changes to this property will trigger replacement.
String
The Amazon Resource Name (ARN) of an AWS Lambda function that provides credentials to authenticate to the private Docker registry where your model image is hosted. For information about how to create an AWS Lambda function, see Create a Lambda function with the console in the AWS Lambda Developer Guide.
repositoryCredentialsProviderArn
This property is required.
Changes to this property will trigger replacement.
string
The Amazon Resource Name (ARN) of an AWS Lambda function that provides credentials to authenticate to the private Docker registry where your model image is hosted. For information about how to create an AWS Lambda function, see Create a Lambda function with the console in the AWS Lambda Developer Guide.
repository_credentials_provider_arn
This property is required.
Changes to this property will trigger replacement.
str
The Amazon Resource Name (ARN) of an AWS Lambda function that provides credentials to authenticate to the private Docker registry where your model image is hosted. For information about how to create an AWS Lambda function, see Create a Lambda function with the console in the AWS Lambda Developer Guide.
repositoryCredentialsProviderArn
This property is required.
Changes to this property will trigger replacement.
String
The Amazon Resource Name (ARN) of an AWS Lambda function that provides credentials to authenticate to the private Docker registry where your model image is hosted. For information about how to create an AWS Lambda function, see Create a Lambda function with the console in the AWS Lambda Developer Guide.

ModelContainerModelDataSource
, ModelContainerModelDataSourceArgs

S3DataSources This property is required. List<ModelContainerModelDataSourceS3DataSource>
The S3 location of model data to deploy.
S3DataSources This property is required. []ModelContainerModelDataSourceS3DataSource
The S3 location of model data to deploy.
s3DataSources This property is required. List<ModelContainerModelDataSourceS3DataSource>
The S3 location of model data to deploy.
s3DataSources This property is required. ModelContainerModelDataSourceS3DataSource[]
The S3 location of model data to deploy.
s3_data_sources This property is required. Sequence[ModelContainerModelDataSourceS3DataSource]
The S3 location of model data to deploy.
s3DataSources This property is required. List<Property Map>
The S3 location of model data to deploy.

ModelContainerModelDataSourceS3DataSource
, ModelContainerModelDataSourceS3DataSourceArgs

CompressionType
This property is required.
Changes to this property will trigger replacement.
string
How the model data is prepared. Allowed values are: None and Gzip.
S3DataType
This property is required.
Changes to this property will trigger replacement.
string
The type of model data to deploy. Allowed values are: S3Object and S3Prefix.
S3Uri
This property is required.
Changes to this property will trigger replacement.
string
The S3 path of model data to deploy.
ModelAccessConfig ModelContainerModelDataSourceS3DataSourceModelAccessConfig
Specifies the access configuration file for the ML model. You can explicitly accept the model end-user license agreement (EULA) within the [model_access_config configuration block]. see Model Access Config.
CompressionType
This property is required.
Changes to this property will trigger replacement.
string
How the model data is prepared. Allowed values are: None and Gzip.
S3DataType
This property is required.
Changes to this property will trigger replacement.
string
The type of model data to deploy. Allowed values are: S3Object and S3Prefix.
S3Uri
This property is required.
Changes to this property will trigger replacement.
string
The S3 path of model data to deploy.
ModelAccessConfig ModelContainerModelDataSourceS3DataSourceModelAccessConfig
Specifies the access configuration file for the ML model. You can explicitly accept the model end-user license agreement (EULA) within the [model_access_config configuration block]. see Model Access Config.
compressionType
This property is required.
Changes to this property will trigger replacement.
String
How the model data is prepared. Allowed values are: None and Gzip.
s3DataType
This property is required.
Changes to this property will trigger replacement.
String
The type of model data to deploy. Allowed values are: S3Object and S3Prefix.
s3Uri
This property is required.
Changes to this property will trigger replacement.
String
The S3 path of model data to deploy.
modelAccessConfig ModelContainerModelDataSourceS3DataSourceModelAccessConfig
Specifies the access configuration file for the ML model. You can explicitly accept the model end-user license agreement (EULA) within the [model_access_config configuration block]. see Model Access Config.
compressionType
This property is required.
Changes to this property will trigger replacement.
string
How the model data is prepared. Allowed values are: None and Gzip.
s3DataType
This property is required.
Changes to this property will trigger replacement.
string
The type of model data to deploy. Allowed values are: S3Object and S3Prefix.
s3Uri
This property is required.
Changes to this property will trigger replacement.
string
The S3 path of model data to deploy.
modelAccessConfig ModelContainerModelDataSourceS3DataSourceModelAccessConfig
Specifies the access configuration file for the ML model. You can explicitly accept the model end-user license agreement (EULA) within the [model_access_config configuration block]. see Model Access Config.
compression_type
This property is required.
Changes to this property will trigger replacement.
str
How the model data is prepared. Allowed values are: None and Gzip.
s3_data_type
This property is required.
Changes to this property will trigger replacement.
str
The type of model data to deploy. Allowed values are: S3Object and S3Prefix.
s3_uri
This property is required.
Changes to this property will trigger replacement.
str
The S3 path of model data to deploy.
model_access_config ModelContainerModelDataSourceS3DataSourceModelAccessConfig
Specifies the access configuration file for the ML model. You can explicitly accept the model end-user license agreement (EULA) within the [model_access_config configuration block]. see Model Access Config.
compressionType
This property is required.
Changes to this property will trigger replacement.
String
How the model data is prepared. Allowed values are: None and Gzip.
s3DataType
This property is required.
Changes to this property will trigger replacement.
String
The type of model data to deploy. Allowed values are: S3Object and S3Prefix.
s3Uri
This property is required.
Changes to this property will trigger replacement.
String
The S3 path of model data to deploy.
modelAccessConfig Property Map
Specifies the access configuration file for the ML model. You can explicitly accept the model end-user license agreement (EULA) within the [model_access_config configuration block]. see Model Access Config.

ModelContainerModelDataSourceS3DataSourceModelAccessConfig
, ModelContainerModelDataSourceS3DataSourceModelAccessConfigArgs

AcceptEula
This property is required.
Changes to this property will trigger replacement.
bool
Specifies agreement to the model end-user license agreement (EULA). The AcceptEula value must be explicitly defined as true in order to accept the EULA that this model requires. You are responsible for reviewing and complying with any applicable license terms and making sure they are acceptable for your use case before downloading or using a model.
AcceptEula
This property is required.
Changes to this property will trigger replacement.
bool
Specifies agreement to the model end-user license agreement (EULA). The AcceptEula value must be explicitly defined as true in order to accept the EULA that this model requires. You are responsible for reviewing and complying with any applicable license terms and making sure they are acceptable for your use case before downloading or using a model.
acceptEula
This property is required.
Changes to this property will trigger replacement.
Boolean
Specifies agreement to the model end-user license agreement (EULA). The AcceptEula value must be explicitly defined as true in order to accept the EULA that this model requires. You are responsible for reviewing and complying with any applicable license terms and making sure they are acceptable for your use case before downloading or using a model.
acceptEula
This property is required.
Changes to this property will trigger replacement.
boolean
Specifies agreement to the model end-user license agreement (EULA). The AcceptEula value must be explicitly defined as true in order to accept the EULA that this model requires. You are responsible for reviewing and complying with any applicable license terms and making sure they are acceptable for your use case before downloading or using a model.
accept_eula
This property is required.
Changes to this property will trigger replacement.
bool
Specifies agreement to the model end-user license agreement (EULA). The AcceptEula value must be explicitly defined as true in order to accept the EULA that this model requires. You are responsible for reviewing and complying with any applicable license terms and making sure they are acceptable for your use case before downloading or using a model.
acceptEula
This property is required.
Changes to this property will trigger replacement.
Boolean
Specifies agreement to the model end-user license agreement (EULA). The AcceptEula value must be explicitly defined as true in order to accept the EULA that this model requires. You are responsible for reviewing and complying with any applicable license terms and making sure they are acceptable for your use case before downloading or using a model.

ModelContainerMultiModelConfig
, ModelContainerMultiModelConfigArgs

ModelCacheSetting Changes to this property will trigger replacement. string
Whether to cache models for a multi-model endpoint. By default, multi-model endpoints cache models so that a model does not have to be loaded into memory each time it is invoked. Some use cases do not benefit from model caching. For example, if an endpoint hosts a large number of models that are each invoked infrequently, the endpoint might perform better if you disable model caching. To disable model caching, set the value of this parameter to Disabled. Allowed values are: Enabled and Disabled.
ModelCacheSetting Changes to this property will trigger replacement. string
Whether to cache models for a multi-model endpoint. By default, multi-model endpoints cache models so that a model does not have to be loaded into memory each time it is invoked. Some use cases do not benefit from model caching. For example, if an endpoint hosts a large number of models that are each invoked infrequently, the endpoint might perform better if you disable model caching. To disable model caching, set the value of this parameter to Disabled. Allowed values are: Enabled and Disabled.
modelCacheSetting Changes to this property will trigger replacement. String
Whether to cache models for a multi-model endpoint. By default, multi-model endpoints cache models so that a model does not have to be loaded into memory each time it is invoked. Some use cases do not benefit from model caching. For example, if an endpoint hosts a large number of models that are each invoked infrequently, the endpoint might perform better if you disable model caching. To disable model caching, set the value of this parameter to Disabled. Allowed values are: Enabled and Disabled.
modelCacheSetting Changes to this property will trigger replacement. string
Whether to cache models for a multi-model endpoint. By default, multi-model endpoints cache models so that a model does not have to be loaded into memory each time it is invoked. Some use cases do not benefit from model caching. For example, if an endpoint hosts a large number of models that are each invoked infrequently, the endpoint might perform better if you disable model caching. To disable model caching, set the value of this parameter to Disabled. Allowed values are: Enabled and Disabled.
model_cache_setting Changes to this property will trigger replacement. str
Whether to cache models for a multi-model endpoint. By default, multi-model endpoints cache models so that a model does not have to be loaded into memory each time it is invoked. Some use cases do not benefit from model caching. For example, if an endpoint hosts a large number of models that are each invoked infrequently, the endpoint might perform better if you disable model caching. To disable model caching, set the value of this parameter to Disabled. Allowed values are: Enabled and Disabled.
modelCacheSetting Changes to this property will trigger replacement. String
Whether to cache models for a multi-model endpoint. By default, multi-model endpoints cache models so that a model does not have to be loaded into memory each time it is invoked. Some use cases do not benefit from model caching. For example, if an endpoint hosts a large number of models that are each invoked infrequently, the endpoint might perform better if you disable model caching. To disable model caching, set the value of this parameter to Disabled. Allowed values are: Enabled and Disabled.

ModelInferenceExecutionConfig
, ModelInferenceExecutionConfigArgs

Mode This property is required. string
The container hosts value SingleModel/MultiModel. The default value is SingleModel.
Mode This property is required. string
The container hosts value SingleModel/MultiModel. The default value is SingleModel.
mode This property is required. String
The container hosts value SingleModel/MultiModel. The default value is SingleModel.
mode This property is required. string
The container hosts value SingleModel/MultiModel. The default value is SingleModel.
mode This property is required. str
The container hosts value SingleModel/MultiModel. The default value is SingleModel.
mode This property is required. String
The container hosts value SingleModel/MultiModel. The default value is SingleModel.

ModelPrimaryContainer
, ModelPrimaryContainerArgs

ContainerHostname Changes to this property will trigger replacement. string
The DNS host name for the container.
Environment Changes to this property will trigger replacement. Dictionary<string, string>
Environment variables for the Docker container. A list of key value pairs.
Image Changes to this property will trigger replacement. string
The registry path where the inference code image is stored in Amazon ECR.
ImageConfig ModelPrimaryContainerImageConfig
Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). For more information see Using a Private Docker Registry for Real-Time Inference Containers. see Image Config.
InferenceSpecificationName Changes to this property will trigger replacement. string
The inference specification name in the model package version.
Mode Changes to this property will trigger replacement. string
The container hosts value SingleModel/MultiModel. The default value is SingleModel.
ModelDataSource ModelPrimaryContainerModelDataSource
The location of model data to deploy. Use this for uncompressed model deployment. For information about how to deploy an uncompressed model, see Deploying uncompressed models in the AWS SageMaker AI Developer Guide.
ModelDataUrl Changes to this property will trigger replacement. string
The URL for the S3 location where model artifacts are stored.
ModelPackageName Changes to this property will trigger replacement. string
The Amazon Resource Name (ARN) of the model package to use to create the model.
MultiModelConfig Changes to this property will trigger replacement. ModelPrimaryContainerMultiModelConfig
Specifies additional configuration for multi-model endpoints. see Multi Model Config.
ContainerHostname Changes to this property will trigger replacement. string
The DNS host name for the container.
Environment Changes to this property will trigger replacement. map[string]string
Environment variables for the Docker container. A list of key value pairs.
Image Changes to this property will trigger replacement. string
The registry path where the inference code image is stored in Amazon ECR.
ImageConfig ModelPrimaryContainerImageConfig
Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). For more information see Using a Private Docker Registry for Real-Time Inference Containers. see Image Config.
InferenceSpecificationName Changes to this property will trigger replacement. string
The inference specification name in the model package version.
Mode Changes to this property will trigger replacement. string
The container hosts value SingleModel/MultiModel. The default value is SingleModel.
ModelDataSource ModelPrimaryContainerModelDataSource
The location of model data to deploy. Use this for uncompressed model deployment. For information about how to deploy an uncompressed model, see Deploying uncompressed models in the AWS SageMaker AI Developer Guide.
ModelDataUrl Changes to this property will trigger replacement. string
The URL for the S3 location where model artifacts are stored.
ModelPackageName Changes to this property will trigger replacement. string
The Amazon Resource Name (ARN) of the model package to use to create the model.
MultiModelConfig Changes to this property will trigger replacement. ModelPrimaryContainerMultiModelConfig
Specifies additional configuration for multi-model endpoints. see Multi Model Config.
containerHostname Changes to this property will trigger replacement. String
The DNS host name for the container.
environment Changes to this property will trigger replacement. Map<String,String>
Environment variables for the Docker container. A list of key value pairs.
image Changes to this property will trigger replacement. String
The registry path where the inference code image is stored in Amazon ECR.
imageConfig ModelPrimaryContainerImageConfig
Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). For more information see Using a Private Docker Registry for Real-Time Inference Containers. see Image Config.
inferenceSpecificationName Changes to this property will trigger replacement. String
The inference specification name in the model package version.
mode Changes to this property will trigger replacement. String
The container hosts value SingleModel/MultiModel. The default value is SingleModel.
modelDataSource ModelPrimaryContainerModelDataSource
The location of model data to deploy. Use this for uncompressed model deployment. For information about how to deploy an uncompressed model, see Deploying uncompressed models in the AWS SageMaker AI Developer Guide.
modelDataUrl Changes to this property will trigger replacement. String
The URL for the S3 location where model artifacts are stored.
modelPackageName Changes to this property will trigger replacement. String
The Amazon Resource Name (ARN) of the model package to use to create the model.
multiModelConfig Changes to this property will trigger replacement. ModelPrimaryContainerMultiModelConfig
Specifies additional configuration for multi-model endpoints. see Multi Model Config.
containerHostname Changes to this property will trigger replacement. string
The DNS host name for the container.
environment Changes to this property will trigger replacement. {[key: string]: string}
Environment variables for the Docker container. A list of key value pairs.
image Changes to this property will trigger replacement. string
The registry path where the inference code image is stored in Amazon ECR.
imageConfig ModelPrimaryContainerImageConfig
Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). For more information see Using a Private Docker Registry for Real-Time Inference Containers. see Image Config.
inferenceSpecificationName Changes to this property will trigger replacement. string
The inference specification name in the model package version.
mode Changes to this property will trigger replacement. string
The container hosts value SingleModel/MultiModel. The default value is SingleModel.
modelDataSource ModelPrimaryContainerModelDataSource
The location of model data to deploy. Use this for uncompressed model deployment. For information about how to deploy an uncompressed model, see Deploying uncompressed models in the AWS SageMaker AI Developer Guide.
modelDataUrl Changes to this property will trigger replacement. string
The URL for the S3 location where model artifacts are stored.
modelPackageName Changes to this property will trigger replacement. string
The Amazon Resource Name (ARN) of the model package to use to create the model.
multiModelConfig Changes to this property will trigger replacement. ModelPrimaryContainerMultiModelConfig
Specifies additional configuration for multi-model endpoints. see Multi Model Config.
container_hostname Changes to this property will trigger replacement. str
The DNS host name for the container.
environment Changes to this property will trigger replacement. Mapping[str, str]
Environment variables for the Docker container. A list of key value pairs.
image Changes to this property will trigger replacement. str
The registry path where the inference code image is stored in Amazon ECR.
image_config ModelPrimaryContainerImageConfig
Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). For more information see Using a Private Docker Registry for Real-Time Inference Containers. see Image Config.
inference_specification_name Changes to this property will trigger replacement. str
The inference specification name in the model package version.
mode Changes to this property will trigger replacement. str
The container hosts value SingleModel/MultiModel. The default value is SingleModel.
model_data_source ModelPrimaryContainerModelDataSource
The location of model data to deploy. Use this for uncompressed model deployment. For information about how to deploy an uncompressed model, see Deploying uncompressed models in the AWS SageMaker AI Developer Guide.
model_data_url Changes to this property will trigger replacement. str
The URL for the S3 location where model artifacts are stored.
model_package_name Changes to this property will trigger replacement. str
The Amazon Resource Name (ARN) of the model package to use to create the model.
multi_model_config Changes to this property will trigger replacement. ModelPrimaryContainerMultiModelConfig
Specifies additional configuration for multi-model endpoints. see Multi Model Config.
containerHostname Changes to this property will trigger replacement. String
The DNS host name for the container.
environment Changes to this property will trigger replacement. Map<String>
Environment variables for the Docker container. A list of key value pairs.
image Changes to this property will trigger replacement. String
The registry path where the inference code image is stored in Amazon ECR.
imageConfig Property Map
Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). For more information see Using a Private Docker Registry for Real-Time Inference Containers. see Image Config.
inferenceSpecificationName Changes to this property will trigger replacement. String
The inference specification name in the model package version.
mode Changes to this property will trigger replacement. String
The container hosts value SingleModel/MultiModel. The default value is SingleModel.
modelDataSource Property Map
The location of model data to deploy. Use this for uncompressed model deployment. For information about how to deploy an uncompressed model, see Deploying uncompressed models in the AWS SageMaker AI Developer Guide.
modelDataUrl Changes to this property will trigger replacement. String
The URL for the S3 location where model artifacts are stored.
modelPackageName Changes to this property will trigger replacement. String
The Amazon Resource Name (ARN) of the model package to use to create the model.
multiModelConfig Changes to this property will trigger replacement. Property Map
Specifies additional configuration for multi-model endpoints. see Multi Model Config.

ModelPrimaryContainerImageConfig
, ModelPrimaryContainerImageConfigArgs

RepositoryAccessMode
This property is required.
Changes to this property will trigger replacement.
string
Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). Allowed values are: Platform and Vpc.
RepositoryAuthConfig ModelPrimaryContainerImageConfigRepositoryAuthConfig
Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified Vpc as the value for the RepositoryAccessMode field, and the private Docker registry where the model image is hosted requires authentication. see Repository Auth Config.
RepositoryAccessMode
This property is required.
Changes to this property will trigger replacement.
string
Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). Allowed values are: Platform and Vpc.
RepositoryAuthConfig ModelPrimaryContainerImageConfigRepositoryAuthConfig
Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified Vpc as the value for the RepositoryAccessMode field, and the private Docker registry where the model image is hosted requires authentication. see Repository Auth Config.
repositoryAccessMode
This property is required.
Changes to this property will trigger replacement.
String
Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). Allowed values are: Platform and Vpc.
repositoryAuthConfig ModelPrimaryContainerImageConfigRepositoryAuthConfig
Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified Vpc as the value for the RepositoryAccessMode field, and the private Docker registry where the model image is hosted requires authentication. see Repository Auth Config.
repositoryAccessMode
This property is required.
Changes to this property will trigger replacement.
string
Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). Allowed values are: Platform and Vpc.
repositoryAuthConfig ModelPrimaryContainerImageConfigRepositoryAuthConfig
Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified Vpc as the value for the RepositoryAccessMode field, and the private Docker registry where the model image is hosted requires authentication. see Repository Auth Config.
repository_access_mode
This property is required.
Changes to this property will trigger replacement.
str
Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). Allowed values are: Platform and Vpc.
repository_auth_config ModelPrimaryContainerImageConfigRepositoryAuthConfig
Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified Vpc as the value for the RepositoryAccessMode field, and the private Docker registry where the model image is hosted requires authentication. see Repository Auth Config.
repositoryAccessMode
This property is required.
Changes to this property will trigger replacement.
String
Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). Allowed values are: Platform and Vpc.
repositoryAuthConfig Property Map
Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified Vpc as the value for the RepositoryAccessMode field, and the private Docker registry where the model image is hosted requires authentication. see Repository Auth Config.

ModelPrimaryContainerImageConfigRepositoryAuthConfig
, ModelPrimaryContainerImageConfigRepositoryAuthConfigArgs

RepositoryCredentialsProviderArn
This property is required.
Changes to this property will trigger replacement.
string
The Amazon Resource Name (ARN) of an AWS Lambda function that provides credentials to authenticate to the private Docker registry where your model image is hosted. For information about how to create an AWS Lambda function, see Create a Lambda function with the console in the AWS Lambda Developer Guide.
RepositoryCredentialsProviderArn
This property is required.
Changes to this property will trigger replacement.
string
The Amazon Resource Name (ARN) of an AWS Lambda function that provides credentials to authenticate to the private Docker registry where your model image is hosted. For information about how to create an AWS Lambda function, see Create a Lambda function with the console in the AWS Lambda Developer Guide.
repositoryCredentialsProviderArn
This property is required.
Changes to this property will trigger replacement.
String
The Amazon Resource Name (ARN) of an AWS Lambda function that provides credentials to authenticate to the private Docker registry where your model image is hosted. For information about how to create an AWS Lambda function, see Create a Lambda function with the console in the AWS Lambda Developer Guide.
repositoryCredentialsProviderArn
This property is required.
Changes to this property will trigger replacement.
string
The Amazon Resource Name (ARN) of an AWS Lambda function that provides credentials to authenticate to the private Docker registry where your model image is hosted. For information about how to create an AWS Lambda function, see Create a Lambda function with the console in the AWS Lambda Developer Guide.
repository_credentials_provider_arn
This property is required.
Changes to this property will trigger replacement.
str
The Amazon Resource Name (ARN) of an AWS Lambda function that provides credentials to authenticate to the private Docker registry where your model image is hosted. For information about how to create an AWS Lambda function, see Create a Lambda function with the console in the AWS Lambda Developer Guide.
repositoryCredentialsProviderArn
This property is required.
Changes to this property will trigger replacement.
String
The Amazon Resource Name (ARN) of an AWS Lambda function that provides credentials to authenticate to the private Docker registry where your model image is hosted. For information about how to create an AWS Lambda function, see Create a Lambda function with the console in the AWS Lambda Developer Guide.

ModelPrimaryContainerModelDataSource
, ModelPrimaryContainerModelDataSourceArgs

S3DataSources This property is required. List<ModelPrimaryContainerModelDataSourceS3DataSource>
The S3 location of model data to deploy.
S3DataSources This property is required. []ModelPrimaryContainerModelDataSourceS3DataSource
The S3 location of model data to deploy.
s3DataSources This property is required. List<ModelPrimaryContainerModelDataSourceS3DataSource>
The S3 location of model data to deploy.
s3DataSources This property is required. ModelPrimaryContainerModelDataSourceS3DataSource[]
The S3 location of model data to deploy.
s3_data_sources This property is required. Sequence[ModelPrimaryContainerModelDataSourceS3DataSource]
The S3 location of model data to deploy.
s3DataSources This property is required. List<Property Map>
The S3 location of model data to deploy.

ModelPrimaryContainerModelDataSourceS3DataSource
, ModelPrimaryContainerModelDataSourceS3DataSourceArgs

CompressionType
This property is required.
Changes to this property will trigger replacement.
string
How the model data is prepared. Allowed values are: None and Gzip.
S3DataType
This property is required.
Changes to this property will trigger replacement.
string
The type of model data to deploy. Allowed values are: S3Object and S3Prefix.
S3Uri
This property is required.
Changes to this property will trigger replacement.
string
The S3 path of model data to deploy.
ModelAccessConfig Changes to this property will trigger replacement. ModelPrimaryContainerModelDataSourceS3DataSourceModelAccessConfig
Specifies the access configuration file for the ML model. You can explicitly accept the model end-user license agreement (EULA) within the [model_access_config configuration block]. see Model Access Config.
CompressionType
This property is required.
Changes to this property will trigger replacement.
string
How the model data is prepared. Allowed values are: None and Gzip.
S3DataType
This property is required.
Changes to this property will trigger replacement.
string
The type of model data to deploy. Allowed values are: S3Object and S3Prefix.
S3Uri
This property is required.
Changes to this property will trigger replacement.
string
The S3 path of model data to deploy.
ModelAccessConfig Changes to this property will trigger replacement. ModelPrimaryContainerModelDataSourceS3DataSourceModelAccessConfig
Specifies the access configuration file for the ML model. You can explicitly accept the model end-user license agreement (EULA) within the [model_access_config configuration block]. see Model Access Config.
compressionType
This property is required.
Changes to this property will trigger replacement.
String
How the model data is prepared. Allowed values are: None and Gzip.
s3DataType
This property is required.
Changes to this property will trigger replacement.
String
The type of model data to deploy. Allowed values are: S3Object and S3Prefix.
s3Uri
This property is required.
Changes to this property will trigger replacement.
String
The S3 path of model data to deploy.
modelAccessConfig Changes to this property will trigger replacement. ModelPrimaryContainerModelDataSourceS3DataSourceModelAccessConfig
Specifies the access configuration file for the ML model. You can explicitly accept the model end-user license agreement (EULA) within the [model_access_config configuration block]. see Model Access Config.
compressionType
This property is required.
Changes to this property will trigger replacement.
string
How the model data is prepared. Allowed values are: None and Gzip.
s3DataType
This property is required.
Changes to this property will trigger replacement.
string
The type of model data to deploy. Allowed values are: S3Object and S3Prefix.
s3Uri
This property is required.
Changes to this property will trigger replacement.
string
The S3 path of model data to deploy.
modelAccessConfig Changes to this property will trigger replacement. ModelPrimaryContainerModelDataSourceS3DataSourceModelAccessConfig
Specifies the access configuration file for the ML model. You can explicitly accept the model end-user license agreement (EULA) within the [model_access_config configuration block]. see Model Access Config.
compression_type
This property is required.
Changes to this property will trigger replacement.
str
How the model data is prepared. Allowed values are: None and Gzip.
s3_data_type
This property is required.
Changes to this property will trigger replacement.
str
The type of model data to deploy. Allowed values are: S3Object and S3Prefix.
s3_uri
This property is required.
Changes to this property will trigger replacement.
str
The S3 path of model data to deploy.
model_access_config Changes to this property will trigger replacement. ModelPrimaryContainerModelDataSourceS3DataSourceModelAccessConfig
Specifies the access configuration file for the ML model. You can explicitly accept the model end-user license agreement (EULA) within the [model_access_config configuration block]. see Model Access Config.
compressionType
This property is required.
Changes to this property will trigger replacement.
String
How the model data is prepared. Allowed values are: None and Gzip.
s3DataType
This property is required.
Changes to this property will trigger replacement.
String
The type of model data to deploy. Allowed values are: S3Object and S3Prefix.
s3Uri
This property is required.
Changes to this property will trigger replacement.
String
The S3 path of model data to deploy.
modelAccessConfig Changes to this property will trigger replacement. Property Map
Specifies the access configuration file for the ML model. You can explicitly accept the model end-user license agreement (EULA) within the [model_access_config configuration block]. see Model Access Config.

ModelPrimaryContainerModelDataSourceS3DataSourceModelAccessConfig
, ModelPrimaryContainerModelDataSourceS3DataSourceModelAccessConfigArgs

AcceptEula
This property is required.
Changes to this property will trigger replacement.
bool
Specifies agreement to the model end-user license agreement (EULA). The AcceptEula value must be explicitly defined as true in order to accept the EULA that this model requires. You are responsible for reviewing and complying with any applicable license terms and making sure they are acceptable for your use case before downloading or using a model.
AcceptEula
This property is required.
Changes to this property will trigger replacement.
bool
Specifies agreement to the model end-user license agreement (EULA). The AcceptEula value must be explicitly defined as true in order to accept the EULA that this model requires. You are responsible for reviewing and complying with any applicable license terms and making sure they are acceptable for your use case before downloading or using a model.
acceptEula
This property is required.
Changes to this property will trigger replacement.
Boolean
Specifies agreement to the model end-user license agreement (EULA). The AcceptEula value must be explicitly defined as true in order to accept the EULA that this model requires. You are responsible for reviewing and complying with any applicable license terms and making sure they are acceptable for your use case before downloading or using a model.
acceptEula
This property is required.
Changes to this property will trigger replacement.
boolean
Specifies agreement to the model end-user license agreement (EULA). The AcceptEula value must be explicitly defined as true in order to accept the EULA that this model requires. You are responsible for reviewing and complying with any applicable license terms and making sure they are acceptable for your use case before downloading or using a model.
accept_eula
This property is required.
Changes to this property will trigger replacement.
bool
Specifies agreement to the model end-user license agreement (EULA). The AcceptEula value must be explicitly defined as true in order to accept the EULA that this model requires. You are responsible for reviewing and complying with any applicable license terms and making sure they are acceptable for your use case before downloading or using a model.
acceptEula
This property is required.
Changes to this property will trigger replacement.
Boolean
Specifies agreement to the model end-user license agreement (EULA). The AcceptEula value must be explicitly defined as true in order to accept the EULA that this model requires. You are responsible for reviewing and complying with any applicable license terms and making sure they are acceptable for your use case before downloading or using a model.

ModelPrimaryContainerMultiModelConfig
, ModelPrimaryContainerMultiModelConfigArgs

ModelCacheSetting Changes to this property will trigger replacement. string
Whether to cache models for a multi-model endpoint. By default, multi-model endpoints cache models so that a model does not have to be loaded into memory each time it is invoked. Some use cases do not benefit from model caching. For example, if an endpoint hosts a large number of models that are each invoked infrequently, the endpoint might perform better if you disable model caching. To disable model caching, set the value of this parameter to Disabled. Allowed values are: Enabled and Disabled.
ModelCacheSetting Changes to this property will trigger replacement. string
Whether to cache models for a multi-model endpoint. By default, multi-model endpoints cache models so that a model does not have to be loaded into memory each time it is invoked. Some use cases do not benefit from model caching. For example, if an endpoint hosts a large number of models that are each invoked infrequently, the endpoint might perform better if you disable model caching. To disable model caching, set the value of this parameter to Disabled. Allowed values are: Enabled and Disabled.
modelCacheSetting Changes to this property will trigger replacement. String
Whether to cache models for a multi-model endpoint. By default, multi-model endpoints cache models so that a model does not have to be loaded into memory each time it is invoked. Some use cases do not benefit from model caching. For example, if an endpoint hosts a large number of models that are each invoked infrequently, the endpoint might perform better if you disable model caching. To disable model caching, set the value of this parameter to Disabled. Allowed values are: Enabled and Disabled.
modelCacheSetting Changes to this property will trigger replacement. string
Whether to cache models for a multi-model endpoint. By default, multi-model endpoints cache models so that a model does not have to be loaded into memory each time it is invoked. Some use cases do not benefit from model caching. For example, if an endpoint hosts a large number of models that are each invoked infrequently, the endpoint might perform better if you disable model caching. To disable model caching, set the value of this parameter to Disabled. Allowed values are: Enabled and Disabled.
model_cache_setting Changes to this property will trigger replacement. str
Whether to cache models for a multi-model endpoint. By default, multi-model endpoints cache models so that a model does not have to be loaded into memory each time it is invoked. Some use cases do not benefit from model caching. For example, if an endpoint hosts a large number of models that are each invoked infrequently, the endpoint might perform better if you disable model caching. To disable model caching, set the value of this parameter to Disabled. Allowed values are: Enabled and Disabled.
modelCacheSetting Changes to this property will trigger replacement. String
Whether to cache models for a multi-model endpoint. By default, multi-model endpoints cache models so that a model does not have to be loaded into memory each time it is invoked. Some use cases do not benefit from model caching. For example, if an endpoint hosts a large number of models that are each invoked infrequently, the endpoint might perform better if you disable model caching. To disable model caching, set the value of this parameter to Disabled. Allowed values are: Enabled and Disabled.

ModelVpcConfig
, ModelVpcConfigArgs

SecurityGroupIds This property is required. List<string>
Subnets This property is required. List<string>
SecurityGroupIds This property is required. []string
Subnets This property is required. []string
securityGroupIds This property is required. List<String>
subnets This property is required. List<String>
securityGroupIds This property is required. string[]
subnets This property is required. string[]
security_group_ids This property is required. Sequence[str]
subnets This property is required. Sequence[str]
securityGroupIds This property is required. List<String>
subnets This property is required. List<String>

Import

Using pulumi import, import models using the name. For example:

$ pulumi import aws:sagemaker/model:Model test_model model-foo
Copy

To learn more about importing existing cloud resources, see Importing resources.

Package Details

Repository
AWS Classic pulumi/pulumi-aws
License
Apache-2.0
Notes
This Pulumi package is based on the aws Terraform Provider.