1. Packages
  2. Confluent Provider
  3. API Docs
  4. KafkaCluster
Confluent v2.24.0 published on Saturday, Apr 19, 2025 by Pulumi

confluentcloud.KafkaCluster

Explore with Pulumi AI

Example Usage

Example Kafka clusters on AWS

import * as pulumi from "@pulumi/pulumi";
import * as confluentcloud from "@pulumi/confluentcloud";

const development = new confluentcloud.Environment("development", {displayName: "Development"});
const basic = new confluentcloud.KafkaCluster("basic", {
    displayName: "basic_kafka_cluster",
    availability: "SINGLE_ZONE",
    cloud: "AWS",
    region: "us-east-2",
    basic: {},
    environment: {
        id: development.id,
    },
});
const standard = new confluentcloud.KafkaCluster("standard", {
    displayName: "standard_kafka_cluster",
    availability: "SINGLE_ZONE",
    cloud: "AWS",
    region: "us-east-2",
    standard: {},
    environment: {
        id: development.id,
    },
});
const enterprise = new confluentcloud.KafkaCluster("enterprise", {
    enterprises: [{}],
    displayName: "enterprise_kafka_cluster",
    availability: "HIGH",
    cloud: "AWS",
    region: "us-east-2",
    environment: {
        id: development.id,
    },
});
const dedicated = new confluentcloud.KafkaCluster("dedicated", {
    displayName: "dedicated_kafka_cluster",
    availability: "MULTI_ZONE",
    cloud: "AWS",
    region: "us-east-2",
    dedicated: {
        cku: 2,
    },
    environment: {
        id: development.id,
    },
});
const freight = new confluentcloud.KafkaCluster("freight", {
    freights: [{}],
    displayName: "freight_kafka_cluster",
    availability: "HIGH",
    cloud: "AWS",
    region: "us-east-1",
    environment: {
        id: staging.id,
    },
});
Copy
import pulumi
import pulumi_confluentcloud as confluentcloud

development = confluentcloud.Environment("development", display_name="Development")
basic = confluentcloud.KafkaCluster("basic",
    display_name="basic_kafka_cluster",
    availability="SINGLE_ZONE",
    cloud="AWS",
    region="us-east-2",
    basic={},
    environment={
        "id": development.id,
    })
standard = confluentcloud.KafkaCluster("standard",
    display_name="standard_kafka_cluster",
    availability="SINGLE_ZONE",
    cloud="AWS",
    region="us-east-2",
    standard={},
    environment={
        "id": development.id,
    })
enterprise = confluentcloud.KafkaCluster("enterprise",
    enterprises=[{}],
    display_name="enterprise_kafka_cluster",
    availability="HIGH",
    cloud="AWS",
    region="us-east-2",
    environment={
        "id": development.id,
    })
dedicated = confluentcloud.KafkaCluster("dedicated",
    display_name="dedicated_kafka_cluster",
    availability="MULTI_ZONE",
    cloud="AWS",
    region="us-east-2",
    dedicated={
        "cku": 2,
    },
    environment={
        "id": development.id,
    })
freight = confluentcloud.KafkaCluster("freight",
    freights=[{}],
    display_name="freight_kafka_cluster",
    availability="HIGH",
    cloud="AWS",
    region="us-east-1",
    environment={
        "id": staging["id"],
    })
Copy
package main

import (
	"github.com/pulumi/pulumi-confluentcloud/sdk/v2/go/confluentcloud"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		development, err := confluentcloud.NewEnvironment(ctx, "development", &confluentcloud.EnvironmentArgs{
			DisplayName: pulumi.String("Development"),
		})
		if err != nil {
			return err
		}
		_, err = confluentcloud.NewKafkaCluster(ctx, "basic", &confluentcloud.KafkaClusterArgs{
			DisplayName:  pulumi.String("basic_kafka_cluster"),
			Availability: pulumi.String("SINGLE_ZONE"),
			Cloud:        pulumi.String("AWS"),
			Region:       pulumi.String("us-east-2"),
			Basic:        &confluentcloud.KafkaClusterBasicArgs{},
			Environment: &confluentcloud.KafkaClusterEnvironmentArgs{
				Id: development.ID(),
			},
		})
		if err != nil {
			return err
		}
		_, err = confluentcloud.NewKafkaCluster(ctx, "standard", &confluentcloud.KafkaClusterArgs{
			DisplayName:  pulumi.String("standard_kafka_cluster"),
			Availability: pulumi.String("SINGLE_ZONE"),
			Cloud:        pulumi.String("AWS"),
			Region:       pulumi.String("us-east-2"),
			Standard:     &confluentcloud.KafkaClusterStandardArgs{},
			Environment: &confluentcloud.KafkaClusterEnvironmentArgs{
				Id: development.ID(),
			},
		})
		if err != nil {
			return err
		}
		_, err = confluentcloud.NewKafkaCluster(ctx, "enterprise", &confluentcloud.KafkaClusterArgs{
			Enterprises: confluentcloud.KafkaClusterEnterpriseArray{
				&confluentcloud.KafkaClusterEnterpriseArgs{},
			},
			DisplayName:  pulumi.String("enterprise_kafka_cluster"),
			Availability: pulumi.String("HIGH"),
			Cloud:        pulumi.String("AWS"),
			Region:       pulumi.String("us-east-2"),
			Environment: &confluentcloud.KafkaClusterEnvironmentArgs{
				Id: development.ID(),
			},
		})
		if err != nil {
			return err
		}
		_, err = confluentcloud.NewKafkaCluster(ctx, "dedicated", &confluentcloud.KafkaClusterArgs{
			DisplayName:  pulumi.String("dedicated_kafka_cluster"),
			Availability: pulumi.String("MULTI_ZONE"),
			Cloud:        pulumi.String("AWS"),
			Region:       pulumi.String("us-east-2"),
			Dedicated: &confluentcloud.KafkaClusterDedicatedArgs{
				Cku: pulumi.Int(2),
			},
			Environment: &confluentcloud.KafkaClusterEnvironmentArgs{
				Id: development.ID(),
			},
		})
		if err != nil {
			return err
		}
		_, err = confluentcloud.NewKafkaCluster(ctx, "freight", &confluentcloud.KafkaClusterArgs{
			Freights: confluentcloud.KafkaClusterFreightArray{
				&confluentcloud.KafkaClusterFreightArgs{},
			},
			DisplayName:  pulumi.String("freight_kafka_cluster"),
			Availability: pulumi.String("HIGH"),
			Cloud:        pulumi.String("AWS"),
			Region:       pulumi.String("us-east-1"),
			Environment: &confluentcloud.KafkaClusterEnvironmentArgs{
				Id: pulumi.Any(staging.Id),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
Copy
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using ConfluentCloud = Pulumi.ConfluentCloud;

return await Deployment.RunAsync(() => 
{
    var development = new ConfluentCloud.Environment("development", new()
    {
        DisplayName = "Development",
    });

    var basic = new ConfluentCloud.KafkaCluster("basic", new()
    {
        DisplayName = "basic_kafka_cluster",
        Availability = "SINGLE_ZONE",
        Cloud = "AWS",
        Region = "us-east-2",
        Basic = null,
        Environment = new ConfluentCloud.Inputs.KafkaClusterEnvironmentArgs
        {
            Id = development.Id,
        },
    });

    var standard = new ConfluentCloud.KafkaCluster("standard", new()
    {
        DisplayName = "standard_kafka_cluster",
        Availability = "SINGLE_ZONE",
        Cloud = "AWS",
        Region = "us-east-2",
        Standard = null,
        Environment = new ConfluentCloud.Inputs.KafkaClusterEnvironmentArgs
        {
            Id = development.Id,
        },
    });

    var enterprise = new ConfluentCloud.KafkaCluster("enterprise", new()
    {
        Enterprises = new[]
        {
            null,
        },
        DisplayName = "enterprise_kafka_cluster",
        Availability = "HIGH",
        Cloud = "AWS",
        Region = "us-east-2",
        Environment = new ConfluentCloud.Inputs.KafkaClusterEnvironmentArgs
        {
            Id = development.Id,
        },
    });

    var dedicated = new ConfluentCloud.KafkaCluster("dedicated", new()
    {
        DisplayName = "dedicated_kafka_cluster",
        Availability = "MULTI_ZONE",
        Cloud = "AWS",
        Region = "us-east-2",
        Dedicated = new ConfluentCloud.Inputs.KafkaClusterDedicatedArgs
        {
            Cku = 2,
        },
        Environment = new ConfluentCloud.Inputs.KafkaClusterEnvironmentArgs
        {
            Id = development.Id,
        },
    });

    var freight = new ConfluentCloud.KafkaCluster("freight", new()
    {
        Freights = new[]
        {
            null,
        },
        DisplayName = "freight_kafka_cluster",
        Availability = "HIGH",
        Cloud = "AWS",
        Region = "us-east-1",
        Environment = new ConfluentCloud.Inputs.KafkaClusterEnvironmentArgs
        {
            Id = staging.Id,
        },
    });

});
Copy
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.confluentcloud.Environment;
import com.pulumi.confluentcloud.EnvironmentArgs;
import com.pulumi.confluentcloud.KafkaCluster;
import com.pulumi.confluentcloud.KafkaClusterArgs;
import com.pulumi.confluentcloud.inputs.KafkaClusterBasicArgs;
import com.pulumi.confluentcloud.inputs.KafkaClusterEnvironmentArgs;
import com.pulumi.confluentcloud.inputs.KafkaClusterStandardArgs;
import com.pulumi.confluentcloud.inputs.KafkaClusterEnterpriseArgs;
import com.pulumi.confluentcloud.inputs.KafkaClusterDedicatedArgs;
import com.pulumi.confluentcloud.inputs.KafkaClusterFreightArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var development = new Environment("development", EnvironmentArgs.builder()
            .displayName("Development")
            .build());

        var basic = new KafkaCluster("basic", KafkaClusterArgs.builder()
            .displayName("basic_kafka_cluster")
            .availability("SINGLE_ZONE")
            .cloud("AWS")
            .region("us-east-2")
            .basic(KafkaClusterBasicArgs.builder()
                .build())
            .environment(KafkaClusterEnvironmentArgs.builder()
                .id(development.id())
                .build())
            .build());

        var standard = new KafkaCluster("standard", KafkaClusterArgs.builder()
            .displayName("standard_kafka_cluster")
            .availability("SINGLE_ZONE")
            .cloud("AWS")
            .region("us-east-2")
            .standard(KafkaClusterStandardArgs.builder()
                .build())
            .environment(KafkaClusterEnvironmentArgs.builder()
                .id(development.id())
                .build())
            .build());

        var enterprise = new KafkaCluster("enterprise", KafkaClusterArgs.builder()
            .enterprises(KafkaClusterEnterpriseArgs.builder()
                .build())
            .displayName("enterprise_kafka_cluster")
            .availability("HIGH")
            .cloud("AWS")
            .region("us-east-2")
            .environment(KafkaClusterEnvironmentArgs.builder()
                .id(development.id())
                .build())
            .build());

        var dedicated = new KafkaCluster("dedicated", KafkaClusterArgs.builder()
            .displayName("dedicated_kafka_cluster")
            .availability("MULTI_ZONE")
            .cloud("AWS")
            .region("us-east-2")
            .dedicated(KafkaClusterDedicatedArgs.builder()
                .cku(2)
                .build())
            .environment(KafkaClusterEnvironmentArgs.builder()
                .id(development.id())
                .build())
            .build());

        var freight = new KafkaCluster("freight", KafkaClusterArgs.builder()
            .freights(KafkaClusterFreightArgs.builder()
                .build())
            .displayName("freight_kafka_cluster")
            .availability("HIGH")
            .cloud("AWS")
            .region("us-east-1")
            .environment(KafkaClusterEnvironmentArgs.builder()
                .id(staging.id())
                .build())
            .build());

    }
}
Copy
resources:
  development:
    type: confluentcloud:Environment
    properties:
      displayName: Development
  basic:
    type: confluentcloud:KafkaCluster
    properties:
      displayName: basic_kafka_cluster
      availability: SINGLE_ZONE
      cloud: AWS
      region: us-east-2
      basic: {}
      environment:
        id: ${development.id}
  standard:
    type: confluentcloud:KafkaCluster
    properties:
      displayName: standard_kafka_cluster
      availability: SINGLE_ZONE
      cloud: AWS
      region: us-east-2
      standard: {}
      environment:
        id: ${development.id}
  enterprise:
    type: confluentcloud:KafkaCluster
    properties:
      enterprises:
        - {}
      displayName: enterprise_kafka_cluster
      availability: HIGH
      cloud: AWS
      region: us-east-2
      environment:
        id: ${development.id}
  dedicated:
    type: confluentcloud:KafkaCluster
    properties:
      displayName: dedicated_kafka_cluster
      availability: MULTI_ZONE
      cloud: AWS
      region: us-east-2
      dedicated:
        cku: 2
      environment:
        id: ${development.id}
  freight:
    type: confluentcloud:KafkaCluster
    properties:
      freights:
        - {}
      displayName: freight_kafka_cluster
      availability: HIGH
      cloud: AWS
      region: us-east-1
      environment:
        id: ${staging.id}
Copy

Example Kafka clusters on Azure

import * as pulumi from "@pulumi/pulumi";
import * as confluentcloud from "@pulumi/confluentcloud";

const development = new confluentcloud.Environment("development", {displayName: "Development"});
const basic = new confluentcloud.KafkaCluster("basic", {
    displayName: "basic_kafka_cluster",
    availability: "SINGLE_ZONE",
    cloud: "AZURE",
    region: "centralus",
    basic: {},
    environment: {
        id: development.id,
    },
});
const standard = new confluentcloud.KafkaCluster("standard", {
    displayName: "standard_kafka_cluster",
    availability: "SINGLE_ZONE",
    cloud: "AZURE",
    region: "centralus",
    standard: {},
    environment: {
        id: development.id,
    },
});
const enterprise = new confluentcloud.KafkaCluster("enterprise", {
    enterprises: [{}],
    displayName: "enterprise_kafka_cluster",
    availability: "HIGH",
    cloud: "AZURE",
    region: "centralus",
    environment: {
        id: development.id,
    },
});
const dedicated = new confluentcloud.KafkaCluster("dedicated", {
    displayName: "dedicated_kafka_cluster",
    availability: "MULTI_ZONE",
    cloud: "AZURE",
    region: "centralus",
    dedicated: {
        cku: 2,
    },
    environment: {
        id: development.id,
    },
});
Copy
import pulumi
import pulumi_confluentcloud as confluentcloud

development = confluentcloud.Environment("development", display_name="Development")
basic = confluentcloud.KafkaCluster("basic",
    display_name="basic_kafka_cluster",
    availability="SINGLE_ZONE",
    cloud="AZURE",
    region="centralus",
    basic={},
    environment={
        "id": development.id,
    })
standard = confluentcloud.KafkaCluster("standard",
    display_name="standard_kafka_cluster",
    availability="SINGLE_ZONE",
    cloud="AZURE",
    region="centralus",
    standard={},
    environment={
        "id": development.id,
    })
enterprise = confluentcloud.KafkaCluster("enterprise",
    enterprises=[{}],
    display_name="enterprise_kafka_cluster",
    availability="HIGH",
    cloud="AZURE",
    region="centralus",
    environment={
        "id": development.id,
    })
dedicated = confluentcloud.KafkaCluster("dedicated",
    display_name="dedicated_kafka_cluster",
    availability="MULTI_ZONE",
    cloud="AZURE",
    region="centralus",
    dedicated={
        "cku": 2,
    },
    environment={
        "id": development.id,
    })
Copy
package main

import (
	"github.com/pulumi/pulumi-confluentcloud/sdk/v2/go/confluentcloud"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		development, err := confluentcloud.NewEnvironment(ctx, "development", &confluentcloud.EnvironmentArgs{
			DisplayName: pulumi.String("Development"),
		})
		if err != nil {
			return err
		}
		_, err = confluentcloud.NewKafkaCluster(ctx, "basic", &confluentcloud.KafkaClusterArgs{
			DisplayName:  pulumi.String("basic_kafka_cluster"),
			Availability: pulumi.String("SINGLE_ZONE"),
			Cloud:        pulumi.String("AZURE"),
			Region:       pulumi.String("centralus"),
			Basic:        &confluentcloud.KafkaClusterBasicArgs{},
			Environment: &confluentcloud.KafkaClusterEnvironmentArgs{
				Id: development.ID(),
			},
		})
		if err != nil {
			return err
		}
		_, err = confluentcloud.NewKafkaCluster(ctx, "standard", &confluentcloud.KafkaClusterArgs{
			DisplayName:  pulumi.String("standard_kafka_cluster"),
			Availability: pulumi.String("SINGLE_ZONE"),
			Cloud:        pulumi.String("AZURE"),
			Region:       pulumi.String("centralus"),
			Standard:     &confluentcloud.KafkaClusterStandardArgs{},
			Environment: &confluentcloud.KafkaClusterEnvironmentArgs{
				Id: development.ID(),
			},
		})
		if err != nil {
			return err
		}
		_, err = confluentcloud.NewKafkaCluster(ctx, "enterprise", &confluentcloud.KafkaClusterArgs{
			Enterprises: confluentcloud.KafkaClusterEnterpriseArray{
				&confluentcloud.KafkaClusterEnterpriseArgs{},
			},
			DisplayName:  pulumi.String("enterprise_kafka_cluster"),
			Availability: pulumi.String("HIGH"),
			Cloud:        pulumi.String("AZURE"),
			Region:       pulumi.String("centralus"),
			Environment: &confluentcloud.KafkaClusterEnvironmentArgs{
				Id: development.ID(),
			},
		})
		if err != nil {
			return err
		}
		_, err = confluentcloud.NewKafkaCluster(ctx, "dedicated", &confluentcloud.KafkaClusterArgs{
			DisplayName:  pulumi.String("dedicated_kafka_cluster"),
			Availability: pulumi.String("MULTI_ZONE"),
			Cloud:        pulumi.String("AZURE"),
			Region:       pulumi.String("centralus"),
			Dedicated: &confluentcloud.KafkaClusterDedicatedArgs{
				Cku: pulumi.Int(2),
			},
			Environment: &confluentcloud.KafkaClusterEnvironmentArgs{
				Id: development.ID(),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
Copy
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using ConfluentCloud = Pulumi.ConfluentCloud;

return await Deployment.RunAsync(() => 
{
    var development = new ConfluentCloud.Environment("development", new()
    {
        DisplayName = "Development",
    });

    var basic = new ConfluentCloud.KafkaCluster("basic", new()
    {
        DisplayName = "basic_kafka_cluster",
        Availability = "SINGLE_ZONE",
        Cloud = "AZURE",
        Region = "centralus",
        Basic = null,
        Environment = new ConfluentCloud.Inputs.KafkaClusterEnvironmentArgs
        {
            Id = development.Id,
        },
    });

    var standard = new ConfluentCloud.KafkaCluster("standard", new()
    {
        DisplayName = "standard_kafka_cluster",
        Availability = "SINGLE_ZONE",
        Cloud = "AZURE",
        Region = "centralus",
        Standard = null,
        Environment = new ConfluentCloud.Inputs.KafkaClusterEnvironmentArgs
        {
            Id = development.Id,
        },
    });

    var enterprise = new ConfluentCloud.KafkaCluster("enterprise", new()
    {
        Enterprises = new[]
        {
            null,
        },
        DisplayName = "enterprise_kafka_cluster",
        Availability = "HIGH",
        Cloud = "AZURE",
        Region = "centralus",
        Environment = new ConfluentCloud.Inputs.KafkaClusterEnvironmentArgs
        {
            Id = development.Id,
        },
    });

    var dedicated = new ConfluentCloud.KafkaCluster("dedicated", new()
    {
        DisplayName = "dedicated_kafka_cluster",
        Availability = "MULTI_ZONE",
        Cloud = "AZURE",
        Region = "centralus",
        Dedicated = new ConfluentCloud.Inputs.KafkaClusterDedicatedArgs
        {
            Cku = 2,
        },
        Environment = new ConfluentCloud.Inputs.KafkaClusterEnvironmentArgs
        {
            Id = development.Id,
        },
    });

});
Copy
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.confluentcloud.Environment;
import com.pulumi.confluentcloud.EnvironmentArgs;
import com.pulumi.confluentcloud.KafkaCluster;
import com.pulumi.confluentcloud.KafkaClusterArgs;
import com.pulumi.confluentcloud.inputs.KafkaClusterBasicArgs;
import com.pulumi.confluentcloud.inputs.KafkaClusterEnvironmentArgs;
import com.pulumi.confluentcloud.inputs.KafkaClusterStandardArgs;
import com.pulumi.confluentcloud.inputs.KafkaClusterEnterpriseArgs;
import com.pulumi.confluentcloud.inputs.KafkaClusterDedicatedArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var development = new Environment("development", EnvironmentArgs.builder()
            .displayName("Development")
            .build());

        var basic = new KafkaCluster("basic", KafkaClusterArgs.builder()
            .displayName("basic_kafka_cluster")
            .availability("SINGLE_ZONE")
            .cloud("AZURE")
            .region("centralus")
            .basic(KafkaClusterBasicArgs.builder()
                .build())
            .environment(KafkaClusterEnvironmentArgs.builder()
                .id(development.id())
                .build())
            .build());

        var standard = new KafkaCluster("standard", KafkaClusterArgs.builder()
            .displayName("standard_kafka_cluster")
            .availability("SINGLE_ZONE")
            .cloud("AZURE")
            .region("centralus")
            .standard(KafkaClusterStandardArgs.builder()
                .build())
            .environment(KafkaClusterEnvironmentArgs.builder()
                .id(development.id())
                .build())
            .build());

        var enterprise = new KafkaCluster("enterprise", KafkaClusterArgs.builder()
            .enterprises(KafkaClusterEnterpriseArgs.builder()
                .build())
            .displayName("enterprise_kafka_cluster")
            .availability("HIGH")
            .cloud("AZURE")
            .region("centralus")
            .environment(KafkaClusterEnvironmentArgs.builder()
                .id(development.id())
                .build())
            .build());

        var dedicated = new KafkaCluster("dedicated", KafkaClusterArgs.builder()
            .displayName("dedicated_kafka_cluster")
            .availability("MULTI_ZONE")
            .cloud("AZURE")
            .region("centralus")
            .dedicated(KafkaClusterDedicatedArgs.builder()
                .cku(2)
                .build())
            .environment(KafkaClusterEnvironmentArgs.builder()
                .id(development.id())
                .build())
            .build());

    }
}
Copy
resources:
  development:
    type: confluentcloud:Environment
    properties:
      displayName: Development
  basic:
    type: confluentcloud:KafkaCluster
    properties:
      displayName: basic_kafka_cluster
      availability: SINGLE_ZONE
      cloud: AZURE
      region: centralus
      basic: {}
      environment:
        id: ${development.id}
  standard:
    type: confluentcloud:KafkaCluster
    properties:
      displayName: standard_kafka_cluster
      availability: SINGLE_ZONE
      cloud: AZURE
      region: centralus
      standard: {}
      environment:
        id: ${development.id}
  enterprise:
    type: confluentcloud:KafkaCluster
    properties:
      enterprises:
        - {}
      displayName: enterprise_kafka_cluster
      availability: HIGH
      cloud: AZURE
      region: centralus
      environment:
        id: ${development.id}
  dedicated:
    type: confluentcloud:KafkaCluster
    properties:
      displayName: dedicated_kafka_cluster
      availability: MULTI_ZONE
      cloud: AZURE
      region: centralus
      dedicated:
        cku: 2
      environment:
        id: ${development.id}
Copy

Example Kafka clusters on GCP

import * as pulumi from "@pulumi/pulumi";
import * as confluentcloud from "@pulumi/confluentcloud";

const development = new confluentcloud.Environment("development", {displayName: "Development"});
const basic = new confluentcloud.KafkaCluster("basic", {
    displayName: "basic_kafka_cluster",
    availability: "SINGLE_ZONE",
    cloud: "GCP",
    region: "us-central1",
    basic: {},
    environment: {
        id: development.id,
    },
});
const standard = new confluentcloud.KafkaCluster("standard", {
    displayName: "standard_kafka_cluster",
    availability: "SINGLE_ZONE",
    cloud: "GCP",
    region: "us-central1",
    standard: {},
    environment: {
        id: development.id,
    },
});
const dedicated = new confluentcloud.KafkaCluster("dedicated", {
    displayName: "dedicated_kafka_cluster",
    availability: "MULTI_ZONE",
    cloud: "GCP",
    region: "us-central1",
    dedicated: {
        cku: 2,
    },
    environment: {
        id: development.id,
    },
});
Copy
import pulumi
import pulumi_confluentcloud as confluentcloud

development = confluentcloud.Environment("development", display_name="Development")
basic = confluentcloud.KafkaCluster("basic",
    display_name="basic_kafka_cluster",
    availability="SINGLE_ZONE",
    cloud="GCP",
    region="us-central1",
    basic={},
    environment={
        "id": development.id,
    })
standard = confluentcloud.KafkaCluster("standard",
    display_name="standard_kafka_cluster",
    availability="SINGLE_ZONE",
    cloud="GCP",
    region="us-central1",
    standard={},
    environment={
        "id": development.id,
    })
dedicated = confluentcloud.KafkaCluster("dedicated",
    display_name="dedicated_kafka_cluster",
    availability="MULTI_ZONE",
    cloud="GCP",
    region="us-central1",
    dedicated={
        "cku": 2,
    },
    environment={
        "id": development.id,
    })
Copy
package main

import (
	"github.com/pulumi/pulumi-confluentcloud/sdk/v2/go/confluentcloud"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		development, err := confluentcloud.NewEnvironment(ctx, "development", &confluentcloud.EnvironmentArgs{
			DisplayName: pulumi.String("Development"),
		})
		if err != nil {
			return err
		}
		_, err = confluentcloud.NewKafkaCluster(ctx, "basic", &confluentcloud.KafkaClusterArgs{
			DisplayName:  pulumi.String("basic_kafka_cluster"),
			Availability: pulumi.String("SINGLE_ZONE"),
			Cloud:        pulumi.String("GCP"),
			Region:       pulumi.String("us-central1"),
			Basic:        &confluentcloud.KafkaClusterBasicArgs{},
			Environment: &confluentcloud.KafkaClusterEnvironmentArgs{
				Id: development.ID(),
			},
		})
		if err != nil {
			return err
		}
		_, err = confluentcloud.NewKafkaCluster(ctx, "standard", &confluentcloud.KafkaClusterArgs{
			DisplayName:  pulumi.String("standard_kafka_cluster"),
			Availability: pulumi.String("SINGLE_ZONE"),
			Cloud:        pulumi.String("GCP"),
			Region:       pulumi.String("us-central1"),
			Standard:     &confluentcloud.KafkaClusterStandardArgs{},
			Environment: &confluentcloud.KafkaClusterEnvironmentArgs{
				Id: development.ID(),
			},
		})
		if err != nil {
			return err
		}
		_, err = confluentcloud.NewKafkaCluster(ctx, "dedicated", &confluentcloud.KafkaClusterArgs{
			DisplayName:  pulumi.String("dedicated_kafka_cluster"),
			Availability: pulumi.String("MULTI_ZONE"),
			Cloud:        pulumi.String("GCP"),
			Region:       pulumi.String("us-central1"),
			Dedicated: &confluentcloud.KafkaClusterDedicatedArgs{
				Cku: pulumi.Int(2),
			},
			Environment: &confluentcloud.KafkaClusterEnvironmentArgs{
				Id: development.ID(),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
Copy
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using ConfluentCloud = Pulumi.ConfluentCloud;

return await Deployment.RunAsync(() => 
{
    var development = new ConfluentCloud.Environment("development", new()
    {
        DisplayName = "Development",
    });

    var basic = new ConfluentCloud.KafkaCluster("basic", new()
    {
        DisplayName = "basic_kafka_cluster",
        Availability = "SINGLE_ZONE",
        Cloud = "GCP",
        Region = "us-central1",
        Basic = null,
        Environment = new ConfluentCloud.Inputs.KafkaClusterEnvironmentArgs
        {
            Id = development.Id,
        },
    });

    var standard = new ConfluentCloud.KafkaCluster("standard", new()
    {
        DisplayName = "standard_kafka_cluster",
        Availability = "SINGLE_ZONE",
        Cloud = "GCP",
        Region = "us-central1",
        Standard = null,
        Environment = new ConfluentCloud.Inputs.KafkaClusterEnvironmentArgs
        {
            Id = development.Id,
        },
    });

    var dedicated = new ConfluentCloud.KafkaCluster("dedicated", new()
    {
        DisplayName = "dedicated_kafka_cluster",
        Availability = "MULTI_ZONE",
        Cloud = "GCP",
        Region = "us-central1",
        Dedicated = new ConfluentCloud.Inputs.KafkaClusterDedicatedArgs
        {
            Cku = 2,
        },
        Environment = new ConfluentCloud.Inputs.KafkaClusterEnvironmentArgs
        {
            Id = development.Id,
        },
    });

});
Copy
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.confluentcloud.Environment;
import com.pulumi.confluentcloud.EnvironmentArgs;
import com.pulumi.confluentcloud.KafkaCluster;
import com.pulumi.confluentcloud.KafkaClusterArgs;
import com.pulumi.confluentcloud.inputs.KafkaClusterBasicArgs;
import com.pulumi.confluentcloud.inputs.KafkaClusterEnvironmentArgs;
import com.pulumi.confluentcloud.inputs.KafkaClusterStandardArgs;
import com.pulumi.confluentcloud.inputs.KafkaClusterDedicatedArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var development = new Environment("development", EnvironmentArgs.builder()
            .displayName("Development")
            .build());

        var basic = new KafkaCluster("basic", KafkaClusterArgs.builder()
            .displayName("basic_kafka_cluster")
            .availability("SINGLE_ZONE")
            .cloud("GCP")
            .region("us-central1")
            .basic(KafkaClusterBasicArgs.builder()
                .build())
            .environment(KafkaClusterEnvironmentArgs.builder()
                .id(development.id())
                .build())
            .build());

        var standard = new KafkaCluster("standard", KafkaClusterArgs.builder()
            .displayName("standard_kafka_cluster")
            .availability("SINGLE_ZONE")
            .cloud("GCP")
            .region("us-central1")
            .standard(KafkaClusterStandardArgs.builder()
                .build())
            .environment(KafkaClusterEnvironmentArgs.builder()
                .id(development.id())
                .build())
            .build());

        var dedicated = new KafkaCluster("dedicated", KafkaClusterArgs.builder()
            .displayName("dedicated_kafka_cluster")
            .availability("MULTI_ZONE")
            .cloud("GCP")
            .region("us-central1")
            .dedicated(KafkaClusterDedicatedArgs.builder()
                .cku(2)
                .build())
            .environment(KafkaClusterEnvironmentArgs.builder()
                .id(development.id())
                .build())
            .build());

    }
}
Copy
resources:
  development:
    type: confluentcloud:Environment
    properties:
      displayName: Development
  basic:
    type: confluentcloud:KafkaCluster
    properties:
      displayName: basic_kafka_cluster
      availability: SINGLE_ZONE
      cloud: GCP
      region: us-central1
      basic: {}
      environment:
        id: ${development.id}
  standard:
    type: confluentcloud:KafkaCluster
    properties:
      displayName: standard_kafka_cluster
      availability: SINGLE_ZONE
      cloud: GCP
      region: us-central1
      standard: {}
      environment:
        id: ${development.id}
  dedicated:
    type: confluentcloud:KafkaCluster
    properties:
      displayName: dedicated_kafka_cluster
      availability: MULTI_ZONE
      cloud: GCP
      region: us-central1
      dedicated:
        cku: 2
      environment:
        id: ${development.id}
Copy

Getting Started

The following end-to-end examples might help to get started with confluentcloud.KafkaCluster resource:

  • basic-kafka-acls: Basic Kafka cluster with authorization using ACLs
  • basic-kafka-acls-with-alias: Basic Kafka cluster with authorization using ACLs
  • standard-kafka-acls: Standard Kafka cluster with authorization using ACLs
  • standard-kafka-rbac: Standard Kafka cluster with authorization using RBAC
  • dedicated-public-kafka-acls: Dedicated Kafka cluster that is accessible over the public internet with authorization using ACLs
  • dedicated-public-kafka-rbac: Dedicated Kafka cluster that is accessible over the public internet with authorization using RBAC
  • dedicated-privatelink-aws-kafka-acls: Dedicated Kafka cluster on AWS that is accessible via PrivateLink connections with authorization using ACLs
  • dedicated-privatelink-aws-kafka-rbac: Dedicated Kafka cluster on AWS that is accessible via PrivateLink connections with authorization using RBAC
  • dedicated-privatelink-azure-kafka-rbac: Dedicated Kafka cluster on Azure that is accessible via PrivateLink connections with authorization using RBAC
  • dedicated-privatelink-azure-kafka-acls: Dedicated Kafka cluster on Azure that is accessible via PrivateLink connections with authorization using ACLs
  • dedicated-private-service-connect-gcp-kafka-acls: Dedicated Kafka cluster on GCP that is accessible via Private Service Connect connections with authorization using ACLs
  • dedicated-private-service-connect-gcp-kafka-rbac: Dedicated Kafka cluster on GCP that is accessible via Private Service Connect connections with authorization using RBAC
  • dedicated-vnet-peering-azure-kafka-acls: Dedicated Kafka cluster on Azure that is accessible via VPC Peering connections with authorization using ACLs
  • dedicated-vnet-peering-azure-kafka-rbac: Dedicated Kafka cluster on Azure that is accessible via VPC Peering connections with authorization using RBAC
  • dedicated-vpc-peering-aws-kafka-acls: Dedicated Kafka cluster on AWS that is accessible via VPC Peering connections with authorization using ACLs
  • dedicated-vpc-peering-aws-kafka-rbac: Dedicated Kafka cluster on AWS that is accessible via VPC Peering connections with authorization using RBAC
  • dedicated-vpc-peering-gcp-kafka-acls: Dedicated Kafka cluster on GCP that is accessible via VPC Peering connections with authorization using ACLs
  • dedicated-vpc-peering-gcp-kafka-rbac: Dedicated Kafka cluster on GCP that is accessible via VPC Peering connections with authorization using RBAC
  • dedicated-transit-gateway-attachment-aws-kafka-acls: Dedicated Kafka cluster on AWS that is accessible via Transit Gateway Endpoint with authorization using ACLs
  • dedicated-transit-gateway-attachment-aws-kafka-rbac: Dedicated Kafka cluster on AWS that is accessible via Transit Gateway Endpoint with authorization using RBAC
  • enterprise-privatelinkattachment-aws-kafka-acls: Enterprise Kafka cluster on AWS that is accessible via PrivateLink connections with authorization using ACLs
  • enterprise-privatelinkattachment-azure-kafka-acls: Enterprise Kafka cluster on Azure that is accessible via PrivateLink connections with authorization using ACLs

Create KafkaCluster Resource

Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.

Constructor syntax

new KafkaCluster(name: string, args: KafkaClusterArgs, opts?: CustomResourceOptions);
@overload
def KafkaCluster(resource_name: str,
                 args: KafkaClusterArgs,
                 opts: Optional[ResourceOptions] = None)

@overload
def KafkaCluster(resource_name: str,
                 opts: Optional[ResourceOptions] = None,
                 availability: Optional[str] = None,
                 cloud: Optional[str] = None,
                 environment: Optional[KafkaClusterEnvironmentArgs] = None,
                 region: Optional[str] = None,
                 basic: Optional[KafkaClusterBasicArgs] = None,
                 byok_key: Optional[KafkaClusterByokKeyArgs] = None,
                 dedicated: Optional[KafkaClusterDedicatedArgs] = None,
                 display_name: Optional[str] = None,
                 enterprises: Optional[Sequence[KafkaClusterEnterpriseArgs]] = None,
                 freights: Optional[Sequence[KafkaClusterFreightArgs]] = None,
                 network: Optional[KafkaClusterNetworkArgs] = None,
                 standard: Optional[KafkaClusterStandardArgs] = None)
func NewKafkaCluster(ctx *Context, name string, args KafkaClusterArgs, opts ...ResourceOption) (*KafkaCluster, error)
public KafkaCluster(string name, KafkaClusterArgs args, CustomResourceOptions? opts = null)
public KafkaCluster(String name, KafkaClusterArgs args)
public KafkaCluster(String name, KafkaClusterArgs args, CustomResourceOptions options)
type: confluentcloud:KafkaCluster
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.

Parameters

name This property is required. string
The unique name of the resource.
args This property is required. KafkaClusterArgs
The arguments to resource properties.
opts CustomResourceOptions
Bag of options to control resource's behavior.
resource_name This property is required. str
The unique name of the resource.
args This property is required. KafkaClusterArgs
The arguments to resource properties.
opts ResourceOptions
Bag of options to control resource's behavior.
ctx Context
Context object for the current deployment.
name This property is required. string
The unique name of the resource.
args This property is required. KafkaClusterArgs
The arguments to resource properties.
opts ResourceOption
Bag of options to control resource's behavior.
name This property is required. string
The unique name of the resource.
args This property is required. KafkaClusterArgs
The arguments to resource properties.
opts CustomResourceOptions
Bag of options to control resource's behavior.
name This property is required. String
The unique name of the resource.
args This property is required. KafkaClusterArgs
The arguments to resource properties.
options CustomResourceOptions
Bag of options to control resource's behavior.

Constructor example

The following reference example uses placeholder values for all input properties.

var kafkaClusterResource = new ConfluentCloud.KafkaCluster("kafkaClusterResource", new()
{
    Availability = "string",
    Cloud = "string",
    Environment = new ConfluentCloud.Inputs.KafkaClusterEnvironmentArgs
    {
        Id = "string",
    },
    Region = "string",
    Basic = null,
    ByokKey = new ConfluentCloud.Inputs.KafkaClusterByokKeyArgs
    {
        Id = "string",
    },
    Dedicated = new ConfluentCloud.Inputs.KafkaClusterDedicatedArgs
    {
        Cku = 0,
        EncryptionKey = "string",
        Zones = new[]
        {
            "string",
        },
    },
    DisplayName = "string",
    Enterprises = new[]
    {
        null,
    },
    Freights = new[]
    {
        new ConfluentCloud.Inputs.KafkaClusterFreightArgs
        {
            Zones = new[]
            {
                "string",
            },
        },
    },
    Network = new ConfluentCloud.Inputs.KafkaClusterNetworkArgs
    {
        Id = "string",
    },
    Standard = null,
});
Copy
example, err := confluentcloud.NewKafkaCluster(ctx, "kafkaClusterResource", &confluentcloud.KafkaClusterArgs{
	Availability: pulumi.String("string"),
	Cloud:        pulumi.String("string"),
	Environment: &confluentcloud.KafkaClusterEnvironmentArgs{
		Id: pulumi.String("string"),
	},
	Region: pulumi.String("string"),
	Basic:  &confluentcloud.KafkaClusterBasicArgs{},
	ByokKey: &confluentcloud.KafkaClusterByokKeyArgs{
		Id: pulumi.String("string"),
	},
	Dedicated: &confluentcloud.KafkaClusterDedicatedArgs{
		Cku:           pulumi.Int(0),
		EncryptionKey: pulumi.String("string"),
		Zones: pulumi.StringArray{
			pulumi.String("string"),
		},
	},
	DisplayName: pulumi.String("string"),
	Enterprises: confluentcloud.KafkaClusterEnterpriseArray{
		&confluentcloud.KafkaClusterEnterpriseArgs{},
	},
	Freights: confluentcloud.KafkaClusterFreightArray{
		&confluentcloud.KafkaClusterFreightArgs{
			Zones: pulumi.StringArray{
				pulumi.String("string"),
			},
		},
	},
	Network: &confluentcloud.KafkaClusterNetworkArgs{
		Id: pulumi.String("string"),
	},
	Standard: &confluentcloud.KafkaClusterStandardArgs{},
})
Copy
var kafkaClusterResource = new KafkaCluster("kafkaClusterResource", KafkaClusterArgs.builder()
    .availability("string")
    .cloud("string")
    .environment(KafkaClusterEnvironmentArgs.builder()
        .id("string")
        .build())
    .region("string")
    .basic()
    .byokKey(KafkaClusterByokKeyArgs.builder()
        .id("string")
        .build())
    .dedicated(KafkaClusterDedicatedArgs.builder()
        .cku(0)
        .encryptionKey("string")
        .zones("string")
        .build())
    .displayName("string")
    .enterprises()
    .freights(KafkaClusterFreightArgs.builder()
        .zones("string")
        .build())
    .network(KafkaClusterNetworkArgs.builder()
        .id("string")
        .build())
    .standard()
    .build());
Copy
kafka_cluster_resource = confluentcloud.KafkaCluster("kafkaClusterResource",
    availability="string",
    cloud="string",
    environment={
        "id": "string",
    },
    region="string",
    basic={},
    byok_key={
        "id": "string",
    },
    dedicated={
        "cku": 0,
        "encryption_key": "string",
        "zones": ["string"],
    },
    display_name="string",
    enterprises=[{}],
    freights=[{
        "zones": ["string"],
    }],
    network={
        "id": "string",
    },
    standard={})
Copy
const kafkaClusterResource = new confluentcloud.KafkaCluster("kafkaClusterResource", {
    availability: "string",
    cloud: "string",
    environment: {
        id: "string",
    },
    region: "string",
    basic: {},
    byokKey: {
        id: "string",
    },
    dedicated: {
        cku: 0,
        encryptionKey: "string",
        zones: ["string"],
    },
    displayName: "string",
    enterprises: [{}],
    freights: [{
        zones: ["string"],
    }],
    network: {
        id: "string",
    },
    standard: {},
});
Copy
type: confluentcloud:KafkaCluster
properties:
    availability: string
    basic: {}
    byokKey:
        id: string
    cloud: string
    dedicated:
        cku: 0
        encryptionKey: string
        zones:
            - string
    displayName: string
    enterprises:
        - {}
    environment:
        id: string
    freights:
        - zones:
            - string
    network:
        id: string
    region: string
    standard: {}
Copy

KafkaCluster Resource Properties

To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

Inputs

In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.

The KafkaCluster resource accepts the following input properties:

Availability
This property is required.
Changes to this property will trigger replacement.
string
The availability zone configuration of the Kafka cluster. Accepted values are: SINGLE_ZONE, MULTI_ZONE, LOW, and HIGH.
Cloud
This property is required.
Changes to this property will trigger replacement.
string
The cloud service provider that runs the Kafka cluster. Accepted values are: AWS, AZURE, and GCP.
Environment
This property is required.
Changes to this property will trigger replacement.
Pulumi.ConfluentCloud.Inputs.KafkaClusterEnvironment
Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
Region
This property is required.
Changes to this property will trigger replacement.
string
The cloud service provider region where the Kafka cluster is running, for example, us-west-2. See Cloud Providers and Regions for a full list of options for AWS, Azure, and GCP.
Basic Pulumi.ConfluentCloud.Inputs.KafkaClusterBasic
The configuration of the Basic Kafka cluster.
ByokKey Pulumi.ConfluentCloud.Inputs.KafkaClusterByokKey
Dedicated Pulumi.ConfluentCloud.Inputs.KafkaClusterDedicated
(Optional Configuration Block) The configuration of the Dedicated Kafka cluster. It supports the following:
DisplayName string
The name of the Kafka cluster.
Enterprises List<Pulumi.ConfluentCloud.Inputs.KafkaClusterEnterprise>
The configuration of the Enterprise Kafka cluster.
Freights List<Pulumi.ConfluentCloud.Inputs.KafkaClusterFreight>
The configuration of the Freight Kafka cluster.
Network Pulumi.ConfluentCloud.Inputs.KafkaClusterNetwork
Network represents a network (VPC) in Confluent Cloud. All Networks exist within Confluent-managed cloud provider accounts.
Standard Pulumi.ConfluentCloud.Inputs.KafkaClusterStandard
The configuration of the Standard Kafka cluster.
Availability
This property is required.
Changes to this property will trigger replacement.
string
The availability zone configuration of the Kafka cluster. Accepted values are: SINGLE_ZONE, MULTI_ZONE, LOW, and HIGH.
Cloud
This property is required.
Changes to this property will trigger replacement.
string
The cloud service provider that runs the Kafka cluster. Accepted values are: AWS, AZURE, and GCP.
Environment
This property is required.
Changes to this property will trigger replacement.
KafkaClusterEnvironmentArgs
Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
Region
This property is required.
Changes to this property will trigger replacement.
string
The cloud service provider region where the Kafka cluster is running, for example, us-west-2. See Cloud Providers and Regions for a full list of options for AWS, Azure, and GCP.
Basic KafkaClusterBasicArgs
The configuration of the Basic Kafka cluster.
ByokKey KafkaClusterByokKeyArgs
Dedicated KafkaClusterDedicatedArgs
(Optional Configuration Block) The configuration of the Dedicated Kafka cluster. It supports the following:
DisplayName string
The name of the Kafka cluster.
Enterprises []KafkaClusterEnterpriseArgs
The configuration of the Enterprise Kafka cluster.
Freights []KafkaClusterFreightArgs
The configuration of the Freight Kafka cluster.
Network KafkaClusterNetworkArgs
Network represents a network (VPC) in Confluent Cloud. All Networks exist within Confluent-managed cloud provider accounts.
Standard KafkaClusterStandardArgs
The configuration of the Standard Kafka cluster.
availability
This property is required.
Changes to this property will trigger replacement.
String
The availability zone configuration of the Kafka cluster. Accepted values are: SINGLE_ZONE, MULTI_ZONE, LOW, and HIGH.
cloud
This property is required.
Changes to this property will trigger replacement.
String
The cloud service provider that runs the Kafka cluster. Accepted values are: AWS, AZURE, and GCP.
environment
This property is required.
Changes to this property will trigger replacement.
KafkaClusterEnvironment
Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
region
This property is required.
Changes to this property will trigger replacement.
String
The cloud service provider region where the Kafka cluster is running, for example, us-west-2. See Cloud Providers and Regions for a full list of options for AWS, Azure, and GCP.
basic KafkaClusterBasic
The configuration of the Basic Kafka cluster.
byokKey KafkaClusterByokKey
dedicated KafkaClusterDedicated
(Optional Configuration Block) The configuration of the Dedicated Kafka cluster. It supports the following:
displayName String
The name of the Kafka cluster.
enterprises List<KafkaClusterEnterprise>
The configuration of the Enterprise Kafka cluster.
freights List<KafkaClusterFreight>
The configuration of the Freight Kafka cluster.
network KafkaClusterNetwork
Network represents a network (VPC) in Confluent Cloud. All Networks exist within Confluent-managed cloud provider accounts.
standard KafkaClusterStandard
The configuration of the Standard Kafka cluster.
availability
This property is required.
Changes to this property will trigger replacement.
string
The availability zone configuration of the Kafka cluster. Accepted values are: SINGLE_ZONE, MULTI_ZONE, LOW, and HIGH.
cloud
This property is required.
Changes to this property will trigger replacement.
string
The cloud service provider that runs the Kafka cluster. Accepted values are: AWS, AZURE, and GCP.
environment
This property is required.
Changes to this property will trigger replacement.
KafkaClusterEnvironment
Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
region
This property is required.
Changes to this property will trigger replacement.
string
The cloud service provider region where the Kafka cluster is running, for example, us-west-2. See Cloud Providers and Regions for a full list of options for AWS, Azure, and GCP.
basic KafkaClusterBasic
The configuration of the Basic Kafka cluster.
byokKey KafkaClusterByokKey
dedicated KafkaClusterDedicated
(Optional Configuration Block) The configuration of the Dedicated Kafka cluster. It supports the following:
displayName string
The name of the Kafka cluster.
enterprises KafkaClusterEnterprise[]
The configuration of the Enterprise Kafka cluster.
freights KafkaClusterFreight[]
The configuration of the Freight Kafka cluster.
network KafkaClusterNetwork
Network represents a network (VPC) in Confluent Cloud. All Networks exist within Confluent-managed cloud provider accounts.
standard KafkaClusterStandard
The configuration of the Standard Kafka cluster.
availability
This property is required.
Changes to this property will trigger replacement.
str
The availability zone configuration of the Kafka cluster. Accepted values are: SINGLE_ZONE, MULTI_ZONE, LOW, and HIGH.
cloud
This property is required.
Changes to this property will trigger replacement.
str
The cloud service provider that runs the Kafka cluster. Accepted values are: AWS, AZURE, and GCP.
environment
This property is required.
Changes to this property will trigger replacement.
KafkaClusterEnvironmentArgs
Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
region
This property is required.
Changes to this property will trigger replacement.
str
The cloud service provider region where the Kafka cluster is running, for example, us-west-2. See Cloud Providers and Regions for a full list of options for AWS, Azure, and GCP.
basic KafkaClusterBasicArgs
The configuration of the Basic Kafka cluster.
byok_key KafkaClusterByokKeyArgs
dedicated KafkaClusterDedicatedArgs
(Optional Configuration Block) The configuration of the Dedicated Kafka cluster. It supports the following:
display_name str
The name of the Kafka cluster.
enterprises Sequence[KafkaClusterEnterpriseArgs]
The configuration of the Enterprise Kafka cluster.
freights Sequence[KafkaClusterFreightArgs]
The configuration of the Freight Kafka cluster.
network KafkaClusterNetworkArgs
Network represents a network (VPC) in Confluent Cloud. All Networks exist within Confluent-managed cloud provider accounts.
standard KafkaClusterStandardArgs
The configuration of the Standard Kafka cluster.
availability
This property is required.
Changes to this property will trigger replacement.
String
The availability zone configuration of the Kafka cluster. Accepted values are: SINGLE_ZONE, MULTI_ZONE, LOW, and HIGH.
cloud
This property is required.
Changes to this property will trigger replacement.
String
The cloud service provider that runs the Kafka cluster. Accepted values are: AWS, AZURE, and GCP.
environment
This property is required.
Changes to this property will trigger replacement.
Property Map
Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
region
This property is required.
Changes to this property will trigger replacement.
String
The cloud service provider region where the Kafka cluster is running, for example, us-west-2. See Cloud Providers and Regions for a full list of options for AWS, Azure, and GCP.
basic Property Map
The configuration of the Basic Kafka cluster.
byokKey Property Map
dedicated Property Map
(Optional Configuration Block) The configuration of the Dedicated Kafka cluster. It supports the following:
displayName String
The name of the Kafka cluster.
enterprises List<Property Map>
The configuration of the Enterprise Kafka cluster.
freights List<Property Map>
The configuration of the Freight Kafka cluster.
network Property Map
Network represents a network (VPC) in Confluent Cloud. All Networks exist within Confluent-managed cloud provider accounts.
standard Property Map
The configuration of the Standard Kafka cluster.

Outputs

All input properties are implicitly available as output properties. Additionally, the KafkaCluster resource produces the following output properties:

ApiVersion string
(Required String) An API Version of the schema version of the Kafka cluster, for example, cmk/v2.
BootstrapEndpoint string
(Required String) The bootstrap endpoint used by Kafka clients to connect to the Kafka cluster. (e.g., SASL_SSL://pkc-00000.us-central1.gcp.confluent.cloud:9092).
Id string
The provider-assigned unique ID for this managed resource.
Kind string
(Required String) A kind of the Kafka cluster, for example, Cluster.
RbacCrn string
(Required String) The Confluent Resource Name of the Kafka cluster, for example, crn://confluent.cloud/organization=1111aaaa-11aa-11aa-11aa-111111aaaaaa/environment=env-abc123/cloud-cluster=lkc-abc123.
RestEndpoint string
(Required String) The REST endpoint of the Kafka cluster (e.g., https://pkc-00000.us-central1.gcp.confluent.cloud:443).
ApiVersion string
(Required String) An API Version of the schema version of the Kafka cluster, for example, cmk/v2.
BootstrapEndpoint string
(Required String) The bootstrap endpoint used by Kafka clients to connect to the Kafka cluster. (e.g., SASL_SSL://pkc-00000.us-central1.gcp.confluent.cloud:9092).
Id string
The provider-assigned unique ID for this managed resource.
Kind string
(Required String) A kind of the Kafka cluster, for example, Cluster.
RbacCrn string
(Required String) The Confluent Resource Name of the Kafka cluster, for example, crn://confluent.cloud/organization=1111aaaa-11aa-11aa-11aa-111111aaaaaa/environment=env-abc123/cloud-cluster=lkc-abc123.
RestEndpoint string
(Required String) The REST endpoint of the Kafka cluster (e.g., https://pkc-00000.us-central1.gcp.confluent.cloud:443).
apiVersion String
(Required String) An API Version of the schema version of the Kafka cluster, for example, cmk/v2.
bootstrapEndpoint String
(Required String) The bootstrap endpoint used by Kafka clients to connect to the Kafka cluster. (e.g., SASL_SSL://pkc-00000.us-central1.gcp.confluent.cloud:9092).
id String
The provider-assigned unique ID for this managed resource.
kind String
(Required String) A kind of the Kafka cluster, for example, Cluster.
rbacCrn String
(Required String) The Confluent Resource Name of the Kafka cluster, for example, crn://confluent.cloud/organization=1111aaaa-11aa-11aa-11aa-111111aaaaaa/environment=env-abc123/cloud-cluster=lkc-abc123.
restEndpoint String
(Required String) The REST endpoint of the Kafka cluster (e.g., https://pkc-00000.us-central1.gcp.confluent.cloud:443).
apiVersion string
(Required String) An API Version of the schema version of the Kafka cluster, for example, cmk/v2.
bootstrapEndpoint string
(Required String) The bootstrap endpoint used by Kafka clients to connect to the Kafka cluster. (e.g., SASL_SSL://pkc-00000.us-central1.gcp.confluent.cloud:9092).
id string
The provider-assigned unique ID for this managed resource.
kind string
(Required String) A kind of the Kafka cluster, for example, Cluster.
rbacCrn string
(Required String) The Confluent Resource Name of the Kafka cluster, for example, crn://confluent.cloud/organization=1111aaaa-11aa-11aa-11aa-111111aaaaaa/environment=env-abc123/cloud-cluster=lkc-abc123.
restEndpoint string
(Required String) The REST endpoint of the Kafka cluster (e.g., https://pkc-00000.us-central1.gcp.confluent.cloud:443).
api_version str
(Required String) An API Version of the schema version of the Kafka cluster, for example, cmk/v2.
bootstrap_endpoint str
(Required String) The bootstrap endpoint used by Kafka clients to connect to the Kafka cluster. (e.g., SASL_SSL://pkc-00000.us-central1.gcp.confluent.cloud:9092).
id str
The provider-assigned unique ID for this managed resource.
kind str
(Required String) A kind of the Kafka cluster, for example, Cluster.
rbac_crn str
(Required String) The Confluent Resource Name of the Kafka cluster, for example, crn://confluent.cloud/organization=1111aaaa-11aa-11aa-11aa-111111aaaaaa/environment=env-abc123/cloud-cluster=lkc-abc123.
rest_endpoint str
(Required String) The REST endpoint of the Kafka cluster (e.g., https://pkc-00000.us-central1.gcp.confluent.cloud:443).
apiVersion String
(Required String) An API Version of the schema version of the Kafka cluster, for example, cmk/v2.
bootstrapEndpoint String
(Required String) The bootstrap endpoint used by Kafka clients to connect to the Kafka cluster. (e.g., SASL_SSL://pkc-00000.us-central1.gcp.confluent.cloud:9092).
id String
The provider-assigned unique ID for this managed resource.
kind String
(Required String) A kind of the Kafka cluster, for example, Cluster.
rbacCrn String
(Required String) The Confluent Resource Name of the Kafka cluster, for example, crn://confluent.cloud/organization=1111aaaa-11aa-11aa-11aa-111111aaaaaa/environment=env-abc123/cloud-cluster=lkc-abc123.
restEndpoint String
(Required String) The REST endpoint of the Kafka cluster (e.g., https://pkc-00000.us-central1.gcp.confluent.cloud:443).

Look up Existing KafkaCluster Resource

Get an existing KafkaCluster resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

public static get(name: string, id: Input<ID>, state?: KafkaClusterState, opts?: CustomResourceOptions): KafkaCluster
@staticmethod
def get(resource_name: str,
        id: str,
        opts: Optional[ResourceOptions] = None,
        api_version: Optional[str] = None,
        availability: Optional[str] = None,
        basic: Optional[KafkaClusterBasicArgs] = None,
        bootstrap_endpoint: Optional[str] = None,
        byok_key: Optional[KafkaClusterByokKeyArgs] = None,
        cloud: Optional[str] = None,
        dedicated: Optional[KafkaClusterDedicatedArgs] = None,
        display_name: Optional[str] = None,
        enterprises: Optional[Sequence[KafkaClusterEnterpriseArgs]] = None,
        environment: Optional[KafkaClusterEnvironmentArgs] = None,
        freights: Optional[Sequence[KafkaClusterFreightArgs]] = None,
        kind: Optional[str] = None,
        network: Optional[KafkaClusterNetworkArgs] = None,
        rbac_crn: Optional[str] = None,
        region: Optional[str] = None,
        rest_endpoint: Optional[str] = None,
        standard: Optional[KafkaClusterStandardArgs] = None) -> KafkaCluster
func GetKafkaCluster(ctx *Context, name string, id IDInput, state *KafkaClusterState, opts ...ResourceOption) (*KafkaCluster, error)
public static KafkaCluster Get(string name, Input<string> id, KafkaClusterState? state, CustomResourceOptions? opts = null)
public static KafkaCluster get(String name, Output<String> id, KafkaClusterState state, CustomResourceOptions options)
resources:  _:    type: confluentcloud:KafkaCluster    get:      id: ${id}
name This property is required.
The unique name of the resulting resource.
id This property is required.
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
resource_name This property is required.
The unique name of the resulting resource.
id This property is required.
The unique provider ID of the resource to lookup.
name This property is required.
The unique name of the resulting resource.
id This property is required.
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
name This property is required.
The unique name of the resulting resource.
id This property is required.
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
name This property is required.
The unique name of the resulting resource.
id This property is required.
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
The following state arguments are supported:
ApiVersion string
(Required String) An API Version of the schema version of the Kafka cluster, for example, cmk/v2.
Availability Changes to this property will trigger replacement. string
The availability zone configuration of the Kafka cluster. Accepted values are: SINGLE_ZONE, MULTI_ZONE, LOW, and HIGH.
Basic Pulumi.ConfluentCloud.Inputs.KafkaClusterBasic
The configuration of the Basic Kafka cluster.
BootstrapEndpoint string
(Required String) The bootstrap endpoint used by Kafka clients to connect to the Kafka cluster. (e.g., SASL_SSL://pkc-00000.us-central1.gcp.confluent.cloud:9092).
ByokKey Pulumi.ConfluentCloud.Inputs.KafkaClusterByokKey
Cloud Changes to this property will trigger replacement. string
The cloud service provider that runs the Kafka cluster. Accepted values are: AWS, AZURE, and GCP.
Dedicated Pulumi.ConfluentCloud.Inputs.KafkaClusterDedicated
(Optional Configuration Block) The configuration of the Dedicated Kafka cluster. It supports the following:
DisplayName string
The name of the Kafka cluster.
Enterprises List<Pulumi.ConfluentCloud.Inputs.KafkaClusterEnterprise>
The configuration of the Enterprise Kafka cluster.
Environment Changes to this property will trigger replacement. Pulumi.ConfluentCloud.Inputs.KafkaClusterEnvironment
Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
Freights List<Pulumi.ConfluentCloud.Inputs.KafkaClusterFreight>
The configuration of the Freight Kafka cluster.
Kind string
(Required String) A kind of the Kafka cluster, for example, Cluster.
Network Pulumi.ConfluentCloud.Inputs.KafkaClusterNetwork
Network represents a network (VPC) in Confluent Cloud. All Networks exist within Confluent-managed cloud provider accounts.
RbacCrn string
(Required String) The Confluent Resource Name of the Kafka cluster, for example, crn://confluent.cloud/organization=1111aaaa-11aa-11aa-11aa-111111aaaaaa/environment=env-abc123/cloud-cluster=lkc-abc123.
Region Changes to this property will trigger replacement. string
The cloud service provider region where the Kafka cluster is running, for example, us-west-2. See Cloud Providers and Regions for a full list of options for AWS, Azure, and GCP.
RestEndpoint string
(Required String) The REST endpoint of the Kafka cluster (e.g., https://pkc-00000.us-central1.gcp.confluent.cloud:443).
Standard Pulumi.ConfluentCloud.Inputs.KafkaClusterStandard
The configuration of the Standard Kafka cluster.
ApiVersion string
(Required String) An API Version of the schema version of the Kafka cluster, for example, cmk/v2.
Availability Changes to this property will trigger replacement. string
The availability zone configuration of the Kafka cluster. Accepted values are: SINGLE_ZONE, MULTI_ZONE, LOW, and HIGH.
Basic KafkaClusterBasicArgs
The configuration of the Basic Kafka cluster.
BootstrapEndpoint string
(Required String) The bootstrap endpoint used by Kafka clients to connect to the Kafka cluster. (e.g., SASL_SSL://pkc-00000.us-central1.gcp.confluent.cloud:9092).
ByokKey KafkaClusterByokKeyArgs
Cloud Changes to this property will trigger replacement. string
The cloud service provider that runs the Kafka cluster. Accepted values are: AWS, AZURE, and GCP.
Dedicated KafkaClusterDedicatedArgs
(Optional Configuration Block) The configuration of the Dedicated Kafka cluster. It supports the following:
DisplayName string
The name of the Kafka cluster.
Enterprises []KafkaClusterEnterpriseArgs
The configuration of the Enterprise Kafka cluster.
Environment Changes to this property will trigger replacement. KafkaClusterEnvironmentArgs
Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
Freights []KafkaClusterFreightArgs
The configuration of the Freight Kafka cluster.
Kind string
(Required String) A kind of the Kafka cluster, for example, Cluster.
Network KafkaClusterNetworkArgs
Network represents a network (VPC) in Confluent Cloud. All Networks exist within Confluent-managed cloud provider accounts.
RbacCrn string
(Required String) The Confluent Resource Name of the Kafka cluster, for example, crn://confluent.cloud/organization=1111aaaa-11aa-11aa-11aa-111111aaaaaa/environment=env-abc123/cloud-cluster=lkc-abc123.
Region Changes to this property will trigger replacement. string
The cloud service provider region where the Kafka cluster is running, for example, us-west-2. See Cloud Providers and Regions for a full list of options for AWS, Azure, and GCP.
RestEndpoint string
(Required String) The REST endpoint of the Kafka cluster (e.g., https://pkc-00000.us-central1.gcp.confluent.cloud:443).
Standard KafkaClusterStandardArgs
The configuration of the Standard Kafka cluster.
apiVersion String
(Required String) An API Version of the schema version of the Kafka cluster, for example, cmk/v2.
availability Changes to this property will trigger replacement. String
The availability zone configuration of the Kafka cluster. Accepted values are: SINGLE_ZONE, MULTI_ZONE, LOW, and HIGH.
basic KafkaClusterBasic
The configuration of the Basic Kafka cluster.
bootstrapEndpoint String
(Required String) The bootstrap endpoint used by Kafka clients to connect to the Kafka cluster. (e.g., SASL_SSL://pkc-00000.us-central1.gcp.confluent.cloud:9092).
byokKey KafkaClusterByokKey
cloud Changes to this property will trigger replacement. String
The cloud service provider that runs the Kafka cluster. Accepted values are: AWS, AZURE, and GCP.
dedicated KafkaClusterDedicated
(Optional Configuration Block) The configuration of the Dedicated Kafka cluster. It supports the following:
displayName String
The name of the Kafka cluster.
enterprises List<KafkaClusterEnterprise>
The configuration of the Enterprise Kafka cluster.
environment Changes to this property will trigger replacement. KafkaClusterEnvironment
Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
freights List<KafkaClusterFreight>
The configuration of the Freight Kafka cluster.
kind String
(Required String) A kind of the Kafka cluster, for example, Cluster.
network KafkaClusterNetwork
Network represents a network (VPC) in Confluent Cloud. All Networks exist within Confluent-managed cloud provider accounts.
rbacCrn String
(Required String) The Confluent Resource Name of the Kafka cluster, for example, crn://confluent.cloud/organization=1111aaaa-11aa-11aa-11aa-111111aaaaaa/environment=env-abc123/cloud-cluster=lkc-abc123.
region Changes to this property will trigger replacement. String
The cloud service provider region where the Kafka cluster is running, for example, us-west-2. See Cloud Providers and Regions for a full list of options for AWS, Azure, and GCP.
restEndpoint String
(Required String) The REST endpoint of the Kafka cluster (e.g., https://pkc-00000.us-central1.gcp.confluent.cloud:443).
standard KafkaClusterStandard
The configuration of the Standard Kafka cluster.
apiVersion string
(Required String) An API Version of the schema version of the Kafka cluster, for example, cmk/v2.
availability Changes to this property will trigger replacement. string
The availability zone configuration of the Kafka cluster. Accepted values are: SINGLE_ZONE, MULTI_ZONE, LOW, and HIGH.
basic KafkaClusterBasic
The configuration of the Basic Kafka cluster.
bootstrapEndpoint string
(Required String) The bootstrap endpoint used by Kafka clients to connect to the Kafka cluster. (e.g., SASL_SSL://pkc-00000.us-central1.gcp.confluent.cloud:9092).
byokKey KafkaClusterByokKey
cloud Changes to this property will trigger replacement. string
The cloud service provider that runs the Kafka cluster. Accepted values are: AWS, AZURE, and GCP.
dedicated KafkaClusterDedicated
(Optional Configuration Block) The configuration of the Dedicated Kafka cluster. It supports the following:
displayName string
The name of the Kafka cluster.
enterprises KafkaClusterEnterprise[]
The configuration of the Enterprise Kafka cluster.
environment Changes to this property will trigger replacement. KafkaClusterEnvironment
Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
freights KafkaClusterFreight[]
The configuration of the Freight Kafka cluster.
kind string
(Required String) A kind of the Kafka cluster, for example, Cluster.
network KafkaClusterNetwork
Network represents a network (VPC) in Confluent Cloud. All Networks exist within Confluent-managed cloud provider accounts.
rbacCrn string
(Required String) The Confluent Resource Name of the Kafka cluster, for example, crn://confluent.cloud/organization=1111aaaa-11aa-11aa-11aa-111111aaaaaa/environment=env-abc123/cloud-cluster=lkc-abc123.
region Changes to this property will trigger replacement. string
The cloud service provider region where the Kafka cluster is running, for example, us-west-2. See Cloud Providers and Regions for a full list of options for AWS, Azure, and GCP.
restEndpoint string
(Required String) The REST endpoint of the Kafka cluster (e.g., https://pkc-00000.us-central1.gcp.confluent.cloud:443).
standard KafkaClusterStandard
The configuration of the Standard Kafka cluster.
api_version str
(Required String) An API Version of the schema version of the Kafka cluster, for example, cmk/v2.
availability Changes to this property will trigger replacement. str
The availability zone configuration of the Kafka cluster. Accepted values are: SINGLE_ZONE, MULTI_ZONE, LOW, and HIGH.
basic KafkaClusterBasicArgs
The configuration of the Basic Kafka cluster.
bootstrap_endpoint str
(Required String) The bootstrap endpoint used by Kafka clients to connect to the Kafka cluster. (e.g., SASL_SSL://pkc-00000.us-central1.gcp.confluent.cloud:9092).
byok_key KafkaClusterByokKeyArgs
cloud Changes to this property will trigger replacement. str
The cloud service provider that runs the Kafka cluster. Accepted values are: AWS, AZURE, and GCP.
dedicated KafkaClusterDedicatedArgs
(Optional Configuration Block) The configuration of the Dedicated Kafka cluster. It supports the following:
display_name str
The name of the Kafka cluster.
enterprises Sequence[KafkaClusterEnterpriseArgs]
The configuration of the Enterprise Kafka cluster.
environment Changes to this property will trigger replacement. KafkaClusterEnvironmentArgs
Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
freights Sequence[KafkaClusterFreightArgs]
The configuration of the Freight Kafka cluster.
kind str
(Required String) A kind of the Kafka cluster, for example, Cluster.
network KafkaClusterNetworkArgs
Network represents a network (VPC) in Confluent Cloud. All Networks exist within Confluent-managed cloud provider accounts.
rbac_crn str
(Required String) The Confluent Resource Name of the Kafka cluster, for example, crn://confluent.cloud/organization=1111aaaa-11aa-11aa-11aa-111111aaaaaa/environment=env-abc123/cloud-cluster=lkc-abc123.
region Changes to this property will trigger replacement. str
The cloud service provider region where the Kafka cluster is running, for example, us-west-2. See Cloud Providers and Regions for a full list of options for AWS, Azure, and GCP.
rest_endpoint str
(Required String) The REST endpoint of the Kafka cluster (e.g., https://pkc-00000.us-central1.gcp.confluent.cloud:443).
standard KafkaClusterStandardArgs
The configuration of the Standard Kafka cluster.
apiVersion String
(Required String) An API Version of the schema version of the Kafka cluster, for example, cmk/v2.
availability Changes to this property will trigger replacement. String
The availability zone configuration of the Kafka cluster. Accepted values are: SINGLE_ZONE, MULTI_ZONE, LOW, and HIGH.
basic Property Map
The configuration of the Basic Kafka cluster.
bootstrapEndpoint String
(Required String) The bootstrap endpoint used by Kafka clients to connect to the Kafka cluster. (e.g., SASL_SSL://pkc-00000.us-central1.gcp.confluent.cloud:9092).
byokKey Property Map
cloud Changes to this property will trigger replacement. String
The cloud service provider that runs the Kafka cluster. Accepted values are: AWS, AZURE, and GCP.
dedicated Property Map
(Optional Configuration Block) The configuration of the Dedicated Kafka cluster. It supports the following:
displayName String
The name of the Kafka cluster.
enterprises List<Property Map>
The configuration of the Enterprise Kafka cluster.
environment Changes to this property will trigger replacement. Property Map
Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
freights List<Property Map>
The configuration of the Freight Kafka cluster.
kind String
(Required String) A kind of the Kafka cluster, for example, Cluster.
network Property Map
Network represents a network (VPC) in Confluent Cloud. All Networks exist within Confluent-managed cloud provider accounts.
rbacCrn String
(Required String) The Confluent Resource Name of the Kafka cluster, for example, crn://confluent.cloud/organization=1111aaaa-11aa-11aa-11aa-111111aaaaaa/environment=env-abc123/cloud-cluster=lkc-abc123.
region Changes to this property will trigger replacement. String
The cloud service provider region where the Kafka cluster is running, for example, us-west-2. See Cloud Providers and Regions for a full list of options for AWS, Azure, and GCP.
restEndpoint String
(Required String) The REST endpoint of the Kafka cluster (e.g., https://pkc-00000.us-central1.gcp.confluent.cloud:443).
standard Property Map
The configuration of the Standard Kafka cluster.

Supporting Types

KafkaClusterByokKey
, KafkaClusterByokKeyArgs

Id
This property is required.
Changes to this property will trigger replacement.
string
The ID of the Confluent key that is used to encrypt the data in the Kafka cluster, for example, cck-lye5m.
Id
This property is required.
Changes to this property will trigger replacement.
string
The ID of the Confluent key that is used to encrypt the data in the Kafka cluster, for example, cck-lye5m.
id
This property is required.
Changes to this property will trigger replacement.
String
The ID of the Confluent key that is used to encrypt the data in the Kafka cluster, for example, cck-lye5m.
id
This property is required.
Changes to this property will trigger replacement.
string
The ID of the Confluent key that is used to encrypt the data in the Kafka cluster, for example, cck-lye5m.
id
This property is required.
Changes to this property will trigger replacement.
str
The ID of the Confluent key that is used to encrypt the data in the Kafka cluster, for example, cck-lye5m.
id
This property is required.
Changes to this property will trigger replacement.
String
The ID of the Confluent key that is used to encrypt the data in the Kafka cluster, for example, cck-lye5m.

KafkaClusterDedicated
, KafkaClusterDedicatedArgs

Cku This property is required. int

The number of Confluent Kafka Units (CKUs) for Dedicated cluster types. The minimum number of CKUs for SINGLE_ZONE dedicated clusters is 1 whereas MULTI_ZONE dedicated clusters must have 2 CKUs or more.

Note: Exactly one from the basic, standard, dedicated, enterprise or freight configuration blocks must be specified.

Note: The freight Kafka cluster type is currently available only on AWS.

Note: The enterprise Kafka cluster type is currently available only on AWS and Azure.

!> Warning: You can only upgrade clusters from basic to standard.

Note: Currently, provisioning of a Dedicated Kafka cluster takes around 25 minutes on average but might take up to 24 hours. If you can't wait for the pulumi up step to finish, you can exit it and import the cluster by using the pulumi import command once it has been provisioned. When the cluster is provisioned, you will receive an email notification, and you can also follow updates on the Target Environment web page of the Confluent Cloud website.

EncryptionKey string
The ID of the encryption key that is used to encrypt the data in the Kafka cluster.
Zones List<string>
(Required List of String) The list of zones the cluster is in.

  • On AWS, zones are AWS AZ IDs, for example, use1-az3.
Cku This property is required. int

The number of Confluent Kafka Units (CKUs) for Dedicated cluster types. The minimum number of CKUs for SINGLE_ZONE dedicated clusters is 1 whereas MULTI_ZONE dedicated clusters must have 2 CKUs or more.

Note: Exactly one from the basic, standard, dedicated, enterprise or freight configuration blocks must be specified.

Note: The freight Kafka cluster type is currently available only on AWS.

Note: The enterprise Kafka cluster type is currently available only on AWS and Azure.

!> Warning: You can only upgrade clusters from basic to standard.

Note: Currently, provisioning of a Dedicated Kafka cluster takes around 25 minutes on average but might take up to 24 hours. If you can't wait for the pulumi up step to finish, you can exit it and import the cluster by using the pulumi import command once it has been provisioned. When the cluster is provisioned, you will receive an email notification, and you can also follow updates on the Target Environment web page of the Confluent Cloud website.

EncryptionKey string
The ID of the encryption key that is used to encrypt the data in the Kafka cluster.
Zones []string
(Required List of String) The list of zones the cluster is in.

  • On AWS, zones are AWS AZ IDs, for example, use1-az3.
cku This property is required. Integer

The number of Confluent Kafka Units (CKUs) for Dedicated cluster types. The minimum number of CKUs for SINGLE_ZONE dedicated clusters is 1 whereas MULTI_ZONE dedicated clusters must have 2 CKUs or more.

Note: Exactly one from the basic, standard, dedicated, enterprise or freight configuration blocks must be specified.

Note: The freight Kafka cluster type is currently available only on AWS.

Note: The enterprise Kafka cluster type is currently available only on AWS and Azure.

!> Warning: You can only upgrade clusters from basic to standard.

Note: Currently, provisioning of a Dedicated Kafka cluster takes around 25 minutes on average but might take up to 24 hours. If you can't wait for the pulumi up step to finish, you can exit it and import the cluster by using the pulumi import command once it has been provisioned. When the cluster is provisioned, you will receive an email notification, and you can also follow updates on the Target Environment web page of the Confluent Cloud website.

encryptionKey String
The ID of the encryption key that is used to encrypt the data in the Kafka cluster.
zones List<String>
(Required List of String) The list of zones the cluster is in.

  • On AWS, zones are AWS AZ IDs, for example, use1-az3.
cku This property is required. number

The number of Confluent Kafka Units (CKUs) for Dedicated cluster types. The minimum number of CKUs for SINGLE_ZONE dedicated clusters is 1 whereas MULTI_ZONE dedicated clusters must have 2 CKUs or more.

Note: Exactly one from the basic, standard, dedicated, enterprise or freight configuration blocks must be specified.

Note: The freight Kafka cluster type is currently available only on AWS.

Note: The enterprise Kafka cluster type is currently available only on AWS and Azure.

!> Warning: You can only upgrade clusters from basic to standard.

Note: Currently, provisioning of a Dedicated Kafka cluster takes around 25 minutes on average but might take up to 24 hours. If you can't wait for the pulumi up step to finish, you can exit it and import the cluster by using the pulumi import command once it has been provisioned. When the cluster is provisioned, you will receive an email notification, and you can also follow updates on the Target Environment web page of the Confluent Cloud website.

encryptionKey string
The ID of the encryption key that is used to encrypt the data in the Kafka cluster.
zones string[]
(Required List of String) The list of zones the cluster is in.

  • On AWS, zones are AWS AZ IDs, for example, use1-az3.
cku This property is required. int

The number of Confluent Kafka Units (CKUs) for Dedicated cluster types. The minimum number of CKUs for SINGLE_ZONE dedicated clusters is 1 whereas MULTI_ZONE dedicated clusters must have 2 CKUs or more.

Note: Exactly one from the basic, standard, dedicated, enterprise or freight configuration blocks must be specified.

Note: The freight Kafka cluster type is currently available only on AWS.

Note: The enterprise Kafka cluster type is currently available only on AWS and Azure.

!> Warning: You can only upgrade clusters from basic to standard.

Note: Currently, provisioning of a Dedicated Kafka cluster takes around 25 minutes on average but might take up to 24 hours. If you can't wait for the pulumi up step to finish, you can exit it and import the cluster by using the pulumi import command once it has been provisioned. When the cluster is provisioned, you will receive an email notification, and you can also follow updates on the Target Environment web page of the Confluent Cloud website.

encryption_key str
The ID of the encryption key that is used to encrypt the data in the Kafka cluster.
zones Sequence[str]
(Required List of String) The list of zones the cluster is in.

  • On AWS, zones are AWS AZ IDs, for example, use1-az3.
cku This property is required. Number

The number of Confluent Kafka Units (CKUs) for Dedicated cluster types. The minimum number of CKUs for SINGLE_ZONE dedicated clusters is 1 whereas MULTI_ZONE dedicated clusters must have 2 CKUs or more.

Note: Exactly one from the basic, standard, dedicated, enterprise or freight configuration blocks must be specified.

Note: The freight Kafka cluster type is currently available only on AWS.

Note: The enterprise Kafka cluster type is currently available only on AWS and Azure.

!> Warning: You can only upgrade clusters from basic to standard.

Note: Currently, provisioning of a Dedicated Kafka cluster takes around 25 minutes on average but might take up to 24 hours. If you can't wait for the pulumi up step to finish, you can exit it and import the cluster by using the pulumi import command once it has been provisioned. When the cluster is provisioned, you will receive an email notification, and you can also follow updates on the Target Environment web page of the Confluent Cloud website.

encryptionKey String
The ID of the encryption key that is used to encrypt the data in the Kafka cluster.
zones List<String>
(Required List of String) The list of zones the cluster is in.

  • On AWS, zones are AWS AZ IDs, for example, use1-az3.

KafkaClusterEnvironment
, KafkaClusterEnvironmentArgs

Id
This property is required.
Changes to this property will trigger replacement.
string
The ID of the Environment that the Kafka cluster belongs to, for example, env-abc123.
Id
This property is required.
Changes to this property will trigger replacement.
string
The ID of the Environment that the Kafka cluster belongs to, for example, env-abc123.
id
This property is required.
Changes to this property will trigger replacement.
String
The ID of the Environment that the Kafka cluster belongs to, for example, env-abc123.
id
This property is required.
Changes to this property will trigger replacement.
string
The ID of the Environment that the Kafka cluster belongs to, for example, env-abc123.
id
This property is required.
Changes to this property will trigger replacement.
str
The ID of the Environment that the Kafka cluster belongs to, for example, env-abc123.
id
This property is required.
Changes to this property will trigger replacement.
String
The ID of the Environment that the Kafka cluster belongs to, for example, env-abc123.

KafkaClusterFreight
, KafkaClusterFreightArgs

Zones List<string>
(Required List of String) The list of zones the cluster is in.

  • On AWS, zones are AWS AZ IDs, for example, use1-az3.
Zones []string
(Required List of String) The list of zones the cluster is in.

  • On AWS, zones are AWS AZ IDs, for example, use1-az3.
zones List<String>
(Required List of String) The list of zones the cluster is in.

  • On AWS, zones are AWS AZ IDs, for example, use1-az3.
zones string[]
(Required List of String) The list of zones the cluster is in.

  • On AWS, zones are AWS AZ IDs, for example, use1-az3.
zones Sequence[str]
(Required List of String) The list of zones the cluster is in.

  • On AWS, zones are AWS AZ IDs, for example, use1-az3.
zones List<String>
(Required List of String) The list of zones the cluster is in.

  • On AWS, zones are AWS AZ IDs, for example, use1-az3.

KafkaClusterNetwork
, KafkaClusterNetworkArgs

Id
This property is required.
Changes to this property will trigger replacement.
string
The ID of the Network that the Kafka cluster belongs to, for example, n-abc123.
Id
This property is required.
Changes to this property will trigger replacement.
string
The ID of the Network that the Kafka cluster belongs to, for example, n-abc123.
id
This property is required.
Changes to this property will trigger replacement.
String
The ID of the Network that the Kafka cluster belongs to, for example, n-abc123.
id
This property is required.
Changes to this property will trigger replacement.
string
The ID of the Network that the Kafka cluster belongs to, for example, n-abc123.
id
This property is required.
Changes to this property will trigger replacement.
str
The ID of the Network that the Kafka cluster belongs to, for example, n-abc123.
id
This property is required.
Changes to this property will trigger replacement.
String
The ID of the Network that the Kafka cluster belongs to, for example, n-abc123.

Import

You can import a Kafka cluster by using Environment ID and Kafka cluster ID, in the format <Environment ID>/<Kafka cluster ID>, e.g.

$ export CONFLUENT_CLOUD_API_KEY="<cloud_api_key>"

$ export CONFLUENT_CLOUD_API_SECRET="<cloud_api_secret>"

$ pulumi import confluentcloud:index/kafkaCluster:KafkaCluster my_kafka env-abc123/lkc-abc123
Copy

!> Warning: Do not forget to delete terminal command history afterwards for security purposes.

To learn more about importing existing cloud resources, see Importing resources.

Package Details

Repository
Confluent Cloud pulumi/pulumi-confluentcloud
License
Apache-2.0
Notes
This Pulumi package is based on the confluent Terraform Provider.