1. Packages
  2. Google Cloud (GCP) Classic
  3. API Docs
  4. bigquery
  5. Job
Google Cloud v8.27.1 published on Friday, Apr 25, 2025 by Pulumi

gcp.bigquery.Job

Explore with Pulumi AI

Jobs are actions that BigQuery runs on your behalf to load data, export data, query data, or copy data. Once a BigQuery job is created, it cannot be changed or deleted.

To get more information about Job, see:

Example Usage

Bigquery Job Query

import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";

const bar = new gcp.bigquery.Dataset("bar", {
    datasetId: "job_query_dataset",
    friendlyName: "test",
    description: "This is a test description",
    location: "US",
});
const foo = new gcp.bigquery.Table("foo", {
    deletionProtection: false,
    datasetId: bar.datasetId,
    tableId: "job_query_table",
});
const job = new gcp.bigquery.Job("job", {
    jobId: "job_query",
    labels: {
        "example-label": "example-value",
    },
    query: {
        query: "SELECT state FROM [lookerdata:cdc.project_tycho_reports]",
        destinationTable: {
            projectId: foo.project,
            datasetId: foo.datasetId,
            tableId: foo.tableId,
        },
        allowLargeResults: true,
        flattenResults: true,
        scriptOptions: {
            keyResultStatement: "LAST",
        },
    },
});
Copy
import pulumi
import pulumi_gcp as gcp

bar = gcp.bigquery.Dataset("bar",
    dataset_id="job_query_dataset",
    friendly_name="test",
    description="This is a test description",
    location="US")
foo = gcp.bigquery.Table("foo",
    deletion_protection=False,
    dataset_id=bar.dataset_id,
    table_id="job_query_table")
job = gcp.bigquery.Job("job",
    job_id="job_query",
    labels={
        "example-label": "example-value",
    },
    query={
        "query": "SELECT state FROM [lookerdata:cdc.project_tycho_reports]",
        "destination_table": {
            "project_id": foo.project,
            "dataset_id": foo.dataset_id,
            "table_id": foo.table_id,
        },
        "allow_large_results": True,
        "flatten_results": True,
        "script_options": {
            "key_result_statement": "LAST",
        },
    })
Copy
package main

import (
	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/bigquery"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		bar, err := bigquery.NewDataset(ctx, "bar", &bigquery.DatasetArgs{
			DatasetId:    pulumi.String("job_query_dataset"),
			FriendlyName: pulumi.String("test"),
			Description:  pulumi.String("This is a test description"),
			Location:     pulumi.String("US"),
		})
		if err != nil {
			return err
		}
		foo, err := bigquery.NewTable(ctx, "foo", &bigquery.TableArgs{
			DeletionProtection: pulumi.Bool(false),
			DatasetId:          bar.DatasetId,
			TableId:            pulumi.String("job_query_table"),
		})
		if err != nil {
			return err
		}
		_, err = bigquery.NewJob(ctx, "job", &bigquery.JobArgs{
			JobId: pulumi.String("job_query"),
			Labels: pulumi.StringMap{
				"example-label": pulumi.String("example-value"),
			},
			Query: &bigquery.JobQueryArgs{
				Query: pulumi.String("SELECT state FROM [lookerdata:cdc.project_tycho_reports]"),
				DestinationTable: &bigquery.JobQueryDestinationTableArgs{
					ProjectId: foo.Project,
					DatasetId: foo.DatasetId,
					TableId:   foo.TableId,
				},
				AllowLargeResults: pulumi.Bool(true),
				FlattenResults:    pulumi.Bool(true),
				ScriptOptions: &bigquery.JobQueryScriptOptionsArgs{
					KeyResultStatement: pulumi.String("LAST"),
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
Copy
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;

return await Deployment.RunAsync(() => 
{
    var bar = new Gcp.BigQuery.Dataset("bar", new()
    {
        DatasetId = "job_query_dataset",
        FriendlyName = "test",
        Description = "This is a test description",
        Location = "US",
    });

    var foo = new Gcp.BigQuery.Table("foo", new()
    {
        DeletionProtection = false,
        DatasetId = bar.DatasetId,
        TableId = "job_query_table",
    });

    var job = new Gcp.BigQuery.Job("job", new()
    {
        JobId = "job_query",
        Labels = 
        {
            { "example-label", "example-value" },
        },
        Query = new Gcp.BigQuery.Inputs.JobQueryArgs
        {
            Query = "SELECT state FROM [lookerdata:cdc.project_tycho_reports]",
            DestinationTable = new Gcp.BigQuery.Inputs.JobQueryDestinationTableArgs
            {
                ProjectId = foo.Project,
                DatasetId = foo.DatasetId,
                TableId = foo.TableId,
            },
            AllowLargeResults = true,
            FlattenResults = true,
            ScriptOptions = new Gcp.BigQuery.Inputs.JobQueryScriptOptionsArgs
            {
                KeyResultStatement = "LAST",
            },
        },
    });

});
Copy
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.bigquery.Dataset;
import com.pulumi.gcp.bigquery.DatasetArgs;
import com.pulumi.gcp.bigquery.Table;
import com.pulumi.gcp.bigquery.TableArgs;
import com.pulumi.gcp.bigquery.Job;
import com.pulumi.gcp.bigquery.JobArgs;
import com.pulumi.gcp.bigquery.inputs.JobQueryArgs;
import com.pulumi.gcp.bigquery.inputs.JobQueryDestinationTableArgs;
import com.pulumi.gcp.bigquery.inputs.JobQueryScriptOptionsArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var bar = new Dataset("bar", DatasetArgs.builder()
            .datasetId("job_query_dataset")
            .friendlyName("test")
            .description("This is a test description")
            .location("US")
            .build());

        var foo = new Table("foo", TableArgs.builder()
            .deletionProtection(false)
            .datasetId(bar.datasetId())
            .tableId("job_query_table")
            .build());

        var job = new Job("job", JobArgs.builder()
            .jobId("job_query")
            .labels(Map.of("example-label", "example-value"))
            .query(JobQueryArgs.builder()
                .query("SELECT state FROM [lookerdata:cdc.project_tycho_reports]")
                .destinationTable(JobQueryDestinationTableArgs.builder()
                    .projectId(foo.project())
                    .datasetId(foo.datasetId())
                    .tableId(foo.tableId())
                    .build())
                .allowLargeResults(true)
                .flattenResults(true)
                .scriptOptions(JobQueryScriptOptionsArgs.builder()
                    .keyResultStatement("LAST")
                    .build())
                .build())
            .build());

    }
}
Copy
resources:
  foo:
    type: gcp:bigquery:Table
    properties:
      deletionProtection: false
      datasetId: ${bar.datasetId}
      tableId: job_query_table
  bar:
    type: gcp:bigquery:Dataset
    properties:
      datasetId: job_query_dataset
      friendlyName: test
      description: This is a test description
      location: US
  job:
    type: gcp:bigquery:Job
    properties:
      jobId: job_query
      labels:
        example-label: example-value
      query:
        query: SELECT state FROM [lookerdata:cdc.project_tycho_reports]
        destinationTable:
          projectId: ${foo.project}
          datasetId: ${foo.datasetId}
          tableId: ${foo.tableId}
        allowLargeResults: true
        flattenResults: true
        scriptOptions:
          keyResultStatement: LAST
Copy

Bigquery Job Query Table Reference

import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";

const bar = new gcp.bigquery.Dataset("bar", {
    datasetId: "job_query_dataset",
    friendlyName: "test",
    description: "This is a test description",
    location: "US",
});
const foo = new gcp.bigquery.Table("foo", {
    deletionProtection: false,
    datasetId: bar.datasetId,
    tableId: "job_query_table",
});
const job = new gcp.bigquery.Job("job", {
    jobId: "job_query",
    labels: {
        "example-label": "example-value",
    },
    query: {
        query: "SELECT state FROM [lookerdata:cdc.project_tycho_reports]",
        destinationTable: {
            tableId: foo.id,
        },
        defaultDataset: {
            datasetId: bar.id,
        },
        allowLargeResults: true,
        flattenResults: true,
        scriptOptions: {
            keyResultStatement: "LAST",
        },
    },
});
Copy
import pulumi
import pulumi_gcp as gcp

bar = gcp.bigquery.Dataset("bar",
    dataset_id="job_query_dataset",
    friendly_name="test",
    description="This is a test description",
    location="US")
foo = gcp.bigquery.Table("foo",
    deletion_protection=False,
    dataset_id=bar.dataset_id,
    table_id="job_query_table")
job = gcp.bigquery.Job("job",
    job_id="job_query",
    labels={
        "example-label": "example-value",
    },
    query={
        "query": "SELECT state FROM [lookerdata:cdc.project_tycho_reports]",
        "destination_table": {
            "table_id": foo.id,
        },
        "default_dataset": {
            "dataset_id": bar.id,
        },
        "allow_large_results": True,
        "flatten_results": True,
        "script_options": {
            "key_result_statement": "LAST",
        },
    })
Copy
package main

import (
	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/bigquery"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		bar, err := bigquery.NewDataset(ctx, "bar", &bigquery.DatasetArgs{
			DatasetId:    pulumi.String("job_query_dataset"),
			FriendlyName: pulumi.String("test"),
			Description:  pulumi.String("This is a test description"),
			Location:     pulumi.String("US"),
		})
		if err != nil {
			return err
		}
		foo, err := bigquery.NewTable(ctx, "foo", &bigquery.TableArgs{
			DeletionProtection: pulumi.Bool(false),
			DatasetId:          bar.DatasetId,
			TableId:            pulumi.String("job_query_table"),
		})
		if err != nil {
			return err
		}
		_, err = bigquery.NewJob(ctx, "job", &bigquery.JobArgs{
			JobId: pulumi.String("job_query"),
			Labels: pulumi.StringMap{
				"example-label": pulumi.String("example-value"),
			},
			Query: &bigquery.JobQueryArgs{
				Query: pulumi.String("SELECT state FROM [lookerdata:cdc.project_tycho_reports]"),
				DestinationTable: &bigquery.JobQueryDestinationTableArgs{
					TableId: foo.ID(),
				},
				DefaultDataset: &bigquery.JobQueryDefaultDatasetArgs{
					DatasetId: bar.ID(),
				},
				AllowLargeResults: pulumi.Bool(true),
				FlattenResults:    pulumi.Bool(true),
				ScriptOptions: &bigquery.JobQueryScriptOptionsArgs{
					KeyResultStatement: pulumi.String("LAST"),
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
Copy
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;

return await Deployment.RunAsync(() => 
{
    var bar = new Gcp.BigQuery.Dataset("bar", new()
    {
        DatasetId = "job_query_dataset",
        FriendlyName = "test",
        Description = "This is a test description",
        Location = "US",
    });

    var foo = new Gcp.BigQuery.Table("foo", new()
    {
        DeletionProtection = false,
        DatasetId = bar.DatasetId,
        TableId = "job_query_table",
    });

    var job = new Gcp.BigQuery.Job("job", new()
    {
        JobId = "job_query",
        Labels = 
        {
            { "example-label", "example-value" },
        },
        Query = new Gcp.BigQuery.Inputs.JobQueryArgs
        {
            Query = "SELECT state FROM [lookerdata:cdc.project_tycho_reports]",
            DestinationTable = new Gcp.BigQuery.Inputs.JobQueryDestinationTableArgs
            {
                TableId = foo.Id,
            },
            DefaultDataset = new Gcp.BigQuery.Inputs.JobQueryDefaultDatasetArgs
            {
                DatasetId = bar.Id,
            },
            AllowLargeResults = true,
            FlattenResults = true,
            ScriptOptions = new Gcp.BigQuery.Inputs.JobQueryScriptOptionsArgs
            {
                KeyResultStatement = "LAST",
            },
        },
    });

});
Copy
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.bigquery.Dataset;
import com.pulumi.gcp.bigquery.DatasetArgs;
import com.pulumi.gcp.bigquery.Table;
import com.pulumi.gcp.bigquery.TableArgs;
import com.pulumi.gcp.bigquery.Job;
import com.pulumi.gcp.bigquery.JobArgs;
import com.pulumi.gcp.bigquery.inputs.JobQueryArgs;
import com.pulumi.gcp.bigquery.inputs.JobQueryDestinationTableArgs;
import com.pulumi.gcp.bigquery.inputs.JobQueryDefaultDatasetArgs;
import com.pulumi.gcp.bigquery.inputs.JobQueryScriptOptionsArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var bar = new Dataset("bar", DatasetArgs.builder()
            .datasetId("job_query_dataset")
            .friendlyName("test")
            .description("This is a test description")
            .location("US")
            .build());

        var foo = new Table("foo", TableArgs.builder()
            .deletionProtection(false)
            .datasetId(bar.datasetId())
            .tableId("job_query_table")
            .build());

        var job = new Job("job", JobArgs.builder()
            .jobId("job_query")
            .labels(Map.of("example-label", "example-value"))
            .query(JobQueryArgs.builder()
                .query("SELECT state FROM [lookerdata:cdc.project_tycho_reports]")
                .destinationTable(JobQueryDestinationTableArgs.builder()
                    .tableId(foo.id())
                    .build())
                .defaultDataset(JobQueryDefaultDatasetArgs.builder()
                    .datasetId(bar.id())
                    .build())
                .allowLargeResults(true)
                .flattenResults(true)
                .scriptOptions(JobQueryScriptOptionsArgs.builder()
                    .keyResultStatement("LAST")
                    .build())
                .build())
            .build());

    }
}
Copy
resources:
  foo:
    type: gcp:bigquery:Table
    properties:
      deletionProtection: false
      datasetId: ${bar.datasetId}
      tableId: job_query_table
  bar:
    type: gcp:bigquery:Dataset
    properties:
      datasetId: job_query_dataset
      friendlyName: test
      description: This is a test description
      location: US
  job:
    type: gcp:bigquery:Job
    properties:
      jobId: job_query
      labels:
        example-label: example-value
      query:
        query: SELECT state FROM [lookerdata:cdc.project_tycho_reports]
        destinationTable:
          tableId: ${foo.id}
        defaultDataset:
          datasetId: ${bar.id}
        allowLargeResults: true
        flattenResults: true
        scriptOptions:
          keyResultStatement: LAST
Copy

Bigquery Job Load

import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";

const bar = new gcp.bigquery.Dataset("bar", {
    datasetId: "job_load_dataset",
    friendlyName: "test",
    description: "This is a test description",
    location: "US",
});
const foo = new gcp.bigquery.Table("foo", {
    deletionProtection: false,
    datasetId: bar.datasetId,
    tableId: "job_load_table",
});
const job = new gcp.bigquery.Job("job", {
    jobId: "job_load",
    labels: {
        my_job: "load",
    },
    load: {
        sourceUris: ["gs://cloud-samples-data/bigquery/us-states/us-states-by-date.csv"],
        destinationTable: {
            projectId: foo.project,
            datasetId: foo.datasetId,
            tableId: foo.tableId,
        },
        skipLeadingRows: 1,
        schemaUpdateOptions: [
            "ALLOW_FIELD_RELAXATION",
            "ALLOW_FIELD_ADDITION",
        ],
        writeDisposition: "WRITE_APPEND",
        autodetect: true,
    },
});
Copy
import pulumi
import pulumi_gcp as gcp

bar = gcp.bigquery.Dataset("bar",
    dataset_id="job_load_dataset",
    friendly_name="test",
    description="This is a test description",
    location="US")
foo = gcp.bigquery.Table("foo",
    deletion_protection=False,
    dataset_id=bar.dataset_id,
    table_id="job_load_table")
job = gcp.bigquery.Job("job",
    job_id="job_load",
    labels={
        "my_job": "load",
    },
    load={
        "source_uris": ["gs://cloud-samples-data/bigquery/us-states/us-states-by-date.csv"],
        "destination_table": {
            "project_id": foo.project,
            "dataset_id": foo.dataset_id,
            "table_id": foo.table_id,
        },
        "skip_leading_rows": 1,
        "schema_update_options": [
            "ALLOW_FIELD_RELAXATION",
            "ALLOW_FIELD_ADDITION",
        ],
        "write_disposition": "WRITE_APPEND",
        "autodetect": True,
    })
Copy
package main

import (
	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/bigquery"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		bar, err := bigquery.NewDataset(ctx, "bar", &bigquery.DatasetArgs{
			DatasetId:    pulumi.String("job_load_dataset"),
			FriendlyName: pulumi.String("test"),
			Description:  pulumi.String("This is a test description"),
			Location:     pulumi.String("US"),
		})
		if err != nil {
			return err
		}
		foo, err := bigquery.NewTable(ctx, "foo", &bigquery.TableArgs{
			DeletionProtection: pulumi.Bool(false),
			DatasetId:          bar.DatasetId,
			TableId:            pulumi.String("job_load_table"),
		})
		if err != nil {
			return err
		}
		_, err = bigquery.NewJob(ctx, "job", &bigquery.JobArgs{
			JobId: pulumi.String("job_load"),
			Labels: pulumi.StringMap{
				"my_job": pulumi.String("load"),
			},
			Load: &bigquery.JobLoadArgs{
				SourceUris: pulumi.StringArray{
					pulumi.String("gs://cloud-samples-data/bigquery/us-states/us-states-by-date.csv"),
				},
				DestinationTable: &bigquery.JobLoadDestinationTableArgs{
					ProjectId: foo.Project,
					DatasetId: foo.DatasetId,
					TableId:   foo.TableId,
				},
				SkipLeadingRows: pulumi.Int(1),
				SchemaUpdateOptions: pulumi.StringArray{
					pulumi.String("ALLOW_FIELD_RELAXATION"),
					pulumi.String("ALLOW_FIELD_ADDITION"),
				},
				WriteDisposition: pulumi.String("WRITE_APPEND"),
				Autodetect:       pulumi.Bool(true),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
Copy
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;

return await Deployment.RunAsync(() => 
{
    var bar = new Gcp.BigQuery.Dataset("bar", new()
    {
        DatasetId = "job_load_dataset",
        FriendlyName = "test",
        Description = "This is a test description",
        Location = "US",
    });

    var foo = new Gcp.BigQuery.Table("foo", new()
    {
        DeletionProtection = false,
        DatasetId = bar.DatasetId,
        TableId = "job_load_table",
    });

    var job = new Gcp.BigQuery.Job("job", new()
    {
        JobId = "job_load",
        Labels = 
        {
            { "my_job", "load" },
        },
        Load = new Gcp.BigQuery.Inputs.JobLoadArgs
        {
            SourceUris = new[]
            {
                "gs://cloud-samples-data/bigquery/us-states/us-states-by-date.csv",
            },
            DestinationTable = new Gcp.BigQuery.Inputs.JobLoadDestinationTableArgs
            {
                ProjectId = foo.Project,
                DatasetId = foo.DatasetId,
                TableId = foo.TableId,
            },
            SkipLeadingRows = 1,
            SchemaUpdateOptions = new[]
            {
                "ALLOW_FIELD_RELAXATION",
                "ALLOW_FIELD_ADDITION",
            },
            WriteDisposition = "WRITE_APPEND",
            Autodetect = true,
        },
    });

});
Copy
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.bigquery.Dataset;
import com.pulumi.gcp.bigquery.DatasetArgs;
import com.pulumi.gcp.bigquery.Table;
import com.pulumi.gcp.bigquery.TableArgs;
import com.pulumi.gcp.bigquery.Job;
import com.pulumi.gcp.bigquery.JobArgs;
import com.pulumi.gcp.bigquery.inputs.JobLoadArgs;
import com.pulumi.gcp.bigquery.inputs.JobLoadDestinationTableArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var bar = new Dataset("bar", DatasetArgs.builder()
            .datasetId("job_load_dataset")
            .friendlyName("test")
            .description("This is a test description")
            .location("US")
            .build());

        var foo = new Table("foo", TableArgs.builder()
            .deletionProtection(false)
            .datasetId(bar.datasetId())
            .tableId("job_load_table")
            .build());

        var job = new Job("job", JobArgs.builder()
            .jobId("job_load")
            .labels(Map.of("my_job", "load"))
            .load(JobLoadArgs.builder()
                .sourceUris("gs://cloud-samples-data/bigquery/us-states/us-states-by-date.csv")
                .destinationTable(JobLoadDestinationTableArgs.builder()
                    .projectId(foo.project())
                    .datasetId(foo.datasetId())
                    .tableId(foo.tableId())
                    .build())
                .skipLeadingRows(1)
                .schemaUpdateOptions(                
                    "ALLOW_FIELD_RELAXATION",
                    "ALLOW_FIELD_ADDITION")
                .writeDisposition("WRITE_APPEND")
                .autodetect(true)
                .build())
            .build());

    }
}
Copy
resources:
  foo:
    type: gcp:bigquery:Table
    properties:
      deletionProtection: false
      datasetId: ${bar.datasetId}
      tableId: job_load_table
  bar:
    type: gcp:bigquery:Dataset
    properties:
      datasetId: job_load_dataset
      friendlyName: test
      description: This is a test description
      location: US
  job:
    type: gcp:bigquery:Job
    properties:
      jobId: job_load
      labels:
        my_job: load
      load:
        sourceUris:
          - gs://cloud-samples-data/bigquery/us-states/us-states-by-date.csv
        destinationTable:
          projectId: ${foo.project}
          datasetId: ${foo.datasetId}
          tableId: ${foo.tableId}
        skipLeadingRows: 1
        schemaUpdateOptions:
          - ALLOW_FIELD_RELAXATION
          - ALLOW_FIELD_ADDITION
        writeDisposition: WRITE_APPEND
        autodetect: true
Copy

Bigquery Job Load Geojson

import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";

const project = "my-project-name";
const bucket = new gcp.storage.Bucket("bucket", {
    name: `${project}-bq-geojson`,
    location: "US",
    uniformBucketLevelAccess: true,
});
const object = new gcp.storage.BucketObject("object", {
    name: "geojson-data.jsonl",
    bucket: bucket.name,
    content: `{"type":"Feature","properties":{"continent":"Europe","region":"Scandinavia"},"geometry":{"type":"Polygon","coordinates":[[[-30.94,53.33],[33.05,53.33],[33.05,71.86],[-30.94,71.86],[-30.94,53.33]]]}}
{"type":"Feature","properties":{"continent":"Africa","region":"West Africa"},"geometry":{"type":"Polygon","coordinates":[[[-23.91,0],[11.95,0],[11.95,18.98],[-23.91,18.98],[-23.91,0]]]}}
`,
});
const bar = new gcp.bigquery.Dataset("bar", {
    datasetId: "job_load_dataset",
    friendlyName: "test",
    description: "This is a test description",
    location: "US",
});
const foo = new gcp.bigquery.Table("foo", {
    deletionProtection: false,
    datasetId: bar.datasetId,
    tableId: "job_load_table",
});
const job = new gcp.bigquery.Job("job", {
    jobId: "job_load",
    labels: {
        my_job: "load",
    },
    load: {
        sourceUris: [pulumi.interpolate`gs://${object.bucket}/${object.name}`],
        destinationTable: {
            projectId: foo.project,
            datasetId: foo.datasetId,
            tableId: foo.tableId,
        },
        writeDisposition: "WRITE_TRUNCATE",
        autodetect: true,
        sourceFormat: "NEWLINE_DELIMITED_JSON",
        jsonExtension: "GEOJSON",
    },
}, {
    dependsOn: [object],
});
Copy
import pulumi
import pulumi_gcp as gcp

project = "my-project-name"
bucket = gcp.storage.Bucket("bucket",
    name=f"{project}-bq-geojson",
    location="US",
    uniform_bucket_level_access=True)
object = gcp.storage.BucketObject("object",
    name="geojson-data.jsonl",
    bucket=bucket.name,
    content="""{"type":"Feature","properties":{"continent":"Europe","region":"Scandinavia"},"geometry":{"type":"Polygon","coordinates":[[[-30.94,53.33],[33.05,53.33],[33.05,71.86],[-30.94,71.86],[-30.94,53.33]]]}}
{"type":"Feature","properties":{"continent":"Africa","region":"West Africa"},"geometry":{"type":"Polygon","coordinates":[[[-23.91,0],[11.95,0],[11.95,18.98],[-23.91,18.98],[-23.91,0]]]}}
""")
bar = gcp.bigquery.Dataset("bar",
    dataset_id="job_load_dataset",
    friendly_name="test",
    description="This is a test description",
    location="US")
foo = gcp.bigquery.Table("foo",
    deletion_protection=False,
    dataset_id=bar.dataset_id,
    table_id="job_load_table")
job = gcp.bigquery.Job("job",
    job_id="job_load",
    labels={
        "my_job": "load",
    },
    load={
        "source_uris": [pulumi.Output.all(
            bucket=object.bucket,
            name=object.name
).apply(lambda resolved_outputs: f"gs://{resolved_outputs['bucket']}/{resolved_outputs['name']}")
],
        "destination_table": {
            "project_id": foo.project,
            "dataset_id": foo.dataset_id,
            "table_id": foo.table_id,
        },
        "write_disposition": "WRITE_TRUNCATE",
        "autodetect": True,
        "source_format": "NEWLINE_DELIMITED_JSON",
        "json_extension": "GEOJSON",
    },
    opts = pulumi.ResourceOptions(depends_on=[object]))
Copy
package main

import (
	"fmt"

	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/bigquery"
	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/storage"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		project := "my-project-name"
		bucket, err := storage.NewBucket(ctx, "bucket", &storage.BucketArgs{
			Name:                     pulumi.Sprintf("%v-bq-geojson", project),
			Location:                 pulumi.String("US"),
			UniformBucketLevelAccess: pulumi.Bool(true),
		})
		if err != nil {
			return err
		}
		object, err := storage.NewBucketObject(ctx, "object", &storage.BucketObjectArgs{
			Name:    pulumi.String("geojson-data.jsonl"),
			Bucket:  bucket.Name,
			Content: pulumi.String("{\"type\":\"Feature\",\"properties\":{\"continent\":\"Europe\",\"region\":\"Scandinavia\"},\"geometry\":{\"type\":\"Polygon\",\"coordinates\":[[[-30.94,53.33],[33.05,53.33],[33.05,71.86],[-30.94,71.86],[-30.94,53.33]]]}}\n{\"type\":\"Feature\",\"properties\":{\"continent\":\"Africa\",\"region\":\"West Africa\"},\"geometry\":{\"type\":\"Polygon\",\"coordinates\":[[[-23.91,0],[11.95,0],[11.95,18.98],[-23.91,18.98],[-23.91,0]]]}}\n"),
		})
		if err != nil {
			return err
		}
		bar, err := bigquery.NewDataset(ctx, "bar", &bigquery.DatasetArgs{
			DatasetId:    pulumi.String("job_load_dataset"),
			FriendlyName: pulumi.String("test"),
			Description:  pulumi.String("This is a test description"),
			Location:     pulumi.String("US"),
		})
		if err != nil {
			return err
		}
		foo, err := bigquery.NewTable(ctx, "foo", &bigquery.TableArgs{
			DeletionProtection: pulumi.Bool(false),
			DatasetId:          bar.DatasetId,
			TableId:            pulumi.String("job_load_table"),
		})
		if err != nil {
			return err
		}
		_, err = bigquery.NewJob(ctx, "job", &bigquery.JobArgs{
			JobId: pulumi.String("job_load"),
			Labels: pulumi.StringMap{
				"my_job": pulumi.String("load"),
			},
			Load: &bigquery.JobLoadArgs{
				SourceUris: pulumi.StringArray{
					pulumi.All(object.Bucket, object.Name).ApplyT(func(_args []interface{}) (string, error) {
						bucket := _args[0].(string)
						name := _args[1].(string)
						return fmt.Sprintf("gs://%v/%v", bucket, name), nil
					}).(pulumi.StringOutput),
				},
				DestinationTable: &bigquery.JobLoadDestinationTableArgs{
					ProjectId: foo.Project,
					DatasetId: foo.DatasetId,
					TableId:   foo.TableId,
				},
				WriteDisposition: pulumi.String("WRITE_TRUNCATE"),
				Autodetect:       pulumi.Bool(true),
				SourceFormat:     pulumi.String("NEWLINE_DELIMITED_JSON"),
				JsonExtension:    pulumi.String("GEOJSON"),
			},
		}, pulumi.DependsOn([]pulumi.Resource{
			object,
		}))
		if err != nil {
			return err
		}
		return nil
	})
}
Copy
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;

return await Deployment.RunAsync(() => 
{
    var project = "my-project-name";

    var bucket = new Gcp.Storage.Bucket("bucket", new()
    {
        Name = $"{project}-bq-geojson",
        Location = "US",
        UniformBucketLevelAccess = true,
    });

    var @object = new Gcp.Storage.BucketObject("object", new()
    {
        Name = "geojson-data.jsonl",
        Bucket = bucket.Name,
        Content = @"{""type"":""Feature"",""properties"":{""continent"":""Europe"",""region"":""Scandinavia""},""geometry"":{""type"":""Polygon"",""coordinates"":[[[-30.94,53.33],[33.05,53.33],[33.05,71.86],[-30.94,71.86],[-30.94,53.33]]]}}
{""type"":""Feature"",""properties"":{""continent"":""Africa"",""region"":""West Africa""},""geometry"":{""type"":""Polygon"",""coordinates"":[[[-23.91,0],[11.95,0],[11.95,18.98],[-23.91,18.98],[-23.91,0]]]}}
",
    });

    var bar = new Gcp.BigQuery.Dataset("bar", new()
    {
        DatasetId = "job_load_dataset",
        FriendlyName = "test",
        Description = "This is a test description",
        Location = "US",
    });

    var foo = new Gcp.BigQuery.Table("foo", new()
    {
        DeletionProtection = false,
        DatasetId = bar.DatasetId,
        TableId = "job_load_table",
    });

    var job = new Gcp.BigQuery.Job("job", new()
    {
        JobId = "job_load",
        Labels = 
        {
            { "my_job", "load" },
        },
        Load = new Gcp.BigQuery.Inputs.JobLoadArgs
        {
            SourceUris = new[]
            {
                Output.Tuple(@object.Bucket, @object.Name).Apply(values =>
                {
                    var bucket = values.Item1;
                    var name = values.Item2;
                    return $"gs://{bucket}/{name}";
                }),
            },
            DestinationTable = new Gcp.BigQuery.Inputs.JobLoadDestinationTableArgs
            {
                ProjectId = foo.Project,
                DatasetId = foo.DatasetId,
                TableId = foo.TableId,
            },
            WriteDisposition = "WRITE_TRUNCATE",
            Autodetect = true,
            SourceFormat = "NEWLINE_DELIMITED_JSON",
            JsonExtension = "GEOJSON",
        },
    }, new CustomResourceOptions
    {
        DependsOn =
        {
            @object,
        },
    });

});
Copy
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.storage.Bucket;
import com.pulumi.gcp.storage.BucketArgs;
import com.pulumi.gcp.storage.BucketObject;
import com.pulumi.gcp.storage.BucketObjectArgs;
import com.pulumi.gcp.bigquery.Dataset;
import com.pulumi.gcp.bigquery.DatasetArgs;
import com.pulumi.gcp.bigquery.Table;
import com.pulumi.gcp.bigquery.TableArgs;
import com.pulumi.gcp.bigquery.Job;
import com.pulumi.gcp.bigquery.JobArgs;
import com.pulumi.gcp.bigquery.inputs.JobLoadArgs;
import com.pulumi.gcp.bigquery.inputs.JobLoadDestinationTableArgs;
import com.pulumi.resources.CustomResourceOptions;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        final var project = "my-project-name";

        var bucket = new Bucket("bucket", BucketArgs.builder()
            .name(String.format("%s-bq-geojson", project))
            .location("US")
            .uniformBucketLevelAccess(true)
            .build());

        var object = new BucketObject("object", BucketObjectArgs.builder()
            .name("geojson-data.jsonl")
            .bucket(bucket.name())
            .content("""
{"type":"Feature","properties":{"continent":"Europe","region":"Scandinavia"},"geometry":{"type":"Polygon","coordinates":[[[-30.94,53.33],[33.05,53.33],[33.05,71.86],[-30.94,71.86],[-30.94,53.33]]]}}
{"type":"Feature","properties":{"continent":"Africa","region":"West Africa"},"geometry":{"type":"Polygon","coordinates":[[[-23.91,0],[11.95,0],[11.95,18.98],[-23.91,18.98],[-23.91,0]]]}}
            """)
            .build());

        var bar = new Dataset("bar", DatasetArgs.builder()
            .datasetId("job_load_dataset")
            .friendlyName("test")
            .description("This is a test description")
            .location("US")
            .build());

        var foo = new Table("foo", TableArgs.builder()
            .deletionProtection(false)
            .datasetId(bar.datasetId())
            .tableId("job_load_table")
            .build());

        var job = new Job("job", JobArgs.builder()
            .jobId("job_load")
            .labels(Map.of("my_job", "load"))
            .load(JobLoadArgs.builder()
                .sourceUris(Output.tuple(object.bucket(), object.name()).applyValue(values -> {
                    var bucket = values.t1;
                    var name = values.t2;
                    return String.format("gs://%s/%s", bucket,name);
                }))
                .destinationTable(JobLoadDestinationTableArgs.builder()
                    .projectId(foo.project())
                    .datasetId(foo.datasetId())
                    .tableId(foo.tableId())
                    .build())
                .writeDisposition("WRITE_TRUNCATE")
                .autodetect(true)
                .sourceFormat("NEWLINE_DELIMITED_JSON")
                .jsonExtension("GEOJSON")
                .build())
            .build(), CustomResourceOptions.builder()
                .dependsOn(object)
                .build());

    }
}
Copy
resources:
  bucket:
    type: gcp:storage:Bucket
    properties:
      name: ${project}-bq-geojson
      location: US
      uniformBucketLevelAccess: true
  object:
    type: gcp:storage:BucketObject
    properties:
      name: geojson-data.jsonl
      bucket: ${bucket.name}
      content: |
        {"type":"Feature","properties":{"continent":"Europe","region":"Scandinavia"},"geometry":{"type":"Polygon","coordinates":[[[-30.94,53.33],[33.05,53.33],[33.05,71.86],[-30.94,71.86],[-30.94,53.33]]]}}
        {"type":"Feature","properties":{"continent":"Africa","region":"West Africa"},"geometry":{"type":"Polygon","coordinates":[[[-23.91,0],[11.95,0],[11.95,18.98],[-23.91,18.98],[-23.91,0]]]}}        
  foo:
    type: gcp:bigquery:Table
    properties:
      deletionProtection: false
      datasetId: ${bar.datasetId}
      tableId: job_load_table
  bar:
    type: gcp:bigquery:Dataset
    properties:
      datasetId: job_load_dataset
      friendlyName: test
      description: This is a test description
      location: US
  job:
    type: gcp:bigquery:Job
    properties:
      jobId: job_load
      labels:
        my_job: load
      load:
        sourceUris:
          - gs://${object.bucket}/${object.name}
        destinationTable:
          projectId: ${foo.project}
          datasetId: ${foo.datasetId}
          tableId: ${foo.tableId}
        writeDisposition: WRITE_TRUNCATE
        autodetect: true
        sourceFormat: NEWLINE_DELIMITED_JSON
        jsonExtension: GEOJSON
    options:
      dependsOn:
        - ${object}
variables:
  project: my-project-name
Copy

Bigquery Job Load Parquet

import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";

const test = new gcp.storage.Bucket("test", {
    name: "job_load_bucket",
    location: "US",
    uniformBucketLevelAccess: true,
});
const testBucketObject = new gcp.storage.BucketObject("test", {
    name: "job_load_bucket_object",
    source: new pulumi.asset.FileAsset("./test-fixtures/test.parquet.gzip"),
    bucket: test.name,
});
const testDataset = new gcp.bigquery.Dataset("test", {
    datasetId: "job_load_dataset",
    friendlyName: "test",
    description: "This is a test description",
    location: "US",
});
const testTable = new gcp.bigquery.Table("test", {
    deletionProtection: false,
    tableId: "job_load_table",
    datasetId: testDataset.datasetId,
});
const job = new gcp.bigquery.Job("job", {
    jobId: "job_load",
    labels: {
        my_job: "load",
    },
    load: {
        sourceUris: [pulumi.interpolate`gs://${testBucketObject.bucket}/${testBucketObject.name}`],
        destinationTable: {
            projectId: testTable.project,
            datasetId: testTable.datasetId,
            tableId: testTable.tableId,
        },
        schemaUpdateOptions: [
            "ALLOW_FIELD_RELAXATION",
            "ALLOW_FIELD_ADDITION",
        ],
        writeDisposition: "WRITE_APPEND",
        sourceFormat: "PARQUET",
        autodetect: true,
        parquetOptions: {
            enumAsString: true,
            enableListInference: true,
        },
    },
});
Copy
import pulumi
import pulumi_gcp as gcp

test = gcp.storage.Bucket("test",
    name="job_load_bucket",
    location="US",
    uniform_bucket_level_access=True)
test_bucket_object = gcp.storage.BucketObject("test",
    name="job_load_bucket_object",
    source=pulumi.FileAsset("./test-fixtures/test.parquet.gzip"),
    bucket=test.name)
test_dataset = gcp.bigquery.Dataset("test",
    dataset_id="job_load_dataset",
    friendly_name="test",
    description="This is a test description",
    location="US")
test_table = gcp.bigquery.Table("test",
    deletion_protection=False,
    table_id="job_load_table",
    dataset_id=test_dataset.dataset_id)
job = gcp.bigquery.Job("job",
    job_id="job_load",
    labels={
        "my_job": "load",
    },
    load={
        "source_uris": [pulumi.Output.all(
            bucket=test_bucket_object.bucket,
            name=test_bucket_object.name
).apply(lambda resolved_outputs: f"gs://{resolved_outputs['bucket']}/{resolved_outputs['name']}")
],
        "destination_table": {
            "project_id": test_table.project,
            "dataset_id": test_table.dataset_id,
            "table_id": test_table.table_id,
        },
        "schema_update_options": [
            "ALLOW_FIELD_RELAXATION",
            "ALLOW_FIELD_ADDITION",
        ],
        "write_disposition": "WRITE_APPEND",
        "source_format": "PARQUET",
        "autodetect": True,
        "parquet_options": {
            "enum_as_string": True,
            "enable_list_inference": True,
        },
    })
Copy
package main

import (
	"fmt"

	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/bigquery"
	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/storage"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		test, err := storage.NewBucket(ctx, "test", &storage.BucketArgs{
			Name:                     pulumi.String("job_load_bucket"),
			Location:                 pulumi.String("US"),
			UniformBucketLevelAccess: pulumi.Bool(true),
		})
		if err != nil {
			return err
		}
		testBucketObject, err := storage.NewBucketObject(ctx, "test", &storage.BucketObjectArgs{
			Name:   pulumi.String("job_load_bucket_object"),
			Source: pulumi.NewFileAsset("./test-fixtures/test.parquet.gzip"),
			Bucket: test.Name,
		})
		if err != nil {
			return err
		}
		testDataset, err := bigquery.NewDataset(ctx, "test", &bigquery.DatasetArgs{
			DatasetId:    pulumi.String("job_load_dataset"),
			FriendlyName: pulumi.String("test"),
			Description:  pulumi.String("This is a test description"),
			Location:     pulumi.String("US"),
		})
		if err != nil {
			return err
		}
		testTable, err := bigquery.NewTable(ctx, "test", &bigquery.TableArgs{
			DeletionProtection: pulumi.Bool(false),
			TableId:            pulumi.String("job_load_table"),
			DatasetId:          testDataset.DatasetId,
		})
		if err != nil {
			return err
		}
		_, err = bigquery.NewJob(ctx, "job", &bigquery.JobArgs{
			JobId: pulumi.String("job_load"),
			Labels: pulumi.StringMap{
				"my_job": pulumi.String("load"),
			},
			Load: &bigquery.JobLoadArgs{
				SourceUris: pulumi.StringArray{
					pulumi.All(testBucketObject.Bucket, testBucketObject.Name).ApplyT(func(_args []interface{}) (string, error) {
						bucket := _args[0].(string)
						name := _args[1].(string)
						return fmt.Sprintf("gs://%v/%v", bucket, name), nil
					}).(pulumi.StringOutput),
				},
				DestinationTable: &bigquery.JobLoadDestinationTableArgs{
					ProjectId: testTable.Project,
					DatasetId: testTable.DatasetId,
					TableId:   testTable.TableId,
				},
				SchemaUpdateOptions: pulumi.StringArray{
					pulumi.String("ALLOW_FIELD_RELAXATION"),
					pulumi.String("ALLOW_FIELD_ADDITION"),
				},
				WriteDisposition: pulumi.String("WRITE_APPEND"),
				SourceFormat:     pulumi.String("PARQUET"),
				Autodetect:       pulumi.Bool(true),
				ParquetOptions: &bigquery.JobLoadParquetOptionsArgs{
					EnumAsString:        pulumi.Bool(true),
					EnableListInference: pulumi.Bool(true),
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
Copy
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;

return await Deployment.RunAsync(() => 
{
    var test = new Gcp.Storage.Bucket("test", new()
    {
        Name = "job_load_bucket",
        Location = "US",
        UniformBucketLevelAccess = true,
    });

    var testBucketObject = new Gcp.Storage.BucketObject("test", new()
    {
        Name = "job_load_bucket_object",
        Source = new FileAsset("./test-fixtures/test.parquet.gzip"),
        Bucket = test.Name,
    });

    var testDataset = new Gcp.BigQuery.Dataset("test", new()
    {
        DatasetId = "job_load_dataset",
        FriendlyName = "test",
        Description = "This is a test description",
        Location = "US",
    });

    var testTable = new Gcp.BigQuery.Table("test", new()
    {
        DeletionProtection = false,
        TableId = "job_load_table",
        DatasetId = testDataset.DatasetId,
    });

    var job = new Gcp.BigQuery.Job("job", new()
    {
        JobId = "job_load",
        Labels = 
        {
            { "my_job", "load" },
        },
        Load = new Gcp.BigQuery.Inputs.JobLoadArgs
        {
            SourceUris = new[]
            {
                Output.Tuple(testBucketObject.Bucket, testBucketObject.Name).Apply(values =>
                {
                    var bucket = values.Item1;
                    var name = values.Item2;
                    return $"gs://{bucket}/{name}";
                }),
            },
            DestinationTable = new Gcp.BigQuery.Inputs.JobLoadDestinationTableArgs
            {
                ProjectId = testTable.Project,
                DatasetId = testTable.DatasetId,
                TableId = testTable.TableId,
            },
            SchemaUpdateOptions = new[]
            {
                "ALLOW_FIELD_RELAXATION",
                "ALLOW_FIELD_ADDITION",
            },
            WriteDisposition = "WRITE_APPEND",
            SourceFormat = "PARQUET",
            Autodetect = true,
            ParquetOptions = new Gcp.BigQuery.Inputs.JobLoadParquetOptionsArgs
            {
                EnumAsString = true,
                EnableListInference = true,
            },
        },
    });

});
Copy
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.storage.Bucket;
import com.pulumi.gcp.storage.BucketArgs;
import com.pulumi.gcp.storage.BucketObject;
import com.pulumi.gcp.storage.BucketObjectArgs;
import com.pulumi.gcp.bigquery.Dataset;
import com.pulumi.gcp.bigquery.DatasetArgs;
import com.pulumi.gcp.bigquery.Table;
import com.pulumi.gcp.bigquery.TableArgs;
import com.pulumi.gcp.bigquery.Job;
import com.pulumi.gcp.bigquery.JobArgs;
import com.pulumi.gcp.bigquery.inputs.JobLoadArgs;
import com.pulumi.gcp.bigquery.inputs.JobLoadDestinationTableArgs;
import com.pulumi.gcp.bigquery.inputs.JobLoadParquetOptionsArgs;
import com.pulumi.asset.FileAsset;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var test = new Bucket("test", BucketArgs.builder()
            .name("job_load_bucket")
            .location("US")
            .uniformBucketLevelAccess(true)
            .build());

        var testBucketObject = new BucketObject("testBucketObject", BucketObjectArgs.builder()
            .name("job_load_bucket_object")
            .source(new FileAsset("./test-fixtures/test.parquet.gzip"))
            .bucket(test.name())
            .build());

        var testDataset = new Dataset("testDataset", DatasetArgs.builder()
            .datasetId("job_load_dataset")
            .friendlyName("test")
            .description("This is a test description")
            .location("US")
            .build());

        var testTable = new Table("testTable", TableArgs.builder()
            .deletionProtection(false)
            .tableId("job_load_table")
            .datasetId(testDataset.datasetId())
            .build());

        var job = new Job("job", JobArgs.builder()
            .jobId("job_load")
            .labels(Map.of("my_job", "load"))
            .load(JobLoadArgs.builder()
                .sourceUris(Output.tuple(testBucketObject.bucket(), testBucketObject.name()).applyValue(values -> {
                    var bucket = values.t1;
                    var name = values.t2;
                    return String.format("gs://%s/%s", bucket,name);
                }))
                .destinationTable(JobLoadDestinationTableArgs.builder()
                    .projectId(testTable.project())
                    .datasetId(testTable.datasetId())
                    .tableId(testTable.tableId())
                    .build())
                .schemaUpdateOptions(                
                    "ALLOW_FIELD_RELAXATION",
                    "ALLOW_FIELD_ADDITION")
                .writeDisposition("WRITE_APPEND")
                .sourceFormat("PARQUET")
                .autodetect(true)
                .parquetOptions(JobLoadParquetOptionsArgs.builder()
                    .enumAsString(true)
                    .enableListInference(true)
                    .build())
                .build())
            .build());

    }
}
Copy
resources:
  test:
    type: gcp:storage:Bucket
    properties:
      name: job_load_bucket
      location: US
      uniformBucketLevelAccess: true
  testBucketObject:
    type: gcp:storage:BucketObject
    name: test
    properties:
      name: job_load_bucket_object
      source:
        fn::FileAsset: ./test-fixtures/test.parquet.gzip
      bucket: ${test.name}
  testDataset:
    type: gcp:bigquery:Dataset
    name: test
    properties:
      datasetId: job_load_dataset
      friendlyName: test
      description: This is a test description
      location: US
  testTable:
    type: gcp:bigquery:Table
    name: test
    properties:
      deletionProtection: false
      tableId: job_load_table
      datasetId: ${testDataset.datasetId}
  job:
    type: gcp:bigquery:Job
    properties:
      jobId: job_load
      labels:
        my_job: load
      load:
        sourceUris:
          - gs://${testBucketObject.bucket}/${testBucketObject.name}
        destinationTable:
          projectId: ${testTable.project}
          datasetId: ${testTable.datasetId}
          tableId: ${testTable.tableId}
        schemaUpdateOptions:
          - ALLOW_FIELD_RELAXATION
          - ALLOW_FIELD_ADDITION
        writeDisposition: WRITE_APPEND
        sourceFormat: PARQUET
        autodetect: true
        parquetOptions:
          enumAsString: true
          enableListInference: true
Copy

Bigquery Job Copy

import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";

const count = 2;
const sourceDataset: gcp.bigquery.Dataset[] = [];
for (const range = {value: 0}; range.value < count; range.value++) {
    sourceDataset.push(new gcp.bigquery.Dataset(`source-${range.value}`, {
        datasetId: `job_copy_${range.value}_dataset`,
        friendlyName: "test",
        description: "This is a test description",
        location: "US",
    }));
}
const source: gcp.bigquery.Table[] = [];
for (const range = {value: 0}; range.value < count; range.value++) {
    source.push(new gcp.bigquery.Table(`source-${range.value}`, {
        datasetId: sourceDataset[range.value].datasetId,
        tableId: `job_copy_${range.value}_table`,
        deletionProtection: false,
        schema: `[
  {
    "name": "name",
    "type": "STRING",
    "mode": "NULLABLE"
  },
  {
    "name": "post_abbr",
    "type": "STRING",
    "mode": "NULLABLE"
  },
  {
    "name": "date",
    "type": "DATE",
    "mode": "NULLABLE"
  }
]
`,
    }));
}
const destDataset = new gcp.bigquery.Dataset("dest", {
    datasetId: "job_copy_dest_dataset",
    friendlyName: "test",
    description: "This is a test description",
    location: "US",
});
const project = gcp.organizations.getProject({
    projectId: "my-project-name",
});
const encryptRole = new gcp.kms.CryptoKeyIAMMember("encrypt_role", {
    cryptoKeyId: "example-key",
    role: "roles/cloudkms.cryptoKeyEncrypterDecrypter",
    member: project.then(project => `serviceAccount:bq-${project.number}@bigquery-encryption.iam.gserviceaccount.com`),
});
const dest = new gcp.bigquery.Table("dest", {
    deletionProtection: false,
    datasetId: destDataset.datasetId,
    tableId: "job_copy_dest_table",
    schema: `[
  {
    "name": "name",
    "type": "STRING",
    "mode": "NULLABLE"
  },
  {
    "name": "post_abbr",
    "type": "STRING",
    "mode": "NULLABLE"
  },
  {
    "name": "date",
    "type": "DATE",
    "mode": "NULLABLE"
  }
]
`,
    encryptionConfiguration: {
        kmsKeyName: "example-key",
    },
}, {
    dependsOn: [encryptRole],
});
const job = new gcp.bigquery.Job("job", {
    jobId: "job_copy",
    copy: {
        sourceTables: [
            {
                projectId: source[0].project,
                datasetId: source[0].datasetId,
                tableId: source[0].tableId,
            },
            {
                projectId: source[1].project,
                datasetId: source[1].datasetId,
                tableId: source[1].tableId,
            },
        ],
        destinationTable: {
            projectId: dest.project,
            datasetId: dest.datasetId,
            tableId: dest.tableId,
        },
        destinationEncryptionConfiguration: {
            kmsKeyName: "example-key",
        },
    },
}, {
    dependsOn: [encryptRole],
});
Copy
import pulumi
import pulumi_gcp as gcp

count = 2
source_dataset = []
for range in [{"value": i} for i in range(0, count)]:
    source_dataset.append(gcp.bigquery.Dataset(f"source-{range['value']}",
        dataset_id=f"job_copy_{range['value']}_dataset",
        friendly_name="test",
        description="This is a test description",
        location="US"))
source = []
for range in [{"value": i} for i in range(0, count)]:
    source.append(gcp.bigquery.Table(f"source-{range['value']}",
        dataset_id=source_dataset[range["value"]].dataset_id,
        table_id=f"job_copy_{range['value']}_table",
        deletion_protection=False,
        schema="""[
  {
    "name": "name",
    "type": "STRING",
    "mode": "NULLABLE"
  },
  {
    "name": "post_abbr",
    "type": "STRING",
    "mode": "NULLABLE"
  },
  {
    "name": "date",
    "type": "DATE",
    "mode": "NULLABLE"
  }
]
"""))
dest_dataset = gcp.bigquery.Dataset("dest",
    dataset_id="job_copy_dest_dataset",
    friendly_name="test",
    description="This is a test description",
    location="US")
project = gcp.organizations.get_project(project_id="my-project-name")
encrypt_role = gcp.kms.CryptoKeyIAMMember("encrypt_role",
    crypto_key_id="example-key",
    role="roles/cloudkms.cryptoKeyEncrypterDecrypter",
    member=f"serviceAccount:bq-{project.number}@bigquery-encryption.iam.gserviceaccount.com")
dest = gcp.bigquery.Table("dest",
    deletion_protection=False,
    dataset_id=dest_dataset.dataset_id,
    table_id="job_copy_dest_table",
    schema="""[
  {
    "name": "name",
    "type": "STRING",
    "mode": "NULLABLE"
  },
  {
    "name": "post_abbr",
    "type": "STRING",
    "mode": "NULLABLE"
  },
  {
    "name": "date",
    "type": "DATE",
    "mode": "NULLABLE"
  }
]
""",
    encryption_configuration={
        "kms_key_name": "example-key",
    },
    opts = pulumi.ResourceOptions(depends_on=[encrypt_role]))
job = gcp.bigquery.Job("job",
    job_id="job_copy",
    copy={
        "source_tables": [
            {
                "project_id": source[0].project,
                "dataset_id": source[0].dataset_id,
                "table_id": source[0].table_id,
            },
            {
                "project_id": source[1].project,
                "dataset_id": source[1].dataset_id,
                "table_id": source[1].table_id,
            },
        ],
        "destination_table": {
            "project_id": dest.project,
            "dataset_id": dest.dataset_id,
            "table_id": dest.table_id,
        },
        "destination_encryption_configuration": {
            "kms_key_name": "example-key",
        },
    },
    opts = pulumi.ResourceOptions(depends_on=[encrypt_role]))
Copy
package main

import (
	"fmt"

	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/bigquery"
	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/kms"
	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/organizations"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		count := 2
		var sourceDataset []*bigquery.Dataset
		for index := 0; index < count; index++ {
			key0 := index
			val0 := index
			__res, err := bigquery.NewDataset(ctx, fmt.Sprintf("source-%v", key0), &bigquery.DatasetArgs{
				DatasetId:    pulumi.Sprintf("job_copy_%v_dataset", val0),
				FriendlyName: pulumi.String("test"),
				Description:  pulumi.String("This is a test description"),
				Location:     pulumi.String("US"),
			})
			if err != nil {
				return err
			}
			sourceDataset = append(sourceDataset, __res)
		}
		var source []*bigquery.Table
		for index := 0; index < count; index++ {
			key0 := index
			val0 := index
			__res, err := bigquery.NewTable(ctx, fmt.Sprintf("source-%v", key0), &bigquery.TableArgs{
				DatasetId:          sourceDataset[val0].DatasetId,
				TableId:            pulumi.Sprintf("job_copy_%v_table", val0),
				DeletionProtection: pulumi.Bool(false),
				Schema: pulumi.String(`[
  {
    "name": "name",
    "type": "STRING",
    "mode": "NULLABLE"
  },
  {
    "name": "post_abbr",
    "type": "STRING",
    "mode": "NULLABLE"
  },
  {
    "name": "date",
    "type": "DATE",
    "mode": "NULLABLE"
  }
]
`),
			})
			if err != nil {
				return err
			}
			source = append(source, __res)
		}
		destDataset, err := bigquery.NewDataset(ctx, "dest", &bigquery.DatasetArgs{
			DatasetId:    pulumi.String("job_copy_dest_dataset"),
			FriendlyName: pulumi.String("test"),
			Description:  pulumi.String("This is a test description"),
			Location:     pulumi.String("US"),
		})
		if err != nil {
			return err
		}
		project, err := organizations.LookupProject(ctx, &organizations.LookupProjectArgs{
			ProjectId: pulumi.StringRef("my-project-name"),
		}, nil)
		if err != nil {
			return err
		}
		encryptRole, err := kms.NewCryptoKeyIAMMember(ctx, "encrypt_role", &kms.CryptoKeyIAMMemberArgs{
			CryptoKeyId: pulumi.String("example-key"),
			Role:        pulumi.String("roles/cloudkms.cryptoKeyEncrypterDecrypter"),
			Member:      pulumi.Sprintf("serviceAccount:bq-%v@bigquery-encryption.iam.gserviceaccount.com", project.Number),
		})
		if err != nil {
			return err
		}
		dest, err := bigquery.NewTable(ctx, "dest", &bigquery.TableArgs{
			DeletionProtection: pulumi.Bool(false),
			DatasetId:          destDataset.DatasetId,
			TableId:            pulumi.String("job_copy_dest_table"),
			Schema: pulumi.String(`[
  {
    "name": "name",
    "type": "STRING",
    "mode": "NULLABLE"
  },
  {
    "name": "post_abbr",
    "type": "STRING",
    "mode": "NULLABLE"
  },
  {
    "name": "date",
    "type": "DATE",
    "mode": "NULLABLE"
  }
]
`),
			EncryptionConfiguration: &bigquery.TableEncryptionConfigurationArgs{
				KmsKeyName: pulumi.String("example-key"),
			},
		}, pulumi.DependsOn([]pulumi.Resource{
			encryptRole,
		}))
		if err != nil {
			return err
		}
		_, err = bigquery.NewJob(ctx, "job", &bigquery.JobArgs{
			JobId: pulumi.String("job_copy"),
			Copy: &bigquery.JobCopyArgs{
				SourceTables: bigquery.JobCopySourceTableArray{
					&bigquery.JobCopySourceTableArgs{
						ProjectId: source[0].Project,
						DatasetId: source[0].DatasetId,
						TableId:   source[0].TableId,
					},
					&bigquery.JobCopySourceTableArgs{
						ProjectId: source[1].Project,
						DatasetId: source[1].DatasetId,
						TableId:   source[1].TableId,
					},
				},
				DestinationTable: &bigquery.JobCopyDestinationTableArgs{
					ProjectId: dest.Project,
					DatasetId: dest.DatasetId,
					TableId:   dest.TableId,
				},
				DestinationEncryptionConfiguration: &bigquery.JobCopyDestinationEncryptionConfigurationArgs{
					KmsKeyName: pulumi.String("example-key"),
				},
			},
		}, pulumi.DependsOn([]pulumi.Resource{
			encryptRole,
		}))
		if err != nil {
			return err
		}
		return nil
	})
}
Copy
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;

return await Deployment.RunAsync(() => 
{
    var count = 2;

    var sourceDataset = new List<Gcp.BigQuery.Dataset>();
    for (var rangeIndex = 0; rangeIndex < count; rangeIndex++)
    {
        var range = new { Value = rangeIndex };
        sourceDataset.Add(new Gcp.BigQuery.Dataset($"source-{range.Value}", new()
        {
            DatasetId = $"job_copy_{range.Value}_dataset",
            FriendlyName = "test",
            Description = "This is a test description",
            Location = "US",
        }));
    }
    var source = new List<Gcp.BigQuery.Table>();
    for (var rangeIndex = 0; rangeIndex < count; rangeIndex++)
    {
        var range = new { Value = rangeIndex };
        source.Add(new Gcp.BigQuery.Table($"source-{range.Value}", new()
        {
            DatasetId = sourceDataset[range.Value].DatasetId,
            TableId = $"job_copy_{range.Value}_table",
            DeletionProtection = false,
            Schema = @"[
  {
    ""name"": ""name"",
    ""type"": ""STRING"",
    ""mode"": ""NULLABLE""
  },
  {
    ""name"": ""post_abbr"",
    ""type"": ""STRING"",
    ""mode"": ""NULLABLE""
  },
  {
    ""name"": ""date"",
    ""type"": ""DATE"",
    ""mode"": ""NULLABLE""
  }
]
",
        }));
    }
    var destDataset = new Gcp.BigQuery.Dataset("dest", new()
    {
        DatasetId = "job_copy_dest_dataset",
        FriendlyName = "test",
        Description = "This is a test description",
        Location = "US",
    });

    var project = Gcp.Organizations.GetProject.Invoke(new()
    {
        ProjectId = "my-project-name",
    });

    var encryptRole = new Gcp.Kms.CryptoKeyIAMMember("encrypt_role", new()
    {
        CryptoKeyId = "example-key",
        Role = "roles/cloudkms.cryptoKeyEncrypterDecrypter",
        Member = $"serviceAccount:bq-{project.Apply(getProjectResult => getProjectResult.Number)}@bigquery-encryption.iam.gserviceaccount.com",
    });

    var dest = new Gcp.BigQuery.Table("dest", new()
    {
        DeletionProtection = false,
        DatasetId = destDataset.DatasetId,
        TableId = "job_copy_dest_table",
        Schema = @"[
  {
    ""name"": ""name"",
    ""type"": ""STRING"",
    ""mode"": ""NULLABLE""
  },
  {
    ""name"": ""post_abbr"",
    ""type"": ""STRING"",
    ""mode"": ""NULLABLE""
  },
  {
    ""name"": ""date"",
    ""type"": ""DATE"",
    ""mode"": ""NULLABLE""
  }
]
",
        EncryptionConfiguration = new Gcp.BigQuery.Inputs.TableEncryptionConfigurationArgs
        {
            KmsKeyName = "example-key",
        },
    }, new CustomResourceOptions
    {
        DependsOn =
        {
            encryptRole,
        },
    });

    var job = new Gcp.BigQuery.Job("job", new()
    {
        JobId = "job_copy",
        Copy = new Gcp.BigQuery.Inputs.JobCopyArgs
        {
            SourceTables = new[]
            {
                new Gcp.BigQuery.Inputs.JobCopySourceTableArgs
                {
                    ProjectId = source[0].Project,
                    DatasetId = source[0].DatasetId,
                    TableId = source[0].TableId,
                },
                new Gcp.BigQuery.Inputs.JobCopySourceTableArgs
                {
                    ProjectId = source[1].Project,
                    DatasetId = source[1].DatasetId,
                    TableId = source[1].TableId,
                },
            },
            DestinationTable = new Gcp.BigQuery.Inputs.JobCopyDestinationTableArgs
            {
                ProjectId = dest.Project,
                DatasetId = dest.DatasetId,
                TableId = dest.TableId,
            },
            DestinationEncryptionConfiguration = new Gcp.BigQuery.Inputs.JobCopyDestinationEncryptionConfigurationArgs
            {
                KmsKeyName = "example-key",
            },
        },
    }, new CustomResourceOptions
    {
        DependsOn =
        {
            encryptRole,
        },
    });

});
Copy
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.bigquery.Dataset;
import com.pulumi.gcp.bigquery.DatasetArgs;
import com.pulumi.gcp.bigquery.Table;
import com.pulumi.gcp.bigquery.TableArgs;
import com.pulumi.gcp.organizations.OrganizationsFunctions;
import com.pulumi.gcp.organizations.inputs.GetProjectArgs;
import com.pulumi.gcp.kms.CryptoKeyIAMMember;
import com.pulumi.gcp.kms.CryptoKeyIAMMemberArgs;
import com.pulumi.gcp.bigquery.inputs.TableEncryptionConfigurationArgs;
import com.pulumi.gcp.bigquery.Job;
import com.pulumi.gcp.bigquery.JobArgs;
import com.pulumi.gcp.bigquery.inputs.JobCopyArgs;
import com.pulumi.gcp.bigquery.inputs.JobCopyDestinationTableArgs;
import com.pulumi.gcp.bigquery.inputs.JobCopyDestinationEncryptionConfigurationArgs;
import com.pulumi.codegen.internal.KeyedValue;
import com.pulumi.resources.CustomResourceOptions;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        final var count = 2;

        for (var i = 0; i < count; i++) {
            new Dataset("sourceDataset-" + i, DatasetArgs.builder()
                .datasetId(String.format("job_copy_%s_dataset", range.value()))
                .friendlyName("test")
                .description("This is a test description")
                .location("US")
                .build());

        
}
        for (var i = 0; i < count; i++) {
            new Table("source-" + i, TableArgs.builder()
                .datasetId(sourceDataset[range.value()].datasetId())
                .tableId(String.format("job_copy_%s_table", range.value()))
                .deletionProtection(false)
                .schema("""
[
  {
    "name": "name",
    "type": "STRING",
    "mode": "NULLABLE"
  },
  {
    "name": "post_abbr",
    "type": "STRING",
    "mode": "NULLABLE"
  },
  {
    "name": "date",
    "type": "DATE",
    "mode": "NULLABLE"
  }
]
                """)
                .build());

        
}
        var destDataset = new Dataset("destDataset", DatasetArgs.builder()
            .datasetId("job_copy_dest_dataset")
            .friendlyName("test")
            .description("This is a test description")
            .location("US")
            .build());

        final var project = OrganizationsFunctions.getProject(GetProjectArgs.builder()
            .projectId("my-project-name")
            .build());

        var encryptRole = new CryptoKeyIAMMember("encryptRole", CryptoKeyIAMMemberArgs.builder()
            .cryptoKeyId("example-key")
            .role("roles/cloudkms.cryptoKeyEncrypterDecrypter")
            .member(String.format("serviceAccount:bq-%s@bigquery-encryption.iam.gserviceaccount.com", project.number()))
            .build());

        var dest = new Table("dest", TableArgs.builder()
            .deletionProtection(false)
            .datasetId(destDataset.datasetId())
            .tableId("job_copy_dest_table")
            .schema("""
[
  {
    "name": "name",
    "type": "STRING",
    "mode": "NULLABLE"
  },
  {
    "name": "post_abbr",
    "type": "STRING",
    "mode": "NULLABLE"
  },
  {
    "name": "date",
    "type": "DATE",
    "mode": "NULLABLE"
  }
]
            """)
            .encryptionConfiguration(TableEncryptionConfigurationArgs.builder()
                .kmsKeyName("example-key")
                .build())
            .build(), CustomResourceOptions.builder()
                .dependsOn(encryptRole)
                .build());

        var job = new Job("job", JobArgs.builder()
            .jobId("job_copy")
            .copy(JobCopyArgs.builder()
                .sourceTables(                
                    JobCopySourceTableArgs.builder()
                        .projectId(source[0].project())
                        .datasetId(source[0].datasetId())
                        .tableId(source[0].tableId())
                        .build(),
                    JobCopySourceTableArgs.builder()
                        .projectId(source[1].project())
                        .datasetId(source[1].datasetId())
                        .tableId(source[1].tableId())
                        .build())
                .destinationTable(JobCopyDestinationTableArgs.builder()
                    .projectId(dest.project())
                    .datasetId(dest.datasetId())
                    .tableId(dest.tableId())
                    .build())
                .destinationEncryptionConfiguration(JobCopyDestinationEncryptionConfigurationArgs.builder()
                    .kmsKeyName("example-key")
                    .build())
                .build())
            .build(), CustomResourceOptions.builder()
                .dependsOn(encryptRole)
                .build());

    }
}
Copy
Coming soon!

Bigquery Job Extract

import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";

const source_oneDataset = new gcp.bigquery.Dataset("source-one", {
    datasetId: "job_extract_dataset",
    friendlyName: "test",
    description: "This is a test description",
    location: "US",
});
const source_one = new gcp.bigquery.Table("source-one", {
    deletionProtection: false,
    datasetId: source_oneDataset.datasetId,
    tableId: "job_extract_table",
    schema: `[
  {
    "name": "name",
    "type": "STRING",
    "mode": "NULLABLE"
  },
  {
    "name": "post_abbr",
    "type": "STRING",
    "mode": "NULLABLE"
  },
  {
    "name": "date",
    "type": "DATE",
    "mode": "NULLABLE"
  }
]
`,
});
const dest = new gcp.storage.Bucket("dest", {
    name: "job_extract_bucket",
    location: "US",
    forceDestroy: true,
});
const job = new gcp.bigquery.Job("job", {
    jobId: "job_extract",
    extract: {
        destinationUris: [pulumi.interpolate`${dest.url}/extract`],
        sourceTable: {
            projectId: source_one.project,
            datasetId: source_one.datasetId,
            tableId: source_one.tableId,
        },
        destinationFormat: "NEWLINE_DELIMITED_JSON",
        compression: "GZIP",
    },
});
Copy
import pulumi
import pulumi_gcp as gcp

source_one_dataset = gcp.bigquery.Dataset("source-one",
    dataset_id="job_extract_dataset",
    friendly_name="test",
    description="This is a test description",
    location="US")
source_one = gcp.bigquery.Table("source-one",
    deletion_protection=False,
    dataset_id=source_one_dataset.dataset_id,
    table_id="job_extract_table",
    schema="""[
  {
    "name": "name",
    "type": "STRING",
    "mode": "NULLABLE"
  },
  {
    "name": "post_abbr",
    "type": "STRING",
    "mode": "NULLABLE"
  },
  {
    "name": "date",
    "type": "DATE",
    "mode": "NULLABLE"
  }
]
""")
dest = gcp.storage.Bucket("dest",
    name="job_extract_bucket",
    location="US",
    force_destroy=True)
job = gcp.bigquery.Job("job",
    job_id="job_extract",
    extract={
        "destination_uris": [dest.url.apply(lambda url: f"{url}/extract")],
        "source_table": {
            "project_id": source_one.project,
            "dataset_id": source_one.dataset_id,
            "table_id": source_one.table_id,
        },
        "destination_format": "NEWLINE_DELIMITED_JSON",
        "compression": "GZIP",
    })
Copy
package main

import (
	"fmt"

	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/bigquery"
	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/storage"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		source_oneDataset, err := bigquery.NewDataset(ctx, "source-one", &bigquery.DatasetArgs{
			DatasetId:    pulumi.String("job_extract_dataset"),
			FriendlyName: pulumi.String("test"),
			Description:  pulumi.String("This is a test description"),
			Location:     pulumi.String("US"),
		})
		if err != nil {
			return err
		}
		source_one, err := bigquery.NewTable(ctx, "source-one", &bigquery.TableArgs{
			DeletionProtection: pulumi.Bool(false),
			DatasetId:          source_oneDataset.DatasetId,
			TableId:            pulumi.String("job_extract_table"),
			Schema: pulumi.String(`[
  {
    "name": "name",
    "type": "STRING",
    "mode": "NULLABLE"
  },
  {
    "name": "post_abbr",
    "type": "STRING",
    "mode": "NULLABLE"
  },
  {
    "name": "date",
    "type": "DATE",
    "mode": "NULLABLE"
  }
]
`),
		})
		if err != nil {
			return err
		}
		dest, err := storage.NewBucket(ctx, "dest", &storage.BucketArgs{
			Name:         pulumi.String("job_extract_bucket"),
			Location:     pulumi.String("US"),
			ForceDestroy: pulumi.Bool(true),
		})
		if err != nil {
			return err
		}
		_, err = bigquery.NewJob(ctx, "job", &bigquery.JobArgs{
			JobId: pulumi.String("job_extract"),
			Extract: &bigquery.JobExtractArgs{
				DestinationUris: pulumi.StringArray{
					dest.Url.ApplyT(func(url string) (string, error) {
						return fmt.Sprintf("%v/extract", url), nil
					}).(pulumi.StringOutput),
				},
				SourceTable: &bigquery.JobExtractSourceTableArgs{
					ProjectId: source_one.Project,
					DatasetId: source_one.DatasetId,
					TableId:   source_one.TableId,
				},
				DestinationFormat: pulumi.String("NEWLINE_DELIMITED_JSON"),
				Compression:       pulumi.String("GZIP"),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
Copy
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;

return await Deployment.RunAsync(() => 
{
    var source_oneDataset = new Gcp.BigQuery.Dataset("source-one", new()
    {
        DatasetId = "job_extract_dataset",
        FriendlyName = "test",
        Description = "This is a test description",
        Location = "US",
    });

    var source_one = new Gcp.BigQuery.Table("source-one", new()
    {
        DeletionProtection = false,
        DatasetId = source_oneDataset.DatasetId,
        TableId = "job_extract_table",
        Schema = @"[
  {
    ""name"": ""name"",
    ""type"": ""STRING"",
    ""mode"": ""NULLABLE""
  },
  {
    ""name"": ""post_abbr"",
    ""type"": ""STRING"",
    ""mode"": ""NULLABLE""
  },
  {
    ""name"": ""date"",
    ""type"": ""DATE"",
    ""mode"": ""NULLABLE""
  }
]
",
    });

    var dest = new Gcp.Storage.Bucket("dest", new()
    {
        Name = "job_extract_bucket",
        Location = "US",
        ForceDestroy = true,
    });

    var job = new Gcp.BigQuery.Job("job", new()
    {
        JobId = "job_extract",
        Extract = new Gcp.BigQuery.Inputs.JobExtractArgs
        {
            DestinationUris = new[]
            {
                dest.Url.Apply(url => $"{url}/extract"),
            },
            SourceTable = new Gcp.BigQuery.Inputs.JobExtractSourceTableArgs
            {
                ProjectId = source_one.Project,
                DatasetId = source_one.DatasetId,
                TableId = source_one.TableId,
            },
            DestinationFormat = "NEWLINE_DELIMITED_JSON",
            Compression = "GZIP",
        },
    });

});
Copy
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.bigquery.Dataset;
import com.pulumi.gcp.bigquery.DatasetArgs;
import com.pulumi.gcp.bigquery.Table;
import com.pulumi.gcp.bigquery.TableArgs;
import com.pulumi.gcp.storage.Bucket;
import com.pulumi.gcp.storage.BucketArgs;
import com.pulumi.gcp.bigquery.Job;
import com.pulumi.gcp.bigquery.JobArgs;
import com.pulumi.gcp.bigquery.inputs.JobExtractArgs;
import com.pulumi.gcp.bigquery.inputs.JobExtractSourceTableArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var source_oneDataset = new Dataset("source-oneDataset", DatasetArgs.builder()
            .datasetId("job_extract_dataset")
            .friendlyName("test")
            .description("This is a test description")
            .location("US")
            .build());

        var source_one = new Table("source-one", TableArgs.builder()
            .deletionProtection(false)
            .datasetId(source_oneDataset.datasetId())
            .tableId("job_extract_table")
            .schema("""
[
  {
    "name": "name",
    "type": "STRING",
    "mode": "NULLABLE"
  },
  {
    "name": "post_abbr",
    "type": "STRING",
    "mode": "NULLABLE"
  },
  {
    "name": "date",
    "type": "DATE",
    "mode": "NULLABLE"
  }
]
            """)
            .build());

        var dest = new Bucket("dest", BucketArgs.builder()
            .name("job_extract_bucket")
            .location("US")
            .forceDestroy(true)
            .build());

        var job = new Job("job", JobArgs.builder()
            .jobId("job_extract")
            .extract(JobExtractArgs.builder()
                .destinationUris(dest.url().applyValue(_url -> String.format("%s/extract", _url)))
                .sourceTable(JobExtractSourceTableArgs.builder()
                    .projectId(source_one.project())
                    .datasetId(source_one.datasetId())
                    .tableId(source_one.tableId())
                    .build())
                .destinationFormat("NEWLINE_DELIMITED_JSON")
                .compression("GZIP")
                .build())
            .build());

    }
}
Copy
resources:
  source-one:
    type: gcp:bigquery:Table
    properties:
      deletionProtection: false
      datasetId: ${["source-oneDataset"].datasetId}
      tableId: job_extract_table
      schema: |
        [
          {
            "name": "name",
            "type": "STRING",
            "mode": "NULLABLE"
          },
          {
            "name": "post_abbr",
            "type": "STRING",
            "mode": "NULLABLE"
          },
          {
            "name": "date",
            "type": "DATE",
            "mode": "NULLABLE"
          }
        ]        
  source-oneDataset:
    type: gcp:bigquery:Dataset
    name: source-one
    properties:
      datasetId: job_extract_dataset
      friendlyName: test
      description: This is a test description
      location: US
  dest:
    type: gcp:storage:Bucket
    properties:
      name: job_extract_bucket
      location: US
      forceDestroy: true
  job:
    type: gcp:bigquery:Job
    properties:
      jobId: job_extract
      extract:
        destinationUris:
          - ${dest.url}/extract
        sourceTable:
          projectId: ${["source-one"].project}
          datasetId: ${["source-one"].datasetId}
          tableId: ${["source-one"].tableId}
        destinationFormat: NEWLINE_DELIMITED_JSON
        compression: GZIP
Copy

Create Job Resource

Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.

Constructor syntax

new Job(name: string, args: JobArgs, opts?: CustomResourceOptions);
@overload
def Job(resource_name: str,
        args: JobArgs,
        opts: Optional[ResourceOptions] = None)

@overload
def Job(resource_name: str,
        opts: Optional[ResourceOptions] = None,
        job_id: Optional[str] = None,
        copy: Optional[JobCopyArgs] = None,
        extract: Optional[JobExtractArgs] = None,
        job_timeout_ms: Optional[str] = None,
        labels: Optional[Mapping[str, str]] = None,
        load: Optional[JobLoadArgs] = None,
        location: Optional[str] = None,
        project: Optional[str] = None,
        query: Optional[JobQueryArgs] = None)
func NewJob(ctx *Context, name string, args JobArgs, opts ...ResourceOption) (*Job, error)
public Job(string name, JobArgs args, CustomResourceOptions? opts = null)
public Job(String name, JobArgs args)
public Job(String name, JobArgs args, CustomResourceOptions options)
type: gcp:bigquery:Job
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.

Parameters

name This property is required. string
The unique name of the resource.
args This property is required. JobArgs
The arguments to resource properties.
opts CustomResourceOptions
Bag of options to control resource's behavior.
resource_name This property is required. str
The unique name of the resource.
args This property is required. JobArgs
The arguments to resource properties.
opts ResourceOptions
Bag of options to control resource's behavior.
ctx Context
Context object for the current deployment.
name This property is required. string
The unique name of the resource.
args This property is required. JobArgs
The arguments to resource properties.
opts ResourceOption
Bag of options to control resource's behavior.
name This property is required. string
The unique name of the resource.
args This property is required. JobArgs
The arguments to resource properties.
opts CustomResourceOptions
Bag of options to control resource's behavior.
name This property is required. String
The unique name of the resource.
args This property is required. JobArgs
The arguments to resource properties.
options CustomResourceOptions
Bag of options to control resource's behavior.

Constructor example

The following reference example uses placeholder values for all input properties.

var jobResource = new Gcp.BigQuery.Job("jobResource", new()
{
    JobId = "string",
    Copy = new Gcp.BigQuery.Inputs.JobCopyArgs
    {
        SourceTables = new[]
        {
            new Gcp.BigQuery.Inputs.JobCopySourceTableArgs
            {
                TableId = "string",
                DatasetId = "string",
                ProjectId = "string",
            },
        },
        CreateDisposition = "string",
        DestinationEncryptionConfiguration = new Gcp.BigQuery.Inputs.JobCopyDestinationEncryptionConfigurationArgs
        {
            KmsKeyName = "string",
            KmsKeyVersion = "string",
        },
        DestinationTable = new Gcp.BigQuery.Inputs.JobCopyDestinationTableArgs
        {
            TableId = "string",
            DatasetId = "string",
            ProjectId = "string",
        },
        WriteDisposition = "string",
    },
    Extract = new Gcp.BigQuery.Inputs.JobExtractArgs
    {
        DestinationUris = new[]
        {
            "string",
        },
        Compression = "string",
        DestinationFormat = "string",
        FieldDelimiter = "string",
        PrintHeader = false,
        SourceModel = new Gcp.BigQuery.Inputs.JobExtractSourceModelArgs
        {
            DatasetId = "string",
            ModelId = "string",
            ProjectId = "string",
        },
        SourceTable = new Gcp.BigQuery.Inputs.JobExtractSourceTableArgs
        {
            TableId = "string",
            DatasetId = "string",
            ProjectId = "string",
        },
        UseAvroLogicalTypes = false,
    },
    JobTimeoutMs = "string",
    Labels = 
    {
        { "string", "string" },
    },
    Load = new Gcp.BigQuery.Inputs.JobLoadArgs
    {
        DestinationTable = new Gcp.BigQuery.Inputs.JobLoadDestinationTableArgs
        {
            TableId = "string",
            DatasetId = "string",
            ProjectId = "string",
        },
        SourceUris = new[]
        {
            "string",
        },
        MaxBadRecords = 0,
        NullMarker = "string",
        DestinationEncryptionConfiguration = new Gcp.BigQuery.Inputs.JobLoadDestinationEncryptionConfigurationArgs
        {
            KmsKeyName = "string",
            KmsKeyVersion = "string",
        },
        Autodetect = false,
        Encoding = "string",
        FieldDelimiter = "string",
        IgnoreUnknownValues = false,
        JsonExtension = "string",
        AllowJaggedRows = false,
        CreateDisposition = "string",
        ParquetOptions = new Gcp.BigQuery.Inputs.JobLoadParquetOptionsArgs
        {
            EnableListInference = false,
            EnumAsString = false,
        },
        ProjectionFields = new[]
        {
            "string",
        },
        Quote = "string",
        SchemaUpdateOptions = new[]
        {
            "string",
        },
        SkipLeadingRows = 0,
        SourceFormat = "string",
        AllowQuotedNewlines = false,
        TimePartitioning = new Gcp.BigQuery.Inputs.JobLoadTimePartitioningArgs
        {
            Type = "string",
            ExpirationMs = "string",
            Field = "string",
        },
        WriteDisposition = "string",
    },
    Location = "string",
    Project = "string",
    Query = new Gcp.BigQuery.Inputs.JobQueryArgs
    {
        Query = "string",
        MaximumBytesBilled = "string",
        Priority = "string",
        DefaultDataset = new Gcp.BigQuery.Inputs.JobQueryDefaultDatasetArgs
        {
            DatasetId = "string",
            ProjectId = "string",
        },
        DestinationEncryptionConfiguration = new Gcp.BigQuery.Inputs.JobQueryDestinationEncryptionConfigurationArgs
        {
            KmsKeyName = "string",
            KmsKeyVersion = "string",
        },
        DestinationTable = new Gcp.BigQuery.Inputs.JobQueryDestinationTableArgs
        {
            TableId = "string",
            DatasetId = "string",
            ProjectId = "string",
        },
        FlattenResults = false,
        MaximumBillingTier = 0,
        AllowLargeResults = false,
        CreateDisposition = "string",
        Continuous = false,
        ParameterMode = "string",
        SchemaUpdateOptions = new[]
        {
            "string",
        },
        ScriptOptions = new Gcp.BigQuery.Inputs.JobQueryScriptOptionsArgs
        {
            KeyResultStatement = "string",
            StatementByteBudget = "string",
            StatementTimeoutMs = "string",
        },
        UseLegacySql = false,
        UseQueryCache = false,
        UserDefinedFunctionResources = new[]
        {
            new Gcp.BigQuery.Inputs.JobQueryUserDefinedFunctionResourceArgs
            {
                InlineCode = "string",
                ResourceUri = "string",
            },
        },
        WriteDisposition = "string",
    },
});
Copy
example, err := bigquery.NewJob(ctx, "jobResource", &bigquery.JobArgs{
	JobId: pulumi.String("string"),
	Copy: &bigquery.JobCopyArgs{
		SourceTables: bigquery.JobCopySourceTableArray{
			&bigquery.JobCopySourceTableArgs{
				TableId:   pulumi.String("string"),
				DatasetId: pulumi.String("string"),
				ProjectId: pulumi.String("string"),
			},
		},
		CreateDisposition: pulumi.String("string"),
		DestinationEncryptionConfiguration: &bigquery.JobCopyDestinationEncryptionConfigurationArgs{
			KmsKeyName:    pulumi.String("string"),
			KmsKeyVersion: pulumi.String("string"),
		},
		DestinationTable: &bigquery.JobCopyDestinationTableArgs{
			TableId:   pulumi.String("string"),
			DatasetId: pulumi.String("string"),
			ProjectId: pulumi.String("string"),
		},
		WriteDisposition: pulumi.String("string"),
	},
	Extract: &bigquery.JobExtractArgs{
		DestinationUris: pulumi.StringArray{
			pulumi.String("string"),
		},
		Compression:       pulumi.String("string"),
		DestinationFormat: pulumi.String("string"),
		FieldDelimiter:    pulumi.String("string"),
		PrintHeader:       pulumi.Bool(false),
		SourceModel: &bigquery.JobExtractSourceModelArgs{
			DatasetId: pulumi.String("string"),
			ModelId:   pulumi.String("string"),
			ProjectId: pulumi.String("string"),
		},
		SourceTable: &bigquery.JobExtractSourceTableArgs{
			TableId:   pulumi.String("string"),
			DatasetId: pulumi.String("string"),
			ProjectId: pulumi.String("string"),
		},
		UseAvroLogicalTypes: pulumi.Bool(false),
	},
	JobTimeoutMs: pulumi.String("string"),
	Labels: pulumi.StringMap{
		"string": pulumi.String("string"),
	},
	Load: &bigquery.JobLoadArgs{
		DestinationTable: &bigquery.JobLoadDestinationTableArgs{
			TableId:   pulumi.String("string"),
			DatasetId: pulumi.String("string"),
			ProjectId: pulumi.String("string"),
		},
		SourceUris: pulumi.StringArray{
			pulumi.String("string"),
		},
		MaxBadRecords: pulumi.Int(0),
		NullMarker:    pulumi.String("string"),
		DestinationEncryptionConfiguration: &bigquery.JobLoadDestinationEncryptionConfigurationArgs{
			KmsKeyName:    pulumi.String("string"),
			KmsKeyVersion: pulumi.String("string"),
		},
		Autodetect:          pulumi.Bool(false),
		Encoding:            pulumi.String("string"),
		FieldDelimiter:      pulumi.String("string"),
		IgnoreUnknownValues: pulumi.Bool(false),
		JsonExtension:       pulumi.String("string"),
		AllowJaggedRows:     pulumi.Bool(false),
		CreateDisposition:   pulumi.String("string"),
		ParquetOptions: &bigquery.JobLoadParquetOptionsArgs{
			EnableListInference: pulumi.Bool(false),
			EnumAsString:        pulumi.Bool(false),
		},
		ProjectionFields: pulumi.StringArray{
			pulumi.String("string"),
		},
		Quote: pulumi.String("string"),
		SchemaUpdateOptions: pulumi.StringArray{
			pulumi.String("string"),
		},
		SkipLeadingRows:     pulumi.Int(0),
		SourceFormat:        pulumi.String("string"),
		AllowQuotedNewlines: pulumi.Bool(false),
		TimePartitioning: &bigquery.JobLoadTimePartitioningArgs{
			Type:         pulumi.String("string"),
			ExpirationMs: pulumi.String("string"),
			Field:        pulumi.String("string"),
		},
		WriteDisposition: pulumi.String("string"),
	},
	Location: pulumi.String("string"),
	Project:  pulumi.String("string"),
	Query: &bigquery.JobQueryArgs{
		Query:              pulumi.String("string"),
		MaximumBytesBilled: pulumi.String("string"),
		Priority:           pulumi.String("string"),
		DefaultDataset: &bigquery.JobQueryDefaultDatasetArgs{
			DatasetId: pulumi.String("string"),
			ProjectId: pulumi.String("string"),
		},
		DestinationEncryptionConfiguration: &bigquery.JobQueryDestinationEncryptionConfigurationArgs{
			KmsKeyName:    pulumi.String("string"),
			KmsKeyVersion: pulumi.String("string"),
		},
		DestinationTable: &bigquery.JobQueryDestinationTableArgs{
			TableId:   pulumi.String("string"),
			DatasetId: pulumi.String("string"),
			ProjectId: pulumi.String("string"),
		},
		FlattenResults:     pulumi.Bool(false),
		MaximumBillingTier: pulumi.Int(0),
		AllowLargeResults:  pulumi.Bool(false),
		CreateDisposition:  pulumi.String("string"),
		Continuous:         pulumi.Bool(false),
		ParameterMode:      pulumi.String("string"),
		SchemaUpdateOptions: pulumi.StringArray{
			pulumi.String("string"),
		},
		ScriptOptions: &bigquery.JobQueryScriptOptionsArgs{
			KeyResultStatement:  pulumi.String("string"),
			StatementByteBudget: pulumi.String("string"),
			StatementTimeoutMs:  pulumi.String("string"),
		},
		UseLegacySql:  pulumi.Bool(false),
		UseQueryCache: pulumi.Bool(false),
		UserDefinedFunctionResources: bigquery.JobQueryUserDefinedFunctionResourceArray{
			&bigquery.JobQueryUserDefinedFunctionResourceArgs{
				InlineCode:  pulumi.String("string"),
				ResourceUri: pulumi.String("string"),
			},
		},
		WriteDisposition: pulumi.String("string"),
	},
})
Copy
var jobResource = new com.pulumi.gcp.bigquery.Job("jobResource", com.pulumi.gcp.bigquery.JobArgs.builder()
    .jobId("string")
    .copy(JobCopyArgs.builder()
        .sourceTables(JobCopySourceTableArgs.builder()
            .tableId("string")
            .datasetId("string")
            .projectId("string")
            .build())
        .createDisposition("string")
        .destinationEncryptionConfiguration(JobCopyDestinationEncryptionConfigurationArgs.builder()
            .kmsKeyName("string")
            .kmsKeyVersion("string")
            .build())
        .destinationTable(JobCopyDestinationTableArgs.builder()
            .tableId("string")
            .datasetId("string")
            .projectId("string")
            .build())
        .writeDisposition("string")
        .build())
    .extract(JobExtractArgs.builder()
        .destinationUris("string")
        .compression("string")
        .destinationFormat("string")
        .fieldDelimiter("string")
        .printHeader(false)
        .sourceModel(JobExtractSourceModelArgs.builder()
            .datasetId("string")
            .modelId("string")
            .projectId("string")
            .build())
        .sourceTable(JobExtractSourceTableArgs.builder()
            .tableId("string")
            .datasetId("string")
            .projectId("string")
            .build())
        .useAvroLogicalTypes(false)
        .build())
    .jobTimeoutMs("string")
    .labels(Map.of("string", "string"))
    .load(JobLoadArgs.builder()
        .destinationTable(JobLoadDestinationTableArgs.builder()
            .tableId("string")
            .datasetId("string")
            .projectId("string")
            .build())
        .sourceUris("string")
        .maxBadRecords(0)
        .nullMarker("string")
        .destinationEncryptionConfiguration(JobLoadDestinationEncryptionConfigurationArgs.builder()
            .kmsKeyName("string")
            .kmsKeyVersion("string")
            .build())
        .autodetect(false)
        .encoding("string")
        .fieldDelimiter("string")
        .ignoreUnknownValues(false)
        .jsonExtension("string")
        .allowJaggedRows(false)
        .createDisposition("string")
        .parquetOptions(JobLoadParquetOptionsArgs.builder()
            .enableListInference(false)
            .enumAsString(false)
            .build())
        .projectionFields("string")
        .quote("string")
        .schemaUpdateOptions("string")
        .skipLeadingRows(0)
        .sourceFormat("string")
        .allowQuotedNewlines(false)
        .timePartitioning(JobLoadTimePartitioningArgs.builder()
            .type("string")
            .expirationMs("string")
            .field("string")
            .build())
        .writeDisposition("string")
        .build())
    .location("string")
    .project("string")
    .query(JobQueryArgs.builder()
        .query("string")
        .maximumBytesBilled("string")
        .priority("string")
        .defaultDataset(JobQueryDefaultDatasetArgs.builder()
            .datasetId("string")
            .projectId("string")
            .build())
        .destinationEncryptionConfiguration(JobQueryDestinationEncryptionConfigurationArgs.builder()
            .kmsKeyName("string")
            .kmsKeyVersion("string")
            .build())
        .destinationTable(JobQueryDestinationTableArgs.builder()
            .tableId("string")
            .datasetId("string")
            .projectId("string")
            .build())
        .flattenResults(false)
        .maximumBillingTier(0)
        .allowLargeResults(false)
        .createDisposition("string")
        .continuous(false)
        .parameterMode("string")
        .schemaUpdateOptions("string")
        .scriptOptions(JobQueryScriptOptionsArgs.builder()
            .keyResultStatement("string")
            .statementByteBudget("string")
            .statementTimeoutMs("string")
            .build())
        .useLegacySql(false)
        .useQueryCache(false)
        .userDefinedFunctionResources(JobQueryUserDefinedFunctionResourceArgs.builder()
            .inlineCode("string")
            .resourceUri("string")
            .build())
        .writeDisposition("string")
        .build())
    .build());
Copy
job_resource = gcp.bigquery.Job("jobResource",
    job_id="string",
    copy={
        "source_tables": [{
            "table_id": "string",
            "dataset_id": "string",
            "project_id": "string",
        }],
        "create_disposition": "string",
        "destination_encryption_configuration": {
            "kms_key_name": "string",
            "kms_key_version": "string",
        },
        "destination_table": {
            "table_id": "string",
            "dataset_id": "string",
            "project_id": "string",
        },
        "write_disposition": "string",
    },
    extract={
        "destination_uris": ["string"],
        "compression": "string",
        "destination_format": "string",
        "field_delimiter": "string",
        "print_header": False,
        "source_model": {
            "dataset_id": "string",
            "model_id": "string",
            "project_id": "string",
        },
        "source_table": {
            "table_id": "string",
            "dataset_id": "string",
            "project_id": "string",
        },
        "use_avro_logical_types": False,
    },
    job_timeout_ms="string",
    labels={
        "string": "string",
    },
    load={
        "destination_table": {
            "table_id": "string",
            "dataset_id": "string",
            "project_id": "string",
        },
        "source_uris": ["string"],
        "max_bad_records": 0,
        "null_marker": "string",
        "destination_encryption_configuration": {
            "kms_key_name": "string",
            "kms_key_version": "string",
        },
        "autodetect": False,
        "encoding": "string",
        "field_delimiter": "string",
        "ignore_unknown_values": False,
        "json_extension": "string",
        "allow_jagged_rows": False,
        "create_disposition": "string",
        "parquet_options": {
            "enable_list_inference": False,
            "enum_as_string": False,
        },
        "projection_fields": ["string"],
        "quote": "string",
        "schema_update_options": ["string"],
        "skip_leading_rows": 0,
        "source_format": "string",
        "allow_quoted_newlines": False,
        "time_partitioning": {
            "type": "string",
            "expiration_ms": "string",
            "field": "string",
        },
        "write_disposition": "string",
    },
    location="string",
    project="string",
    query={
        "query": "string",
        "maximum_bytes_billed": "string",
        "priority": "string",
        "default_dataset": {
            "dataset_id": "string",
            "project_id": "string",
        },
        "destination_encryption_configuration": {
            "kms_key_name": "string",
            "kms_key_version": "string",
        },
        "destination_table": {
            "table_id": "string",
            "dataset_id": "string",
            "project_id": "string",
        },
        "flatten_results": False,
        "maximum_billing_tier": 0,
        "allow_large_results": False,
        "create_disposition": "string",
        "continuous": False,
        "parameter_mode": "string",
        "schema_update_options": ["string"],
        "script_options": {
            "key_result_statement": "string",
            "statement_byte_budget": "string",
            "statement_timeout_ms": "string",
        },
        "use_legacy_sql": False,
        "use_query_cache": False,
        "user_defined_function_resources": [{
            "inline_code": "string",
            "resource_uri": "string",
        }],
        "write_disposition": "string",
    })
Copy
const jobResource = new gcp.bigquery.Job("jobResource", {
    jobId: "string",
    copy: {
        sourceTables: [{
            tableId: "string",
            datasetId: "string",
            projectId: "string",
        }],
        createDisposition: "string",
        destinationEncryptionConfiguration: {
            kmsKeyName: "string",
            kmsKeyVersion: "string",
        },
        destinationTable: {
            tableId: "string",
            datasetId: "string",
            projectId: "string",
        },
        writeDisposition: "string",
    },
    extract: {
        destinationUris: ["string"],
        compression: "string",
        destinationFormat: "string",
        fieldDelimiter: "string",
        printHeader: false,
        sourceModel: {
            datasetId: "string",
            modelId: "string",
            projectId: "string",
        },
        sourceTable: {
            tableId: "string",
            datasetId: "string",
            projectId: "string",
        },
        useAvroLogicalTypes: false,
    },
    jobTimeoutMs: "string",
    labels: {
        string: "string",
    },
    load: {
        destinationTable: {
            tableId: "string",
            datasetId: "string",
            projectId: "string",
        },
        sourceUris: ["string"],
        maxBadRecords: 0,
        nullMarker: "string",
        destinationEncryptionConfiguration: {
            kmsKeyName: "string",
            kmsKeyVersion: "string",
        },
        autodetect: false,
        encoding: "string",
        fieldDelimiter: "string",
        ignoreUnknownValues: false,
        jsonExtension: "string",
        allowJaggedRows: false,
        createDisposition: "string",
        parquetOptions: {
            enableListInference: false,
            enumAsString: false,
        },
        projectionFields: ["string"],
        quote: "string",
        schemaUpdateOptions: ["string"],
        skipLeadingRows: 0,
        sourceFormat: "string",
        allowQuotedNewlines: false,
        timePartitioning: {
            type: "string",
            expirationMs: "string",
            field: "string",
        },
        writeDisposition: "string",
    },
    location: "string",
    project: "string",
    query: {
        query: "string",
        maximumBytesBilled: "string",
        priority: "string",
        defaultDataset: {
            datasetId: "string",
            projectId: "string",
        },
        destinationEncryptionConfiguration: {
            kmsKeyName: "string",
            kmsKeyVersion: "string",
        },
        destinationTable: {
            tableId: "string",
            datasetId: "string",
            projectId: "string",
        },
        flattenResults: false,
        maximumBillingTier: 0,
        allowLargeResults: false,
        createDisposition: "string",
        continuous: false,
        parameterMode: "string",
        schemaUpdateOptions: ["string"],
        scriptOptions: {
            keyResultStatement: "string",
            statementByteBudget: "string",
            statementTimeoutMs: "string",
        },
        useLegacySql: false,
        useQueryCache: false,
        userDefinedFunctionResources: [{
            inlineCode: "string",
            resourceUri: "string",
        }],
        writeDisposition: "string",
    },
});
Copy
type: gcp:bigquery:Job
properties:
    copy:
        createDisposition: string
        destinationEncryptionConfiguration:
            kmsKeyName: string
            kmsKeyVersion: string
        destinationTable:
            datasetId: string
            projectId: string
            tableId: string
        sourceTables:
            - datasetId: string
              projectId: string
              tableId: string
        writeDisposition: string
    extract:
        compression: string
        destinationFormat: string
        destinationUris:
            - string
        fieldDelimiter: string
        printHeader: false
        sourceModel:
            datasetId: string
            modelId: string
            projectId: string
        sourceTable:
            datasetId: string
            projectId: string
            tableId: string
        useAvroLogicalTypes: false
    jobId: string
    jobTimeoutMs: string
    labels:
        string: string
    load:
        allowJaggedRows: false
        allowQuotedNewlines: false
        autodetect: false
        createDisposition: string
        destinationEncryptionConfiguration:
            kmsKeyName: string
            kmsKeyVersion: string
        destinationTable:
            datasetId: string
            projectId: string
            tableId: string
        encoding: string
        fieldDelimiter: string
        ignoreUnknownValues: false
        jsonExtension: string
        maxBadRecords: 0
        nullMarker: string
        parquetOptions:
            enableListInference: false
            enumAsString: false
        projectionFields:
            - string
        quote: string
        schemaUpdateOptions:
            - string
        skipLeadingRows: 0
        sourceFormat: string
        sourceUris:
            - string
        timePartitioning:
            expirationMs: string
            field: string
            type: string
        writeDisposition: string
    location: string
    project: string
    query:
        allowLargeResults: false
        continuous: false
        createDisposition: string
        defaultDataset:
            datasetId: string
            projectId: string
        destinationEncryptionConfiguration:
            kmsKeyName: string
            kmsKeyVersion: string
        destinationTable:
            datasetId: string
            projectId: string
            tableId: string
        flattenResults: false
        maximumBillingTier: 0
        maximumBytesBilled: string
        parameterMode: string
        priority: string
        query: string
        schemaUpdateOptions:
            - string
        scriptOptions:
            keyResultStatement: string
            statementByteBudget: string
            statementTimeoutMs: string
        useLegacySql: false
        useQueryCache: false
        userDefinedFunctionResources:
            - inlineCode: string
              resourceUri: string
        writeDisposition: string
Copy

Job Resource Properties

To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

Inputs

In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.

The Job resource accepts the following input properties:

JobId
This property is required.
Changes to this property will trigger replacement.
string
The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters.
Copy Changes to this property will trigger replacement. JobCopy
Copies a table.
Extract Changes to this property will trigger replacement. JobExtract
Configures an extract job.
JobTimeoutMs Changes to this property will trigger replacement. string
Job timeout in milliseconds. If this time limit is exceeded, BigQuery may attempt to terminate the job.
Labels Dictionary<string, string>
The labels associated with this job. You can use these to organize and group your jobs. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
Load Changes to this property will trigger replacement. JobLoad
Configures a load job.
Location Changes to this property will trigger replacement. string
Specifies where the error occurred, if present.
Project Changes to this property will trigger replacement. string
Query Changes to this property will trigger replacement. JobQuery
Configures a query job.
JobId
This property is required.
Changes to this property will trigger replacement.
string
The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters.
Copy Changes to this property will trigger replacement. JobCopyArgs
Copies a table.
Extract Changes to this property will trigger replacement. JobExtractArgs
Configures an extract job.
JobTimeoutMs Changes to this property will trigger replacement. string
Job timeout in milliseconds. If this time limit is exceeded, BigQuery may attempt to terminate the job.
Labels map[string]string
The labels associated with this job. You can use these to organize and group your jobs. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
Load Changes to this property will trigger replacement. JobLoadArgs
Configures a load job.
Location Changes to this property will trigger replacement. string
Specifies where the error occurred, if present.
Project Changes to this property will trigger replacement. string
Query Changes to this property will trigger replacement. JobQueryArgs
Configures a query job.
jobId
This property is required.
Changes to this property will trigger replacement.
String
The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters.
copy Changes to this property will trigger replacement. JobCopy
Copies a table.
extract Changes to this property will trigger replacement. JobExtract
Configures an extract job.
jobTimeoutMs Changes to this property will trigger replacement. String
Job timeout in milliseconds. If this time limit is exceeded, BigQuery may attempt to terminate the job.
labels Map<String,String>
The labels associated with this job. You can use these to organize and group your jobs. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
load Changes to this property will trigger replacement. JobLoad
Configures a load job.
location Changes to this property will trigger replacement. String
Specifies where the error occurred, if present.
project Changes to this property will trigger replacement. String
query Changes to this property will trigger replacement. JobQuery
Configures a query job.
jobId
This property is required.
Changes to this property will trigger replacement.
string
The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters.
copy Changes to this property will trigger replacement. JobCopy
Copies a table.
extract Changes to this property will trigger replacement. JobExtract
Configures an extract job.
jobTimeoutMs Changes to this property will trigger replacement. string
Job timeout in milliseconds. If this time limit is exceeded, BigQuery may attempt to terminate the job.
labels {[key: string]: string}
The labels associated with this job. You can use these to organize and group your jobs. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
load Changes to this property will trigger replacement. JobLoad
Configures a load job.
location Changes to this property will trigger replacement. string
Specifies where the error occurred, if present.
project Changes to this property will trigger replacement. string
query Changes to this property will trigger replacement. JobQuery
Configures a query job.
job_id
This property is required.
Changes to this property will trigger replacement.
str
The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters.
copy Changes to this property will trigger replacement. JobCopyArgs
Copies a table.
extract Changes to this property will trigger replacement. JobExtractArgs
Configures an extract job.
job_timeout_ms Changes to this property will trigger replacement. str
Job timeout in milliseconds. If this time limit is exceeded, BigQuery may attempt to terminate the job.
labels Mapping[str, str]
The labels associated with this job. You can use these to organize and group your jobs. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
load Changes to this property will trigger replacement. JobLoadArgs
Configures a load job.
location Changes to this property will trigger replacement. str
Specifies where the error occurred, if present.
project Changes to this property will trigger replacement. str
query Changes to this property will trigger replacement. JobQueryArgs
Configures a query job.
jobId
This property is required.
Changes to this property will trigger replacement.
String
The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters.
copy Changes to this property will trigger replacement. Property Map
Copies a table.
extract Changes to this property will trigger replacement. Property Map
Configures an extract job.
jobTimeoutMs Changes to this property will trigger replacement. String
Job timeout in milliseconds. If this time limit is exceeded, BigQuery may attempt to terminate the job.
labels Map<String>
The labels associated with this job. You can use these to organize and group your jobs. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
load Changes to this property will trigger replacement. Property Map
Configures a load job.
location Changes to this property will trigger replacement. String
Specifies where the error occurred, if present.
project Changes to this property will trigger replacement. String
query Changes to this property will trigger replacement. Property Map
Configures a query job.

Outputs

All input properties are implicitly available as output properties. Additionally, the Job resource produces the following output properties:

EffectiveLabels Dictionary<string, string>
(Output) All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
Id string
The provider-assigned unique ID for this managed resource.
JobType string
(Output) The type of the job.
PulumiLabels Dictionary<string, string>
(Output) The combination of labels configured directly on the resource and default labels configured on the provider.
Statuses List<JobStatus>
The status of this job. Examine this value when polling an asynchronous job to see if the job is complete. Structure is documented below.
UserEmail string
Email address of the user who ran the job.
EffectiveLabels map[string]string
(Output) All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
Id string
The provider-assigned unique ID for this managed resource.
JobType string
(Output) The type of the job.
PulumiLabels map[string]string
(Output) The combination of labels configured directly on the resource and default labels configured on the provider.
Statuses []JobStatus
The status of this job. Examine this value when polling an asynchronous job to see if the job is complete. Structure is documented below.
UserEmail string
Email address of the user who ran the job.
effectiveLabels Map<String,String>
(Output) All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
id String
The provider-assigned unique ID for this managed resource.
jobType String
(Output) The type of the job.
pulumiLabels Map<String,String>
(Output) The combination of labels configured directly on the resource and default labels configured on the provider.
statuses List<JobStatus>
The status of this job. Examine this value when polling an asynchronous job to see if the job is complete. Structure is documented below.
userEmail String
Email address of the user who ran the job.
effectiveLabels {[key: string]: string}
(Output) All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
id string
The provider-assigned unique ID for this managed resource.
jobType string
(Output) The type of the job.
pulumiLabels {[key: string]: string}
(Output) The combination of labels configured directly on the resource and default labels configured on the provider.
statuses JobStatus[]
The status of this job. Examine this value when polling an asynchronous job to see if the job is complete. Structure is documented below.
userEmail string
Email address of the user who ran the job.
effective_labels Mapping[str, str]
(Output) All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
id str
The provider-assigned unique ID for this managed resource.
job_type str
(Output) The type of the job.
pulumi_labels Mapping[str, str]
(Output) The combination of labels configured directly on the resource and default labels configured on the provider.
statuses Sequence[JobStatus]
The status of this job. Examine this value when polling an asynchronous job to see if the job is complete. Structure is documented below.
user_email str
Email address of the user who ran the job.
effectiveLabels Map<String>
(Output) All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
id String
The provider-assigned unique ID for this managed resource.
jobType String
(Output) The type of the job.
pulumiLabels Map<String>
(Output) The combination of labels configured directly on the resource and default labels configured on the provider.
statuses List<Property Map>
The status of this job. Examine this value when polling an asynchronous job to see if the job is complete. Structure is documented below.
userEmail String
Email address of the user who ran the job.

Look up Existing Job Resource

Get an existing Job resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

public static get(name: string, id: Input<ID>, state?: JobState, opts?: CustomResourceOptions): Job
@staticmethod
def get(resource_name: str,
        id: str,
        opts: Optional[ResourceOptions] = None,
        copy: Optional[JobCopyArgs] = None,
        effective_labels: Optional[Mapping[str, str]] = None,
        extract: Optional[JobExtractArgs] = None,
        job_id: Optional[str] = None,
        job_timeout_ms: Optional[str] = None,
        job_type: Optional[str] = None,
        labels: Optional[Mapping[str, str]] = None,
        load: Optional[JobLoadArgs] = None,
        location: Optional[str] = None,
        project: Optional[str] = None,
        pulumi_labels: Optional[Mapping[str, str]] = None,
        query: Optional[JobQueryArgs] = None,
        statuses: Optional[Sequence[JobStatusArgs]] = None,
        user_email: Optional[str] = None) -> Job
func GetJob(ctx *Context, name string, id IDInput, state *JobState, opts ...ResourceOption) (*Job, error)
public static Job Get(string name, Input<string> id, JobState? state, CustomResourceOptions? opts = null)
public static Job get(String name, Output<String> id, JobState state, CustomResourceOptions options)
resources:  _:    type: gcp:bigquery:Job    get:      id: ${id}
name This property is required.
The unique name of the resulting resource.
id This property is required.
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
resource_name This property is required.
The unique name of the resulting resource.
id This property is required.
The unique provider ID of the resource to lookup.
name This property is required.
The unique name of the resulting resource.
id This property is required.
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
name This property is required.
The unique name of the resulting resource.
id This property is required.
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
name This property is required.
The unique name of the resulting resource.
id This property is required.
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
The following state arguments are supported:
Copy Changes to this property will trigger replacement. JobCopy
Copies a table.
EffectiveLabels Changes to this property will trigger replacement. Dictionary<string, string>
(Output) All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
Extract Changes to this property will trigger replacement. JobExtract
Configures an extract job.
JobId Changes to this property will trigger replacement. string
The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters.
JobTimeoutMs Changes to this property will trigger replacement. string
Job timeout in milliseconds. If this time limit is exceeded, BigQuery may attempt to terminate the job.
JobType string
(Output) The type of the job.
Labels Dictionary<string, string>
The labels associated with this job. You can use these to organize and group your jobs. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
Load Changes to this property will trigger replacement. JobLoad
Configures a load job.
Location Changes to this property will trigger replacement. string
Specifies where the error occurred, if present.
Project Changes to this property will trigger replacement. string
PulumiLabels Dictionary<string, string>
(Output) The combination of labels configured directly on the resource and default labels configured on the provider.
Query Changes to this property will trigger replacement. JobQuery
Configures a query job.
Statuses List<JobStatus>
The status of this job. Examine this value when polling an asynchronous job to see if the job is complete. Structure is documented below.
UserEmail string
Email address of the user who ran the job.
Copy Changes to this property will trigger replacement. JobCopyArgs
Copies a table.
EffectiveLabels Changes to this property will trigger replacement. map[string]string
(Output) All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
Extract Changes to this property will trigger replacement. JobExtractArgs
Configures an extract job.
JobId Changes to this property will trigger replacement. string
The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters.
JobTimeoutMs Changes to this property will trigger replacement. string
Job timeout in milliseconds. If this time limit is exceeded, BigQuery may attempt to terminate the job.
JobType string
(Output) The type of the job.
Labels map[string]string
The labels associated with this job. You can use these to organize and group your jobs. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
Load Changes to this property will trigger replacement. JobLoadArgs
Configures a load job.
Location Changes to this property will trigger replacement. string
Specifies where the error occurred, if present.
Project Changes to this property will trigger replacement. string
PulumiLabels map[string]string
(Output) The combination of labels configured directly on the resource and default labels configured on the provider.
Query Changes to this property will trigger replacement. JobQueryArgs
Configures a query job.
Statuses []JobStatusArgs
The status of this job. Examine this value when polling an asynchronous job to see if the job is complete. Structure is documented below.
UserEmail string
Email address of the user who ran the job.
copy Changes to this property will trigger replacement. JobCopy
Copies a table.
effectiveLabels Changes to this property will trigger replacement. Map<String,String>
(Output) All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
extract Changes to this property will trigger replacement. JobExtract
Configures an extract job.
jobId Changes to this property will trigger replacement. String
The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters.
jobTimeoutMs Changes to this property will trigger replacement. String
Job timeout in milliseconds. If this time limit is exceeded, BigQuery may attempt to terminate the job.
jobType String
(Output) The type of the job.
labels Map<String,String>
The labels associated with this job. You can use these to organize and group your jobs. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
load Changes to this property will trigger replacement. JobLoad
Configures a load job.
location Changes to this property will trigger replacement. String
Specifies where the error occurred, if present.
project Changes to this property will trigger replacement. String
pulumiLabels Map<String,String>
(Output) The combination of labels configured directly on the resource and default labels configured on the provider.
query Changes to this property will trigger replacement. JobQuery
Configures a query job.
statuses List<JobStatus>
The status of this job. Examine this value when polling an asynchronous job to see if the job is complete. Structure is documented below.
userEmail String
Email address of the user who ran the job.
copy Changes to this property will trigger replacement. JobCopy
Copies a table.
effectiveLabels Changes to this property will trigger replacement. {[key: string]: string}
(Output) All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
extract Changes to this property will trigger replacement. JobExtract
Configures an extract job.
jobId Changes to this property will trigger replacement. string
The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters.
jobTimeoutMs Changes to this property will trigger replacement. string
Job timeout in milliseconds. If this time limit is exceeded, BigQuery may attempt to terminate the job.
jobType string
(Output) The type of the job.
labels {[key: string]: string}
The labels associated with this job. You can use these to organize and group your jobs. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
load Changes to this property will trigger replacement. JobLoad
Configures a load job.
location Changes to this property will trigger replacement. string
Specifies where the error occurred, if present.
project Changes to this property will trigger replacement. string
pulumiLabels {[key: string]: string}
(Output) The combination of labels configured directly on the resource and default labels configured on the provider.
query Changes to this property will trigger replacement. JobQuery
Configures a query job.
statuses JobStatus[]
The status of this job. Examine this value when polling an asynchronous job to see if the job is complete. Structure is documented below.
userEmail string
Email address of the user who ran the job.
copy Changes to this property will trigger replacement. JobCopyArgs
Copies a table.
effective_labels Changes to this property will trigger replacement. Mapping[str, str]
(Output) All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
extract Changes to this property will trigger replacement. JobExtractArgs
Configures an extract job.
job_id Changes to this property will trigger replacement. str
The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters.
job_timeout_ms Changes to this property will trigger replacement. str
Job timeout in milliseconds. If this time limit is exceeded, BigQuery may attempt to terminate the job.
job_type str
(Output) The type of the job.
labels Mapping[str, str]
The labels associated with this job. You can use these to organize and group your jobs. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
load Changes to this property will trigger replacement. JobLoadArgs
Configures a load job.
location Changes to this property will trigger replacement. str
Specifies where the error occurred, if present.
project Changes to this property will trigger replacement. str
pulumi_labels Mapping[str, str]
(Output) The combination of labels configured directly on the resource and default labels configured on the provider.
query Changes to this property will trigger replacement. JobQueryArgs
Configures a query job.
statuses Sequence[JobStatusArgs]
The status of this job. Examine this value when polling an asynchronous job to see if the job is complete. Structure is documented below.
user_email str
Email address of the user who ran the job.
copy Changes to this property will trigger replacement. Property Map
Copies a table.
effectiveLabels Changes to this property will trigger replacement. Map<String>
(Output) All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
extract Changes to this property will trigger replacement. Property Map
Configures an extract job.
jobId Changes to this property will trigger replacement. String
The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters.
jobTimeoutMs Changes to this property will trigger replacement. String
Job timeout in milliseconds. If this time limit is exceeded, BigQuery may attempt to terminate the job.
jobType String
(Output) The type of the job.
labels Map<String>
The labels associated with this job. You can use these to organize and group your jobs. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
load Changes to this property will trigger replacement. Property Map
Configures a load job.
location Changes to this property will trigger replacement. String
Specifies where the error occurred, if present.
project Changes to this property will trigger replacement. String
pulumiLabels Map<String>
(Output) The combination of labels configured directly on the resource and default labels configured on the provider.
query Changes to this property will trigger replacement. Property Map
Configures a query job.
statuses List<Property Map>
The status of this job. Examine this value when polling an asynchronous job to see if the job is complete. Structure is documented below.
userEmail String
Email address of the user who ran the job.

Supporting Types

JobCopy
, JobCopyArgs

SourceTables
This property is required.
Changes to this property will trigger replacement.
List<JobCopySourceTable>
Source tables to copy. Structure is documented below.
CreateDisposition Changes to this property will trigger replacement. string
Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. Creation, truncation and append actions occur as one atomic update upon job completion Default value is CREATE_IF_NEEDED. Possible values are: CREATE_IF_NEEDED, CREATE_NEVER.
DestinationEncryptionConfiguration Changes to this property will trigger replacement. JobCopyDestinationEncryptionConfiguration
Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.
DestinationTable Changes to this property will trigger replacement. JobCopyDestinationTable
The destination table. Structure is documented below.
WriteDisposition Changes to this property will trigger replacement. string
Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion. Default value is WRITE_EMPTY. Possible values are: WRITE_TRUNCATE, WRITE_APPEND, WRITE_EMPTY.
SourceTables
This property is required.
Changes to this property will trigger replacement.
[]JobCopySourceTable
Source tables to copy. Structure is documented below.
CreateDisposition Changes to this property will trigger replacement. string
Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. Creation, truncation and append actions occur as one atomic update upon job completion Default value is CREATE_IF_NEEDED. Possible values are: CREATE_IF_NEEDED, CREATE_NEVER.
DestinationEncryptionConfiguration Changes to this property will trigger replacement. JobCopyDestinationEncryptionConfiguration
Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.
DestinationTable Changes to this property will trigger replacement. JobCopyDestinationTable
The destination table. Structure is documented below.
WriteDisposition Changes to this property will trigger replacement. string
Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion. Default value is WRITE_EMPTY. Possible values are: WRITE_TRUNCATE, WRITE_APPEND, WRITE_EMPTY.
sourceTables
This property is required.
Changes to this property will trigger replacement.
List<JobCopySourceTable>
Source tables to copy. Structure is documented below.
createDisposition Changes to this property will trigger replacement. String
Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. Creation, truncation and append actions occur as one atomic update upon job completion Default value is CREATE_IF_NEEDED. Possible values are: CREATE_IF_NEEDED, CREATE_NEVER.
destinationEncryptionConfiguration Changes to this property will trigger replacement. JobCopyDestinationEncryptionConfiguration
Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.
destinationTable Changes to this property will trigger replacement. JobCopyDestinationTable
The destination table. Structure is documented below.
writeDisposition Changes to this property will trigger replacement. String
Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion. Default value is WRITE_EMPTY. Possible values are: WRITE_TRUNCATE, WRITE_APPEND, WRITE_EMPTY.
sourceTables
This property is required.
Changes to this property will trigger replacement.
JobCopySourceTable[]
Source tables to copy. Structure is documented below.
createDisposition Changes to this property will trigger replacement. string
Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. Creation, truncation and append actions occur as one atomic update upon job completion Default value is CREATE_IF_NEEDED. Possible values are: CREATE_IF_NEEDED, CREATE_NEVER.
destinationEncryptionConfiguration Changes to this property will trigger replacement. JobCopyDestinationEncryptionConfiguration
Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.
destinationTable Changes to this property will trigger replacement. JobCopyDestinationTable
The destination table. Structure is documented below.
writeDisposition Changes to this property will trigger replacement. string
Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion. Default value is WRITE_EMPTY. Possible values are: WRITE_TRUNCATE, WRITE_APPEND, WRITE_EMPTY.
source_tables
This property is required.
Changes to this property will trigger replacement.
Sequence[JobCopySourceTable]
Source tables to copy. Structure is documented below.
create_disposition Changes to this property will trigger replacement. str
Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. Creation, truncation and append actions occur as one atomic update upon job completion Default value is CREATE_IF_NEEDED. Possible values are: CREATE_IF_NEEDED, CREATE_NEVER.
destination_encryption_configuration Changes to this property will trigger replacement. JobCopyDestinationEncryptionConfiguration
Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.
destination_table Changes to this property will trigger replacement. JobCopyDestinationTable
The destination table. Structure is documented below.
write_disposition Changes to this property will trigger replacement. str
Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion. Default value is WRITE_EMPTY. Possible values are: WRITE_TRUNCATE, WRITE_APPEND, WRITE_EMPTY.
sourceTables
This property is required.
Changes to this property will trigger replacement.
List<Property Map>
Source tables to copy. Structure is documented below.
createDisposition Changes to this property will trigger replacement. String
Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. Creation, truncation and append actions occur as one atomic update upon job completion Default value is CREATE_IF_NEEDED. Possible values are: CREATE_IF_NEEDED, CREATE_NEVER.
destinationEncryptionConfiguration Changes to this property will trigger replacement. Property Map
Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.
destinationTable Changes to this property will trigger replacement. Property Map
The destination table. Structure is documented below.
writeDisposition Changes to this property will trigger replacement. String
Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion. Default value is WRITE_EMPTY. Possible values are: WRITE_TRUNCATE, WRITE_APPEND, WRITE_EMPTY.

JobCopyDestinationEncryptionConfiguration
, JobCopyDestinationEncryptionConfigurationArgs

KmsKeyName
This property is required.
Changes to this property will trigger replacement.
string
Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.
KmsKeyVersion string
(Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.
KmsKeyName
This property is required.
Changes to this property will trigger replacement.
string
Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.
KmsKeyVersion string
(Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.
kmsKeyName
This property is required.
Changes to this property will trigger replacement.
String
Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.
kmsKeyVersion String
(Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.
kmsKeyName
This property is required.
Changes to this property will trigger replacement.
string
Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.
kmsKeyVersion string
(Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.
kms_key_name
This property is required.
Changes to this property will trigger replacement.
str
Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.
kms_key_version str
(Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.
kmsKeyName
This property is required.
Changes to this property will trigger replacement.
String
Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.
kmsKeyVersion String
(Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.

JobCopyDestinationTable
, JobCopyDestinationTableArgs

TableId
This property is required.
Changes to this property will trigger replacement.
string
The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.
DatasetId Changes to this property will trigger replacement. string
The ID of the dataset containing this table.
ProjectId Changes to this property will trigger replacement. string
The ID of the project containing this table.
TableId
This property is required.
Changes to this property will trigger replacement.
string
The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.
DatasetId Changes to this property will trigger replacement. string
The ID of the dataset containing this table.
ProjectId Changes to this property will trigger replacement. string
The ID of the project containing this table.
tableId
This property is required.
Changes to this property will trigger replacement.
String
The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.
datasetId Changes to this property will trigger replacement. String
The ID of the dataset containing this table.
projectId Changes to this property will trigger replacement. String
The ID of the project containing this table.
tableId
This property is required.
Changes to this property will trigger replacement.
string
The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.
datasetId Changes to this property will trigger replacement. string
The ID of the dataset containing this table.
projectId Changes to this property will trigger replacement. string
The ID of the project containing this table.
table_id
This property is required.
Changes to this property will trigger replacement.
str
The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.
dataset_id Changes to this property will trigger replacement. str
The ID of the dataset containing this table.
project_id Changes to this property will trigger replacement. str
The ID of the project containing this table.
tableId
This property is required.
Changes to this property will trigger replacement.
String
The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.
datasetId Changes to this property will trigger replacement. String
The ID of the dataset containing this table.
projectId Changes to this property will trigger replacement. String
The ID of the project containing this table.

JobCopySourceTable
, JobCopySourceTableArgs

TableId
This property is required.
Changes to this property will trigger replacement.
string
The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.
DatasetId Changes to this property will trigger replacement. string
The ID of the dataset containing this table.
ProjectId Changes to this property will trigger replacement. string
The ID of the project containing this table.
TableId
This property is required.
Changes to this property will trigger replacement.
string
The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.
DatasetId Changes to this property will trigger replacement. string
The ID of the dataset containing this table.
ProjectId Changes to this property will trigger replacement. string
The ID of the project containing this table.
tableId
This property is required.
Changes to this property will trigger replacement.
String
The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.
datasetId Changes to this property will trigger replacement. String
The ID of the dataset containing this table.
projectId Changes to this property will trigger replacement. String
The ID of the project containing this table.
tableId
This property is required.
Changes to this property will trigger replacement.
string
The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.
datasetId Changes to this property will trigger replacement. string
The ID of the dataset containing this table.
projectId Changes to this property will trigger replacement. string
The ID of the project containing this table.
table_id
This property is required.
Changes to this property will trigger replacement.
str
The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.
dataset_id Changes to this property will trigger replacement. str
The ID of the dataset containing this table.
project_id Changes to this property will trigger replacement. str
The ID of the project containing this table.
tableId
This property is required.
Changes to this property will trigger replacement.
String
The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.
datasetId Changes to this property will trigger replacement. String
The ID of the dataset containing this table.
projectId Changes to this property will trigger replacement. String
The ID of the project containing this table.

JobExtract
, JobExtractArgs

DestinationUris
This property is required.
Changes to this property will trigger replacement.
List<string>
A list of fully-qualified Google Cloud Storage URIs where the extracted table should be written.
Compression Changes to this property will trigger replacement. string
The compression type to use for exported files. Possible values include GZIP, DEFLATE, SNAPPY, and NONE. The default value is NONE. DEFLATE and SNAPPY are only supported for Avro.
DestinationFormat Changes to this property will trigger replacement. string
The exported file format. Possible values include CSV, NEWLINE_DELIMITED_JSON and AVRO for tables and SAVED_MODEL for models. The default value for tables is CSV. Tables with nested or repeated fields cannot be exported as CSV. The default value for models is SAVED_MODEL.
FieldDelimiter Changes to this property will trigger replacement. string
When extracting data in CSV format, this defines the delimiter to use between fields in the exported data. Default is ','
PrintHeader Changes to this property will trigger replacement. bool
Whether to print out a header row in the results. Default is true.
SourceModel Changes to this property will trigger replacement. JobExtractSourceModel
A reference to the model being exported. Structure is documented below.
SourceTable Changes to this property will trigger replacement. JobExtractSourceTable
A reference to the table being exported. Structure is documented below.
UseAvroLogicalTypes Changes to this property will trigger replacement. bool
Whether to use logical types when extracting to AVRO format.
DestinationUris
This property is required.
Changes to this property will trigger replacement.
[]string
A list of fully-qualified Google Cloud Storage URIs where the extracted table should be written.
Compression Changes to this property will trigger replacement. string
The compression type to use for exported files. Possible values include GZIP, DEFLATE, SNAPPY, and NONE. The default value is NONE. DEFLATE and SNAPPY are only supported for Avro.
DestinationFormat Changes to this property will trigger replacement. string
The exported file format. Possible values include CSV, NEWLINE_DELIMITED_JSON and AVRO for tables and SAVED_MODEL for models. The default value for tables is CSV. Tables with nested or repeated fields cannot be exported as CSV. The default value for models is SAVED_MODEL.
FieldDelimiter Changes to this property will trigger replacement. string
When extracting data in CSV format, this defines the delimiter to use between fields in the exported data. Default is ','
PrintHeader Changes to this property will trigger replacement. bool
Whether to print out a header row in the results. Default is true.
SourceModel Changes to this property will trigger replacement. JobExtractSourceModel
A reference to the model being exported. Structure is documented below.
SourceTable Changes to this property will trigger replacement. JobExtractSourceTable
A reference to the table being exported. Structure is documented below.
UseAvroLogicalTypes Changes to this property will trigger replacement. bool
Whether to use logical types when extracting to AVRO format.
destinationUris
This property is required.
Changes to this property will trigger replacement.
List<String>
A list of fully-qualified Google Cloud Storage URIs where the extracted table should be written.
compression Changes to this property will trigger replacement. String
The compression type to use for exported files. Possible values include GZIP, DEFLATE, SNAPPY, and NONE. The default value is NONE. DEFLATE and SNAPPY are only supported for Avro.
destinationFormat Changes to this property will trigger replacement. String
The exported file format. Possible values include CSV, NEWLINE_DELIMITED_JSON and AVRO for tables and SAVED_MODEL for models. The default value for tables is CSV. Tables with nested or repeated fields cannot be exported as CSV. The default value for models is SAVED_MODEL.
fieldDelimiter Changes to this property will trigger replacement. String
When extracting data in CSV format, this defines the delimiter to use between fields in the exported data. Default is ','
printHeader Changes to this property will trigger replacement. Boolean
Whether to print out a header row in the results. Default is true.
sourceModel Changes to this property will trigger replacement. JobExtractSourceModel
A reference to the model being exported. Structure is documented below.
sourceTable Changes to this property will trigger replacement. JobExtractSourceTable
A reference to the table being exported. Structure is documented below.
useAvroLogicalTypes Changes to this property will trigger replacement. Boolean
Whether to use logical types when extracting to AVRO format.
destinationUris
This property is required.
Changes to this property will trigger replacement.
string[]
A list of fully-qualified Google Cloud Storage URIs where the extracted table should be written.
compression Changes to this property will trigger replacement. string
The compression type to use for exported files. Possible values include GZIP, DEFLATE, SNAPPY, and NONE. The default value is NONE. DEFLATE and SNAPPY are only supported for Avro.
destinationFormat Changes to this property will trigger replacement. string
The exported file format. Possible values include CSV, NEWLINE_DELIMITED_JSON and AVRO for tables and SAVED_MODEL for models. The default value for tables is CSV. Tables with nested or repeated fields cannot be exported as CSV. The default value for models is SAVED_MODEL.
fieldDelimiter Changes to this property will trigger replacement. string
When extracting data in CSV format, this defines the delimiter to use between fields in the exported data. Default is ','
printHeader Changes to this property will trigger replacement. boolean
Whether to print out a header row in the results. Default is true.
sourceModel Changes to this property will trigger replacement. JobExtractSourceModel
A reference to the model being exported. Structure is documented below.
sourceTable Changes to this property will trigger replacement. JobExtractSourceTable
A reference to the table being exported. Structure is documented below.
useAvroLogicalTypes Changes to this property will trigger replacement. boolean
Whether to use logical types when extracting to AVRO format.
destination_uris
This property is required.
Changes to this property will trigger replacement.
Sequence[str]
A list of fully-qualified Google Cloud Storage URIs where the extracted table should be written.
compression Changes to this property will trigger replacement. str
The compression type to use for exported files. Possible values include GZIP, DEFLATE, SNAPPY, and NONE. The default value is NONE. DEFLATE and SNAPPY are only supported for Avro.
destination_format Changes to this property will trigger replacement. str
The exported file format. Possible values include CSV, NEWLINE_DELIMITED_JSON and AVRO for tables and SAVED_MODEL for models. The default value for tables is CSV. Tables with nested or repeated fields cannot be exported as CSV. The default value for models is SAVED_MODEL.
field_delimiter Changes to this property will trigger replacement. str
When extracting data in CSV format, this defines the delimiter to use between fields in the exported data. Default is ','
print_header Changes to this property will trigger replacement. bool
Whether to print out a header row in the results. Default is true.
source_model Changes to this property will trigger replacement. JobExtractSourceModel
A reference to the model being exported. Structure is documented below.
source_table Changes to this property will trigger replacement. JobExtractSourceTable
A reference to the table being exported. Structure is documented below.
use_avro_logical_types Changes to this property will trigger replacement. bool
Whether to use logical types when extracting to AVRO format.
destinationUris
This property is required.
Changes to this property will trigger replacement.
List<String>
A list of fully-qualified Google Cloud Storage URIs where the extracted table should be written.
compression Changes to this property will trigger replacement. String
The compression type to use for exported files. Possible values include GZIP, DEFLATE, SNAPPY, and NONE. The default value is NONE. DEFLATE and SNAPPY are only supported for Avro.
destinationFormat Changes to this property will trigger replacement. String
The exported file format. Possible values include CSV, NEWLINE_DELIMITED_JSON and AVRO for tables and SAVED_MODEL for models. The default value for tables is CSV. Tables with nested or repeated fields cannot be exported as CSV. The default value for models is SAVED_MODEL.
fieldDelimiter Changes to this property will trigger replacement. String
When extracting data in CSV format, this defines the delimiter to use between fields in the exported data. Default is ','
printHeader Changes to this property will trigger replacement. Boolean
Whether to print out a header row in the results. Default is true.
sourceModel Changes to this property will trigger replacement. Property Map
A reference to the model being exported. Structure is documented below.
sourceTable Changes to this property will trigger replacement. Property Map
A reference to the table being exported. Structure is documented below.
useAvroLogicalTypes Changes to this property will trigger replacement. Boolean
Whether to use logical types when extracting to AVRO format.

JobExtractSourceModel
, JobExtractSourceModelArgs

DatasetId
This property is required.
Changes to this property will trigger replacement.
string
The ID of the dataset containing this model.
ModelId
This property is required.
Changes to this property will trigger replacement.
string
The ID of the model.


ProjectId
This property is required.
Changes to this property will trigger replacement.
string
The ID of the project containing this model.
DatasetId
This property is required.
Changes to this property will trigger replacement.
string
The ID of the dataset containing this model.
ModelId
This property is required.
Changes to this property will trigger replacement.
string
The ID of the model.


ProjectId
This property is required.
Changes to this property will trigger replacement.
string
The ID of the project containing this model.
datasetId
This property is required.
Changes to this property will trigger replacement.
String
The ID of the dataset containing this model.
modelId
This property is required.
Changes to this property will trigger replacement.
String
The ID of the model.


projectId
This property is required.
Changes to this property will trigger replacement.
String
The ID of the project containing this model.
datasetId
This property is required.
Changes to this property will trigger replacement.
string
The ID of the dataset containing this model.
modelId
This property is required.
Changes to this property will trigger replacement.
string
The ID of the model.


projectId
This property is required.
Changes to this property will trigger replacement.
string
The ID of the project containing this model.
dataset_id
This property is required.
Changes to this property will trigger replacement.
str
The ID of the dataset containing this model.
model_id
This property is required.
Changes to this property will trigger replacement.
str
The ID of the model.


project_id
This property is required.
Changes to this property will trigger replacement.
str
The ID of the project containing this model.
datasetId
This property is required.
Changes to this property will trigger replacement.
String
The ID of the dataset containing this model.
modelId
This property is required.
Changes to this property will trigger replacement.
String
The ID of the model.


projectId
This property is required.
Changes to this property will trigger replacement.
String
The ID of the project containing this model.

JobExtractSourceTable
, JobExtractSourceTableArgs

TableId
This property is required.
Changes to this property will trigger replacement.
string
The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.
DatasetId Changes to this property will trigger replacement. string
The ID of the dataset containing this table.
ProjectId Changes to this property will trigger replacement. string
The ID of the project containing this table.
TableId
This property is required.
Changes to this property will trigger replacement.
string
The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.
DatasetId Changes to this property will trigger replacement. string
The ID of the dataset containing this table.
ProjectId Changes to this property will trigger replacement. string
The ID of the project containing this table.
tableId
This property is required.
Changes to this property will trigger replacement.
String
The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.
datasetId Changes to this property will trigger replacement. String
The ID of the dataset containing this table.
projectId Changes to this property will trigger replacement. String
The ID of the project containing this table.
tableId
This property is required.
Changes to this property will trigger replacement.
string
The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.
datasetId Changes to this property will trigger replacement. string
The ID of the dataset containing this table.
projectId Changes to this property will trigger replacement. string
The ID of the project containing this table.
table_id
This property is required.
Changes to this property will trigger replacement.
str
The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.
dataset_id Changes to this property will trigger replacement. str
The ID of the dataset containing this table.
project_id Changes to this property will trigger replacement. str
The ID of the project containing this table.
tableId
This property is required.
Changes to this property will trigger replacement.
String
The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.
datasetId Changes to this property will trigger replacement. String
The ID of the dataset containing this table.
projectId Changes to this property will trigger replacement. String
The ID of the project containing this table.

JobLoad
, JobLoadArgs

DestinationTable
This property is required.
Changes to this property will trigger replacement.
JobLoadDestinationTable
The destination table to load the data into. Structure is documented below.
SourceUris
This property is required.
Changes to this property will trigger replacement.
List<string>
The fully-qualified URIs that point to your data in Google Cloud. For Google Cloud Storage URIs: Each URI can contain one '*' wildcard character and it must come after the 'bucket' name. Size limits related to load jobs apply to external data sources. For Google Cloud Bigtable URIs: Exactly one URI can be specified and it has be a fully specified and valid HTTPS URL for a Google Cloud Bigtable table. For Google Cloud Datastore backups: Exactly one URI can be specified. Also, the '*' wildcard character is not allowed.
AllowJaggedRows Changes to this property will trigger replacement. bool
Accept rows that are missing trailing optional columns. The missing values are treated as nulls. If false, records with missing trailing columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. Only applicable to CSV, ignored for other formats.
AllowQuotedNewlines Changes to this property will trigger replacement. bool
Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false.
Autodetect Changes to this property will trigger replacement. bool
Indicates if we should automatically infer the options and schema for CSV and JSON sources.
CreateDisposition Changes to this property will trigger replacement. string
Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. Creation, truncation and append actions occur as one atomic update upon job completion Default value is CREATE_IF_NEEDED. Possible values are: CREATE_IF_NEEDED, CREATE_NEVER.
DestinationEncryptionConfiguration Changes to this property will trigger replacement. JobLoadDestinationEncryptionConfiguration
Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.
Encoding Changes to this property will trigger replacement. string
The character encoding of the data. The supported values are UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery decodes the data after the raw, binary data has been split using the values of the quote and fieldDelimiter properties.
FieldDelimiter Changes to this property will trigger replacement. string
The separator for fields in a CSV file. The separator can be any ISO-8859-1 single-byte character. To use a character in the range 128-255, you must encode the character as UTF8. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. BigQuery also supports the escape sequence "\t" to specify a tab separator. The default value is a comma (',').
IgnoreUnknownValues Changes to this property will trigger replacement. bool
Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don't match any column names
JsonExtension Changes to this property will trigger replacement. string
If sourceFormat is set to newline-delimited JSON, indicates whether it should be processed as a JSON variant such as GeoJSON. For a sourceFormat other than JSON, omit this field. If the sourceFormat is newline-delimited JSON: - for newline-delimited GeoJSON: set to GEOJSON.
MaxBadRecords Changes to this property will trigger replacement. int
The maximum number of bad records that BigQuery can ignore when running the job. If the number of bad records exceeds this value, an invalid error is returned in the job result. The default value is 0, which requires that all records are valid.
NullMarker Changes to this property will trigger replacement. string
Specifies a string that represents a null value in a CSV file. The default value is the empty string. If you set this property to a custom value, BigQuery throws an error if an empty string is present for all data types except for STRING and BYTE. For STRING and BYTE columns, BigQuery interprets the empty string as an empty value.
ParquetOptions Changes to this property will trigger replacement. JobLoadParquetOptions
Parquet Options for load and make external tables. Structure is documented below.
ProjectionFields Changes to this property will trigger replacement. List<string>
If sourceFormat is set to "DATASTORE_BACKUP", indicates which entity properties to load into BigQuery from a Cloud Datastore backup. Property names are case sensitive and must be top-level properties. If no properties are specified, BigQuery loads all properties. If any named property isn't found in the Cloud Datastore backup, an invalid error is returned in the job result.
Quote Changes to this property will trigger replacement. string
The value that is used to quote data sections in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. The default value is a double-quote ('"'). If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allowQuotedNewlines property to true.
SchemaUpdateOptions Changes to this property will trigger replacement. List<string>
Allows the schema of the destination table to be updated as a side effect of the load job if a schema is autodetected or supplied in the job configuration. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable.
SkipLeadingRows Changes to this property will trigger replacement. int
The number of rows at the top of a CSV file that BigQuery will skip when loading the data. The default value is 0. This property is useful if you have header rows in the file that should be skipped. When autodetect is on, the behavior is the following: skipLeadingRows unspecified - Autodetect tries to detect headers in the first row. If they are not detected, the row is read as data. Otherwise data is read starting from the second row. skipLeadingRows is 0 - Instructs autodetect that there are no headers and data should be read starting from the first row. skipLeadingRows = N > 0 - Autodetect skips N-1 rows and tries to detect headers in row N. If headers are not detected, row N is just skipped. Otherwise row N is used to extract column names for the detected schema.
SourceFormat Changes to this property will trigger replacement. string
The format of the data files. For CSV files, specify "CSV". For datastore backups, specify "DATASTORE_BACKUP". For newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON". For Avro, specify "AVRO". For parquet, specify "PARQUET". For orc, specify "ORC". [Beta] For Bigtable, specify "BIGTABLE". The default value is CSV.
TimePartitioning Changes to this property will trigger replacement. JobLoadTimePartitioning
Time-based partitioning specification for the destination table. Structure is documented below.
WriteDisposition Changes to this property will trigger replacement. string
Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion. Default value is WRITE_EMPTY. Possible values are: WRITE_TRUNCATE, WRITE_APPEND, WRITE_EMPTY.
DestinationTable
This property is required.
Changes to this property will trigger replacement.
JobLoadDestinationTable
The destination table to load the data into. Structure is documented below.
SourceUris
This property is required.
Changes to this property will trigger replacement.
[]string
The fully-qualified URIs that point to your data in Google Cloud. For Google Cloud Storage URIs: Each URI can contain one '*' wildcard character and it must come after the 'bucket' name. Size limits related to load jobs apply to external data sources. For Google Cloud Bigtable URIs: Exactly one URI can be specified and it has be a fully specified and valid HTTPS URL for a Google Cloud Bigtable table. For Google Cloud Datastore backups: Exactly one URI can be specified. Also, the '*' wildcard character is not allowed.
AllowJaggedRows Changes to this property will trigger replacement. bool
Accept rows that are missing trailing optional columns. The missing values are treated as nulls. If false, records with missing trailing columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. Only applicable to CSV, ignored for other formats.
AllowQuotedNewlines Changes to this property will trigger replacement. bool
Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false.
Autodetect Changes to this property will trigger replacement. bool
Indicates if we should automatically infer the options and schema for CSV and JSON sources.
CreateDisposition Changes to this property will trigger replacement. string
Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. Creation, truncation and append actions occur as one atomic update upon job completion Default value is CREATE_IF_NEEDED. Possible values are: CREATE_IF_NEEDED, CREATE_NEVER.
DestinationEncryptionConfiguration Changes to this property will trigger replacement. JobLoadDestinationEncryptionConfiguration
Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.
Encoding Changes to this property will trigger replacement. string
The character encoding of the data. The supported values are UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery decodes the data after the raw, binary data has been split using the values of the quote and fieldDelimiter properties.
FieldDelimiter Changes to this property will trigger replacement. string
The separator for fields in a CSV file. The separator can be any ISO-8859-1 single-byte character. To use a character in the range 128-255, you must encode the character as UTF8. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. BigQuery also supports the escape sequence "\t" to specify a tab separator. The default value is a comma (',').
IgnoreUnknownValues Changes to this property will trigger replacement. bool
Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don't match any column names
JsonExtension Changes to this property will trigger replacement. string
If sourceFormat is set to newline-delimited JSON, indicates whether it should be processed as a JSON variant such as GeoJSON. For a sourceFormat other than JSON, omit this field. If the sourceFormat is newline-delimited JSON: - for newline-delimited GeoJSON: set to GEOJSON.
MaxBadRecords Changes to this property will trigger replacement. int
The maximum number of bad records that BigQuery can ignore when running the job. If the number of bad records exceeds this value, an invalid error is returned in the job result. The default value is 0, which requires that all records are valid.
NullMarker Changes to this property will trigger replacement. string
Specifies a string that represents a null value in a CSV file. The default value is the empty string. If you set this property to a custom value, BigQuery throws an error if an empty string is present for all data types except for STRING and BYTE. For STRING and BYTE columns, BigQuery interprets the empty string as an empty value.
ParquetOptions Changes to this property will trigger replacement. JobLoadParquetOptions
Parquet Options for load and make external tables. Structure is documented below.
ProjectionFields Changes to this property will trigger replacement. []string
If sourceFormat is set to "DATASTORE_BACKUP", indicates which entity properties to load into BigQuery from a Cloud Datastore backup. Property names are case sensitive and must be top-level properties. If no properties are specified, BigQuery loads all properties. If any named property isn't found in the Cloud Datastore backup, an invalid error is returned in the job result.
Quote Changes to this property will trigger replacement. string
The value that is used to quote data sections in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. The default value is a double-quote ('"'). If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allowQuotedNewlines property to true.
SchemaUpdateOptions Changes to this property will trigger replacement. []string
Allows the schema of the destination table to be updated as a side effect of the load job if a schema is autodetected or supplied in the job configuration. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable.
SkipLeadingRows Changes to this property will trigger replacement. int
The number of rows at the top of a CSV file that BigQuery will skip when loading the data. The default value is 0. This property is useful if you have header rows in the file that should be skipped. When autodetect is on, the behavior is the following: skipLeadingRows unspecified - Autodetect tries to detect headers in the first row. If they are not detected, the row is read as data. Otherwise data is read starting from the second row. skipLeadingRows is 0 - Instructs autodetect that there are no headers and data should be read starting from the first row. skipLeadingRows = N > 0 - Autodetect skips N-1 rows and tries to detect headers in row N. If headers are not detected, row N is just skipped. Otherwise row N is used to extract column names for the detected schema.
SourceFormat Changes to this property will trigger replacement. string
The format of the data files. For CSV files, specify "CSV". For datastore backups, specify "DATASTORE_BACKUP". For newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON". For Avro, specify "AVRO". For parquet, specify "PARQUET". For orc, specify "ORC". [Beta] For Bigtable, specify "BIGTABLE". The default value is CSV.
TimePartitioning Changes to this property will trigger replacement. JobLoadTimePartitioning
Time-based partitioning specification for the destination table. Structure is documented below.
WriteDisposition Changes to this property will trigger replacement. string
Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion. Default value is WRITE_EMPTY. Possible values are: WRITE_TRUNCATE, WRITE_APPEND, WRITE_EMPTY.
destinationTable
This property is required.
Changes to this property will trigger replacement.
JobLoadDestinationTable
The destination table to load the data into. Structure is documented below.
sourceUris
This property is required.
Changes to this property will trigger replacement.
List<String>
The fully-qualified URIs that point to your data in Google Cloud. For Google Cloud Storage URIs: Each URI can contain one '*' wildcard character and it must come after the 'bucket' name. Size limits related to load jobs apply to external data sources. For Google Cloud Bigtable URIs: Exactly one URI can be specified and it has be a fully specified and valid HTTPS URL for a Google Cloud Bigtable table. For Google Cloud Datastore backups: Exactly one URI can be specified. Also, the '*' wildcard character is not allowed.
allowJaggedRows Changes to this property will trigger replacement. Boolean
Accept rows that are missing trailing optional columns. The missing values are treated as nulls. If false, records with missing trailing columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. Only applicable to CSV, ignored for other formats.
allowQuotedNewlines Changes to this property will trigger replacement. Boolean
Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false.
autodetect Changes to this property will trigger replacement. Boolean
Indicates if we should automatically infer the options and schema for CSV and JSON sources.
createDisposition Changes to this property will trigger replacement. String
Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. Creation, truncation and append actions occur as one atomic update upon job completion Default value is CREATE_IF_NEEDED. Possible values are: CREATE_IF_NEEDED, CREATE_NEVER.
destinationEncryptionConfiguration Changes to this property will trigger replacement. JobLoadDestinationEncryptionConfiguration
Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.
encoding Changes to this property will trigger replacement. String
The character encoding of the data. The supported values are UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery decodes the data after the raw, binary data has been split using the values of the quote and fieldDelimiter properties.
fieldDelimiter Changes to this property will trigger replacement. String
The separator for fields in a CSV file. The separator can be any ISO-8859-1 single-byte character. To use a character in the range 128-255, you must encode the character as UTF8. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. BigQuery also supports the escape sequence "\t" to specify a tab separator. The default value is a comma (',').
ignoreUnknownValues Changes to this property will trigger replacement. Boolean
Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don't match any column names
jsonExtension Changes to this property will trigger replacement. String
If sourceFormat is set to newline-delimited JSON, indicates whether it should be processed as a JSON variant such as GeoJSON. For a sourceFormat other than JSON, omit this field. If the sourceFormat is newline-delimited JSON: - for newline-delimited GeoJSON: set to GEOJSON.
maxBadRecords Changes to this property will trigger replacement. Integer
The maximum number of bad records that BigQuery can ignore when running the job. If the number of bad records exceeds this value, an invalid error is returned in the job result. The default value is 0, which requires that all records are valid.
nullMarker Changes to this property will trigger replacement. String
Specifies a string that represents a null value in a CSV file. The default value is the empty string. If you set this property to a custom value, BigQuery throws an error if an empty string is present for all data types except for STRING and BYTE. For STRING and BYTE columns, BigQuery interprets the empty string as an empty value.
parquetOptions Changes to this property will trigger replacement. JobLoadParquetOptions
Parquet Options for load and make external tables. Structure is documented below.
projectionFields Changes to this property will trigger replacement. List<String>
If sourceFormat is set to "DATASTORE_BACKUP", indicates which entity properties to load into BigQuery from a Cloud Datastore backup. Property names are case sensitive and must be top-level properties. If no properties are specified, BigQuery loads all properties. If any named property isn't found in the Cloud Datastore backup, an invalid error is returned in the job result.
quote Changes to this property will trigger replacement. String
The value that is used to quote data sections in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. The default value is a double-quote ('"'). If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allowQuotedNewlines property to true.
schemaUpdateOptions Changes to this property will trigger replacement. List<String>
Allows the schema of the destination table to be updated as a side effect of the load job if a schema is autodetected or supplied in the job configuration. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable.
skipLeadingRows Changes to this property will trigger replacement. Integer
The number of rows at the top of a CSV file that BigQuery will skip when loading the data. The default value is 0. This property is useful if you have header rows in the file that should be skipped. When autodetect is on, the behavior is the following: skipLeadingRows unspecified - Autodetect tries to detect headers in the first row. If they are not detected, the row is read as data. Otherwise data is read starting from the second row. skipLeadingRows is 0 - Instructs autodetect that there are no headers and data should be read starting from the first row. skipLeadingRows = N > 0 - Autodetect skips N-1 rows and tries to detect headers in row N. If headers are not detected, row N is just skipped. Otherwise row N is used to extract column names for the detected schema.
sourceFormat Changes to this property will trigger replacement. String
The format of the data files. For CSV files, specify "CSV". For datastore backups, specify "DATASTORE_BACKUP". For newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON". For Avro, specify "AVRO". For parquet, specify "PARQUET". For orc, specify "ORC". [Beta] For Bigtable, specify "BIGTABLE". The default value is CSV.
timePartitioning Changes to this property will trigger replacement. JobLoadTimePartitioning
Time-based partitioning specification for the destination table. Structure is documented below.
writeDisposition Changes to this property will trigger replacement. String
Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion. Default value is WRITE_EMPTY. Possible values are: WRITE_TRUNCATE, WRITE_APPEND, WRITE_EMPTY.
destinationTable
This property is required.
Changes to this property will trigger replacement.
JobLoadDestinationTable
The destination table to load the data into. Structure is documented below.
sourceUris
This property is required.
Changes to this property will trigger replacement.
string[]
The fully-qualified URIs that point to your data in Google Cloud. For Google Cloud Storage URIs: Each URI can contain one '*' wildcard character and it must come after the 'bucket' name. Size limits related to load jobs apply to external data sources. For Google Cloud Bigtable URIs: Exactly one URI can be specified and it has be a fully specified and valid HTTPS URL for a Google Cloud Bigtable table. For Google Cloud Datastore backups: Exactly one URI can be specified. Also, the '*' wildcard character is not allowed.
allowJaggedRows Changes to this property will trigger replacement. boolean
Accept rows that are missing trailing optional columns. The missing values are treated as nulls. If false, records with missing trailing columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. Only applicable to CSV, ignored for other formats.
allowQuotedNewlines Changes to this property will trigger replacement. boolean
Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false.
autodetect Changes to this property will trigger replacement. boolean
Indicates if we should automatically infer the options and schema for CSV and JSON sources.
createDisposition Changes to this property will trigger replacement. string
Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. Creation, truncation and append actions occur as one atomic update upon job completion Default value is CREATE_IF_NEEDED. Possible values are: CREATE_IF_NEEDED, CREATE_NEVER.
destinationEncryptionConfiguration Changes to this property will trigger replacement. JobLoadDestinationEncryptionConfiguration
Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.
encoding Changes to this property will trigger replacement. string
The character encoding of the data. The supported values are UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery decodes the data after the raw, binary data has been split using the values of the quote and fieldDelimiter properties.
fieldDelimiter Changes to this property will trigger replacement. string
The separator for fields in a CSV file. The separator can be any ISO-8859-1 single-byte character. To use a character in the range 128-255, you must encode the character as UTF8. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. BigQuery also supports the escape sequence "\t" to specify a tab separator. The default value is a comma (',').
ignoreUnknownValues Changes to this property will trigger replacement. boolean
Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don't match any column names
jsonExtension Changes to this property will trigger replacement. string
If sourceFormat is set to newline-delimited JSON, indicates whether it should be processed as a JSON variant such as GeoJSON. For a sourceFormat other than JSON, omit this field. If the sourceFormat is newline-delimited JSON: - for newline-delimited GeoJSON: set to GEOJSON.
maxBadRecords Changes to this property will trigger replacement. number
The maximum number of bad records that BigQuery can ignore when running the job. If the number of bad records exceeds this value, an invalid error is returned in the job result. The default value is 0, which requires that all records are valid.
nullMarker Changes to this property will trigger replacement. string
Specifies a string that represents a null value in a CSV file. The default value is the empty string. If you set this property to a custom value, BigQuery throws an error if an empty string is present for all data types except for STRING and BYTE. For STRING and BYTE columns, BigQuery interprets the empty string as an empty value.
parquetOptions Changes to this property will trigger replacement. JobLoadParquetOptions
Parquet Options for load and make external tables. Structure is documented below.
projectionFields Changes to this property will trigger replacement. string[]
If sourceFormat is set to "DATASTORE_BACKUP", indicates which entity properties to load into BigQuery from a Cloud Datastore backup. Property names are case sensitive and must be top-level properties. If no properties are specified, BigQuery loads all properties. If any named property isn't found in the Cloud Datastore backup, an invalid error is returned in the job result.
quote Changes to this property will trigger replacement. string
The value that is used to quote data sections in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. The default value is a double-quote ('"'). If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allowQuotedNewlines property to true.
schemaUpdateOptions Changes to this property will trigger replacement. string[]
Allows the schema of the destination table to be updated as a side effect of the load job if a schema is autodetected or supplied in the job configuration. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable.
skipLeadingRows Changes to this property will trigger replacement. number
The number of rows at the top of a CSV file that BigQuery will skip when loading the data. The default value is 0. This property is useful if you have header rows in the file that should be skipped. When autodetect is on, the behavior is the following: skipLeadingRows unspecified - Autodetect tries to detect headers in the first row. If they are not detected, the row is read as data. Otherwise data is read starting from the second row. skipLeadingRows is 0 - Instructs autodetect that there are no headers and data should be read starting from the first row. skipLeadingRows = N > 0 - Autodetect skips N-1 rows and tries to detect headers in row N. If headers are not detected, row N is just skipped. Otherwise row N is used to extract column names for the detected schema.
sourceFormat Changes to this property will trigger replacement. string
The format of the data files. For CSV files, specify "CSV". For datastore backups, specify "DATASTORE_BACKUP". For newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON". For Avro, specify "AVRO". For parquet, specify "PARQUET". For orc, specify "ORC". [Beta] For Bigtable, specify "BIGTABLE". The default value is CSV.
timePartitioning Changes to this property will trigger replacement. JobLoadTimePartitioning
Time-based partitioning specification for the destination table. Structure is documented below.
writeDisposition Changes to this property will trigger replacement. string
Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion. Default value is WRITE_EMPTY. Possible values are: WRITE_TRUNCATE, WRITE_APPEND, WRITE_EMPTY.
destination_table
This property is required.
Changes to this property will trigger replacement.
JobLoadDestinationTable
The destination table to load the data into. Structure is documented below.
source_uris
This property is required.
Changes to this property will trigger replacement.
Sequence[str]
The fully-qualified URIs that point to your data in Google Cloud. For Google Cloud Storage URIs: Each URI can contain one '*' wildcard character and it must come after the 'bucket' name. Size limits related to load jobs apply to external data sources. For Google Cloud Bigtable URIs: Exactly one URI can be specified and it has be a fully specified and valid HTTPS URL for a Google Cloud Bigtable table. For Google Cloud Datastore backups: Exactly one URI can be specified. Also, the '*' wildcard character is not allowed.
allow_jagged_rows Changes to this property will trigger replacement. bool
Accept rows that are missing trailing optional columns. The missing values are treated as nulls. If false, records with missing trailing columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. Only applicable to CSV, ignored for other formats.
allow_quoted_newlines Changes to this property will trigger replacement. bool
Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false.
autodetect Changes to this property will trigger replacement. bool
Indicates if we should automatically infer the options and schema for CSV and JSON sources.
create_disposition Changes to this property will trigger replacement. str
Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. Creation, truncation and append actions occur as one atomic update upon job completion Default value is CREATE_IF_NEEDED. Possible values are: CREATE_IF_NEEDED, CREATE_NEVER.
destination_encryption_configuration Changes to this property will trigger replacement. JobLoadDestinationEncryptionConfiguration
Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.
encoding Changes to this property will trigger replacement. str
The character encoding of the data. The supported values are UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery decodes the data after the raw, binary data has been split using the values of the quote and fieldDelimiter properties.
field_delimiter Changes to this property will trigger replacement. str
The separator for fields in a CSV file. The separator can be any ISO-8859-1 single-byte character. To use a character in the range 128-255, you must encode the character as UTF8. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. BigQuery also supports the escape sequence "\t" to specify a tab separator. The default value is a comma (',').
ignore_unknown_values Changes to this property will trigger replacement. bool
Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don't match any column names
json_extension Changes to this property will trigger replacement. str
If sourceFormat is set to newline-delimited JSON, indicates whether it should be processed as a JSON variant such as GeoJSON. For a sourceFormat other than JSON, omit this field. If the sourceFormat is newline-delimited JSON: - for newline-delimited GeoJSON: set to GEOJSON.
max_bad_records Changes to this property will trigger replacement. int
The maximum number of bad records that BigQuery can ignore when running the job. If the number of bad records exceeds this value, an invalid error is returned in the job result. The default value is 0, which requires that all records are valid.
null_marker Changes to this property will trigger replacement. str
Specifies a string that represents a null value in a CSV file. The default value is the empty string. If you set this property to a custom value, BigQuery throws an error if an empty string is present for all data types except for STRING and BYTE. For STRING and BYTE columns, BigQuery interprets the empty string as an empty value.
parquet_options Changes to this property will trigger replacement. JobLoadParquetOptions
Parquet Options for load and make external tables. Structure is documented below.
projection_fields Changes to this property will trigger replacement. Sequence[str]
If sourceFormat is set to "DATASTORE_BACKUP", indicates which entity properties to load into BigQuery from a Cloud Datastore backup. Property names are case sensitive and must be top-level properties. If no properties are specified, BigQuery loads all properties. If any named property isn't found in the Cloud Datastore backup, an invalid error is returned in the job result.
quote Changes to this property will trigger replacement. str
The value that is used to quote data sections in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. The default value is a double-quote ('"'). If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allowQuotedNewlines property to true.
schema_update_options Changes to this property will trigger replacement. Sequence[str]
Allows the schema of the destination table to be updated as a side effect of the load job if a schema is autodetected or supplied in the job configuration. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable.
skip_leading_rows Changes to this property will trigger replacement. int
The number of rows at the top of a CSV file that BigQuery will skip when loading the data. The default value is 0. This property is useful if you have header rows in the file that should be skipped. When autodetect is on, the behavior is the following: skipLeadingRows unspecified - Autodetect tries to detect headers in the first row. If they are not detected, the row is read as data. Otherwise data is read starting from the second row. skipLeadingRows is 0 - Instructs autodetect that there are no headers and data should be read starting from the first row. skipLeadingRows = N > 0 - Autodetect skips N-1 rows and tries to detect headers in row N. If headers are not detected, row N is just skipped. Otherwise row N is used to extract column names for the detected schema.
source_format Changes to this property will trigger replacement. str
The format of the data files. For CSV files, specify "CSV". For datastore backups, specify "DATASTORE_BACKUP". For newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON". For Avro, specify "AVRO". For parquet, specify "PARQUET". For orc, specify "ORC". [Beta] For Bigtable, specify "BIGTABLE". The default value is CSV.
time_partitioning Changes to this property will trigger replacement. JobLoadTimePartitioning
Time-based partitioning specification for the destination table. Structure is documented below.
write_disposition Changes to this property will trigger replacement. str
Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion. Default value is WRITE_EMPTY. Possible values are: WRITE_TRUNCATE, WRITE_APPEND, WRITE_EMPTY.
destinationTable
This property is required.
Changes to this property will trigger replacement.
Property Map
The destination table to load the data into. Structure is documented below.
sourceUris
This property is required.
Changes to this property will trigger replacement.
List<String>
The fully-qualified URIs that point to your data in Google Cloud. For Google Cloud Storage URIs: Each URI can contain one '*' wildcard character and it must come after the 'bucket' name. Size limits related to load jobs apply to external data sources. For Google Cloud Bigtable URIs: Exactly one URI can be specified and it has be a fully specified and valid HTTPS URL for a Google Cloud Bigtable table. For Google Cloud Datastore backups: Exactly one URI can be specified. Also, the '*' wildcard character is not allowed.
allowJaggedRows Changes to this property will trigger replacement. Boolean
Accept rows that are missing trailing optional columns. The missing values are treated as nulls. If false, records with missing trailing columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. Only applicable to CSV, ignored for other formats.
allowQuotedNewlines Changes to this property will trigger replacement. Boolean
Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false.
autodetect Changes to this property will trigger replacement. Boolean
Indicates if we should automatically infer the options and schema for CSV and JSON sources.
createDisposition Changes to this property will trigger replacement. String
Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. Creation, truncation and append actions occur as one atomic update upon job completion Default value is CREATE_IF_NEEDED. Possible values are: CREATE_IF_NEEDED, CREATE_NEVER.
destinationEncryptionConfiguration Changes to this property will trigger replacement. Property Map
Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.
encoding Changes to this property will trigger replacement. String
The character encoding of the data. The supported values are UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery decodes the data after the raw, binary data has been split using the values of the quote and fieldDelimiter properties.
fieldDelimiter Changes to this property will trigger replacement. String
The separator for fields in a CSV file. The separator can be any ISO-8859-1 single-byte character. To use a character in the range 128-255, you must encode the character as UTF8. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. BigQuery also supports the escape sequence "\t" to specify a tab separator. The default value is a comma (',').
ignoreUnknownValues Changes to this property will trigger replacement. Boolean
Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don't match any column names
jsonExtension Changes to this property will trigger replacement. String
If sourceFormat is set to newline-delimited JSON, indicates whether it should be processed as a JSON variant such as GeoJSON. For a sourceFormat other than JSON, omit this field. If the sourceFormat is newline-delimited JSON: - for newline-delimited GeoJSON: set to GEOJSON.
maxBadRecords Changes to this property will trigger replacement. Number
The maximum number of bad records that BigQuery can ignore when running the job. If the number of bad records exceeds this value, an invalid error is returned in the job result. The default value is 0, which requires that all records are valid.
nullMarker Changes to this property will trigger replacement. String
Specifies a string that represents a null value in a CSV file. The default value is the empty string. If you set this property to a custom value, BigQuery throws an error if an empty string is present for all data types except for STRING and BYTE. For STRING and BYTE columns, BigQuery interprets the empty string as an empty value.
parquetOptions Changes to this property will trigger replacement. Property Map
Parquet Options for load and make external tables. Structure is documented below.
projectionFields Changes to this property will trigger replacement. List<String>
If sourceFormat is set to "DATASTORE_BACKUP", indicates which entity properties to load into BigQuery from a Cloud Datastore backup. Property names are case sensitive and must be top-level properties. If no properties are specified, BigQuery loads all properties. If any named property isn't found in the Cloud Datastore backup, an invalid error is returned in the job result.
quote Changes to this property will trigger replacement. String
The value that is used to quote data sections in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. The default value is a double-quote ('"'). If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allowQuotedNewlines property to true.
schemaUpdateOptions Changes to this property will trigger replacement. List<String>
Allows the schema of the destination table to be updated as a side effect of the load job if a schema is autodetected or supplied in the job configuration. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable.
skipLeadingRows Changes to this property will trigger replacement. Number
The number of rows at the top of a CSV file that BigQuery will skip when loading the data. The default value is 0. This property is useful if you have header rows in the file that should be skipped. When autodetect is on, the behavior is the following: skipLeadingRows unspecified - Autodetect tries to detect headers in the first row. If they are not detected, the row is read as data. Otherwise data is read starting from the second row. skipLeadingRows is 0 - Instructs autodetect that there are no headers and data should be read starting from the first row. skipLeadingRows = N > 0 - Autodetect skips N-1 rows and tries to detect headers in row N. If headers are not detected, row N is just skipped. Otherwise row N is used to extract column names for the detected schema.
sourceFormat Changes to this property will trigger replacement. String
The format of the data files. For CSV files, specify "CSV". For datastore backups, specify "DATASTORE_BACKUP". For newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON". For Avro, specify "AVRO". For parquet, specify "PARQUET". For orc, specify "ORC". [Beta] For Bigtable, specify "BIGTABLE". The default value is CSV.
timePartitioning Changes to this property will trigger replacement. Property Map
Time-based partitioning specification for the destination table. Structure is documented below.
writeDisposition Changes to this property will trigger replacement. String
Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion. Default value is WRITE_EMPTY. Possible values are: WRITE_TRUNCATE, WRITE_APPEND, WRITE_EMPTY.

JobLoadDestinationEncryptionConfiguration
, JobLoadDestinationEncryptionConfigurationArgs

KmsKeyName
This property is required.
Changes to this property will trigger replacement.
string
Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.
KmsKeyVersion string
(Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.
KmsKeyName
This property is required.
Changes to this property will trigger replacement.
string
Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.
KmsKeyVersion string
(Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.
kmsKeyName
This property is required.
Changes to this property will trigger replacement.
String
Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.
kmsKeyVersion String
(Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.
kmsKeyName
This property is required.
Changes to this property will trigger replacement.
string
Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.
kmsKeyVersion string
(Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.
kms_key_name
This property is required.
Changes to this property will trigger replacement.
str
Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.
kms_key_version str
(Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.
kmsKeyName
This property is required.
Changes to this property will trigger replacement.
String
Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.
kmsKeyVersion String
(Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.

JobLoadDestinationTable
, JobLoadDestinationTableArgs

TableId
This property is required.
Changes to this property will trigger replacement.
string
The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.
DatasetId Changes to this property will trigger replacement. string
The ID of the dataset containing this table.
ProjectId Changes to this property will trigger replacement. string
The ID of the project containing this table.
TableId
This property is required.
Changes to this property will trigger replacement.
string
The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.
DatasetId Changes to this property will trigger replacement. string
The ID of the dataset containing this table.
ProjectId Changes to this property will trigger replacement. string
The ID of the project containing this table.
tableId
This property is required.
Changes to this property will trigger replacement.
String
The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.
datasetId Changes to this property will trigger replacement. String
The ID of the dataset containing this table.
projectId Changes to this property will trigger replacement. String
The ID of the project containing this table.
tableId
This property is required.
Changes to this property will trigger replacement.
string
The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.
datasetId Changes to this property will trigger replacement. string
The ID of the dataset containing this table.
projectId Changes to this property will trigger replacement. string
The ID of the project containing this table.
table_id
This property is required.
Changes to this property will trigger replacement.
str
The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.
dataset_id Changes to this property will trigger replacement. str
The ID of the dataset containing this table.
project_id Changes to this property will trigger replacement. str
The ID of the project containing this table.
tableId
This property is required.
Changes to this property will trigger replacement.
String
The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.
datasetId Changes to this property will trigger replacement. String
The ID of the dataset containing this table.
projectId Changes to this property will trigger replacement. String
The ID of the project containing this table.

JobLoadParquetOptions
, JobLoadParquetOptionsArgs

EnableListInference Changes to this property will trigger replacement. bool
If sourceFormat is set to PARQUET, indicates whether to use schema inference specifically for Parquet LIST logical type.
EnumAsString Changes to this property will trigger replacement. bool
If sourceFormat is set to PARQUET, indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default.
EnableListInference Changes to this property will trigger replacement. bool
If sourceFormat is set to PARQUET, indicates whether to use schema inference specifically for Parquet LIST logical type.
EnumAsString Changes to this property will trigger replacement. bool
If sourceFormat is set to PARQUET, indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default.
enableListInference Changes to this property will trigger replacement. Boolean
If sourceFormat is set to PARQUET, indicates whether to use schema inference specifically for Parquet LIST logical type.
enumAsString Changes to this property will trigger replacement. Boolean
If sourceFormat is set to PARQUET, indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default.
enableListInference Changes to this property will trigger replacement. boolean
If sourceFormat is set to PARQUET, indicates whether to use schema inference specifically for Parquet LIST logical type.
enumAsString Changes to this property will trigger replacement. boolean
If sourceFormat is set to PARQUET, indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default.
enable_list_inference Changes to this property will trigger replacement. bool
If sourceFormat is set to PARQUET, indicates whether to use schema inference specifically for Parquet LIST logical type.
enum_as_string Changes to this property will trigger replacement. bool
If sourceFormat is set to PARQUET, indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default.
enableListInference Changes to this property will trigger replacement. Boolean
If sourceFormat is set to PARQUET, indicates whether to use schema inference specifically for Parquet LIST logical type.
enumAsString Changes to this property will trigger replacement. Boolean
If sourceFormat is set to PARQUET, indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default.

JobLoadTimePartitioning
, JobLoadTimePartitioningArgs

Type
This property is required.
Changes to this property will trigger replacement.
string
The only type supported is DAY, which will generate one partition per day. Providing an empty string used to cause an error, but in OnePlatform the field will be treated as unset.
ExpirationMs Changes to this property will trigger replacement. string
Number of milliseconds for which to keep the storage for a partition. A wrapper is used here because 0 is an invalid value.
Field Changes to this property will trigger replacement. string
If not set, the table is partitioned by pseudo column '_PARTITIONTIME'; if set, the table is partitioned by this field. The field must be a top-level TIMESTAMP or DATE field. Its mode must be NULLABLE or REQUIRED. A wrapper is used here because an empty string is an invalid value.
Type
This property is required.
Changes to this property will trigger replacement.
string
The only type supported is DAY, which will generate one partition per day. Providing an empty string used to cause an error, but in OnePlatform the field will be treated as unset.
ExpirationMs Changes to this property will trigger replacement. string
Number of milliseconds for which to keep the storage for a partition. A wrapper is used here because 0 is an invalid value.
Field Changes to this property will trigger replacement. string
If not set, the table is partitioned by pseudo column '_PARTITIONTIME'; if set, the table is partitioned by this field. The field must be a top-level TIMESTAMP or DATE field. Its mode must be NULLABLE or REQUIRED. A wrapper is used here because an empty string is an invalid value.
type
This property is required.
Changes to this property will trigger replacement.
String
The only type supported is DAY, which will generate one partition per day. Providing an empty string used to cause an error, but in OnePlatform the field will be treated as unset.
expirationMs Changes to this property will trigger replacement. String
Number of milliseconds for which to keep the storage for a partition. A wrapper is used here because 0 is an invalid value.
field Changes to this property will trigger replacement. String
If not set, the table is partitioned by pseudo column '_PARTITIONTIME'; if set, the table is partitioned by this field. The field must be a top-level TIMESTAMP or DATE field. Its mode must be NULLABLE or REQUIRED. A wrapper is used here because an empty string is an invalid value.
type
This property is required.
Changes to this property will trigger replacement.
string
The only type supported is DAY, which will generate one partition per day. Providing an empty string used to cause an error, but in OnePlatform the field will be treated as unset.
expirationMs Changes to this property will trigger replacement. string
Number of milliseconds for which to keep the storage for a partition. A wrapper is used here because 0 is an invalid value.
field Changes to this property will trigger replacement. string
If not set, the table is partitioned by pseudo column '_PARTITIONTIME'; if set, the table is partitioned by this field. The field must be a top-level TIMESTAMP or DATE field. Its mode must be NULLABLE or REQUIRED. A wrapper is used here because an empty string is an invalid value.
type
This property is required.
Changes to this property will trigger replacement.
str
The only type supported is DAY, which will generate one partition per day. Providing an empty string used to cause an error, but in OnePlatform the field will be treated as unset.
expiration_ms Changes to this property will trigger replacement. str
Number of milliseconds for which to keep the storage for a partition. A wrapper is used here because 0 is an invalid value.
field Changes to this property will trigger replacement. str
If not set, the table is partitioned by pseudo column '_PARTITIONTIME'; if set, the table is partitioned by this field. The field must be a top-level TIMESTAMP or DATE field. Its mode must be NULLABLE or REQUIRED. A wrapper is used here because an empty string is an invalid value.
type
This property is required.
Changes to this property will trigger replacement.
String
The only type supported is DAY, which will generate one partition per day. Providing an empty string used to cause an error, but in OnePlatform the field will be treated as unset.
expirationMs Changes to this property will trigger replacement. String
Number of milliseconds for which to keep the storage for a partition. A wrapper is used here because 0 is an invalid value.
field Changes to this property will trigger replacement. String
If not set, the table is partitioned by pseudo column '_PARTITIONTIME'; if set, the table is partitioned by this field. The field must be a top-level TIMESTAMP or DATE field. Its mode must be NULLABLE or REQUIRED. A wrapper is used here because an empty string is an invalid value.

JobQuery
, JobQueryArgs

Query
This property is required.
Changes to this property will trigger replacement.
string
SQL query text to execute. The useLegacySql field can be used to indicate whether the query uses legacy SQL or standard SQL. NOTE: queries containing DML language (DELETE, UPDATE, MERGE, INSERT) must specify create_disposition = "" and write_disposition = "".
AllowLargeResults Changes to this property will trigger replacement. bool
If true and query uses legacy SQL dialect, allows the query to produce arbitrarily large result tables at a slight cost in performance. Requires destinationTable to be set. For standard SQL queries, this flag is ignored and large results are always allowed. However, you must still set destinationTable when result size exceeds the allowed maximum response size.
Continuous Changes to this property will trigger replacement. bool
Whether to run the query as continuous or a regular query.
CreateDisposition Changes to this property will trigger replacement. string
Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. Creation, truncation and append actions occur as one atomic update upon job completion Default value is CREATE_IF_NEEDED. Possible values are: CREATE_IF_NEEDED, CREATE_NEVER.
DefaultDataset Changes to this property will trigger replacement. JobQueryDefaultDataset
Specifies the default dataset to use for unqualified table names in the query. Note that this does not alter behavior of unqualified dataset names. Structure is documented below.
DestinationEncryptionConfiguration Changes to this property will trigger replacement. JobQueryDestinationEncryptionConfiguration
Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.
DestinationTable Changes to this property will trigger replacement. JobQueryDestinationTable
Describes the table where the query results should be stored. This property must be set for large results that exceed the maximum response size. For queries that produce anonymous (cached) results, this field will be populated by BigQuery. Structure is documented below.
FlattenResults Changes to this property will trigger replacement. bool
If true and query uses legacy SQL dialect, flattens all nested and repeated fields in the query results. allowLargeResults must be true if this is set to false. For standard SQL queries, this flag is ignored and results are never flattened.
MaximumBillingTier Changes to this property will trigger replacement. int
Limits the billing tier for this job. Queries that have resource usage beyond this tier will fail (without incurring a charge). If unspecified, this will be set to your project default.
MaximumBytesBilled Changes to this property will trigger replacement. string
Limits the bytes billed for this job. Queries that will have bytes billed beyond this limit will fail (without incurring a charge). If unspecified, this will be set to your project default.
ParameterMode Changes to this property will trigger replacement. string
Standard SQL only. Set to POSITIONAL to use positional (?) query parameters or to NAMED to use named (@myparam) query parameters in this query.
Priority Changes to this property will trigger replacement. string
Specifies a priority for the query. Default value is INTERACTIVE. Possible values are: INTERACTIVE, BATCH.
SchemaUpdateOptions Changes to this property will trigger replacement. List<string>
Allows the schema of the destination table to be updated as a side effect of the query job. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable.
ScriptOptions Changes to this property will trigger replacement. JobQueryScriptOptions
Options controlling the execution of scripts. Structure is documented below.
UseLegacySql Changes to this property will trigger replacement. bool
Specifies whether to use BigQuery's legacy SQL dialect for this query. The default value is true. If set to false, the query will use BigQuery's standard SQL.
UseQueryCache Changes to this property will trigger replacement. bool
Whether to look for the result in the query cache. The query cache is a best-effort cache that will be flushed whenever tables in the query are modified. Moreover, the query cache is only available when a query does not have a destination table specified. The default value is true.
UserDefinedFunctionResources Changes to this property will trigger replacement. List<JobQueryUserDefinedFunctionResource>
Describes user-defined function resources used in the query. Structure is documented below.
WriteDisposition Changes to this property will trigger replacement. string
Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion. Default value is WRITE_EMPTY. Possible values are: WRITE_TRUNCATE, WRITE_APPEND, WRITE_EMPTY.
Query
This property is required.
Changes to this property will trigger replacement.
string
SQL query text to execute. The useLegacySql field can be used to indicate whether the query uses legacy SQL or standard SQL. NOTE: queries containing DML language (DELETE, UPDATE, MERGE, INSERT) must specify create_disposition = "" and write_disposition = "".
AllowLargeResults Changes to this property will trigger replacement. bool
If true and query uses legacy SQL dialect, allows the query to produce arbitrarily large result tables at a slight cost in performance. Requires destinationTable to be set. For standard SQL queries, this flag is ignored and large results are always allowed. However, you must still set destinationTable when result size exceeds the allowed maximum response size.
Continuous Changes to this property will trigger replacement. bool
Whether to run the query as continuous or a regular query.
CreateDisposition Changes to this property will trigger replacement. string
Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. Creation, truncation and append actions occur as one atomic update upon job completion Default value is CREATE_IF_NEEDED. Possible values are: CREATE_IF_NEEDED, CREATE_NEVER.
DefaultDataset Changes to this property will trigger replacement. JobQueryDefaultDataset
Specifies the default dataset to use for unqualified table names in the query. Note that this does not alter behavior of unqualified dataset names. Structure is documented below.
DestinationEncryptionConfiguration Changes to this property will trigger replacement. JobQueryDestinationEncryptionConfiguration
Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.
DestinationTable Changes to this property will trigger replacement. JobQueryDestinationTable
Describes the table where the query results should be stored. This property must be set for large results that exceed the maximum response size. For queries that produce anonymous (cached) results, this field will be populated by BigQuery. Structure is documented below.
FlattenResults Changes to this property will trigger replacement. bool
If true and query uses legacy SQL dialect, flattens all nested and repeated fields in the query results. allowLargeResults must be true if this is set to false. For standard SQL queries, this flag is ignored and results are never flattened.
MaximumBillingTier Changes to this property will trigger replacement. int
Limits the billing tier for this job. Queries that have resource usage beyond this tier will fail (without incurring a charge). If unspecified, this will be set to your project default.
MaximumBytesBilled Changes to this property will trigger replacement. string
Limits the bytes billed for this job. Queries that will have bytes billed beyond this limit will fail (without incurring a charge). If unspecified, this will be set to your project default.
ParameterMode Changes to this property will trigger replacement. string
Standard SQL only. Set to POSITIONAL to use positional (?) query parameters or to NAMED to use named (@myparam) query parameters in this query.
Priority Changes to this property will trigger replacement. string
Specifies a priority for the query. Default value is INTERACTIVE. Possible values are: INTERACTIVE, BATCH.
SchemaUpdateOptions Changes to this property will trigger replacement. []string
Allows the schema of the destination table to be updated as a side effect of the query job. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable.
ScriptOptions Changes to this property will trigger replacement. JobQueryScriptOptions
Options controlling the execution of scripts. Structure is documented below.
UseLegacySql Changes to this property will trigger replacement. bool
Specifies whether to use BigQuery's legacy SQL dialect for this query. The default value is true. If set to false, the query will use BigQuery's standard SQL.
UseQueryCache Changes to this property will trigger replacement. bool
Whether to look for the result in the query cache. The query cache is a best-effort cache that will be flushed whenever tables in the query are modified. Moreover, the query cache is only available when a query does not have a destination table specified. The default value is true.
UserDefinedFunctionResources Changes to this property will trigger replacement. []JobQueryUserDefinedFunctionResource
Describes user-defined function resources used in the query. Structure is documented below.
WriteDisposition Changes to this property will trigger replacement. string
Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion. Default value is WRITE_EMPTY. Possible values are: WRITE_TRUNCATE, WRITE_APPEND, WRITE_EMPTY.
query
This property is required.
Changes to this property will trigger replacement.
String
SQL query text to execute. The useLegacySql field can be used to indicate whether the query uses legacy SQL or standard SQL. NOTE: queries containing DML language (DELETE, UPDATE, MERGE, INSERT) must specify create_disposition = "" and write_disposition = "".
allowLargeResults Changes to this property will trigger replacement. Boolean
If true and query uses legacy SQL dialect, allows the query to produce arbitrarily large result tables at a slight cost in performance. Requires destinationTable to be set. For standard SQL queries, this flag is ignored and large results are always allowed. However, you must still set destinationTable when result size exceeds the allowed maximum response size.
continuous Changes to this property will trigger replacement. Boolean
Whether to run the query as continuous or a regular query.
createDisposition Changes to this property will trigger replacement. String
Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. Creation, truncation and append actions occur as one atomic update upon job completion Default value is CREATE_IF_NEEDED. Possible values are: CREATE_IF_NEEDED, CREATE_NEVER.
defaultDataset Changes to this property will trigger replacement. JobQueryDefaultDataset
Specifies the default dataset to use for unqualified table names in the query. Note that this does not alter behavior of unqualified dataset names. Structure is documented below.
destinationEncryptionConfiguration Changes to this property will trigger replacement. JobQueryDestinationEncryptionConfiguration
Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.
destinationTable Changes to this property will trigger replacement. JobQueryDestinationTable
Describes the table where the query results should be stored. This property must be set for large results that exceed the maximum response size. For queries that produce anonymous (cached) results, this field will be populated by BigQuery. Structure is documented below.
flattenResults Changes to this property will trigger replacement. Boolean
If true and query uses legacy SQL dialect, flattens all nested and repeated fields in the query results. allowLargeResults must be true if this is set to false. For standard SQL queries, this flag is ignored and results are never flattened.
maximumBillingTier Changes to this property will trigger replacement. Integer
Limits the billing tier for this job. Queries that have resource usage beyond this tier will fail (without incurring a charge). If unspecified, this will be set to your project default.
maximumBytesBilled Changes to this property will trigger replacement. String
Limits the bytes billed for this job. Queries that will have bytes billed beyond this limit will fail (without incurring a charge). If unspecified, this will be set to your project default.
parameterMode Changes to this property will trigger replacement. String
Standard SQL only. Set to POSITIONAL to use positional (?) query parameters or to NAMED to use named (@myparam) query parameters in this query.
priority Changes to this property will trigger replacement. String
Specifies a priority for the query. Default value is INTERACTIVE. Possible values are: INTERACTIVE, BATCH.
schemaUpdateOptions Changes to this property will trigger replacement. List<String>
Allows the schema of the destination table to be updated as a side effect of the query job. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable.
scriptOptions Changes to this property will trigger replacement. JobQueryScriptOptions
Options controlling the execution of scripts. Structure is documented below.
useLegacySql Changes to this property will trigger replacement. Boolean
Specifies whether to use BigQuery's legacy SQL dialect for this query. The default value is true. If set to false, the query will use BigQuery's standard SQL.
useQueryCache Changes to this property will trigger replacement. Boolean
Whether to look for the result in the query cache. The query cache is a best-effort cache that will be flushed whenever tables in the query are modified. Moreover, the query cache is only available when a query does not have a destination table specified. The default value is true.
userDefinedFunctionResources Changes to this property will trigger replacement. List<JobQueryUserDefinedFunctionResource>
Describes user-defined function resources used in the query. Structure is documented below.
writeDisposition Changes to this property will trigger replacement. String
Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion. Default value is WRITE_EMPTY. Possible values are: WRITE_TRUNCATE, WRITE_APPEND, WRITE_EMPTY.
query
This property is required.
Changes to this property will trigger replacement.
string
SQL query text to execute. The useLegacySql field can be used to indicate whether the query uses legacy SQL or standard SQL. NOTE: queries containing DML language (DELETE, UPDATE, MERGE, INSERT) must specify create_disposition = "" and write_disposition = "".
allowLargeResults Changes to this property will trigger replacement. boolean
If true and query uses legacy SQL dialect, allows the query to produce arbitrarily large result tables at a slight cost in performance. Requires destinationTable to be set. For standard SQL queries, this flag is ignored and large results are always allowed. However, you must still set destinationTable when result size exceeds the allowed maximum response size.
continuous Changes to this property will trigger replacement. boolean
Whether to run the query as continuous or a regular query.
createDisposition Changes to this property will trigger replacement. string
Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. Creation, truncation and append actions occur as one atomic update upon job completion Default value is CREATE_IF_NEEDED. Possible values are: CREATE_IF_NEEDED, CREATE_NEVER.
defaultDataset Changes to this property will trigger replacement. JobQueryDefaultDataset
Specifies the default dataset to use for unqualified table names in the query. Note that this does not alter behavior of unqualified dataset names. Structure is documented below.
destinationEncryptionConfiguration Changes to this property will trigger replacement. JobQueryDestinationEncryptionConfiguration
Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.
destinationTable Changes to this property will trigger replacement. JobQueryDestinationTable
Describes the table where the query results should be stored. This property must be set for large results that exceed the maximum response size. For queries that produce anonymous (cached) results, this field will be populated by BigQuery. Structure is documented below.
flattenResults Changes to this property will trigger replacement. boolean
If true and query uses legacy SQL dialect, flattens all nested and repeated fields in the query results. allowLargeResults must be true if this is set to false. For standard SQL queries, this flag is ignored and results are never flattened.
maximumBillingTier Changes to this property will trigger replacement. number
Limits the billing tier for this job. Queries that have resource usage beyond this tier will fail (without incurring a charge). If unspecified, this will be set to your project default.
maximumBytesBilled Changes to this property will trigger replacement. string
Limits the bytes billed for this job. Queries that will have bytes billed beyond this limit will fail (without incurring a charge). If unspecified, this will be set to your project default.
parameterMode Changes to this property will trigger replacement. string
Standard SQL only. Set to POSITIONAL to use positional (?) query parameters or to NAMED to use named (@myparam) query parameters in this query.
priority Changes to this property will trigger replacement. string
Specifies a priority for the query. Default value is INTERACTIVE. Possible values are: INTERACTIVE, BATCH.
schemaUpdateOptions Changes to this property will trigger replacement. string[]
Allows the schema of the destination table to be updated as a side effect of the query job. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable.
scriptOptions Changes to this property will trigger replacement. JobQueryScriptOptions
Options controlling the execution of scripts. Structure is documented below.
useLegacySql Changes to this property will trigger replacement. boolean
Specifies whether to use BigQuery's legacy SQL dialect for this query. The default value is true. If set to false, the query will use BigQuery's standard SQL.
useQueryCache Changes to this property will trigger replacement. boolean
Whether to look for the result in the query cache. The query cache is a best-effort cache that will be flushed whenever tables in the query are modified. Moreover, the query cache is only available when a query does not have a destination table specified. The default value is true.
userDefinedFunctionResources Changes to this property will trigger replacement. JobQueryUserDefinedFunctionResource[]
Describes user-defined function resources used in the query. Structure is documented below.
writeDisposition Changes to this property will trigger replacement. string
Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion. Default value is WRITE_EMPTY. Possible values are: WRITE_TRUNCATE, WRITE_APPEND, WRITE_EMPTY.
query
This property is required.
Changes to this property will trigger replacement.
str
SQL query text to execute. The useLegacySql field can be used to indicate whether the query uses legacy SQL or standard SQL. NOTE: queries containing DML language (DELETE, UPDATE, MERGE, INSERT) must specify create_disposition = "" and write_disposition = "".
allow_large_results Changes to this property will trigger replacement. bool
If true and query uses legacy SQL dialect, allows the query to produce arbitrarily large result tables at a slight cost in performance. Requires destinationTable to be set. For standard SQL queries, this flag is ignored and large results are always allowed. However, you must still set destinationTable when result size exceeds the allowed maximum response size.
continuous Changes to this property will trigger replacement. bool
Whether to run the query as continuous or a regular query.
create_disposition Changes to this property will trigger replacement. str
Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. Creation, truncation and append actions occur as one atomic update upon job completion Default value is CREATE_IF_NEEDED. Possible values are: CREATE_IF_NEEDED, CREATE_NEVER.
default_dataset Changes to this property will trigger replacement. JobQueryDefaultDataset
Specifies the default dataset to use for unqualified table names in the query. Note that this does not alter behavior of unqualified dataset names. Structure is documented below.
destination_encryption_configuration Changes to this property will trigger replacement. JobQueryDestinationEncryptionConfiguration
Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.
destination_table Changes to this property will trigger replacement. JobQueryDestinationTable
Describes the table where the query results should be stored. This property must be set for large results that exceed the maximum response size. For queries that produce anonymous (cached) results, this field will be populated by BigQuery. Structure is documented below.
flatten_results Changes to this property will trigger replacement. bool
If true and query uses legacy SQL dialect, flattens all nested and repeated fields in the query results. allowLargeResults must be true if this is set to false. For standard SQL queries, this flag is ignored and results are never flattened.
maximum_billing_tier Changes to this property will trigger replacement. int
Limits the billing tier for this job. Queries that have resource usage beyond this tier will fail (without incurring a charge). If unspecified, this will be set to your project default.
maximum_bytes_billed Changes to this property will trigger replacement. str
Limits the bytes billed for this job. Queries that will have bytes billed beyond this limit will fail (without incurring a charge). If unspecified, this will be set to your project default.
parameter_mode Changes to this property will trigger replacement. str
Standard SQL only. Set to POSITIONAL to use positional (?) query parameters or to NAMED to use named (@myparam) query parameters in this query.
priority Changes to this property will trigger replacement. str
Specifies a priority for the query. Default value is INTERACTIVE. Possible values are: INTERACTIVE, BATCH.
schema_update_options Changes to this property will trigger replacement. Sequence[str]
Allows the schema of the destination table to be updated as a side effect of the query job. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable.
script_options Changes to this property will trigger replacement. JobQueryScriptOptions
Options controlling the execution of scripts. Structure is documented below.
use_legacy_sql Changes to this property will trigger replacement. bool
Specifies whether to use BigQuery's legacy SQL dialect for this query. The default value is true. If set to false, the query will use BigQuery's standard SQL.
use_query_cache Changes to this property will trigger replacement. bool
Whether to look for the result in the query cache. The query cache is a best-effort cache that will be flushed whenever tables in the query are modified. Moreover, the query cache is only available when a query does not have a destination table specified. The default value is true.
user_defined_function_resources Changes to this property will trigger replacement. Sequence[JobQueryUserDefinedFunctionResource]
Describes user-defined function resources used in the query. Structure is documented below.
write_disposition Changes to this property will trigger replacement. str
Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion. Default value is WRITE_EMPTY. Possible values are: WRITE_TRUNCATE, WRITE_APPEND, WRITE_EMPTY.
query
This property is required.
Changes to this property will trigger replacement.
String
SQL query text to execute. The useLegacySql field can be used to indicate whether the query uses legacy SQL or standard SQL. NOTE: queries containing DML language (DELETE, UPDATE, MERGE, INSERT) must specify create_disposition = "" and write_disposition = "".
allowLargeResults Changes to this property will trigger replacement. Boolean
If true and query uses legacy SQL dialect, allows the query to produce arbitrarily large result tables at a slight cost in performance. Requires destinationTable to be set. For standard SQL queries, this flag is ignored and large results are always allowed. However, you must still set destinationTable when result size exceeds the allowed maximum response size.
continuous Changes to this property will trigger replacement. Boolean
Whether to run the query as continuous or a regular query.
createDisposition Changes to this property will trigger replacement. String
Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. Creation, truncation and append actions occur as one atomic update upon job completion Default value is CREATE_IF_NEEDED. Possible values are: CREATE_IF_NEEDED, CREATE_NEVER.
defaultDataset Changes to this property will trigger replacement. Property Map
Specifies the default dataset to use for unqualified table names in the query. Note that this does not alter behavior of unqualified dataset names. Structure is documented below.
destinationEncryptionConfiguration Changes to this property will trigger replacement. Property Map
Custom encryption configuration (e.g., Cloud KMS keys) Structure is documented below.
destinationTable Changes to this property will trigger replacement. Property Map
Describes the table where the query results should be stored. This property must be set for large results that exceed the maximum response size. For queries that produce anonymous (cached) results, this field will be populated by BigQuery. Structure is documented below.
flattenResults Changes to this property will trigger replacement. Boolean
If true and query uses legacy SQL dialect, flattens all nested and repeated fields in the query results. allowLargeResults must be true if this is set to false. For standard SQL queries, this flag is ignored and results are never flattened.
maximumBillingTier Changes to this property will trigger replacement. Number
Limits the billing tier for this job. Queries that have resource usage beyond this tier will fail (without incurring a charge). If unspecified, this will be set to your project default.
maximumBytesBilled Changes to this property will trigger replacement. String
Limits the bytes billed for this job. Queries that will have bytes billed beyond this limit will fail (without incurring a charge). If unspecified, this will be set to your project default.
parameterMode Changes to this property will trigger replacement. String
Standard SQL only. Set to POSITIONAL to use positional (?) query parameters or to NAMED to use named (@myparam) query parameters in this query.
priority Changes to this property will trigger replacement. String
Specifies a priority for the query. Default value is INTERACTIVE. Possible values are: INTERACTIVE, BATCH.
schemaUpdateOptions Changes to this property will trigger replacement. List<String>
Allows the schema of the destination table to be updated as a side effect of the query job. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable.
scriptOptions Changes to this property will trigger replacement. Property Map
Options controlling the execution of scripts. Structure is documented below.
useLegacySql Changes to this property will trigger replacement. Boolean
Specifies whether to use BigQuery's legacy SQL dialect for this query. The default value is true. If set to false, the query will use BigQuery's standard SQL.
useQueryCache Changes to this property will trigger replacement. Boolean
Whether to look for the result in the query cache. The query cache is a best-effort cache that will be flushed whenever tables in the query are modified. Moreover, the query cache is only available when a query does not have a destination table specified. The default value is true.
userDefinedFunctionResources Changes to this property will trigger replacement. List<Property Map>
Describes user-defined function resources used in the query. Structure is documented below.
writeDisposition Changes to this property will trigger replacement. String
Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion. Default value is WRITE_EMPTY. Possible values are: WRITE_TRUNCATE, WRITE_APPEND, WRITE_EMPTY.

JobQueryDefaultDataset
, JobQueryDefaultDatasetArgs

DatasetId
This property is required.
Changes to this property will trigger replacement.
string
The dataset. Can be specified {{dataset_id}} if project_id is also set, or of the form projects/{{project}}/datasets/{{dataset_id}} if not.
ProjectId Changes to this property will trigger replacement. string
The ID of the project containing this table.
DatasetId
This property is required.
Changes to this property will trigger replacement.
string
The dataset. Can be specified {{dataset_id}} if project_id is also set, or of the form projects/{{project}}/datasets/{{dataset_id}} if not.
ProjectId Changes to this property will trigger replacement. string
The ID of the project containing this table.
datasetId
This property is required.
Changes to this property will trigger replacement.
String
The dataset. Can be specified {{dataset_id}} if project_id is also set, or of the form projects/{{project}}/datasets/{{dataset_id}} if not.
projectId Changes to this property will trigger replacement. String
The ID of the project containing this table.
datasetId
This property is required.
Changes to this property will trigger replacement.
string
The dataset. Can be specified {{dataset_id}} if project_id is also set, or of the form projects/{{project}}/datasets/{{dataset_id}} if not.
projectId Changes to this property will trigger replacement. string
The ID of the project containing this table.
dataset_id
This property is required.
Changes to this property will trigger replacement.
str
The dataset. Can be specified {{dataset_id}} if project_id is also set, or of the form projects/{{project}}/datasets/{{dataset_id}} if not.
project_id Changes to this property will trigger replacement. str
The ID of the project containing this table.
datasetId
This property is required.
Changes to this property will trigger replacement.
String
The dataset. Can be specified {{dataset_id}} if project_id is also set, or of the form projects/{{project}}/datasets/{{dataset_id}} if not.
projectId Changes to this property will trigger replacement. String
The ID of the project containing this table.

JobQueryDestinationEncryptionConfiguration
, JobQueryDestinationEncryptionConfigurationArgs

KmsKeyName
This property is required.
Changes to this property will trigger replacement.
string
Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.
KmsKeyVersion string
(Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.
KmsKeyName
This property is required.
Changes to this property will trigger replacement.
string
Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.
KmsKeyVersion string
(Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.
kmsKeyName
This property is required.
Changes to this property will trigger replacement.
String
Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.
kmsKeyVersion String
(Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.
kmsKeyName
This property is required.
Changes to this property will trigger replacement.
string
Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.
kmsKeyVersion string
(Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.
kms_key_name
This property is required.
Changes to this property will trigger replacement.
str
Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.
kms_key_version str
(Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.
kmsKeyName
This property is required.
Changes to this property will trigger replacement.
String
Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.
kmsKeyVersion String
(Output) Describes the Cloud KMS encryption key version used to protect destination BigQuery table.

JobQueryDestinationTable
, JobQueryDestinationTableArgs

TableId
This property is required.
Changes to this property will trigger replacement.
string
The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.
DatasetId Changes to this property will trigger replacement. string
The ID of the dataset containing this table.
ProjectId Changes to this property will trigger replacement. string
The ID of the project containing this table.
TableId
This property is required.
Changes to this property will trigger replacement.
string
The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.
DatasetId Changes to this property will trigger replacement. string
The ID of the dataset containing this table.
ProjectId Changes to this property will trigger replacement. string
The ID of the project containing this table.
tableId
This property is required.
Changes to this property will trigger replacement.
String
The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.
datasetId Changes to this property will trigger replacement. String
The ID of the dataset containing this table.
projectId Changes to this property will trigger replacement. String
The ID of the project containing this table.
tableId
This property is required.
Changes to this property will trigger replacement.
string
The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.
datasetId Changes to this property will trigger replacement. string
The ID of the dataset containing this table.
projectId Changes to this property will trigger replacement. string
The ID of the project containing this table.
table_id
This property is required.
Changes to this property will trigger replacement.
str
The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.
dataset_id Changes to this property will trigger replacement. str
The ID of the dataset containing this table.
project_id Changes to this property will trigger replacement. str
The ID of the project containing this table.
tableId
This property is required.
Changes to this property will trigger replacement.
String
The table. Can be specified {{table_id}} if project_id and dataset_id are also set, or of the form projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} if not.
datasetId Changes to this property will trigger replacement. String
The ID of the dataset containing this table.
projectId Changes to this property will trigger replacement. String
The ID of the project containing this table.

JobQueryScriptOptions
, JobQueryScriptOptionsArgs

KeyResultStatement Changes to this property will trigger replacement. string
Determines which statement in the script represents the "key result", used to populate the schema and query results of the script job. Possible values are: LAST, FIRST_SELECT.
StatementByteBudget Changes to this property will trigger replacement. string
Limit on the number of bytes billed per statement. Exceeding this budget results in an error.
StatementTimeoutMs Changes to this property will trigger replacement. string
Timeout period for each statement in a script.
KeyResultStatement Changes to this property will trigger replacement. string
Determines which statement in the script represents the "key result", used to populate the schema and query results of the script job. Possible values are: LAST, FIRST_SELECT.
StatementByteBudget Changes to this property will trigger replacement. string
Limit on the number of bytes billed per statement. Exceeding this budget results in an error.
StatementTimeoutMs Changes to this property will trigger replacement. string
Timeout period for each statement in a script.
keyResultStatement Changes to this property will trigger replacement. String
Determines which statement in the script represents the "key result", used to populate the schema and query results of the script job. Possible values are: LAST, FIRST_SELECT.
statementByteBudget Changes to this property will trigger replacement. String
Limit on the number of bytes billed per statement. Exceeding this budget results in an error.
statementTimeoutMs Changes to this property will trigger replacement. String
Timeout period for each statement in a script.
keyResultStatement Changes to this property will trigger replacement. string
Determines which statement in the script represents the "key result", used to populate the schema and query results of the script job. Possible values are: LAST, FIRST_SELECT.
statementByteBudget Changes to this property will trigger replacement. string
Limit on the number of bytes billed per statement. Exceeding this budget results in an error.
statementTimeoutMs Changes to this property will trigger replacement. string
Timeout period for each statement in a script.
key_result_statement Changes to this property will trigger replacement. str
Determines which statement in the script represents the "key result", used to populate the schema and query results of the script job. Possible values are: LAST, FIRST_SELECT.
statement_byte_budget Changes to this property will trigger replacement. str
Limit on the number of bytes billed per statement. Exceeding this budget results in an error.
statement_timeout_ms Changes to this property will trigger replacement. str
Timeout period for each statement in a script.
keyResultStatement Changes to this property will trigger replacement. String
Determines which statement in the script represents the "key result", used to populate the schema and query results of the script job. Possible values are: LAST, FIRST_SELECT.
statementByteBudget Changes to this property will trigger replacement. String
Limit on the number of bytes billed per statement. Exceeding this budget results in an error.
statementTimeoutMs Changes to this property will trigger replacement. String
Timeout period for each statement in a script.

JobQueryUserDefinedFunctionResource
, JobQueryUserDefinedFunctionResourceArgs

InlineCode Changes to this property will trigger replacement. string
An inline resource that contains code for a user-defined function (UDF). Providing a inline code resource is equivalent to providing a URI for a file containing the same code.
ResourceUri Changes to this property will trigger replacement. string
A code resource to load from a Google Cloud Storage URI (gs://bucket/path).
InlineCode Changes to this property will trigger replacement. string
An inline resource that contains code for a user-defined function (UDF). Providing a inline code resource is equivalent to providing a URI for a file containing the same code.
ResourceUri Changes to this property will trigger replacement. string
A code resource to load from a Google Cloud Storage URI (gs://bucket/path).
inlineCode Changes to this property will trigger replacement. String
An inline resource that contains code for a user-defined function (UDF). Providing a inline code resource is equivalent to providing a URI for a file containing the same code.
resourceUri Changes to this property will trigger replacement. String
A code resource to load from a Google Cloud Storage URI (gs://bucket/path).
inlineCode Changes to this property will trigger replacement. string
An inline resource that contains code for a user-defined function (UDF). Providing a inline code resource is equivalent to providing a URI for a file containing the same code.
resourceUri Changes to this property will trigger replacement. string
A code resource to load from a Google Cloud Storage URI (gs://bucket/path).
inline_code Changes to this property will trigger replacement. str
An inline resource that contains code for a user-defined function (UDF). Providing a inline code resource is equivalent to providing a URI for a file containing the same code.
resource_uri Changes to this property will trigger replacement. str
A code resource to load from a Google Cloud Storage URI (gs://bucket/path).
inlineCode Changes to this property will trigger replacement. String
An inline resource that contains code for a user-defined function (UDF). Providing a inline code resource is equivalent to providing a URI for a file containing the same code.
resourceUri Changes to this property will trigger replacement. String
A code resource to load from a Google Cloud Storage URI (gs://bucket/path).

JobStatus
, JobStatusArgs

ErrorResults List<JobStatusErrorResult>
(Output) Final error result of the job. If present, indicates that the job has completed and was unsuccessful. Structure is documented below.
Errors List<JobStatusError>
(Output) The first errors encountered during the running of the job. The final message includes the number of errors that caused the process to stop. Errors here do not necessarily mean that the job has not completed or was unsuccessful. Structure is documented below.
State string
(Output) Running state of the job. Valid states include 'PENDING', 'RUNNING', and 'DONE'.
ErrorResults []JobStatusErrorResult
(Output) Final error result of the job. If present, indicates that the job has completed and was unsuccessful. Structure is documented below.
Errors []JobStatusError
(Output) The first errors encountered during the running of the job. The final message includes the number of errors that caused the process to stop. Errors here do not necessarily mean that the job has not completed or was unsuccessful. Structure is documented below.
State string
(Output) Running state of the job. Valid states include 'PENDING', 'RUNNING', and 'DONE'.
errorResults List<JobStatusErrorResult>
(Output) Final error result of the job. If present, indicates that the job has completed and was unsuccessful. Structure is documented below.
errors List<JobStatusError>
(Output) The first errors encountered during the running of the job. The final message includes the number of errors that caused the process to stop. Errors here do not necessarily mean that the job has not completed or was unsuccessful. Structure is documented below.
state String
(Output) Running state of the job. Valid states include 'PENDING', 'RUNNING', and 'DONE'.
errorResults JobStatusErrorResult[]
(Output) Final error result of the job. If present, indicates that the job has completed and was unsuccessful. Structure is documented below.
errors JobStatusError[]
(Output) The first errors encountered during the running of the job. The final message includes the number of errors that caused the process to stop. Errors here do not necessarily mean that the job has not completed or was unsuccessful. Structure is documented below.
state string
(Output) Running state of the job. Valid states include 'PENDING', 'RUNNING', and 'DONE'.
error_results Sequence[JobStatusErrorResult]
(Output) Final error result of the job. If present, indicates that the job has completed and was unsuccessful. Structure is documented below.
errors Sequence[JobStatusError]
(Output) The first errors encountered during the running of the job. The final message includes the number of errors that caused the process to stop. Errors here do not necessarily mean that the job has not completed or was unsuccessful. Structure is documented below.
state str
(Output) Running state of the job. Valid states include 'PENDING', 'RUNNING', and 'DONE'.
errorResults List<Property Map>
(Output) Final error result of the job. If present, indicates that the job has completed and was unsuccessful. Structure is documented below.
errors List<Property Map>
(Output) The first errors encountered during the running of the job. The final message includes the number of errors that caused the process to stop. Errors here do not necessarily mean that the job has not completed or was unsuccessful. Structure is documented below.
state String
(Output) Running state of the job. Valid states include 'PENDING', 'RUNNING', and 'DONE'.

JobStatusError
, JobStatusErrorArgs

Location string
Specifies where the error occurred, if present.
Message string
A human-readable description of the error.
Reason string
A short error code that summarizes the error.
Location string
Specifies where the error occurred, if present.
Message string
A human-readable description of the error.
Reason string
A short error code that summarizes the error.
location String
Specifies where the error occurred, if present.
message String
A human-readable description of the error.
reason String
A short error code that summarizes the error.
location string
Specifies where the error occurred, if present.
message string
A human-readable description of the error.
reason string
A short error code that summarizes the error.
location str
Specifies where the error occurred, if present.
message str
A human-readable description of the error.
reason str
A short error code that summarizes the error.
location String
Specifies where the error occurred, if present.
message String
A human-readable description of the error.
reason String
A short error code that summarizes the error.

JobStatusErrorResult
, JobStatusErrorResultArgs

Location string
Specifies where the error occurred, if present.
Message string
A human-readable description of the error.
Reason string
A short error code that summarizes the error.
Location string
Specifies where the error occurred, if present.
Message string
A human-readable description of the error.
Reason string
A short error code that summarizes the error.
location String
Specifies where the error occurred, if present.
message String
A human-readable description of the error.
reason String
A short error code that summarizes the error.
location string
Specifies where the error occurred, if present.
message string
A human-readable description of the error.
reason string
A short error code that summarizes the error.
location str
Specifies where the error occurred, if present.
message str
A human-readable description of the error.
reason str
A short error code that summarizes the error.
location String
Specifies where the error occurred, if present.
message String
A human-readable description of the error.
reason String
A short error code that summarizes the error.

Import

Job can be imported using any of these accepted formats:

  • projects/{{project}}/jobs/{{job_id}}/location/{{location}}

  • projects/{{project}}/jobs/{{job_id}}

  • {{project}}/{{job_id}}/{{location}}

  • {{job_id}}/{{location}}

  • {{project}}/{{job_id}}

  • {{job_id}}

When using the pulumi import command, Job can be imported using one of the formats above. For example:

$ pulumi import gcp:bigquery/job:Job default projects/{{project}}/jobs/{{job_id}}/location/{{location}}
Copy
$ pulumi import gcp:bigquery/job:Job default projects/{{project}}/jobs/{{job_id}}
Copy
$ pulumi import gcp:bigquery/job:Job default {{project}}/{{job_id}}/{{location}}
Copy
$ pulumi import gcp:bigquery/job:Job default {{job_id}}/{{location}}
Copy
$ pulumi import gcp:bigquery/job:Job default {{project}}/{{job_id}}
Copy
$ pulumi import gcp:bigquery/job:Job default {{job_id}}
Copy

To learn more about importing existing cloud resources, see Importing resources.

Package Details

Repository
Google Cloud (GCP) Classic pulumi/pulumi-gcp
License
Apache-2.0
Notes
This Pulumi package is based on the google-beta Terraform Provider.