diff --git a/artifact-registry/Cargo.toml b/artifact-registry/Cargo.toml index 716fdb1c..f7c32944 100644 --- a/artifact-registry/Cargo.toml +++ b/artifact-registry/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "google-cloud-artifact-registry" -version = "0.7.1" +version = "0.8.0" edition = "2021" authors = ["yoshidan "] repository = "https://github.com/yoshidan/google-cloud-rust/tree/main/artifact-registry" @@ -13,9 +13,9 @@ documentation = "https://docs.rs/google-cloud-artifact-registry/latest/google_cl [dependencies] google-cloud-token = { version = "0.1.2", path = "../foundation/token" } google-cloud-auth = { optional = true, version = "0.17", path="../foundation/auth", default-features=false } -google-cloud-googleapis = { version="0.15.0", path = "../googleapis", features=["artifact-registry"]} -google-cloud-gax = { version = "0.19.1", path = "../foundation/gax"} -google-cloud-longrunning = { version = "0.20.0", path = "../foundation/longrunning" } +google-cloud-googleapis = { version="0.16.0", path = "../googleapis", features=["artifact-registry"]} +google-cloud-gax = { version = "0.19.2", path = "../foundation/gax"} +google-cloud-longrunning = { version = "0.21.0", path = "../foundation/longrunning" } tracing = "0.1" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" diff --git a/artifact-registry/src/client.rs b/artifact-registry/src/client.rs index 03291f3b..621e23c5 100644 --- a/artifact-registry/src/client.rs +++ b/artifact-registry/src/client.rs @@ -146,7 +146,13 @@ mod tests { create_time: None, update_time: None, kms_key_name: "".to_string(), + mode: 0, + cleanup_policies: Default::default(), + size_bytes: 0, + satisfies_pzs: false, + cleanup_policy_dry_run: false, format_config: None, + mode_config: None, }), }; let mut created_repository = client.create_repository(create_request.clone(), None).await.unwrap(); diff --git a/bigquery/Cargo.toml b/bigquery/Cargo.toml index a5b4fcfd..b351740a 100644 --- a/bigquery/Cargo.toml +++ b/bigquery/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "google-cloud-bigquery" -version = "0.13.1" +version = "0.14.0" edition = "2021" authors = ["yoshidan "] repository = "https://github.com/yoshidan/google-cloud-rust/tree/main/bigquery" @@ -13,8 +13,8 @@ documentation = "https://docs.rs/google-cloud-bigquery/latest/google_cloud_bigqu [dependencies] async-trait = "0.1" google-cloud-token = { version = "0.1.2", path = "../foundation/token" } -google-cloud-googleapis = { version="0.15.0", path = "../googleapis", features=["bigquery"]} -google-cloud-gax = { version = "0.19.1", path = "../foundation/gax"} +google-cloud-googleapis = { version="0.16.0", path = "../googleapis", features=["bigquery"]} +google-cloud-gax = { version = "0.19.2", path = "../foundation/gax"} thiserror = "1.0" tracing = "0.1" reqwest = { version = "0.12.4", features = ["json", "stream", "multipart", "charset"], default-features = false } diff --git a/deny.toml b/deny.toml index f946bda4..ccde4d85 100644 --- a/deny.toml +++ b/deny.toml @@ -10,24 +10,6 @@ # when any section or field is not specified in your own configuration # Root options - -# If 1 or more target triples (and optionally, target_features) are specified, -# only the specified targets will be checked when running `cargo deny check`. -# This means, if a particular package is only ever used as a target specific -# dependency, such as, for example, the `nix` crate only being used via the -# `target_family = "unix"` configuration, that only having windows targets in -# this list would mean the nix crate, as well as any of its exclusive -# dependencies not shared by any other crates, would be ignored, as the target -# list here is effectively saying which targets you are building for. -targets = [ - # The triple can be any string, but only the target triples built in to - # rustc (as of 1.40) can be checked against actual config expressions - #{ triple = "x86_64-unknown-linux-musl" }, - # You can also specify which target_features you promise are enabled for a - # particular target. target_features are currently not validated against - # the actual valid features supported by the target architecture. - #{ triple = "wasm32-unknown-unknown", features = ["atomics"] }, -] # When creating the dependency graph used as the source of truth when checks are # executed, this field can be used to prune crates from the graph, removing them # from the view of cargo-deny. This is an extremely heavy hammer, as if a crate @@ -36,45 +18,25 @@ targets = [ # so it should be used with care. The identifiers are [Package ID Specifications] # (https://doc.rust-lang.org/cargo/reference/pkgid-spec.html) #exclude = [] -# If true, metadata will be collected with `--all-features`. Note that this can't -# be toggled off if true, if you want to conditionally enable `--all-features` it -# is recommended to pass `--all-features` on the cmd line instead -all-features = false -# If true, metadata will be collected with `--no-default-features`. The same -# caveat with `all-features` applies -no-default-features = false # If set, these feature will be enabled when collecting metadata. If `--features` # is specified on the cmd line they will take precedence over this option. #features = [] -# When outputting inclusion graphs in diagnostics that include features, this -# option can be used to specify the depth at which feature edges will be added. -# This option is included since the graphs can be quite large and the addition -# of features from the crate(s) to all of the graph roots can be far too verbose. -# This option can be overridden via `--feature-depth` on the cmd line -feature-depth = 1 - # This section is considered when running `cargo deny check advisories` # More documentation for the advisories section can be found here: # https://embarkstudios.github.io/cargo-deny/checks/advisories/cfg.html [advisories] +version = 2 # The path where the advisory database is cloned/fetched into db-path = "~/.cargo/advisory-db" # The url(s) of the advisory databases to use db-urls = ["https://github.com/rustsec/advisory-db"] -# The lint level for security vulnerabilities -vulnerability = "deny" -# The lint level for unmaintained crates -unmaintained = "warn" # The lint level for crates that have been yanked from their source registry yanked = "warn" -# The lint level for crates with security notices. Note that as of -# 2019-12-17 there are no security notice advisories in -# https://github.com/rustsec/advisory-db -notice = "warn" # A list of advisory IDs to ignore. Note that ignored advisories will still # output a note when they are encountered. ignore = [ - #"RUSTSEC-0000-0000", + # TODO wait for release https://github.com/apache/arrow-rs/blob/f41c258246cd4bd9d89228cded9ed54dbd00faff/arrow-cast/Cargo.toml#L52 + "RUSTSEC-2023-0086", ] # Threshold for security vulnerabilities, any vulnerability with a CVSS score # lower than the range specified will be ignored. Note that ignored advisories @@ -96,8 +58,7 @@ ignore = [ # More documentation for the licenses section can be found here: # https://embarkstudios.github.io/cargo-deny/checks/licenses/cfg.html [licenses] -# The lint level for crates which do not have a detectable license -unlicensed = "deny" +version = 2 # List of explicitly allowed licenses # See https://spdx.org/licenses/ for list of possible licenses # [possible values: any SPDX 3.11 short identifier (+ optional exception)]. @@ -109,29 +70,8 @@ allow = [ "Unicode-DFS-2016", "BSD-2-Clause", "BSD-3-Clause", - "CC0-1.0", - "Unicode-3.0" -] -# List of explicitly disallowed licenses -# See https://spdx.org/licenses/ for list of possible licenses -# [possible values: any SPDX 3.11 short identifier (+ optional exception)]. -deny = [ - #"Nokia", + "CC0-1.0" ] -# Lint level for licenses considered copyleft -copyleft = "warn" -# Blanket approval or denial for OSI-approved or FSF Free/Libre licenses -# * both - The license will be approved if it is both OSI-approved *AND* FSF -# * either - The license will be approved if it is either OSI-approved *OR* FSF -# * osi-only - The license will be approved if is OSI-approved *AND NOT* FSF -# * fsf-only - The license will be approved if is FSF *AND NOT* OSI-approved -# * neither - This predicate is ignored and the default lint level is used -allow-osi-fsf-free = "neither" -# Lint level used when no other predicates are matched -# 1. License isn't in the allow or deny lists -# 2. License isn't copyleft -# 3. License isn't OSI/FSF, or allow-osi-fsf-free = "neither" -default = "deny" # The confidence threshold for detecting a license from license text. # The higher the value, the more closely the license text must be to the # canonical license text of a valid SPDX license file. diff --git a/foundation/gax/Cargo.toml b/foundation/gax/Cargo.toml index 8206148d..bcac5242 100644 --- a/foundation/gax/Cargo.toml +++ b/foundation/gax/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "google-cloud-gax" -version = "0.19.1" +version = "0.19.2" authors = ["yoshidan "] edition = "2018" repository = "https://github.com/yoshidan/google-cloud-rust/tree/main/foundation/gax" @@ -12,7 +12,7 @@ description = "Google Cloud Platform gRPC retry library." [dependencies] tracing = "0.1" tokio = { version = "1.32", features = ["macros"] } -tonic = { version = "0.12", features = ["prost", "tls-webpki-roots"] } +tonic = { version = "0.12", default-features = false, features = ["prost", "tls-webpki-roots"] } thiserror = "1.0" tower = { version = "0.4", features = ["filter"] } http = "1.1" diff --git a/foundation/longrunning/Cargo.toml b/foundation/longrunning/Cargo.toml index 82bf60bf..42a34450 100644 --- a/foundation/longrunning/Cargo.toml +++ b/foundation/longrunning/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "google-cloud-longrunning" -version = "0.20.1" +version = "0.21.0" authors = ["yoshidan "] edition = "2021" repository = "https://github.com/yoshidan/google-cloud-rust/tree/main/foundation/longrunning" @@ -10,7 +10,7 @@ readme = "README.md" description = "Google Cloud Platform longrunning library." [dependencies] -google-cloud-googleapis = { version = "0.15.0", path = "../../googleapis" } -google-cloud-gax = { version = "0.19.1", path = "../gax" } -tonic = { version = "0.12", features = ["tls", "prost"] } +google-cloud-googleapis = { version = "0.16.0", path = "../../googleapis" } +google-cloud-gax = { version = "0.19.2", path = "../gax" } +tonic = { version = "0.12", default-features = false } prost = "0.13" diff --git a/googleapis/Cargo.toml b/googleapis/Cargo.toml index 4b16f446..23b24631 100644 --- a/googleapis/Cargo.toml +++ b/googleapis/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "google-cloud-googleapis" -version = "0.15.0" +version = "0.16.0" authors = ["yoshidan "] edition = "2021" repository = "https://github.com/yoshidan/google-cloud-rust/tree/main/googleapis" @@ -12,7 +12,7 @@ description = "Google Cloud Platform rust client." # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -tonic = { version = "0.12", features = ["tls", "prost", "gzip"] } +tonic = { version = "0.12", default-features = false, features = ["prost", "codegen", "gzip"] } prost = "0.13" prost-types = "0.13" diff --git a/googleapis/googleapis b/googleapis/googleapis index 1dcb40c3..005df468 160000 --- a/googleapis/googleapis +++ b/googleapis/googleapis @@ -1 +1 @@ -Subproject commit 1dcb40c35ab57aa74c14ca4072f27c2df9e5f66c +Subproject commit 005df4681b89bd204a90b76168a6dc9d9e7bf4fe diff --git a/googleapis/src/bytes/google.api.rs b/googleapis/src/bytes/google.api.rs index c96f9f7a..cd38789a 100644 --- a/googleapis/src/bytes/google.api.rs +++ b/googleapis/src/bytes/google.api.rs @@ -19,7 +19,7 @@ pub struct Http { #[prost(bool, tag = "2")] pub fully_decode_reserved_expansion: bool, } -/// # gRPC Transcoding +/// gRPC Transcoding /// /// gRPC Transcoding is a feature for mapping between a gRPC method and one or /// more HTTP REST endpoints. It allows developers to build a single API service @@ -60,9 +60,8 @@ pub struct Http { /// /// This enables an HTTP REST to gRPC mapping as below: /// -/// HTTP | gRPC -/// -----|----- -/// `GET /v1/messages/123456` | `GetMessage(name: "messages/123456")` +/// - HTTP: `GET /v1/messages/123456` +/// - gRPC: `GetMessage(name: "messages/123456")` /// /// Any fields in the request message which are not bound by the path template /// automatically become HTTP query parameters if there is no HTTP request body. @@ -86,11 +85,9 @@ pub struct Http { /// /// This enables a HTTP JSON to RPC mapping as below: /// -/// HTTP | gRPC -/// -----|----- -/// `GET /v1/messages/123456?revision=2&sub.subfield=foo` | -/// `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield: -/// "foo"))` +/// - HTTP: `GET /v1/messages/123456?revision=2&sub.subfield=foo` +/// - gRPC: `GetMessage(message_id: "123456" revision: 2 sub: +/// SubMessage(subfield: "foo"))` /// /// Note that fields which are mapped to URL query parameters must have a /// primitive type or a repeated primitive type or a non-repeated message type. @@ -120,10 +117,8 @@ pub struct Http { /// representation of the JSON in the request body is determined by /// protos JSON encoding: /// -/// HTTP | gRPC -/// -----|----- -/// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: -/// "123456" message { text: "Hi!" })` +/// - HTTP: `PATCH /v1/messages/123456 { "text": "Hi!" }` +/// - gRPC: `UpdateMessage(message_id: "123456" message { text: "Hi!" })` /// /// The special name `*` can be used in the body mapping to define that /// every field not bound by the path template should be mapped to the @@ -146,10 +141,8 @@ pub struct Http { /// /// The following HTTP JSON to RPC mapping is enabled: /// -/// HTTP | gRPC -/// -----|----- -/// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: -/// "123456" text: "Hi!")` +/// - HTTP: `PATCH /v1/messages/123456 { "text": "Hi!" }` +/// - gRPC: `UpdateMessage(message_id: "123456" text: "Hi!")` /// /// Note that when using `*` in the body mapping, it is not possible to /// have HTTP parameters, as all fields not bound by the path end in @@ -177,13 +170,13 @@ pub struct Http { /// /// This enables the following two alternative HTTP JSON to RPC mappings: /// -/// HTTP | gRPC -/// -----|----- -/// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")` -/// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id: -/// "123456")` +/// - HTTP: `GET /v1/messages/123456` +/// - gRPC: `GetMessage(message_id: "123456")` /// -/// ## Rules for HTTP mapping +/// - HTTP: `GET /v1/users/me/messages/123456` +/// - gRPC: `GetMessage(user_id: "me" message_id: "123456")` +/// +/// Rules for HTTP mapping /// /// 1. Leaf request fields (recursive expansion nested messages in the request /// message) are classified into three categories: @@ -202,7 +195,7 @@ pub struct Http { /// request body, all /// fields are passed via URL path and URL query parameters. /// -/// ### Path template syntax +/// Path template syntax /// /// Template = "/" Segments \[ Verb \] ; /// Segments = Segment { "/" Segment } ; @@ -241,7 +234,7 @@ pub struct Http { /// Document]() as /// `{+var}`. /// -/// ## Using gRPC API Service Configuration +/// Using gRPC API Service Configuration /// /// gRPC API Service Configuration (service config) is a configuration language /// for configuring a gRPC service to become a user-facing product. The @@ -256,15 +249,14 @@ pub struct Http { /// specified in the service config will override any matching transcoding /// configuration in the proto. /// -/// Example: +/// The following example selects a gRPC method and applies an `HttpRule` to it: /// /// http: /// rules: -/// # Selects a gRPC method and applies HttpRule to it. /// - selector: example.v1.Messaging.GetMessage /// get: /v1/messages/{message_id}/{sub.subfield} /// -/// ## Special notes +/// Special notes /// /// When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the /// proto to JSON conversion must follow the [proto3 @@ -545,6 +537,10 @@ pub struct Publishing { /// #[prost(string, tag = "110")] pub proto_reference_documentation_uri: ::prost::alloc::string::String, + /// Optional link to REST reference documentation. Example: + /// + #[prost(string, tag = "111")] + pub rest_reference_documentation_uri: ::prost::alloc::string::String, } /// Settings for Java client libraries. #[allow(clippy::derive_partial_eq_without_eq)] @@ -609,6 +605,27 @@ pub struct PythonSettings { /// Some settings. #[prost(message, optional, tag = "1")] pub common: ::core::option::Option, + /// Experimental features to be included during client library generation. + #[prost(message, optional, tag = "2")] + pub experimental_features: ::core::option::Option< + python_settings::ExperimentalFeatures, + >, +} +/// Nested message and enum types in `PythonSettings`. +pub mod python_settings { + /// Experimental features to be included during client library generation. + /// These fields will be deprecated once the feature graduates and is enabled + /// by default. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, Copy, PartialEq, ::prost::Message)] + pub struct ExperimentalFeatures { + /// Enables generation of asynchronous REST clients if `rest` transport is + /// enabled. By default, asynchronous REST clients will not be generated. + /// This feature will be enabled by default 1 month after launching the + /// feature in preview packages. + #[prost(bool, tag = "1")] + pub rest_async_io_enabled: bool, + } } /// Settings for Node client libraries. #[allow(clippy::derive_partial_eq_without_eq)] @@ -686,6 +703,13 @@ pub struct GoSettings { pub struct MethodSettings { /// The fully qualified name of the method, for which the options below apply. /// This is used to find the method to apply the options. + /// + /// Example: + /// + /// publishing: + /// method_settings: + /// - selector: google.storage.control.v2.StorageControl.CreateFolder + /// # method settings for CreateFolder... #[prost(string, tag = "1")] pub selector: ::prost::alloc::string::String, /// Describes settings to use for long-running operations when generating @@ -694,19 +718,29 @@ pub struct MethodSettings { /// /// Example of a YAML configuration:: /// - /// publishing: - /// method_settings: + /// publishing: + /// method_settings: /// - selector: google.cloud.speech.v2.Speech.BatchRecognize /// long_running: - /// initial_poll_delay: - /// seconds: 60 # 1 minute + /// initial_poll_delay: 60s # 1 minute /// poll_delay_multiplier: 1.5 - /// max_poll_delay: - /// seconds: 360 # 6 minutes - /// total_poll_timeout: - /// seconds: 54000 # 90 minutes + /// max_poll_delay: 360s # 6 minutes + /// total_poll_timeout: 54000s # 90 minutes #[prost(message, optional, tag = "2")] pub long_running: ::core::option::Option, + /// List of top-level fields of the request message, that should be + /// automatically populated by the client libraries based on their + /// (google.api.field_info).format. Currently supported format: UUID4. + /// + /// Example of a YAML configuration: + /// + /// publishing: + /// method_settings: + /// - selector: google.example.v1.ExampleService.CreateExample + /// auto_populated_fields: + /// - request_id + #[prost(string, repeated, tag = "3")] + pub auto_populated_fields: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, } /// Nested message and enum types in `MethodSettings`. pub mod method_settings { @@ -872,6 +906,19 @@ pub enum FieldBehavior { /// a non-empty value will be returned. The user will not be aware of what /// non-empty value to expect. NonEmptyDefault = 7, + /// Denotes that the field in a resource (a message annotated with + /// google.api.resource) is used in the resource name to uniquely identify the + /// resource. For AIP-compliant APIs, this should only be applied to the + /// `name` field on the resource. + /// + /// This behavior should not be applied to references to other resources within + /// the message. + /// + /// The identifier field of resources often have different field behavior + /// depending on the request it is embedded in (e.g. for Create methods name + /// is optional and unused, while for Update methods it is required). Instead + /// of method-specific annotations, only `IDENTIFIER` is required. + Identifier = 8, } impl FieldBehavior { /// String value of the enum field names used in the ProtoBuf definition. @@ -888,6 +935,7 @@ impl FieldBehavior { FieldBehavior::Immutable => "IMMUTABLE", FieldBehavior::UnorderedList => "UNORDERED_LIST", FieldBehavior::NonEmptyDefault => "NON_EMPTY_DEFAULT", + FieldBehavior::Identifier => "IDENTIFIER", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -901,6 +949,7 @@ impl FieldBehavior { "IMMUTABLE" => Some(Self::Immutable), "UNORDERED_LIST" => Some(Self::UnorderedList), "NON_EMPTY_DEFAULT" => Some(Self::NonEmptyDefault), + "IDENTIFIER" => Some(Self::Identifier), _ => None, } } @@ -1011,8 +1060,13 @@ pub struct ResourceDescriptor { pub history: i32, /// The plural name used in the resource name and permission names, such as /// 'projects' for the resource name of 'projects/{project}' and the permission - /// name of 'cloudresourcemanager.googleapis.com/projects.get'. It is the same - /// concept of the `plural` field in k8s CRD spec + /// name of 'cloudresourcemanager.googleapis.com/projects.get'. One exception + /// to this is for Nested Collections that have stuttering names, as defined + /// in [AIP-122](), where the + /// collection ID in the resource name pattern does not necessarily directly + /// match the `plural` value. + /// + /// It is the same concept of the `plural` field in k8s CRD spec /// /// /// Note: The plural form is required even for singleton resources. See diff --git a/googleapis/src/bytes/google.cloud.bigquery.storage.v1.rs b/googleapis/src/bytes/google.cloud.bigquery.storage.v1.rs index a1ef14e4..6c7ff71c 100644 --- a/googleapis/src/bytes/google.cloud.bigquery.storage.v1.rs +++ b/googleapis/src/bytes/google.cloud.bigquery.storage.v1.rs @@ -232,9 +232,25 @@ pub struct TableFieldSchema { /// () for this field. #[prost(string, tag = "10")] pub default_value_expression: ::prost::alloc::string::String, + /// Optional. The subtype of the RANGE, if the type of this field is RANGE. If + /// the type is RANGE, this field is required. Possible values for the field + /// element type of a RANGE include: + /// * DATE + /// * DATETIME + /// * TIMESTAMP + #[prost(message, optional, tag = "11")] + pub range_element_type: ::core::option::Option, } /// Nested message and enum types in `TableFieldSchema`. pub mod table_field_schema { + /// Represents the type of a field element. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, Copy, PartialEq, ::prost::Message)] + pub struct FieldElementType { + /// Required. The type of a field element. + #[prost(enumeration = "Type", tag = "1")] + pub r#type: i32, + } #[derive( Clone, Copy, @@ -280,6 +296,8 @@ pub mod table_field_schema { Interval = 14, /// JSON, String Json = 15, + /// RANGE + Range = 16, } impl Type { /// String value of the enum field names used in the ProtoBuf definition. @@ -304,6 +322,7 @@ pub mod table_field_schema { Type::Bignumeric => "BIGNUMERIC", Type::Interval => "INTERVAL", Type::Json => "JSON", + Type::Range => "RANGE", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -325,6 +344,7 @@ pub mod table_field_schema { "BIGNUMERIC" => Some(Self::Bignumeric), "INTERVAL" => Some(Self::Interval), "JSON" => Some(Self::Json), + "RANGE" => Some(Self::Range), _ => None, } } @@ -524,6 +544,14 @@ pub mod read_session { /// ) #[prost(double, optional, tag = "5")] pub sample_percentage: ::core::option::Option, + /// Optional. Set response_compression_codec when creating a read session to + /// enable application-level compression of ReadRows responses. + #[prost( + enumeration = "table_read_options::ResponseCompressionCodec", + optional, + tag = "6" + )] + pub response_compression_codec: ::core::option::Option, #[prost( oneof = "table_read_options::OutputFormatSerializationOptions", tags = "3, 4" @@ -534,6 +562,53 @@ pub mod read_session { } /// Nested message and enum types in `TableReadOptions`. pub mod table_read_options { + /// Specifies which compression codec to attempt on the entire serialized + /// response payload (either Arrow record batch or Avro rows). This is + /// not to be confused with the Apache Arrow native compression codecs + /// specified in ArrowSerializationOptions. For performance reasons, when + /// creating a read session requesting Arrow responses, setting both native + /// Arrow compression and application-level response compression will not be + /// allowed - choose, at most, one kind of compression. + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] + #[repr(i32)] + pub enum ResponseCompressionCodec { + /// Default is no compression. + Unspecified = 0, + /// Use raw LZ4 compression. + Lz4 = 2, + } + impl ResponseCompressionCodec { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + ResponseCompressionCodec::Unspecified => { + "RESPONSE_COMPRESSION_CODEC_UNSPECIFIED" + } + ResponseCompressionCodec::Lz4 => "RESPONSE_COMPRESSION_CODEC_LZ4", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "RESPONSE_COMPRESSION_CODEC_UNSPECIFIED" => Some(Self::Unspecified), + "RESPONSE_COMPRESSION_CODEC_LZ4" => Some(Self::Lz4), + _ => None, + } + } + } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, Copy, PartialEq, ::prost::Oneof)] pub enum OutputFormatSerializationOptions { @@ -874,6 +949,22 @@ pub struct ReadRowsResponse { /// the current throttling status. #[prost(message, optional, tag = "5")] pub throttle_state: ::core::option::Option, + /// Optional. If the row data in this ReadRowsResponse is compressed, then + /// uncompressed byte size is the original size of the uncompressed row data. + /// If it is set to a value greater than 0, then decompress into a buffer of + /// size uncompressed_byte_size using the compression codec that was requested + /// during session creation time and which is specified in + /// TableReadOptions.response_compression_codec in ReadSession. + /// This value is not set if no response_compression_codec was not requested + /// and it is -1 if the requested compression would not have reduced the size + /// of this ReadRowsResponse's row data. This attempts to match Apache Arrow's + /// behavior described here where + /// the uncompressed length may be set to -1 to indicate that the data that + /// follows is not compressed, which can be useful for cases where compression + /// does not yield appreciable savings. When uncompressed_byte_size is not + /// greater than 0, the client should skip decompression. + #[prost(int64, optional, tag = "9")] + pub uncompressed_byte_size: ::core::option::Option, /// Row data is returned in format specified during session creation. #[prost(oneof = "read_rows_response::Rows", tags = "3, 4")] pub rows: ::core::option::Option, @@ -1039,6 +1130,17 @@ pub struct AppendRowsRequest { ::prost::alloc::string::String, i32, >, + /// Optional. Default missing value interpretation for all columns in the + /// table. When a value is specified on an `AppendRowsRequest`, it is applied + /// to all requests on the connection from that point forward, until a + /// subsequent `AppendRowsRequest` sets it to a different value. + /// `missing_value_interpretation` can override + /// `default_missing_value_interpretation`. For example, if you want to write + /// `NULL` instead of using default values for some columns, you can set + /// `default_missing_value_interpretation` to `DEFAULT_VALUE` and at the same + /// time, set `missing_value_interpretations` to `NULL_VALUE` on those columns. + #[prost(enumeration = "append_rows_request::MissingValueInterpretation", tag = "8")] + pub default_missing_value_interpretation: i32, /// Input rows. The `writer_schema` field must be specified at the initial /// request and currently, it will be ignored if specified in following /// requests. Following requests must have data in the same format as the @@ -1336,7 +1438,8 @@ pub mod storage_error { InvalidCmekProvided = 11, /// There is an encryption error while using customer-managed encryption key. CmekEncryptionError = 12, - /// Key Management Service (KMS) service returned an error. + /// Key Management Service (KMS) service returned an error, which can be + /// retried. KmsServiceError = 13, /// Permission denied while using customer-managed encryption key. KmsPermissionDenied = 14, diff --git a/googleapis/src/bytes/google.cloud.kms.v1.rs b/googleapis/src/bytes/google.cloud.kms.v1.rs index 90abefb3..fa63efd9 100644 --- a/googleapis/src/bytes/google.cloud.kms.v1.rs +++ b/googleapis/src/bytes/google.cloud.kms.v1.rs @@ -91,7 +91,7 @@ pub struct CryptoKey { /// [DESTROY_SCHEDULED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DESTROY_SCHEDULED] /// state before transitioning to /// [DESTROYED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DESTROYED]. - /// If not specified at creation time, the default duration is 24 hours. + /// If not specified at creation time, the default duration is 30 days. #[prost(message, optional, tag = "14")] pub destroy_scheduled_duration: ::core::option::Option<::prost_types::Duration>, /// Immutable. The resource name of the backend environment where the key @@ -106,6 +106,18 @@ pub struct CryptoKey { /// [ProtectionLevels][google.cloud.kms.v1.ProtectionLevel] in the future. #[prost(string, tag = "15")] pub crypto_key_backend: ::prost::alloc::string::String, + /// Optional. The policy used for Key Access Justifications Policy Enforcement. + /// If this field is present and this key is enrolled in Key Access + /// Justifications Policy Enforcement, the policy will be evaluated in encrypt, + /// decrypt, and sign operations, and the operation will fail if rejected by + /// the policy. The policy is defined by specifying zero or more allowed + /// justification codes. + /// + /// By default, this field is absent, and all justification codes are allowed. + #[prost(message, optional, tag = "17")] + pub key_access_justifications_policy: ::core::option::Option< + KeyAccessJustificationsPolicy, + >, /// Controls the rate of automatic rotation. #[prost(oneof = "crypto_key::RotationSchedule", tags = "8")] pub rotation_schedule: ::core::option::Option, @@ -440,11 +452,11 @@ pub mod crypto_key_version { /// [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] /// [ENCRYPT_DECRYPT][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ENCRYPT_DECRYPT]. /// - /// Algorithms beginning with "RSA_SIGN_" are usable with + /// Algorithms beginning with `RSA_SIGN_` are usable with /// [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] /// [ASYMMETRIC_SIGN][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ASYMMETRIC_SIGN]. /// - /// The fields in the name after "RSA_SIGN_" correspond to the following + /// The fields in the name after `RSA_SIGN_` correspond to the following /// parameters: padding algorithm, modulus bit length, and digest algorithm. /// /// For PSS, the salt length used is equal to the length of digest @@ -452,25 +464,25 @@ pub mod crypto_key_version { /// [RSA_SIGN_PSS_2048_SHA256][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionAlgorithm.RSA_SIGN_PSS_2048_SHA256] /// will use PSS with a salt length of 256 bits or 32 bytes. /// - /// Algorithms beginning with "RSA_DECRYPT_" are usable with + /// Algorithms beginning with `RSA_DECRYPT_` are usable with /// [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] /// [ASYMMETRIC_DECRYPT][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ASYMMETRIC_DECRYPT]. /// - /// The fields in the name after "RSA_DECRYPT_" correspond to the following + /// The fields in the name after `RSA_DECRYPT_` correspond to the following /// parameters: padding algorithm, modulus bit length, and digest algorithm. /// - /// Algorithms beginning with "EC_SIGN_" are usable with + /// Algorithms beginning with `EC_SIGN_` are usable with /// [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] /// [ASYMMETRIC_SIGN][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ASYMMETRIC_SIGN]. /// - /// The fields in the name after "EC_SIGN_" correspond to the following + /// The fields in the name after `EC_SIGN_` correspond to the following /// parameters: elliptic curve, digest algorithm. /// - /// Algorithms beginning with "HMAC_" are usable with + /// Algorithms beginning with `HMAC_` are usable with /// [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] /// [MAC][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.MAC]. /// - /// The suffix following "HMAC_" corresponds to the hash algorithm being used + /// The suffix following `HMAC_` corresponds to the hash algorithm being used /// (eg. SHA256). /// /// For more information, see \[Key purposes and algorithms\] @@ -553,6 +565,8 @@ pub mod crypto_key_version { /// Other hash functions can also be used: /// EcSignSecp256k1Sha256 = 31, + /// EdDSA on the Curve25519 in pure mode (taking data as input). + EcSignEd25519 = 40, /// HMAC-SHA256 signing with a 256 bit key. HmacSha256 = 32, /// HMAC-SHA1 signing with a 160 bit key. @@ -644,6 +658,7 @@ pub mod crypto_key_version { CryptoKeyVersionAlgorithm::EcSignSecp256k1Sha256 => { "EC_SIGN_SECP256K1_SHA256" } + CryptoKeyVersionAlgorithm::EcSignEd25519 => "EC_SIGN_ED25519", CryptoKeyVersionAlgorithm::HmacSha256 => "HMAC_SHA256", CryptoKeyVersionAlgorithm::HmacSha1 => "HMAC_SHA1", CryptoKeyVersionAlgorithm::HmacSha384 => "HMAC_SHA384", @@ -686,6 +701,7 @@ pub mod crypto_key_version { "EC_SIGN_P256_SHA256" => Some(Self::EcSignP256Sha256), "EC_SIGN_P384_SHA384" => Some(Self::EcSignP384Sha384), "EC_SIGN_SECP256K1_SHA256" => Some(Self::EcSignSecp256k1Sha256), + "EC_SIGN_ED25519" => Some(Self::EcSignEd25519), "HMAC_SHA256" => Some(Self::HmacSha256), "HMAC_SHA1" => Some(Self::HmacSha1), "HMAC_SHA384" => Some(Self::HmacSha384), @@ -865,7 +881,7 @@ pub mod crypto_key_version { } } } -/// The public key for a given +/// The public keys for a given /// [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]. Obtained via /// [GetPublicKey][google.cloud.kms.v1.KeyManagementService.GetPublicKey]. #[allow(clippy::derive_partial_eq_without_eq)] @@ -1179,6 +1195,22 @@ pub struct ExternalProtectionLevelOptions { #[prost(string, tag = "2")] pub ekm_connection_key_path: ::prost::alloc::string::String, } +/// A +/// [KeyAccessJustificationsPolicy][google.cloud.kms.v1.KeyAccessJustificationsPolicy] +/// specifies zero or more allowed +/// [AccessReason][google.cloud.kms.v1.AccessReason] values for encrypt, decrypt, +/// and sign operations on a [CryptoKey][google.cloud.kms.v1.CryptoKey]. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct KeyAccessJustificationsPolicy { + /// The list of allowed reasons for access to a + /// [CryptoKey][google.cloud.kms.v1.CryptoKey]. Zero allowed access reasons + /// means all encrypt, decrypt, and sign operations for the + /// [CryptoKey][google.cloud.kms.v1.CryptoKey] associated with this policy will + /// fail. + #[prost(enumeration = "AccessReason", repeated, tag = "1")] + pub allowed_access_reasons: ::prost::alloc::vec::Vec, +} /// [ProtectionLevel][google.cloud.kms.v1.ProtectionLevel] specifies how /// cryptographic operations are performed. For more information, see [Protection /// levels] (). @@ -1222,6 +1254,123 @@ impl ProtectionLevel { } } } +/// Describes the reason for a data access. Please refer to +/// +/// for the detailed semantic meaning of justification reason codes. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum AccessReason { + /// Unspecified access reason. + ReasonUnspecified = 0, + /// Customer-initiated support. + CustomerInitiatedSupport = 1, + /// Google-initiated access for system management and troubleshooting. + GoogleInitiatedService = 2, + /// Google-initiated access in response to a legal request or legal process. + ThirdPartyDataRequest = 3, + /// Google-initiated access for security, fraud, abuse, or compliance purposes. + GoogleInitiatedReview = 4, + /// Customer uses their account to perform any access to their own data which + /// their IAM policy authorizes. + CustomerInitiatedAccess = 5, + /// Google systems access customer data to help optimize the structure of the + /// data or quality for future uses by the customer. + GoogleInitiatedSystemOperation = 6, + /// No reason is expected for this key request. + ReasonNotExpected = 7, + /// Customer uses their account to perform any access to their own data which + /// their IAM policy authorizes, and one of the following is true: + /// + /// * A Google administrator has reset the root-access account associated with + /// the user's organization within the past 7 days. + /// * A Google-initiated emergency access operation has interacted with a + /// resource in the same project or folder as the currently accessed resource + /// within the past 7 days. + ModifiedCustomerInitiatedAccess = 8, + /// Google systems access customer data to help optimize the structure of the + /// data or quality for future uses by the customer, and one of the following + /// is true: + /// + /// * A Google administrator has reset the root-access account associated with + /// the user's organization within the past 7 days. + /// * A Google-initiated emergency access operation has interacted with a + /// resource in the same project or folder as the currently accessed resource + /// within the past 7 days. + ModifiedGoogleInitiatedSystemOperation = 9, + /// Google-initiated access to maintain system reliability. + GoogleResponseToProductionAlert = 10, + /// One of the following operations is being executed while simultaneously + /// encountering an internal technical issue which prevented a more precise + /// justification code from being generated: + /// + /// * Your account has been used to perform any access to your own data which + /// your IAM policy authorizes. + /// * An automated Google system operates on encrypted customer data which your + /// IAM policy authorizes. + /// * Customer-initiated Google support access. + /// * Google-initiated support access to protect system reliability. + CustomerAuthorizedWorkflowServicing = 11, +} +impl AccessReason { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + AccessReason::ReasonUnspecified => "REASON_UNSPECIFIED", + AccessReason::CustomerInitiatedSupport => "CUSTOMER_INITIATED_SUPPORT", + AccessReason::GoogleInitiatedService => "GOOGLE_INITIATED_SERVICE", + AccessReason::ThirdPartyDataRequest => "THIRD_PARTY_DATA_REQUEST", + AccessReason::GoogleInitiatedReview => "GOOGLE_INITIATED_REVIEW", + AccessReason::CustomerInitiatedAccess => "CUSTOMER_INITIATED_ACCESS", + AccessReason::GoogleInitiatedSystemOperation => { + "GOOGLE_INITIATED_SYSTEM_OPERATION" + } + AccessReason::ReasonNotExpected => "REASON_NOT_EXPECTED", + AccessReason::ModifiedCustomerInitiatedAccess => { + "MODIFIED_CUSTOMER_INITIATED_ACCESS" + } + AccessReason::ModifiedGoogleInitiatedSystemOperation => { + "MODIFIED_GOOGLE_INITIATED_SYSTEM_OPERATION" + } + AccessReason::GoogleResponseToProductionAlert => { + "GOOGLE_RESPONSE_TO_PRODUCTION_ALERT" + } + AccessReason::CustomerAuthorizedWorkflowServicing => { + "CUSTOMER_AUTHORIZED_WORKFLOW_SERVICING" + } + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "REASON_UNSPECIFIED" => Some(Self::ReasonUnspecified), + "CUSTOMER_INITIATED_SUPPORT" => Some(Self::CustomerInitiatedSupport), + "GOOGLE_INITIATED_SERVICE" => Some(Self::GoogleInitiatedService), + "THIRD_PARTY_DATA_REQUEST" => Some(Self::ThirdPartyDataRequest), + "GOOGLE_INITIATED_REVIEW" => Some(Self::GoogleInitiatedReview), + "CUSTOMER_INITIATED_ACCESS" => Some(Self::CustomerInitiatedAccess), + "GOOGLE_INITIATED_SYSTEM_OPERATION" => { + Some(Self::GoogleInitiatedSystemOperation) + } + "REASON_NOT_EXPECTED" => Some(Self::ReasonNotExpected), + "MODIFIED_CUSTOMER_INITIATED_ACCESS" => { + Some(Self::ModifiedCustomerInitiatedAccess) + } + "MODIFIED_GOOGLE_INITIATED_SYSTEM_OPERATION" => { + Some(Self::ModifiedGoogleInitiatedSystemOperation) + } + "GOOGLE_RESPONSE_TO_PRODUCTION_ALERT" => { + Some(Self::GoogleResponseToProductionAlert) + } + "CUSTOMER_AUTHORIZED_WORKFLOW_SERVICING" => { + Some(Self::CustomerAuthorizedWorkflowServicing) + } + _ => None, + } + } +} /// Request message for /// [KeyManagementService.ListKeyRings][google.cloud.kms.v1.KeyManagementService.ListKeyRings]. #[allow(clippy::derive_partial_eq_without_eq)] @@ -4075,7 +4224,7 @@ pub struct EkmConnection { /// [EkmConnection][google.cloud.kms.v1.EkmConnection] was created. #[prost(message, optional, tag = "2")] pub create_time: ::core::option::Option<::prost_types::Timestamp>, - /// A list of + /// Optional. A list of /// [ServiceResolvers][google.cloud.kms.v1.EkmConnection.ServiceResolver] where /// the EKM can be reached. There should be one ServiceResolver per EKM /// replica. Currently, only a single diff --git a/googleapis/src/bytes/google.devtools.artifactregistry.v1.rs b/googleapis/src/bytes/google.devtools.artifactregistry.v1.rs index 88ef37fc..0d61a82c 100644 --- a/googleapis/src/bytes/google.devtools.artifactregistry.v1.rs +++ b/googleapis/src/bytes/google.devtools.artifactregistry.v1.rs @@ -799,7 +799,7 @@ pub struct GetFileRequest { #[derive(Clone, PartialEq, ::prost::Message)] pub struct Package { /// The name of the package, for example: - /// "projects/p1/locations/us-central1/repositories/repo1/packages/pkg1". + /// `projects/p1/locations/us-central1/repositories/repo1/packages/pkg1`. /// If the package ID part contains slashes, the slashes are escaped. #[prost(string, tag = "1")] pub name: ::prost::alloc::string::String, @@ -856,15 +856,678 @@ pub struct DeletePackageRequest { #[prost(string, tag = "1")] pub name: ::prost::alloc::string::String, } +/// Artifact policy configuration for the repository contents. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct UpstreamPolicy { + /// The user-provided ID of the upstream policy. + #[prost(string, tag = "1")] + pub id: ::prost::alloc::string::String, + /// A reference to the repository resource, for example: + /// `projects/p1/locations/us-central1/repositories/repo1`. + #[prost(string, tag = "2")] + pub repository: ::prost::alloc::string::String, + /// Entries with a greater priority value take precedence in the pull order. + #[prost(int32, tag = "3")] + pub priority: i32, +} +/// CleanupPolicyCondition is a set of conditions attached to a CleanupPolicy. +/// If multiple entries are set, all must be satisfied for the condition to be +/// satisfied. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CleanupPolicyCondition { + /// Match versions by tag status. + #[prost(enumeration = "cleanup_policy_condition::TagState", optional, tag = "2")] + pub tag_state: ::core::option::Option, + /// Match versions by tag prefix. Applied on any prefix match. + #[prost(string, repeated, tag = "3")] + pub tag_prefixes: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + /// Match versions by version name prefix. Applied on any prefix match. + #[prost(string, repeated, tag = "4")] + pub version_name_prefixes: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + /// Match versions by package prefix. Applied on any prefix match. + #[prost(string, repeated, tag = "5")] + pub package_name_prefixes: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + /// Match versions older than a duration. + #[prost(message, optional, tag = "6")] + pub older_than: ::core::option::Option<::prost_types::Duration>, + /// Match versions newer than a duration. + #[prost(message, optional, tag = "7")] + pub newer_than: ::core::option::Option<::prost_types::Duration>, +} +/// Nested message and enum types in `CleanupPolicyCondition`. +pub mod cleanup_policy_condition { + /// Statuses applying to versions. + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] + #[repr(i32)] + pub enum TagState { + /// Tag status not specified. + Unspecified = 0, + /// Applies to tagged versions only. + Tagged = 1, + /// Applies to untagged versions only. + Untagged = 2, + /// Applies to all versions. + Any = 3, + } + impl TagState { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + TagState::Unspecified => "TAG_STATE_UNSPECIFIED", + TagState::Tagged => "TAGGED", + TagState::Untagged => "UNTAGGED", + TagState::Any => "ANY", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "TAG_STATE_UNSPECIFIED" => Some(Self::Unspecified), + "TAGGED" => Some(Self::Tagged), + "UNTAGGED" => Some(Self::Untagged), + "ANY" => Some(Self::Any), + _ => None, + } + } + } +} +/// CleanupPolicyMostRecentVersions is an alternate condition of a CleanupPolicy +/// for retaining a minimum number of versions. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CleanupPolicyMostRecentVersions { + /// List of package name prefixes that will apply this rule. + #[prost(string, repeated, tag = "1")] + pub package_name_prefixes: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + /// Minimum number of versions to keep. + #[prost(int32, optional, tag = "2")] + pub keep_count: ::core::option::Option, +} +/// Artifact policy configuration for repository cleanup policies. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CleanupPolicy { + /// The user-provided ID of the cleanup policy. + #[prost(string, tag = "1")] + pub id: ::prost::alloc::string::String, + /// Policy action. + #[prost(enumeration = "cleanup_policy::Action", tag = "3")] + pub action: i32, + #[prost(oneof = "cleanup_policy::ConditionType", tags = "2, 4")] + pub condition_type: ::core::option::Option, +} +/// Nested message and enum types in `CleanupPolicy`. +pub mod cleanup_policy { + /// Action type for a cleanup policy. + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] + #[repr(i32)] + pub enum Action { + /// Action not specified. + Unspecified = 0, + /// Delete action. + Delete = 1, + /// Keep action. + Keep = 2, + } + impl Action { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Action::Unspecified => "ACTION_UNSPECIFIED", + Action::Delete => "DELETE", + Action::Keep => "KEEP", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "ACTION_UNSPECIFIED" => Some(Self::Unspecified), + "DELETE" => Some(Self::Delete), + "KEEP" => Some(Self::Keep), + _ => None, + } + } + } + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum ConditionType { + /// Policy condition for matching versions. + #[prost(message, tag = "2")] + Condition(super::CleanupPolicyCondition), + /// Policy condition for retaining a minimum number of versions. May only be + /// specified with a Keep action. + #[prost(message, tag = "4")] + MostRecentVersions(super::CleanupPolicyMostRecentVersions), + } +} +/// Virtual repository configuration. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct VirtualRepositoryConfig { + /// Policies that configure the upstream artifacts distributed by the Virtual + /// Repository. Upstream policies cannot be set on a standard repository. + #[prost(message, repeated, tag = "1")] + pub upstream_policies: ::prost::alloc::vec::Vec, +} +/// Remote repository configuration. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RemoteRepositoryConfig { + /// The description of the remote source. + #[prost(string, tag = "1")] + pub description: ::prost::alloc::string::String, + /// Optional. The credentials used to access the remote repository. + #[prost(message, optional, tag = "9")] + pub upstream_credentials: ::core::option::Option< + remote_repository_config::UpstreamCredentials, + >, + /// Settings specific to the remote repository. + #[prost(oneof = "remote_repository_config::RemoteSource", tags = "2, 3, 4, 5, 6, 7")] + pub remote_source: ::core::option::Option, +} +/// Nested message and enum types in `RemoteRepositoryConfig`. +pub mod remote_repository_config { + /// The credentials to access the remote repository. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct UpstreamCredentials { + #[prost(oneof = "upstream_credentials::Credentials", tags = "1")] + pub credentials: ::core::option::Option, + } + /// Nested message and enum types in `UpstreamCredentials`. + pub mod upstream_credentials { + /// Username and password credentials. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct UsernamePasswordCredentials { + /// The username to access the remote repository. + #[prost(string, tag = "1")] + pub username: ::prost::alloc::string::String, + /// The Secret Manager key version that holds the password to access the + /// remote repository. Must be in the format of + /// `projects/{project}/secrets/{secret}/versions/{version}`. + #[prost(string, tag = "2")] + pub password_secret_version: ::prost::alloc::string::String, + } + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Credentials { + /// Use username and password to access the remote repository. + #[prost(message, tag = "1")] + UsernamePasswordCredentials(UsernamePasswordCredentials), + } + } + /// Configuration for a Docker remote repository. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, Copy, PartialEq, ::prost::Message)] + pub struct DockerRepository { + /// Address of the remote repository. + #[prost(oneof = "docker_repository::Upstream", tags = "1")] + pub upstream: ::core::option::Option, + } + /// Nested message and enum types in `DockerRepository`. + pub mod docker_repository { + /// Predefined list of publicly available Docker repositories like Docker + /// Hub. + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] + #[repr(i32)] + pub enum PublicRepository { + /// Unspecified repository. + Unspecified = 0, + /// Docker Hub. + DockerHub = 1, + } + impl PublicRepository { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + PublicRepository::Unspecified => "PUBLIC_REPOSITORY_UNSPECIFIED", + PublicRepository::DockerHub => "DOCKER_HUB", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "PUBLIC_REPOSITORY_UNSPECIFIED" => Some(Self::Unspecified), + "DOCKER_HUB" => Some(Self::DockerHub), + _ => None, + } + } + } + /// Address of the remote repository. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, Copy, PartialEq, ::prost::Oneof)] + pub enum Upstream { + /// One of the publicly available Docker repositories supported by Artifact + /// Registry. + #[prost(enumeration = "PublicRepository", tag = "1")] + PublicRepository(i32), + } + } + /// Configuration for a Maven remote repository. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, Copy, PartialEq, ::prost::Message)] + pub struct MavenRepository { + /// Address of the remote repository. + #[prost(oneof = "maven_repository::Upstream", tags = "1")] + pub upstream: ::core::option::Option, + } + /// Nested message and enum types in `MavenRepository`. + pub mod maven_repository { + /// Predefined list of publicly available Maven repositories like Maven + /// Central. + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] + #[repr(i32)] + pub enum PublicRepository { + /// Unspecified repository. + Unspecified = 0, + /// Maven Central. + MavenCentral = 1, + } + impl PublicRepository { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + PublicRepository::Unspecified => "PUBLIC_REPOSITORY_UNSPECIFIED", + PublicRepository::MavenCentral => "MAVEN_CENTRAL", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "PUBLIC_REPOSITORY_UNSPECIFIED" => Some(Self::Unspecified), + "MAVEN_CENTRAL" => Some(Self::MavenCentral), + _ => None, + } + } + } + /// Address of the remote repository. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, Copy, PartialEq, ::prost::Oneof)] + pub enum Upstream { + /// One of the publicly available Maven repositories supported by Artifact + /// Registry. + #[prost(enumeration = "PublicRepository", tag = "1")] + PublicRepository(i32), + } + } + /// Configuration for a Npm remote repository. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, Copy, PartialEq, ::prost::Message)] + pub struct NpmRepository { + /// Address of the remote repository + #[prost(oneof = "npm_repository::Upstream", tags = "1")] + pub upstream: ::core::option::Option, + } + /// Nested message and enum types in `NpmRepository`. + pub mod npm_repository { + /// Predefined list of publicly available NPM repositories like npmjs. + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] + #[repr(i32)] + pub enum PublicRepository { + /// Unspecified repository. + Unspecified = 0, + /// npmjs. + Npmjs = 1, + } + impl PublicRepository { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + PublicRepository::Unspecified => "PUBLIC_REPOSITORY_UNSPECIFIED", + PublicRepository::Npmjs => "NPMJS", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "PUBLIC_REPOSITORY_UNSPECIFIED" => Some(Self::Unspecified), + "NPMJS" => Some(Self::Npmjs), + _ => None, + } + } + } + /// Address of the remote repository + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, Copy, PartialEq, ::prost::Oneof)] + pub enum Upstream { + /// One of the publicly available Npm repositories supported by Artifact + /// Registry. + #[prost(enumeration = "PublicRepository", tag = "1")] + PublicRepository(i32), + } + } + /// Configuration for a Python remote repository. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, Copy, PartialEq, ::prost::Message)] + pub struct PythonRepository { + /// Address of the remote repository. + #[prost(oneof = "python_repository::Upstream", tags = "1")] + pub upstream: ::core::option::Option, + } + /// Nested message and enum types in `PythonRepository`. + pub mod python_repository { + /// Predefined list of publicly available Python repositories like PyPI.org. + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] + #[repr(i32)] + pub enum PublicRepository { + /// Unspecified repository. + Unspecified = 0, + /// PyPI. + Pypi = 1, + } + impl PublicRepository { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + PublicRepository::Unspecified => "PUBLIC_REPOSITORY_UNSPECIFIED", + PublicRepository::Pypi => "PYPI", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "PUBLIC_REPOSITORY_UNSPECIFIED" => Some(Self::Unspecified), + "PYPI" => Some(Self::Pypi), + _ => None, + } + } + } + /// Address of the remote repository. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, Copy, PartialEq, ::prost::Oneof)] + pub enum Upstream { + /// One of the publicly available Python repositories supported by Artifact + /// Registry. + #[prost(enumeration = "PublicRepository", tag = "1")] + PublicRepository(i32), + } + } + /// Configuration for an Apt remote repository. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct AptRepository { + /// Address of the remote repository. + #[prost(oneof = "apt_repository::Upstream", tags = "1")] + pub upstream: ::core::option::Option, + } + /// Nested message and enum types in `AptRepository`. + pub mod apt_repository { + /// Publicly available Apt repositories constructed from a common repository + /// base and a custom repository path. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct PublicRepository { + /// A common public repository base for Apt. + #[prost(enumeration = "public_repository::RepositoryBase", tag = "1")] + pub repository_base: i32, + /// A custom field to define a path to a specific repository from the base. + #[prost(string, tag = "2")] + pub repository_path: ::prost::alloc::string::String, + } + /// Nested message and enum types in `PublicRepository`. + pub mod public_repository { + /// Predefined list of publicly available repository bases for Apt. + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] + #[repr(i32)] + pub enum RepositoryBase { + /// Unspecified repository base. + Unspecified = 0, + /// Debian. + Debian = 1, + /// Ubuntu LTS/Pro. + Ubuntu = 2, + } + impl RepositoryBase { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + RepositoryBase::Unspecified => "REPOSITORY_BASE_UNSPECIFIED", + RepositoryBase::Debian => "DEBIAN", + RepositoryBase::Ubuntu => "UBUNTU", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "REPOSITORY_BASE_UNSPECIFIED" => Some(Self::Unspecified), + "DEBIAN" => Some(Self::Debian), + "UBUNTU" => Some(Self::Ubuntu), + _ => None, + } + } + } + } + /// Address of the remote repository. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Upstream { + /// One of the publicly available Apt repositories supported by Artifact + /// Registry. + #[prost(message, tag = "1")] + PublicRepository(PublicRepository), + } + } + /// Configuration for a Yum remote repository. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct YumRepository { + /// Address of the remote repository. + #[prost(oneof = "yum_repository::Upstream", tags = "1")] + pub upstream: ::core::option::Option, + } + /// Nested message and enum types in `YumRepository`. + pub mod yum_repository { + /// Publicly available Yum repositories constructed from a common repository + /// base and a custom repository path. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct PublicRepository { + /// A common public repository base for Yum. + #[prost(enumeration = "public_repository::RepositoryBase", tag = "1")] + pub repository_base: i32, + /// A custom field to define a path to a specific repository from the base. + #[prost(string, tag = "2")] + pub repository_path: ::prost::alloc::string::String, + } + /// Nested message and enum types in `PublicRepository`. + pub mod public_repository { + /// Predefined list of publicly available repository bases for Yum. + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] + #[repr(i32)] + pub enum RepositoryBase { + /// Unspecified repository base. + Unspecified = 0, + /// CentOS. + Centos = 1, + /// CentOS Debug. + CentosDebug = 2, + /// CentOS Vault. + CentosVault = 3, + /// CentOS Stream. + CentosStream = 4, + /// Rocky. + Rocky = 5, + /// Fedora Extra Packages for Enterprise Linux (EPEL). + Epel = 6, + } + impl RepositoryBase { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + RepositoryBase::Unspecified => "REPOSITORY_BASE_UNSPECIFIED", + RepositoryBase::Centos => "CENTOS", + RepositoryBase::CentosDebug => "CENTOS_DEBUG", + RepositoryBase::CentosVault => "CENTOS_VAULT", + RepositoryBase::CentosStream => "CENTOS_STREAM", + RepositoryBase::Rocky => "ROCKY", + RepositoryBase::Epel => "EPEL", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "REPOSITORY_BASE_UNSPECIFIED" => Some(Self::Unspecified), + "CENTOS" => Some(Self::Centos), + "CENTOS_DEBUG" => Some(Self::CentosDebug), + "CENTOS_VAULT" => Some(Self::CentosVault), + "CENTOS_STREAM" => Some(Self::CentosStream), + "ROCKY" => Some(Self::Rocky), + "EPEL" => Some(Self::Epel), + _ => None, + } + } + } + } + /// Address of the remote repository. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Upstream { + /// One of the publicly available Yum repositories supported by Artifact + /// Registry. + #[prost(message, tag = "1")] + PublicRepository(PublicRepository), + } + } + /// Settings specific to the remote repository. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum RemoteSource { + /// Specific settings for a Docker remote repository. + #[prost(message, tag = "2")] + DockerRepository(DockerRepository), + /// Specific settings for a Maven remote repository. + #[prost(message, tag = "3")] + MavenRepository(MavenRepository), + /// Specific settings for an Npm remote repository. + #[prost(message, tag = "4")] + NpmRepository(NpmRepository), + /// Specific settings for a Python remote repository. + #[prost(message, tag = "5")] + PythonRepository(PythonRepository), + /// Specific settings for an Apt remote repository. + #[prost(message, tag = "6")] + AptRepository(AptRepository), + /// Specific settings for a Yum remote repository. + #[prost(message, tag = "7")] + YumRepository(YumRepository), + } +} /// A Repository for storing artifacts with a specific format. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Repository { /// The name of the repository, for example: - /// "projects/p1/locations/us-central1/repositories/repo1". + /// `projects/p1/locations/us-central1/repositories/repo1`. #[prost(string, tag = "1")] pub name: ::prost::alloc::string::String, - /// The format of packages that are stored in the repository. + /// Optional. The format of packages that are stored in the repository. #[prost(enumeration = "repository::Format", tag = "2")] pub format: i32, /// The user-provided description of the repository. @@ -880,10 +1543,10 @@ pub struct Repository { ::prost::alloc::string::String, ::prost::alloc::string::String, >, - /// The time when the repository was created. + /// Output only. The time when the repository was created. #[prost(message, optional, tag = "5")] pub create_time: ::core::option::Option<::prost_types::Timestamp>, - /// The time when the repository was last updated. + /// Output only. The time when the repository was last updated. #[prost(message, optional, tag = "6")] pub update_time: ::core::option::Option<::prost_types::Timestamp>, /// The Cloud KMS resource name of the customer managed encryption key that's @@ -892,9 +1555,37 @@ pub struct Repository { /// This value may not be changed after the Repository has been created. #[prost(string, tag = "8")] pub kms_key_name: ::prost::alloc::string::String, + /// Optional. The mode of the repository. + #[prost(enumeration = "repository::Mode", tag = "10")] + pub mode: i32, + /// Optional. Cleanup policies for this repository. Cleanup policies indicate + /// when certain package versions can be automatically deleted. Map keys are + /// policy IDs supplied by users during policy creation. They must unique + /// within a repository and be under 128 characters in length. + #[prost(map = "string, message", tag = "12")] + pub cleanup_policies: ::std::collections::HashMap< + ::prost::alloc::string::String, + CleanupPolicy, + >, + /// Output only. The size, in bytes, of all artifact storage in this + /// repository. Repositories that are generally available or in public preview + /// use this to calculate storage costs. + #[prost(int64, tag = "13")] + pub size_bytes: i64, + /// Output only. If set, the repository satisfies physical zone separation. + #[prost(bool, tag = "16")] + pub satisfies_pzs: bool, + /// Optional. If true, the cleanup pipeline is prevented from deleting versions + /// in this repository. + #[prost(bool, tag = "18")] + pub cleanup_policy_dry_run: bool, /// Repository-specific configurations. - #[prost(oneof = "repository::FormatConfig", tags = "9")] + #[prost(oneof = "repository::FormatConfig", tags = "9, 17")] pub format_config: ::core::option::Option, + /// Repository configuration specific to the Mode value being selected (Remote + /// or Virtual) + #[prost(oneof = "repository::ModeConfig", tags = "14, 15")] + pub mode_config: ::core::option::Option, } /// Nested message and enum types in `Repository`. pub mod repository { @@ -960,6 +1651,18 @@ pub mod repository { } } } + /// DockerRepositoryConfig is docker related repository details. + /// Provides additional configuration details for repositories of the docker + /// format type. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, Copy, PartialEq, ::prost::Message)] + pub struct DockerRepositoryConfig { + /// The repository which enabled this flag prevents all tags from being + /// modified, moved or deleted. This does not prevent tags from being + /// created. + #[prost(bool, tag = "1")] + pub immutable_tags: bool, + } /// A package format. #[derive( Clone, @@ -988,6 +1691,10 @@ pub mod repository { Yum = 6, /// Python package format. Python = 8, + /// Kubeflow Pipelines package format. + Kfp = 9, + /// Go package format. + Go = 10, } impl Format { /// String value of the enum field names used in the ProtoBuf definition. @@ -1003,6 +1710,8 @@ pub mod repository { Format::Apt => "APT", Format::Yum => "YUM", Format::Python => "PYTHON", + Format::Kfp => "KFP", + Format::Go => "GO", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -1015,6 +1724,56 @@ pub mod repository { "APT" => Some(Self::Apt), "YUM" => Some(Self::Yum), "PYTHON" => Some(Self::Python), + "KFP" => Some(Self::Kfp), + "GO" => Some(Self::Go), + _ => None, + } + } + } + /// The mode configures the repository to serve artifacts from different + /// sources. + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] + #[repr(i32)] + pub enum Mode { + /// Unspecified mode. + Unspecified = 0, + /// A standard repository storing artifacts. + StandardRepository = 1, + /// A virtual repository to serve artifacts from one or more sources. + VirtualRepository = 2, + /// A remote repository to serve artifacts from a remote source. + RemoteRepository = 3, + } + impl Mode { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Mode::Unspecified => "MODE_UNSPECIFIED", + Mode::StandardRepository => "STANDARD_REPOSITORY", + Mode::VirtualRepository => "VIRTUAL_REPOSITORY", + Mode::RemoteRepository => "REMOTE_REPOSITORY", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "MODE_UNSPECIFIED" => Some(Self::Unspecified), + "STANDARD_REPOSITORY" => Some(Self::StandardRepository), + "VIRTUAL_REPOSITORY" => Some(Self::VirtualRepository), + "REMOTE_REPOSITORY" => Some(Self::RemoteRepository), _ => None, } } @@ -1027,13 +1786,30 @@ pub mod repository { /// for the repositories of maven type. #[prost(message, tag = "9")] MavenConfig(MavenRepositoryConfig), + /// Docker repository config contains repository level configuration + /// for the repositories of docker type. + #[prost(message, tag = "17")] + DockerConfig(DockerRepositoryConfig), + } + /// Repository configuration specific to the Mode value being selected (Remote + /// or Virtual) + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum ModeConfig { + /// Configuration specific for a Virtual Repository. + #[prost(message, tag = "14")] + VirtualRepositoryConfig(super::VirtualRepositoryConfig), + /// Configuration specific for a Remote Repository. + #[prost(message, tag = "15")] + RemoteRepositoryConfig(super::RemoteRepositoryConfig), } } /// The request to list repositories. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ListRepositoriesRequest { - /// Required. The name of the parent resource whose repositories will be listed. + /// Required. The name of the parent resource whose repositories will be + /// listed. #[prost(string, tag = "1")] pub parent: ::prost::alloc::string::String, /// The maximum number of repositories to return. Maximum page size is 1,000. @@ -1067,13 +1843,14 @@ pub struct GetRepositoryRequest { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct CreateRepositoryRequest { - /// Required. The name of the parent resource where the repository will be created. + /// Required. The name of the parent resource where the repository will be + /// created. #[prost(string, tag = "1")] pub parent: ::prost::alloc::string::String, - /// The repository id to use for this repository. + /// Required. The repository id to use for this repository. #[prost(string, tag = "2")] pub repository_id: ::prost::alloc::string::String, - /// The repository to be created. + /// Required. The repository to be created. #[prost(message, optional, tag = "3")] pub repository: ::core::option::Option, } @@ -1219,7 +1996,9 @@ pub struct Tag { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ListTagsRequest { - /// The name of the parent resource whose tags will be listed. + /// The name of the parent package whose tags will be listed. + /// For example: + /// `projects/p1/locations/us-central1/repositories/repo1/packages/pkg1`. #[prost(string, tag = "1")] pub parent: ::prost::alloc::string::String, /// An expression for filtering the results of the request. Filter rules are @@ -1383,6 +2162,21 @@ pub struct DeleteVersionRequest { #[prost(bool, tag = "2")] pub force: bool, } +/// The request to delete multiple versions across a repository. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BatchDeleteVersionsRequest { + /// The name of the repository holding all requested versions. + #[prost(string, tag = "1")] + pub parent: ::prost::alloc::string::String, + /// Required. The names of the versions to delete. + /// A maximum of 10000 versions can be deleted in a batch. + #[prost(string, repeated, tag = "2")] + pub names: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + /// If true, the request is performed without deleting data, following AIP-163. + #[prost(bool, tag = "3")] + pub validate_only: bool, +} /// The metadata of an LRO from deleting multiple versions. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -2166,6 +2960,38 @@ pub mod artifact_registry_client { ); self.inner.unary(req, path, codec).await } + /// Deletes multiple versions across a repository. The returned operation will + /// complete once the versions have been deleted. + pub async fn batch_delete_versions( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/google.devtools.artifactregistry.v1.ArtifactRegistry/BatchDeleteVersions", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "google.devtools.artifactregistry.v1.ArtifactRegistry", + "BatchDeleteVersions", + ), + ); + self.inner.unary(req, path, codec).await + } /// Lists files. pub async fn list_files( &mut self, diff --git a/googleapis/src/bytes/google.iam.v1.rs b/googleapis/src/bytes/google.iam.v1.rs index f9c60b84..9ac9638e 100644 --- a/googleapis/src/bytes/google.iam.v1.rs +++ b/googleapis/src/bytes/google.iam.v1.rs @@ -44,6 +44,7 @@ pub struct GetPolicyOptions { /// /// **JSON example:** /// +/// ``` /// { /// "bindings": [ /// { @@ -71,9 +72,11 @@ pub struct GetPolicyOptions { /// "etag": "BwWWja0YfJA=", /// "version": 3 /// } +/// ``` /// /// **YAML example:** /// +/// ``` /// bindings: /// - members: /// - user:mike@example.com @@ -90,6 +93,7 @@ pub struct GetPolicyOptions { /// expression: request.time < timestamp('2020-10-01T00:00:00.000Z') /// etag: BwWWja0YfJA= /// version: 3 +/// ``` /// /// For a description of IAM and its features, see the /// [IAM documentation](). @@ -161,7 +165,7 @@ pub struct Binding { /// For example, `roles/viewer`, `roles/editor`, or `roles/owner`. #[prost(string, tag = "1")] pub role: ::prost::alloc::string::String, - /// Specifies the principals requesting access for a Cloud Platform resource. + /// Specifies the principals requesting access for a Google Cloud resource. /// `members` can have the following values: /// /// * `allUsers`: A special identifier that represents anyone who is @@ -271,8 +275,8 @@ pub struct Binding { /// } /// /// For sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ -/// logging. It also exempts jose@example.com from DATA_READ logging, and -/// aliya@example.com from DATA_WRITE logging. +/// logging. It also exempts `jose@example.com` from DATA_READ logging, and +/// `aliya@example.com` from DATA_WRITE logging. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct AuditConfig { @@ -393,7 +397,7 @@ pub struct BindingDelta { /// Required #[prost(string, tag = "2")] pub role: ::prost::alloc::string::String, - /// A single identity requesting access for a Cloud Platform resource. + /// A single identity requesting access for a Google Cloud resource. /// Follows the same format of Binding.members. /// Required #[prost(string, tag = "3")] diff --git a/googleapis/src/bytes/google.pubsub.v1.rs b/googleapis/src/bytes/google.pubsub.v1.rs index 9dda09ba..0059f567 100644 --- a/googleapis/src/bytes/google.pubsub.v1.rs +++ b/googleapis/src/bytes/google.pubsub.v1.rs @@ -76,8 +76,8 @@ pub struct CreateSchemaRequest { /// The ID to use for the schema, which will become the final component of /// the schema's resource name. /// - /// See for resource - /// name constraints. + /// See for + /// resource name constraints. #[prost(string, tag = "3")] pub schema_id: ::prost::alloc::string::String, } @@ -569,13 +569,21 @@ pub mod schema_service_client { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct MessageStoragePolicy { - /// A list of IDs of GCP regions where messages that are published to the topic - /// may be persisted in storage. Messages published by publishers running in - /// non-allowed GCP regions (or running outside of GCP altogether) will be - /// routed for storage in one of the allowed regions. An empty list means that - /// no regions are allowed, and is not a valid configuration. + /// Optional. A list of IDs of Google Cloud regions where messages that are + /// published to the topic may be persisted in storage. Messages published by + /// publishers running in non-allowed Google Cloud regions (or running outside + /// of Google Cloud altogether) are routed for storage in one of the allowed + /// regions. An empty list means that no regions are allowed, and is not a + /// valid configuration. #[prost(string, repeated, tag = "1")] pub allowed_persistence_regions: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + /// Optional. If true, `allowed_persistence_regions` is also used to enforce + /// in-transit guarantees for messages. That is, Pub/Sub will fail + /// Publish operations on this topic and subscribe operations + /// on any subscription attached to this topic in any region that is + /// not in `allowed_persistence_regions`. + #[prost(bool, tag = "2")] + pub enforce_in_transit: bool, } /// Settings for validating messages published against a schema. #[allow(clippy::derive_partial_eq_without_eq)] @@ -587,20 +595,309 @@ pub struct SchemaSettings { /// deleted. #[prost(string, tag = "1")] pub schema: ::prost::alloc::string::String, - /// The encoding of messages validated against `schema`. + /// Optional. The encoding of messages validated against `schema`. #[prost(enumeration = "Encoding", tag = "2")] pub encoding: i32, - /// The minimum (inclusive) revision allowed for validating messages. If empty - /// or not present, allow any revision to be validated against last_revision or - /// any revision created before. + /// Optional. The minimum (inclusive) revision allowed for validating messages. + /// If empty or not present, allow any revision to be validated against + /// last_revision or any revision created before. #[prost(string, tag = "3")] pub first_revision_id: ::prost::alloc::string::String, - /// The maximum (inclusive) revision allowed for validating messages. If empty - /// or not present, allow any revision to be validated against first_revision - /// or any revision created after. + /// Optional. The maximum (inclusive) revision allowed for validating messages. + /// If empty or not present, allow any revision to be validated against + /// first_revision or any revision created after. #[prost(string, tag = "4")] pub last_revision_id: ::prost::alloc::string::String, } +/// Settings for an ingestion data source on a topic. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct IngestionDataSourceSettings { + /// Optional. Platform Logs settings. If unset, no Platform Logs will be + /// generated. + #[prost(message, optional, tag = "4")] + pub platform_logs_settings: ::core::option::Option, + /// Only one source type can have settings set. + #[prost(oneof = "ingestion_data_source_settings::Source", tags = "1, 2")] + pub source: ::core::option::Option, +} +/// Nested message and enum types in `IngestionDataSourceSettings`. +pub mod ingestion_data_source_settings { + /// Ingestion settings for Amazon Kinesis Data Streams. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct AwsKinesis { + /// Output only. An output-only field that indicates the state of the Kinesis + /// ingestion source. + #[prost(enumeration = "aws_kinesis::State", tag = "1")] + pub state: i32, + /// Required. The Kinesis stream ARN to ingest data from. + #[prost(string, tag = "2")] + pub stream_arn: ::prost::alloc::string::String, + /// Required. The Kinesis consumer ARN to used for ingestion in Enhanced + /// Fan-Out mode. The consumer must be already created and ready to be used. + #[prost(string, tag = "3")] + pub consumer_arn: ::prost::alloc::string::String, + /// Required. AWS role ARN to be used for Federated Identity authentication + /// with Kinesis. Check the Pub/Sub docs for how to set up this role and the + /// required permissions that need to be attached to it. + #[prost(string, tag = "4")] + pub aws_role_arn: ::prost::alloc::string::String, + /// Required. The GCP service account to be used for Federated Identity + /// authentication with Kinesis (via a `AssumeRoleWithWebIdentity` call for + /// the provided role). The `aws_role_arn` must be set up with + /// `accounts.google.com:sub` equals to this service account number. + #[prost(string, tag = "5")] + pub gcp_service_account: ::prost::alloc::string::String, + } + /// Nested message and enum types in `AwsKinesis`. + pub mod aws_kinesis { + /// Possible states for ingestion from Amazon Kinesis Data Streams. + #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] + #[repr(i32)] + pub enum State { + /// Default value. This value is unused. + Unspecified = 0, + /// Ingestion is active. + Active = 1, + /// Permission denied encountered while consuming data from Kinesis. + /// This can happen if: + /// - The provided `aws_role_arn` does not exist or does not have the + /// appropriate permissions attached. + /// - The provided `aws_role_arn` is not set up properly for Identity + /// Federation using `gcp_service_account`. + /// - The Pub/Sub SA is not granted the + /// `iam.serviceAccounts.getOpenIdToken` permission on + /// `gcp_service_account`. + KinesisPermissionDenied = 2, + /// Permission denied encountered while publishing to the topic. This can + /// happen if the Pub/Sub SA has not been granted the [appropriate publish + /// permissions]() + PublishPermissionDenied = 3, + /// The Kinesis stream does not exist. + StreamNotFound = 4, + /// The Kinesis consumer does not exist. + ConsumerNotFound = 5, + } + impl State { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + State::Unspecified => "STATE_UNSPECIFIED", + State::Active => "ACTIVE", + State::KinesisPermissionDenied => "KINESIS_PERMISSION_DENIED", + State::PublishPermissionDenied => "PUBLISH_PERMISSION_DENIED", + State::StreamNotFound => "STREAM_NOT_FOUND", + State::ConsumerNotFound => "CONSUMER_NOT_FOUND", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "STATE_UNSPECIFIED" => Some(Self::Unspecified), + "ACTIVE" => Some(Self::Active), + "KINESIS_PERMISSION_DENIED" => Some(Self::KinesisPermissionDenied), + "PUBLISH_PERMISSION_DENIED" => Some(Self::PublishPermissionDenied), + "STREAM_NOT_FOUND" => Some(Self::StreamNotFound), + "CONSUMER_NOT_FOUND" => Some(Self::ConsumerNotFound), + _ => None, + } + } + } + } + /// Ingestion settings for Cloud Storage. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct CloudStorage { + /// Output only. An output-only field that indicates the state of the Cloud + /// Storage ingestion source. + #[prost(enumeration = "cloud_storage::State", tag = "1")] + pub state: i32, + /// Optional. Cloud Storage bucket. The bucket name must be without any + /// prefix like "gs://". See the \[bucket naming requirements\] + /// (). + #[prost(string, tag = "2")] + pub bucket: ::prost::alloc::string::String, + /// Optional. Only objects with a larger or equal creation timestamp will be + /// ingested. + #[prost(message, optional, tag = "6")] + pub minimum_object_create_time: ::core::option::Option<::prost_types::Timestamp>, + /// Optional. Glob pattern used to match objects that will be ingested. If + /// unset, all objects will be ingested. See the [supported + /// patterns](). + #[prost(string, tag = "9")] + pub match_glob: ::prost::alloc::string::String, + /// Defaults to text format. + #[prost(oneof = "cloud_storage::InputFormat", tags = "3, 4, 5")] + pub input_format: ::core::option::Option, + } + /// Nested message and enum types in `CloudStorage`. + pub mod cloud_storage { + /// Configuration for reading Cloud Storage data in text format. Each line of + /// text as specified by the delimiter will be set to the `data` field of a + /// Pub/Sub message. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct TextFormat { + /// Optional. When unset, '\n' is used. + #[prost(string, optional, tag = "1")] + pub delimiter: ::core::option::Option<::prost::alloc::string::String>, + } + /// Configuration for reading Cloud Storage data in Avro binary format. The + /// bytes of each object will be set to the `data` field of a Pub/Sub + /// message. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, Copy, PartialEq, ::prost::Message)] + pub struct AvroFormat {} + /// Configuration for reading Cloud Storage data written via [Cloud Storage + /// subscriptions](). The + /// data and attributes fields of the originally exported Pub/Sub message + /// will be restored when publishing. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, Copy, PartialEq, ::prost::Message)] + pub struct PubSubAvroFormat {} + /// Possible states for ingestion from Cloud Storage. + #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] + #[repr(i32)] + pub enum State { + /// Default value. This value is unused. + Unspecified = 0, + /// Ingestion is active. + Active = 1, + /// Permission denied encountered while calling the Cloud Storage API. This + /// can happen if the Pub/Sub SA has not been granted the + /// [appropriate + /// permissions](): + /// - storage.objects.list: to list the objects in a bucket. + /// - storage.objects.get: to read the objects in a bucket. + /// - storage.buckets.get: to verify the bucket exists. + CloudStoragePermissionDenied = 2, + /// Permission denied encountered while publishing to the topic. This can + /// happen if the Pub/Sub SA has not been granted the [appropriate publish + /// permissions]() + PublishPermissionDenied = 3, + /// The provided Cloud Storage bucket doesn't exist. + BucketNotFound = 4, + /// The Cloud Storage bucket has too many objects, ingestion will be + /// paused. + TooManyObjects = 5, + } + impl State { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + State::Unspecified => "STATE_UNSPECIFIED", + State::Active => "ACTIVE", + State::CloudStoragePermissionDenied => "CLOUD_STORAGE_PERMISSION_DENIED", + State::PublishPermissionDenied => "PUBLISH_PERMISSION_DENIED", + State::BucketNotFound => "BUCKET_NOT_FOUND", + State::TooManyObjects => "TOO_MANY_OBJECTS", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "STATE_UNSPECIFIED" => Some(Self::Unspecified), + "ACTIVE" => Some(Self::Active), + "CLOUD_STORAGE_PERMISSION_DENIED" => Some(Self::CloudStoragePermissionDenied), + "PUBLISH_PERMISSION_DENIED" => Some(Self::PublishPermissionDenied), + "BUCKET_NOT_FOUND" => Some(Self::BucketNotFound), + "TOO_MANY_OBJECTS" => Some(Self::TooManyObjects), + _ => None, + } + } + } + /// Defaults to text format. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum InputFormat { + /// Optional. Data from Cloud Storage will be interpreted as text. + #[prost(message, tag = "3")] + TextFormat(TextFormat), + /// Optional. Data from Cloud Storage will be interpreted in Avro format. + #[prost(message, tag = "4")] + AvroFormat(AvroFormat), + /// Optional. It will be assumed data from Cloud Storage was written via + /// [Cloud Storage + /// subscriptions](). + #[prost(message, tag = "5")] + PubsubAvroFormat(PubSubAvroFormat), + } + } + /// Only one source type can have settings set. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Source { + /// Optional. Amazon Kinesis Data Streams. + #[prost(message, tag = "1")] + AwsKinesis(AwsKinesis), + /// Optional. Cloud Storage. + #[prost(message, tag = "2")] + CloudStorage(CloudStorage), + } +} +/// Settings for Platform Logs produced by Pub/Sub. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct PlatformLogsSettings { + /// Optional. The minimum severity level of Platform Logs that will be written. + #[prost(enumeration = "platform_logs_settings::Severity", tag = "1")] + pub severity: i32, +} +/// Nested message and enum types in `PlatformLogsSettings`. +pub mod platform_logs_settings { + /// Severity levels of Platform Logs. + #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] + #[repr(i32)] + pub enum Severity { + /// Default value. Logs level is unspecified. Logs will be disabled. + Unspecified = 0, + /// Logs will be disabled. + Disabled = 1, + /// Debug logs and higher-severity logs will be written. + Debug = 2, + /// Info logs and higher-severity logs will be written. + Info = 3, + /// Warning logs and higher-severity logs will be written. + Warning = 4, + /// Only error logs will be written. + Error = 5, + } + impl Severity { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Severity::Unspecified => "SEVERITY_UNSPECIFIED", + Severity::Disabled => "DISABLED", + Severity::Debug => "DEBUG", + Severity::Info => "INFO", + Severity::Warning => "WARNING", + Severity::Error => "ERROR", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "SEVERITY_UNSPECIFIED" => Some(Self::Unspecified), + "DISABLED" => Some(Self::Disabled), + "DEBUG" => Some(Self::Debug), + "INFO" => Some(Self::Info), + "WARNING" => Some(Self::Warning), + "ERROR" => Some(Self::Error), + _ => None, + } + } + } +} /// A topic resource. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -613,38 +910,83 @@ pub struct Topic { /// must not start with `"goog"`. #[prost(string, tag = "1")] pub name: ::prost::alloc::string::String, - /// See \[Creating and managing labels\] + /// Optional. See \[Creating and managing labels\] /// (). #[prost(map = "string, string", tag = "2")] pub labels: ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, - /// Policy constraining the set of Google Cloud Platform regions where messages - /// published to the topic may be stored. If not present, then no constraints - /// are in effect. + /// Optional. Policy constraining the set of Google Cloud Platform regions + /// where messages published to the topic may be stored. If not present, then + /// no constraints are in effect. #[prost(message, optional, tag = "3")] pub message_storage_policy: ::core::option::Option, - /// The resource name of the Cloud KMS CryptoKey to be used to protect access - /// to messages published on this topic. + /// Optional. The resource name of the Cloud KMS CryptoKey to be used to + /// protect access to messages published on this topic. /// /// The expected format is `projects/*/locations/*/keyRings/*/cryptoKeys/*`. #[prost(string, tag = "5")] pub kms_key_name: ::prost::alloc::string::String, - /// Settings for validating messages published against a schema. + /// Optional. Settings for validating messages published against a schema. #[prost(message, optional, tag = "6")] pub schema_settings: ::core::option::Option, - /// Reserved for future use. This field is set only in responses from the - /// server; it is ignored if it is set in any requests. + /// Optional. Reserved for future use. This field is set only in responses from + /// the server; it is ignored if it is set in any requests. #[prost(bool, tag = "7")] pub satisfies_pzs: bool, - /// Indicates the minimum duration to retain a message after it is published to - /// the topic. If this field is set, messages published to the topic in the - /// last `message_retention_duration` are always available to subscribers. For - /// instance, it allows any attached subscription to [seek to a + /// Optional. Indicates the minimum duration to retain a message after it is + /// published to the topic. If this field is set, messages published to the + /// topic in the last `message_retention_duration` are always available to + /// subscribers. For instance, it allows any attached subscription to [seek to + /// a /// timestamp]() /// that is up to `message_retention_duration` in the past. If this field is /// not set, message retention is controlled by settings on individual /// subscriptions. Cannot be more than 31 days or less than 10 minutes. #[prost(message, optional, tag = "8")] pub message_retention_duration: ::core::option::Option<::prost_types::Duration>, + /// Output only. An output-only field indicating the state of the topic. + #[prost(enumeration = "topic::State", tag = "9")] + pub state: i32, + /// Optional. Settings for ingestion from a data source into this topic. + #[prost(message, optional, tag = "10")] + pub ingestion_data_source_settings: ::core::option::Option, +} +/// Nested message and enum types in `Topic`. +pub mod topic { + /// The state of the topic. + #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] + #[repr(i32)] + pub enum State { + /// Default value. This value is unused. + Unspecified = 0, + /// The topic does not have any persistent errors. + Active = 1, + /// Ingestion from the data source has encountered a permanent error. + /// See the more detailed error state in the corresponding ingestion + /// source configuration. + IngestionResourceError = 2, + } + impl State { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + State::Unspecified => "STATE_UNSPECIFIED", + State::Active => "ACTIVE", + State::IngestionResourceError => "INGESTION_RESOURCE_ERROR", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "STATE_UNSPECIFIED" => Some(Self::Unspecified), + "ACTIVE" => Some(Self::Active), + "INGESTION_RESOURCE_ERROR" => Some(Self::IngestionResourceError), + _ => None, + } + } + } } /// A message that is published by publishers and consumed by subscribers. The /// message must contain either a non-empty data field or at least one attribute. @@ -657,12 +999,12 @@ pub struct Topic { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct PubsubMessage { - /// The message data field. If this field is empty, the message must contain - /// at least one attribute. + /// Optional. The message data field. If this field is empty, the message must + /// contain at least one attribute. #[prost(bytes = "bytes", tag = "1")] pub data: ::prost::bytes::Bytes, - /// Attributes for this message. If this field is empty, the message must - /// contain non-empty data. This can be used to filter messages on the + /// Optional. Attributes for this message. If this field is empty, the message + /// must contain non-empty data. This can be used to filter messages on the /// subscription. #[prost(map = "string, string", tag = "2")] pub attributes: ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, @@ -677,13 +1019,13 @@ pub struct PubsubMessage { /// publisher in a `Publish` call. #[prost(message, optional, tag = "4")] pub publish_time: ::core::option::Option<::prost_types::Timestamp>, - /// If non-empty, identifies related messages for which publish order should be - /// respected. If a `Subscription` has `enable_message_ordering` set to `true`, - /// messages published with the same non-empty `ordering_key` value will be - /// delivered to subscribers in the order in which they are received by the - /// Pub/Sub system. All `PubsubMessage`s published in a given `PublishRequest` - /// must specify the same `ordering_key` value. - /// For more information, see [ordering + /// Optional. If non-empty, identifies related messages for which publish order + /// should be respected. If a `Subscription` has `enable_message_ordering` set + /// to `true`, messages published with the same non-empty `ordering_key` value + /// will be delivered to subscribers in the order in which they are received by + /// the Pub/Sub system. All `PubsubMessage`s published in a given + /// `PublishRequest` must specify the same `ordering_key` value. For more + /// information, see [ordering /// messages](). #[prost(string, tag = "5")] pub ordering_key: ::prost::alloc::string::String, @@ -728,9 +1070,9 @@ pub struct PublishRequest { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct PublishResponse { - /// The server-assigned ID of each published message, in the same order as - /// the messages in the request. IDs are guaranteed to be unique within - /// the topic. + /// Optional. The server-assigned ID of each published message, in the same + /// order as the messages in the request. IDs are guaranteed to be unique + /// within the topic. #[prost(string, repeated, tag = "1")] pub message_ids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, } @@ -742,12 +1084,12 @@ pub struct ListTopicsRequest { /// Format is `projects/{project-id}`. #[prost(string, tag = "1")] pub project: ::prost::alloc::string::String, - /// Maximum number of topics to return. + /// Optional. Maximum number of topics to return. #[prost(int32, tag = "2")] pub page_size: i32, - /// The value returned by the last `ListTopicsResponse`; indicates that this is - /// a continuation of a prior `ListTopics` call, and that the system should - /// return the next page of data. + /// Optional. The value returned by the last `ListTopicsResponse`; indicates + /// that this is a continuation of a prior `ListTopics` call, and that the + /// system should return the next page of data. #[prost(string, tag = "3")] pub page_token: ::prost::alloc::string::String, } @@ -755,11 +1097,11 @@ pub struct ListTopicsRequest { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ListTopicsResponse { - /// The resulting topics. + /// Optional. The resulting topics. #[prost(message, repeated, tag = "1")] pub topics: ::prost::alloc::vec::Vec, - /// If not empty, indicates that there may be more topics that match the - /// request; this value should be passed in a new `ListTopicsRequest`. + /// Optional. If not empty, indicates that there may be more topics that match + /// the request; this value should be passed in a new `ListTopicsRequest`. #[prost(string, tag = "2")] pub next_page_token: ::prost::alloc::string::String, } @@ -771,12 +1113,12 @@ pub struct ListTopicSubscriptionsRequest { /// Format is `projects/{project}/topics/{topic}`. #[prost(string, tag = "1")] pub topic: ::prost::alloc::string::String, - /// Maximum number of subscription names to return. + /// Optional. Maximum number of subscription names to return. #[prost(int32, tag = "2")] pub page_size: i32, - /// The value returned by the last `ListTopicSubscriptionsResponse`; indicates - /// that this is a continuation of a prior `ListTopicSubscriptions` call, and - /// that the system should return the next page of data. + /// Optional. The value returned by the last `ListTopicSubscriptionsResponse`; + /// indicates that this is a continuation of a prior `ListTopicSubscriptions` + /// call, and that the system should return the next page of data. #[prost(string, tag = "3")] pub page_token: ::prost::alloc::string::String, } @@ -784,11 +1126,12 @@ pub struct ListTopicSubscriptionsRequest { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ListTopicSubscriptionsResponse { - /// The names of subscriptions attached to the topic specified in the request. + /// Optional. The names of subscriptions attached to the topic specified in the + /// request. #[prost(string, repeated, tag = "1")] pub subscriptions: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - /// If not empty, indicates that there may be more subscriptions that match - /// the request; this value should be passed in a new + /// Optional. If not empty, indicates that there may be more subscriptions that + /// match the request; this value should be passed in a new /// `ListTopicSubscriptionsRequest` to get more subscriptions. #[prost(string, tag = "2")] pub next_page_token: ::prost::alloc::string::String, @@ -801,12 +1144,12 @@ pub struct ListTopicSnapshotsRequest { /// Format is `projects/{project}/topics/{topic}`. #[prost(string, tag = "1")] pub topic: ::prost::alloc::string::String, - /// Maximum number of snapshot names to return. + /// Optional. Maximum number of snapshot names to return. #[prost(int32, tag = "2")] pub page_size: i32, - /// The value returned by the last `ListTopicSnapshotsResponse`; indicates - /// that this is a continuation of a prior `ListTopicSnapshots` call, and - /// that the system should return the next page of data. + /// Optional. The value returned by the last `ListTopicSnapshotsResponse`; + /// indicates that this is a continuation of a prior `ListTopicSnapshots` call, + /// and that the system should return the next page of data. #[prost(string, tag = "3")] pub page_token: ::prost::alloc::string::String, } @@ -814,11 +1157,11 @@ pub struct ListTopicSnapshotsRequest { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ListTopicSnapshotsResponse { - /// The names of the snapshots that match the request. + /// Optional. The names of the snapshots that match the request. #[prost(string, repeated, tag = "1")] pub snapshots: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - /// If not empty, indicates that there may be more snapshots that match - /// the request; this value should be passed in a new + /// Optional. If not empty, indicates that there may be more snapshots that + /// match the request; this value should be passed in a new /// `ListTopicSnapshotsRequest` to get more snapshots. #[prost(string, tag = "2")] pub next_page_token: ::prost::alloc::string::String, @@ -865,23 +1208,23 @@ pub struct Subscription { /// field will be `_deleted-topic_` if the topic has been deleted. #[prost(string, tag = "2")] pub topic: ::prost::alloc::string::String, - /// If push delivery is used with this subscription, this field is + /// Optional. If push delivery is used with this subscription, this field is /// used to configure it. #[prost(message, optional, tag = "4")] pub push_config: ::core::option::Option, - /// If delivery to BigQuery is used with this subscription, this field is - /// used to configure it. + /// Optional. If delivery to BigQuery is used with this subscription, this + /// field is used to configure it. #[prost(message, optional, tag = "18")] pub bigquery_config: ::core::option::Option, - /// If delivery to Google Cloud Storage is used with this subscription, this - /// field is used to configure it. + /// Optional. If delivery to Google Cloud Storage is used with this + /// subscription, this field is used to configure it. #[prost(message, optional, tag = "22")] pub cloud_storage_config: ::core::option::Option, - /// The approximate amount of time (on a best-effort basis) Pub/Sub waits for - /// the subscriber to acknowledge receipt before resending the message. In the - /// interval after the message is delivered and before it is acknowledged, it - /// is considered to be _outstanding_. During that time period, the - /// message will not be redelivered (on a best-effort basis). + /// Optional. The approximate amount of time (on a best-effort basis) Pub/Sub + /// waits for the subscriber to acknowledge receipt before resending the + /// message. In the interval after the message is delivered and before it is + /// acknowledged, it is considered to be _outstanding_. During that time + /// period, the message will not be redelivered (on a best-effort basis). /// /// For pull subscriptions, this value is used as the initial value for the ack /// deadline. To override this value for a given message, call @@ -899,7 +1242,7 @@ pub struct Subscription { /// system will eventually redeliver the message. #[prost(int32, tag = "5")] pub ack_deadline_seconds: i32, - /// Indicates whether to retain acknowledged messages. If true, then + /// Optional. Indicates whether to retain acknowledged messages. If true, then /// messages are not expunged from the subscription's backlog, even if they are /// acknowledged, until they fall out of the `message_retention_duration` /// window. This must be true if you would like to \[`Seek` to a timestamp\] @@ -907,52 +1250,51 @@ pub struct Subscription { /// the past to replay previously-acknowledged messages. #[prost(bool, tag = "7")] pub retain_acked_messages: bool, - /// How long to retain unacknowledged messages in the subscription's backlog, - /// from the moment a message is published. - /// If `retain_acked_messages` is true, then this also configures the retention - /// of acknowledged messages, and thus configures how far back in time a `Seek` - /// can be done. Defaults to 7 days. Cannot be more than 7 days or less than 10 - /// minutes. + /// Optional. How long to retain unacknowledged messages in the subscription's + /// backlog, from the moment a message is published. If `retain_acked_messages` + /// is true, then this also configures the retention of acknowledged messages, + /// and thus configures how far back in time a `Seek` can be done. Defaults to + /// 7 days. Cannot be more than 31 days or less than 10 minutes. #[prost(message, optional, tag = "8")] pub message_retention_duration: ::core::option::Option<::prost_types::Duration>, - /// See [Creating and managing + /// Optional. See [Creating and managing /// labels](). #[prost(map = "string, string", tag = "9")] pub labels: ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, - /// If true, messages published with the same `ordering_key` in `PubsubMessage` - /// will be delivered to the subscribers in the order in which they - /// are received by the Pub/Sub system. Otherwise, they may be delivered in - /// any order. + /// Optional. If true, messages published with the same `ordering_key` in + /// `PubsubMessage` will be delivered to the subscribers in the order in which + /// they are received by the Pub/Sub system. Otherwise, they may be delivered + /// in any order. #[prost(bool, tag = "10")] pub enable_message_ordering: bool, - /// A policy that specifies the conditions for this subscription's expiration. - /// A subscription is considered active as long as any connected subscriber is - /// successfully consuming messages from the subscription or is issuing - /// operations on the subscription. If `expiration_policy` is not set, a - /// *default policy* with `ttl` of 31 days will be used. The minimum allowed + /// Optional. A policy that specifies the conditions for this subscription's + /// expiration. A subscription is considered active as long as any connected + /// subscriber is successfully consuming messages from the subscription or is + /// issuing operations on the subscription. If `expiration_policy` is not set, + /// a *default policy* with `ttl` of 31 days will be used. The minimum allowed /// value for `expiration_policy.ttl` is 1 day. If `expiration_policy` is set, /// but `expiration_policy.ttl` is not set, the subscription never expires. #[prost(message, optional, tag = "11")] pub expiration_policy: ::core::option::Option, - /// An expression written in the Pub/Sub [filter + /// Optional. An expression written in the Pub/Sub [filter /// language](). If non-empty, /// then only `PubsubMessage`s whose `attributes` field matches the filter are /// delivered on this subscription. If empty, then no messages are filtered /// out. #[prost(string, tag = "12")] pub filter: ::prost::alloc::string::String, - /// A policy that specifies the conditions for dead lettering messages in - /// this subscription. If dead_letter_policy is not set, dead lettering - /// is disabled. + /// Optional. A policy that specifies the conditions for dead lettering + /// messages in this subscription. If dead_letter_policy is not set, dead + /// lettering is disabled. /// - /// The Cloud Pub/Sub service account associated with this subscriptions's + /// The Pub/Sub service account associated with this subscriptions's /// parent project (i.e., /// service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com) must have /// permission to Acknowledge() messages on this subscription. #[prost(message, optional, tag = "13")] pub dead_letter_policy: ::core::option::Option, - /// A policy that specifies how Pub/Sub retries message delivery for this - /// subscription. + /// Optional. A policy that specifies how Pub/Sub retries message delivery for + /// this subscription. /// /// If not set, the default retry policy is applied. This generally implies /// that messages will be retried as soon as possible for healthy subscribers. @@ -960,15 +1302,16 @@ pub struct Subscription { /// exceeded events for a given message. #[prost(message, optional, tag = "14")] pub retry_policy: ::core::option::Option, - /// Indicates whether the subscription is detached from its topic. Detached - /// subscriptions don't receive messages from their topic and don't retain any - /// backlog. `Pull` and `StreamingPull` requests will return + /// Optional. Indicates whether the subscription is detached from its topic. + /// Detached subscriptions don't receive messages from their topic and don't + /// retain any backlog. `Pull` and `StreamingPull` requests will return /// FAILED_PRECONDITION. If the subscription is a push subscription, pushes to /// the endpoint will not be made. #[prost(bool, tag = "15")] pub detached: bool, - /// If true, Pub/Sub provides the following guarantees for the delivery of - /// a message with a given value of `message_id` on this subscription: + /// Optional. If true, Pub/Sub provides the following guarantees for the + /// delivery of a message with a given value of `message_id` on this + /// subscription: /// /// * The message sent to a subscriber is guaranteed not to be resent /// before the message's acknowledgement deadline expires. @@ -992,9 +1335,29 @@ pub struct Subscription { /// subscription can receive messages. #[prost(enumeration = "subscription::State", tag = "19")] pub state: i32, + /// Output only. Information about the associated Analytics Hub subscription. + /// Only set if the subscritpion is created by Analytics Hub. + #[prost(message, optional, tag = "23")] + pub analytics_hub_subscription_info: ::core::option::Option, } /// Nested message and enum types in `Subscription`. pub mod subscription { + /// Information about an associated Analytics Hub subscription + /// (). + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct AnalyticsHubSubscriptionInfo { + /// Optional. The name of the associated Analytics Hub listing resource. + /// Pattern: + /// "projects/{project}/locations/{location}/dataExchanges/{data_exchange}/listings/{listing}" + #[prost(string, tag = "1")] + pub listing: ::prost::alloc::string::String, + /// Optional. The name of the associated Analytics Hub subscription resource. + /// Pattern: + /// "projects/{project}/locations/{location}/subscriptions/{subscription}" + #[prost(string, tag = "2")] + pub subscription: ::prost::alloc::string::String, + } /// Possible states for a subscription. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] @@ -1031,7 +1394,7 @@ pub mod subscription { } } } -/// A policy that specifies how Cloud Pub/Sub retries message delivery. +/// A policy that specifies how Pub/Sub retries message delivery. /// /// Retry delay will be exponential based on provided minimum and maximum /// backoffs. @@ -1045,12 +1408,13 @@ pub mod subscription { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct RetryPolicy { - /// The minimum delay between consecutive deliveries of a given message. - /// Value should be between 0 and 600 seconds. Defaults to 10 seconds. + /// Optional. The minimum delay between consecutive deliveries of a given + /// message. Value should be between 0 and 600 seconds. Defaults to 10 seconds. #[prost(message, optional, tag = "1")] pub minimum_backoff: ::core::option::Option<::prost_types::Duration>, - /// The maximum delay between consecutive deliveries of a given message. - /// Value should be between 0 and 600 seconds. Defaults to 600 seconds. + /// Optional. The maximum delay between consecutive deliveries of a given + /// message. Value should be between 0 and 600 seconds. Defaults to 600 + /// seconds. #[prost(message, optional, tag = "2")] pub maximum_backoff: ::core::option::Option<::prost_types::Duration>, } @@ -1062,19 +1426,19 @@ pub struct RetryPolicy { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct DeadLetterPolicy { - /// The name of the topic to which dead letter messages should be published. - /// Format is `projects/{project}/topics/{topic}`.The Cloud Pub/Sub service - /// account associated with the enclosing subscription's parent project (i.e., - /// service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com) must have - /// permission to Publish() to this topic. + /// Optional. The name of the topic to which dead letter messages should be + /// published. Format is `projects/{project}/topics/{topic}`.The Pub/Sub + /// service account associated with the enclosing subscription's parent project + /// (i.e., service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com) must + /// have permission to Publish() to this topic. /// /// The operation will fail if the topic does not exist. /// Users should ensure that there is a subscription attached to this topic /// since messages published to a topic with no subscriptions are lost. #[prost(string, tag = "1")] pub dead_letter_topic: ::prost::alloc::string::String, - /// The maximum number of delivery attempts for any message. The value must be - /// between 5 and 100. + /// Optional. The maximum number of delivery attempts for any message. The + /// value must be between 5 and 100. /// /// The number of delivery attempts is defined as 1 + (the sum of number of /// NACKs and number of times the acknowledgement deadline has been exceeded @@ -1094,12 +1458,12 @@ pub struct DeadLetterPolicy { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct ExpirationPolicy { - /// Specifies the "time-to-live" duration for an associated resource. The - /// resource expires if it is not active for a period of `ttl`. The definition - /// of "activity" depends on the type of the associated resource. The minimum - /// and maximum allowed values for `ttl` depend on the type of the associated - /// resource, as well. If `ttl` is not set, the associated resource never - /// expires. + /// Optional. Specifies the "time-to-live" duration for an associated resource. + /// The resource expires if it is not active for a period of `ttl`. The + /// definition of "activity" depends on the type of the associated resource. + /// The minimum and maximum allowed values for `ttl` depend on the type of the + /// associated resource, as well. If `ttl` is not set, the associated resource + /// never expires. #[prost(message, optional, tag = "1")] pub ttl: ::core::option::Option<::prost_types::Duration>, } @@ -1107,12 +1471,12 @@ pub struct ExpirationPolicy { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct PushConfig { - /// A URL locating the endpoint to which messages should be pushed. + /// Optional. A URL locating the endpoint to which messages should be pushed. /// For example, a Webhook endpoint might use ` #[prost(string, tag = "1")] pub push_endpoint: ::prost::alloc::string::String, - /// Endpoint configuration attributes that can be used to control different - /// aspects of the message delivery. + /// Optional. Endpoint configuration attributes that can be used to control + /// different aspects of the message delivery. /// /// The only currently supported attribute is `x-goog-version`, which you can /// use to change the format of the pushed message. This attribute @@ -1136,7 +1500,7 @@ pub struct PushConfig { pub attributes: ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, /// An authentication method used by push endpoints to verify the source of /// push requests. This can be used with push endpoints that are private by - /// default to allow requests only from the Cloud Pub/Sub system, for example. + /// default to allow requests only from the Pub/Sub system, for example. /// This field is optional and should be set only by users interested in /// authenticated push. #[prost(oneof = "push_config::AuthenticationMethod", tags = "3")] @@ -1154,19 +1518,20 @@ pub mod push_config { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct OidcToken { - /// [Service account + /// Optional. [Service account /// email]() /// used for generating the OIDC token. For more information /// on setting up authentication, see /// [Push subscriptions](). #[prost(string, tag = "1")] pub service_account_email: ::prost::alloc::string::String, - /// Audience to be used when generating OIDC token. The audience claim - /// identifies the recipients that the JWT is intended for. The audience - /// value is a single case-sensitive string. Having multiple values (array) - /// for the audience field is not supported. More info about the OIDC JWT - /// token audience here: - /// Note: if not specified, the Push endpoint URL will be used. + /// Optional. Audience to be used when generating OIDC token. The audience + /// claim identifies the recipients that the JWT is intended for. The + /// audience value is a single case-sensitive string. Having multiple values + /// (array) for the audience field is not supported. More info about the OIDC + /// JWT token audience here: + /// Note: if not specified, + /// the Push endpoint URL will be used. #[prost(string, tag = "2")] pub audience: ::prost::alloc::string::String, } @@ -1180,7 +1545,7 @@ pub mod push_config { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct NoWrapper { - /// When true, writes the Pub/Sub message metadata to + /// Optional. When true, writes the Pub/Sub message metadata to /// `x-goog-pubsub-:` headers of the HTTP request. Writes the /// Pub/Sub message attributes to `:` headers of the HTTP request. #[prost(bool, tag = "1")] @@ -1188,14 +1553,15 @@ pub mod push_config { } /// An authentication method used by push endpoints to verify the source of /// push requests. This can be used with push endpoints that are private by - /// default to allow requests only from the Cloud Pub/Sub system, for example. + /// default to allow requests only from the Pub/Sub system, for example. /// This field is optional and should be set only by users interested in /// authenticated push. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum AuthenticationMethod { - /// If specified, Pub/Sub will generate and attach an OIDC JWT token as an - /// `Authorization` header in the HTTP request for every pushed message. + /// Optional. If specified, Pub/Sub will generate and attach an OIDC JWT + /// token as an `Authorization` header in the HTTP request for every pushed + /// message. #[prost(message, tag = "3")] OidcToken(OidcToken), } @@ -1204,12 +1570,12 @@ pub mod push_config { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, Copy, PartialEq, ::prost::Oneof)] pub enum Wrapper { - /// When set, the payload to the push endpoint is in the form of the JSON - /// representation of a PubsubMessage + /// Optional. When set, the payload to the push endpoint is in the form of + /// the JSON representation of a PubsubMessage /// (). #[prost(message, tag = "4")] PubsubWrapper(PubsubWrapper), - /// When set, the payload to the push endpoint is not wrapped. + /// Optional. When set, the payload to the push endpoint is not wrapped. #[prost(message, tag = "5")] NoWrapper(NoWrapper), } @@ -1218,25 +1584,26 @@ pub mod push_config { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct BigQueryConfig { - /// The name of the table to which to write data, of the form + /// Optional. The name of the table to which to write data, of the form /// {projectId}.{datasetId}.{tableId} #[prost(string, tag = "1")] pub table: ::prost::alloc::string::String, - /// When true, use the topic's schema as the columns to write to in BigQuery, - /// if it exists. + /// Optional. When true, use the topic's schema as the columns to write to in + /// BigQuery, if it exists. `use_topic_schema` and `use_table_schema` cannot be + /// enabled at the same time. #[prost(bool, tag = "2")] pub use_topic_schema: bool, - /// When true, write the subscription name, message_id, publish_time, + /// Optional. When true, write the subscription name, message_id, publish_time, /// attributes, and ordering_key to additional columns in the table. The /// subscription name, message_id, and publish_time fields are put in their own /// columns while all other message properties (other than data) are written to /// a JSON object in the attributes column. #[prost(bool, tag = "3")] pub write_metadata: bool, - /// When true and use_topic_schema is true, any fields that are a part of the - /// topic schema that are not part of the BigQuery table schema are dropped - /// when writing to BigQuery. Otherwise, the schemas must be kept in sync and - /// any messages with extra fields are not written and remain in the + /// Optional. When true and use_topic_schema is true, any fields that are a + /// part of the topic schema that are not part of the BigQuery table schema are + /// dropped when writing to BigQuery. Otherwise, the schemas must be kept in + /// sync and any messages with extra fields are not written and remain in the /// subscription's backlog. #[prost(bool, tag = "4")] pub drop_unknown_fields: bool, @@ -1244,6 +1611,19 @@ pub struct BigQueryConfig { /// subscription can receive messages. #[prost(enumeration = "big_query_config::State", tag = "5")] pub state: i32, + /// Optional. When true, use the BigQuery table's schema as the columns to + /// write to in BigQuery. `use_table_schema` and `use_topic_schema` cannot be + /// enabled at the same time. + #[prost(bool, tag = "6")] + pub use_table_schema: bool, + /// Optional. The service account to use to write to BigQuery. The subscription + /// creator or updater that specifies this field must have + /// `iam.serviceAccounts.actAs` permission on the service account. If not + /// specified, the Pub/Sub [service + /// agent](), + /// service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com, is used. + #[prost(string, tag = "7")] + pub service_account_email: ::prost::alloc::string::String, } /// Nested message and enum types in `BigQueryConfig`. pub mod big_query_config { @@ -1266,6 +1646,9 @@ pub mod big_query_config { NotFound = 3, /// Cannot write to the BigQuery table due to a schema mismatch. SchemaMismatch = 4, + /// Cannot write to the destination because enforce_in_transit is set to true + /// and the destination locations are not in the allowed regions. + InTransitLocationRestriction = 5, } impl State { /// String value of the enum field names used in the ProtoBuf definition. @@ -1279,6 +1662,7 @@ pub mod big_query_config { State::PermissionDenied => "PERMISSION_DENIED", State::NotFound => "NOT_FOUND", State::SchemaMismatch => "SCHEMA_MISMATCH", + State::InTransitLocationRestriction => "IN_TRANSIT_LOCATION_RESTRICTION", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -1289,6 +1673,7 @@ pub mod big_query_config { "PERMISSION_DENIED" => Some(Self::PermissionDenied), "NOT_FOUND" => Some(Self::NotFound), "SCHEMA_MISMATCH" => Some(Self::SchemaMismatch), + "IN_TRANSIT_LOCATION_RESTRICTION" => Some(Self::InTransitLocationRestriction), _ => None, } } @@ -1304,29 +1689,46 @@ pub struct CloudStorageConfig { /// requirements] (). #[prost(string, tag = "1")] pub bucket: ::prost::alloc::string::String, - /// User-provided prefix for Cloud Storage filename. See the [object naming - /// requirements](). + /// Optional. User-provided prefix for Cloud Storage filename. See the [object + /// naming requirements](). #[prost(string, tag = "2")] pub filename_prefix: ::prost::alloc::string::String, - /// User-provided suffix for Cloud Storage filename. See the [object naming - /// requirements](). Must - /// not end in "/". + /// Optional. User-provided suffix for Cloud Storage filename. See the [object + /// naming requirements](). + /// Must not end in "/". #[prost(string, tag = "3")] pub filename_suffix: ::prost::alloc::string::String, - /// The maximum duration that can elapse before a new Cloud Storage file is - /// created. Min 1 minute, max 10 minutes, default 5 minutes. May not exceed - /// the subscription's acknowledgement deadline. + /// Optional. User-provided format string specifying how to represent datetimes + /// in Cloud Storage filenames. See the [datetime format + /// guidance](). + #[prost(string, tag = "10")] + pub filename_datetime_format: ::prost::alloc::string::String, + /// Optional. The maximum duration that can elapse before a new Cloud Storage + /// file is created. Min 1 minute, max 10 minutes, default 5 minutes. May not + /// exceed the subscription's acknowledgement deadline. #[prost(message, optional, tag = "6")] pub max_duration: ::core::option::Option<::prost_types::Duration>, - /// The maximum bytes that can be written to a Cloud Storage file before a new - /// file is created. Min 1 KB, max 10 GiB. The max_bytes limit may be exceeded - /// in cases where messages are larger than the limit. + /// Optional. The maximum bytes that can be written to a Cloud Storage file + /// before a new file is created. Min 1 KB, max 10 GiB. The max_bytes limit may + /// be exceeded in cases where messages are larger than the limit. #[prost(int64, tag = "7")] pub max_bytes: i64, + /// Optional. The maximum number of messages that can be written to a Cloud + /// Storage file before a new file is created. Min 1000 messages. + #[prost(int64, tag = "8")] + pub max_messages: i64, /// Output only. An output-only field that indicates whether or not the /// subscription can receive messages. #[prost(enumeration = "cloud_storage_config::State", tag = "9")] pub state: i32, + /// Optional. The service account to use to write to Cloud Storage. The + /// subscription creator or updater that specifies this field must have + /// `iam.serviceAccounts.actAs` permission on the service account. If not + /// specified, the Pub/Sub + /// [service agent](), + /// service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com, is used. + #[prost(string, tag = "11")] + pub service_account_email: ::prost::alloc::string::String, /// Defaults to text format. #[prost(oneof = "cloud_storage_config::OutputFormat", tags = "4, 5")] pub output_format: ::core::option::Option, @@ -1344,14 +1746,18 @@ pub mod cloud_storage_config { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct AvroConfig { - /// When true, write the subscription name, message_id, publish_time, - /// attributes, and ordering_key as additional fields in the output. The - /// subscription name, message_id, and publish_time fields are put in their - /// own fields while all other message properties other than data (for - /// example, an ordering_key, if present) are added as entries in the - /// attributes map. + /// Optional. When true, write the subscription name, message_id, + /// publish_time, attributes, and ordering_key as additional fields in the + /// output. The subscription name, message_id, and publish_time fields are + /// put in their own fields while all other message properties other than + /// data (for example, an ordering_key, if present) are added as entries in + /// the attributes map. #[prost(bool, tag = "1")] pub write_metadata: bool, + /// Optional. When true, the output Cloud Storage file will be serialized + /// using the topic schema, if it exists. + #[prost(bool, tag = "2")] + pub use_topic_schema: bool, } /// Possible states for a Cloud Storage subscription. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] @@ -1366,6 +1772,12 @@ pub mod cloud_storage_config { PermissionDenied = 2, /// Cannot write to the Cloud Storage bucket because it does not exist. NotFound = 3, + /// Cannot write to the destination because enforce_in_transit is set to true + /// and the destination locations are not in the allowed regions. + InTransitLocationRestriction = 4, + /// Cannot write to the Cloud Storage bucket due to an incompatibility + /// between the topic schema and subscription settings. + SchemaMismatch = 5, } impl State { /// String value of the enum field names used in the ProtoBuf definition. @@ -1378,6 +1790,8 @@ pub mod cloud_storage_config { State::Active => "ACTIVE", State::PermissionDenied => "PERMISSION_DENIED", State::NotFound => "NOT_FOUND", + State::InTransitLocationRestriction => "IN_TRANSIT_LOCATION_RESTRICTION", + State::SchemaMismatch => "SCHEMA_MISMATCH", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -1387,6 +1801,8 @@ pub mod cloud_storage_config { "ACTIVE" => Some(Self::Active), "PERMISSION_DENIED" => Some(Self::PermissionDenied), "NOT_FOUND" => Some(Self::NotFound), + "IN_TRANSIT_LOCATION_RESTRICTION" => Some(Self::InTransitLocationRestriction), + "SCHEMA_MISMATCH" => Some(Self::SchemaMismatch), _ => None, } } @@ -1395,10 +1811,12 @@ pub mod cloud_storage_config { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, Copy, PartialEq, ::prost::Oneof)] pub enum OutputFormat { - /// If set, message data will be written to Cloud Storage in text format. + /// Optional. If set, message data will be written to Cloud Storage in text + /// format. #[prost(message, tag = "4")] TextConfig(TextConfig), - /// If set, message data will be written to Cloud Storage in Avro format. + /// Optional. If set, message data will be written to Cloud Storage in Avro + /// format. #[prost(message, tag = "5")] AvroConfig(AvroConfig), } @@ -1407,14 +1825,14 @@ pub mod cloud_storage_config { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ReceivedMessage { - /// This ID can be used to acknowledge the received message. + /// Optional. This ID can be used to acknowledge the received message. #[prost(string, tag = "1")] pub ack_id: ::prost::alloc::string::String, - /// The message. + /// Optional. The message. #[prost(message, optional, tag = "2")] pub message: ::core::option::Option, - /// The approximate number of times that Cloud Pub/Sub has attempted to deliver - /// the associated message to a subscriber. + /// Optional. The approximate number of times that Pub/Sub has attempted to + /// deliver the associated message to a subscriber. /// /// More precisely, this is 1 + (number of NACKs) + /// (number of ack_deadline exceeds) for this message. @@ -1461,12 +1879,12 @@ pub struct ListSubscriptionsRequest { /// Format is `projects/{project-id}`. #[prost(string, tag = "1")] pub project: ::prost::alloc::string::String, - /// Maximum number of subscriptions to return. + /// Optional. Maximum number of subscriptions to return. #[prost(int32, tag = "2")] pub page_size: i32, - /// The value returned by the last `ListSubscriptionsResponse`; indicates that - /// this is a continuation of a prior `ListSubscriptions` call, and that the - /// system should return the next page of data. + /// Optional. The value returned by the last `ListSubscriptionsResponse`; + /// indicates that this is a continuation of a prior `ListSubscriptions` call, + /// and that the system should return the next page of data. #[prost(string, tag = "3")] pub page_token: ::prost::alloc::string::String, } @@ -1474,11 +1892,11 @@ pub struct ListSubscriptionsRequest { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ListSubscriptionsResponse { - /// The subscriptions that match the request. + /// Optional. The subscriptions that match the request. #[prost(message, repeated, tag = "1")] pub subscriptions: ::prost::alloc::vec::Vec, - /// If not empty, indicates that there may be more subscriptions that match - /// the request; this value should be passed in a new + /// Optional. If not empty, indicates that there may be more subscriptions that + /// match the request; this value should be passed in a new /// `ListSubscriptionsRequest` to get more subscriptions. #[prost(string, tag = "2")] pub next_page_token: ::prost::alloc::string::String, @@ -1537,8 +1955,8 @@ pub struct PullRequest { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct PullResponse { - /// Received Pub/Sub messages. The list will be empty if there are no more - /// messages available in the backlog, or if no messages could be returned + /// Optional. Received Pub/Sub messages. The list will be empty if there are no + /// more messages available in the backlog, or if no messages could be returned /// before the request timeout. For JSON, the response can be entirely /// empty. The Pub/Sub system may return fewer than the `maxMessages` requested /// even if there are more messages available in the backlog. @@ -1563,7 +1981,8 @@ pub struct ModifyAckDeadlineRequest { /// delivery to another subscriber client. This typically results in an /// increase in the rate of message redeliveries (that is, duplicates). /// The minimum deadline you can specify is 0 seconds. - /// The maximum deadline you can specify is 600 seconds (10 minutes). + /// The maximum deadline you can specify in a single request is 600 seconds + /// (10 minutes). #[prost(int32, tag = "3")] pub ack_deadline_seconds: i32, } @@ -1593,14 +2012,15 @@ pub struct StreamingPullRequest { /// Format is `projects/{project}/subscriptions/{sub}`. #[prost(string, tag = "1")] pub subscription: ::prost::alloc::string::String, - /// List of acknowledgement IDs for acknowledging previously received messages - /// (received on this stream or a different stream). If an ack ID has expired, - /// the corresponding message may be redelivered later. Acknowledging a message - /// more than once will not result in an error. If the acknowledgement ID is - /// malformed, the stream will be aborted with status `INVALID_ARGUMENT`. + /// Optional. List of acknowledgement IDs for acknowledging previously received + /// messages (received on this stream or a different stream). If an ack ID has + /// expired, the corresponding message may be redelivered later. Acknowledging + /// a message more than once will not result in an error. If the + /// acknowledgement ID is malformed, the stream will be aborted with status + /// `INVALID_ARGUMENT`. #[prost(string, repeated, tag = "2")] pub ack_ids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - /// The list of new ack deadlines for the IDs listed in + /// Optional. The list of new ack deadlines for the IDs listed in /// `modify_deadline_ack_ids`. The size of this list must be the same as the /// size of `modify_deadline_ack_ids`. If it differs the stream will be aborted /// with `INVALID_ARGUMENT`. Each element in this list is applied to the @@ -1611,11 +2031,11 @@ pub struct StreamingPullRequest { /// the message is immediately made available for another streaming or /// non-streaming pull request. If the value is < 0 (an error), the stream will /// be aborted with status `INVALID_ARGUMENT`. - #[prost(int32, repeated, tag = "3")] + #[prost(int32, repeated, packed = "false", tag = "3")] pub modify_deadline_seconds: ::prost::alloc::vec::Vec, - /// List of acknowledgement IDs whose deadline will be modified based on the - /// corresponding element in `modify_deadline_seconds`. This field can be used - /// to indicate that more time is needed to process a message by the + /// Optional. List of acknowledgement IDs whose deadline will be modified based + /// on the corresponding element in `modify_deadline_seconds`. This field can + /// be used to indicate that more time is needed to process a message by the /// subscriber, or to make the message available for redelivery if the /// processing was interrupted. #[prost(string, repeated, tag = "4")] @@ -1626,16 +2046,16 @@ pub struct StreamingPullRequest { /// seconds. The maximum deadline you can specify is 600 seconds (10 minutes). #[prost(int32, tag = "5")] pub stream_ack_deadline_seconds: i32, - /// A unique identifier that is used to distinguish client instances from each - /// other. Only needs to be provided on the initial request. When a stream - /// disconnects and reconnects for the same stream, the client_id should be set - /// to the same value so that state associated with the old stream can be - /// transferred to the new stream. The same client_id should not be used for + /// Optional. A unique identifier that is used to distinguish client instances + /// from each other. Only needs to be provided on the initial request. When a + /// stream disconnects and reconnects for the same stream, the client_id should + /// be set to the same value so that state associated with the old stream can + /// be transferred to the new stream. The same client_id should not be used for /// different client instances. #[prost(string, tag = "6")] pub client_id: ::prost::alloc::string::String, - /// Flow control settings for the maximum number of outstanding messages. When - /// there are `max_outstanding_messages` or more currently sent to the + /// Optional. Flow control settings for the maximum number of outstanding + /// messages. When there are `max_outstanding_messages` currently sent to the /// streaming pull client that have not yet been acked or nacked, the server /// stops sending more messages. The sending of messages resumes once the /// number of outstanding messages is less than this value. If the value is @@ -1645,14 +2065,14 @@ pub struct StreamingPullRequest { /// `INVALID_ARGUMENT`. #[prost(int64, tag = "7")] pub max_outstanding_messages: i64, - /// Flow control settings for the maximum number of outstanding bytes. When - /// there are `max_outstanding_bytes` or more worth of messages currently sent - /// to the streaming pull client that have not yet been acked or nacked, the - /// server will stop sending more messages. The sending of messages resumes - /// once the number of outstanding bytes is less than this value. If the value - /// is <= 0, there is no limit to the number of outstanding bytes. This - /// property can only be set on the initial StreamingPullRequest. If it is set - /// on a subsequent request, the stream will be aborted with status + /// Optional. Flow control settings for the maximum number of outstanding + /// bytes. When there are `max_outstanding_bytes` or more worth of messages + /// currently sent to the streaming pull client that have not yet been acked or + /// nacked, the server will stop sending more messages. The sending of messages + /// resumes once the number of outstanding bytes is less than this value. If + /// the value is <= 0, there is no limit to the number of outstanding bytes. + /// This property can only be set on the initial StreamingPullRequest. If it is + /// set on a subsequent request, the stream will be aborted with status /// `INVALID_ARGUMENT`. #[prost(int64, tag = "8")] pub max_outstanding_bytes: i64, @@ -1662,19 +2082,19 @@ pub struct StreamingPullRequest { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct StreamingPullResponse { - /// Received Pub/Sub messages. This will not be empty. + /// Optional. Received Pub/Sub messages. This will not be empty. #[prost(message, repeated, tag = "1")] pub received_messages: ::prost::alloc::vec::Vec, - /// This field will only be set if `enable_exactly_once_delivery` is set to - /// `true`. + /// Optional. This field will only be set if `enable_exactly_once_delivery` is + /// set to `true`. #[prost(message, optional, tag = "5")] pub acknowledge_confirmation: ::core::option::Option, - /// This field will only be set if `enable_exactly_once_delivery` is set to - /// `true`. + /// Optional. This field will only be set if `enable_exactly_once_delivery` is + /// set to `true`. #[prost(message, optional, tag = "3")] pub modify_ack_deadline_confirmation: ::core::option::Option, - /// Properties associated with this subscription. + /// Optional. Properties associated with this subscription. #[prost(message, optional, tag = "4")] pub subscription_properties: ::core::option::Option, } @@ -1685,17 +2105,18 @@ pub mod streaming_pull_response { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct AcknowledgeConfirmation { - /// Successfully processed acknowledgement IDs. + /// Optional. Successfully processed acknowledgement IDs. #[prost(string, repeated, tag = "1")] pub ack_ids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - /// List of acknowledgement IDs that were malformed or whose acknowledgement - /// deadline has expired. + /// Optional. List of acknowledgement IDs that were malformed or whose + /// acknowledgement deadline has expired. #[prost(string, repeated, tag = "2")] pub invalid_ack_ids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - /// List of acknowledgement IDs that were out of order. + /// Optional. List of acknowledgement IDs that were out of order. #[prost(string, repeated, tag = "3")] pub unordered_ack_ids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - /// List of acknowledgement IDs that failed processing with temporary issues. + /// Optional. List of acknowledgement IDs that failed processing with + /// temporary issues. #[prost(string, repeated, tag = "4")] pub temporary_failed_ack_ids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, } @@ -1704,14 +2125,15 @@ pub mod streaming_pull_response { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ModifyAckDeadlineConfirmation { - /// Successfully processed acknowledgement IDs. + /// Optional. Successfully processed acknowledgement IDs. #[prost(string, repeated, tag = "1")] pub ack_ids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - /// List of acknowledgement IDs that were malformed or whose acknowledgement - /// deadline has expired. + /// Optional. List of acknowledgement IDs that were malformed or whose + /// acknowledgement deadline has expired. #[prost(string, repeated, tag = "2")] pub invalid_ack_ids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - /// List of acknowledgement IDs that failed processing with temporary issues. + /// Optional. List of acknowledgement IDs that failed processing with + /// temporary issues. #[prost(string, repeated, tag = "3")] pub temporary_failed_ack_ids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, } @@ -1719,10 +2141,11 @@ pub mod streaming_pull_response { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct SubscriptionProperties { - /// True iff exactly once delivery is enabled for this subscription. + /// Optional. True iff exactly once delivery is enabled for this + /// subscription. #[prost(bool, tag = "1")] pub exactly_once_delivery_enabled: bool, - /// True iff message ordering is enabled for this subscription. + /// Optional. True iff message ordering is enabled for this subscription. #[prost(bool, tag = "2")] pub message_ordering_enabled: bool, } @@ -1735,8 +2158,8 @@ pub struct CreateSnapshotRequest { /// in the request, the server will assign a random name for this snapshot on /// the same project as the subscription. Note that for REST API requests, you /// must specify a name. See the [resource name - /// rules](). Format - /// is `projects/{project}/snapshots/{snap}`. + /// rules](). + /// Format is `projects/{project}/snapshots/{snap}`. #[prost(string, tag = "1")] pub name: ::prost::alloc::string::String, /// Required. The subscription whose backlog the snapshot retains. @@ -1750,7 +2173,7 @@ pub struct CreateSnapshotRequest { /// Format is `projects/{project}/subscriptions/{sub}`. #[prost(string, tag = "2")] pub subscription: ::prost::alloc::string::String, - /// See [Creating and managing + /// Optional. See [Creating and managing /// labels](). #[prost(map = "string, string", tag = "3")] pub labels: ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, @@ -1775,13 +2198,14 @@ pub struct UpdateSnapshotRequest { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Snapshot { - /// The name of the snapshot. + /// Optional. The name of the snapshot. #[prost(string, tag = "1")] pub name: ::prost::alloc::string::String, - /// The name of the topic from which this snapshot is retaining messages. + /// Optional. The name of the topic from which this snapshot is retaining + /// messages. #[prost(string, tag = "2")] pub topic: ::prost::alloc::string::String, - /// The snapshot is guaranteed to exist up until this time. + /// Optional. The snapshot is guaranteed to exist up until this time. /// A newly-created snapshot expires no later than 7 days from the time of its /// creation. Its exact lifetime is determined at creation by the existing /// backlog in the source subscription. Specifically, the lifetime of the @@ -1793,7 +2217,7 @@ pub struct Snapshot { /// snapshot that would expire in less than 1 hour after creation. #[prost(message, optional, tag = "3")] pub expire_time: ::core::option::Option<::prost_types::Timestamp>, - /// See \[Creating and managing labels\] + /// Optional. See \[Creating and managing labels\] /// (). #[prost(map = "string, string", tag = "4")] pub labels: ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, @@ -1815,12 +2239,12 @@ pub struct ListSnapshotsRequest { /// Format is `projects/{project-id}`. #[prost(string, tag = "1")] pub project: ::prost::alloc::string::String, - /// Maximum number of snapshots to return. + /// Optional. Maximum number of snapshots to return. #[prost(int32, tag = "2")] pub page_size: i32, - /// The value returned by the last `ListSnapshotsResponse`; indicates that this - /// is a continuation of a prior `ListSnapshots` call, and that the system - /// should return the next page of data. + /// Optional. The value returned by the last `ListSnapshotsResponse`; indicates + /// that this is a continuation of a prior `ListSnapshots` call, and that the + /// system should return the next page of data. #[prost(string, tag = "3")] pub page_token: ::prost::alloc::string::String, } @@ -1828,11 +2252,12 @@ pub struct ListSnapshotsRequest { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ListSnapshotsResponse { - /// The resulting snapshots. + /// Optional. The resulting snapshots. #[prost(message, repeated, tag = "1")] pub snapshots: ::prost::alloc::vec::Vec, - /// If not empty, indicates that there may be more snapshot that match the - /// request; this value should be passed in a new `ListSnapshotsRequest`. + /// Optional. If not empty, indicates that there may be more snapshot that + /// match the request; this value should be passed in a new + /// `ListSnapshotsRequest`. #[prost(string, tag = "2")] pub next_page_token: ::prost::alloc::string::String, } @@ -1860,7 +2285,7 @@ pub mod seek_request { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Target { - /// The time to seek to. + /// Optional. The time to seek to. /// Messages retained in the subscription that were published before this /// time are marked as acknowledged, and messages retained in the /// subscription that were published after this time are marked as @@ -1873,9 +2298,9 @@ pub mod seek_request { /// and already-expunged messages will not be restored. #[prost(message, tag = "2")] Time(::prost_types::Timestamp), - /// The snapshot to seek to. The snapshot's topic must be the same as that of - /// the provided subscription. - /// Format is `projects/{project}/snapshots/{snap}`. + /// Optional. The snapshot to seek to. The snapshot's topic must be the same + /// as that of the provided subscription. Format is + /// `projects/{project}/snapshots/{snap}`. #[prost(string, tag = "3")] Snapshot(::prost::alloc::string::String), } @@ -1965,7 +2390,7 @@ pub mod publisher_client { self } /// Creates the given topic with the given name. See the [resource name rules] - /// (https://cloud.google.com/pubsub/docs/admin#resource_names). + /// (https://cloud.google.com/pubsub/docs/pubsub-basics#resource_names). pub async fn create_topic( &mut self, request: impl tonic::IntoRequest, @@ -1980,8 +2405,8 @@ pub mod publisher_client { .insert(GrpcMethod::new("google.pubsub.v1.Publisher", "CreateTopic")); self.inner.unary(req, path, codec).await } - /// Updates an existing topic. Note that certain properties of a - /// topic are not modifiable. + /// Updates an existing topic by updating the fields specified in the update + /// mask. Note that certain properties of a topic are not modifiable. pub async fn update_topic( &mut self, request: impl tonic::IntoRequest, @@ -2197,16 +2622,16 @@ pub mod subscriber_client { self } /// Creates a subscription to a given topic. See the [resource name rules] - /// (https://cloud.google.com/pubsub/docs/admin#resource_names). + /// (https://cloud.google.com/pubsub/docs/pubsub-basics#resource_names). /// If the subscription already exists, returns `ALREADY_EXISTS`. /// If the corresponding topic doesn't exist, returns `NOT_FOUND`. /// /// If the name is not provided in the request, the server will assign a random /// name for this subscription on the same project as the topic, conforming /// to the [resource name format] - /// (https://cloud.google.com/pubsub/docs/admin#resource_names). The generated - /// name is populated in the returned Subscription object. Note that for REST - /// API requests, you must specify a name in the request. + /// (https://cloud.google.com/pubsub/docs/pubsub-basics#resource_names). The + /// generated name is populated in the returned Subscription object. Note that + /// for REST API requests, you must specify a name in the request. pub async fn create_subscription( &mut self, request: impl tonic::IntoRequest, @@ -2236,8 +2661,9 @@ pub mod subscriber_client { .insert(GrpcMethod::new("google.pubsub.v1.Subscriber", "GetSubscription")); self.inner.unary(req, path, codec).await } - /// Updates an existing subscription. Note that certain properties of a - /// subscription, such as its topic, are not modifiable. + /// Updates an existing subscription by updating the fields specified in the + /// update mask. Note that certain properties of a subscription, such as its + /// topic, are not modifiable. pub async fn update_subscription( &mut self, request: impl tonic::IntoRequest, @@ -2434,7 +2860,7 @@ pub mod subscriber_client { /// the request, the server will assign a random /// name for this snapshot on the same project as the subscription, conforming /// to the [resource name format] - /// (https://cloud.google.com/pubsub/docs/admin#resource_names). The + /// (https://cloud.google.com/pubsub/docs/pubsub-basics#resource_names). The /// generated name is populated in the returned Snapshot object. Note that for /// REST API requests, you must specify a name in the request. pub async fn create_snapshot( @@ -2451,7 +2877,8 @@ pub mod subscriber_client { .insert(GrpcMethod::new("google.pubsub.v1.Subscriber", "CreateSnapshot")); self.inner.unary(req, path, codec).await } - /// Updates an existing snapshot. Snapshots are used in + /// Updates an existing snapshot by updating the fields specified in the update + /// mask. Snapshots are used in /// [Seek](https://cloud.google.com/pubsub/docs/replay-overview) operations, /// which allow you to manage message acknowledgments in bulk. That is, you can /// set the acknowledgment state of messages in an existing subscription to the diff --git a/googleapis/src/bytes/google.spanner.admin.database.v1.rs b/googleapis/src/bytes/google.spanner.admin.database.v1.rs index 4925ada8..d0131abc 100644 --- a/googleapis/src/bytes/google.spanner.admin.database.v1.rs +++ b/googleapis/src/bytes/google.spanner.admin.database.v1.rs @@ -25,6 +25,23 @@ pub struct EncryptionConfig { /// `projects//locations//keyRings//cryptoKeys/`. #[prost(string, tag = "2")] pub kms_key_name: ::prost::alloc::string::String, + /// Specifies the KMS configuration for the one or more keys used to encrypt + /// the database. Values are of the form + /// `projects//locations//keyRings//cryptoKeys/`. + /// + /// The keys referenced by kms_key_names must fully cover all + /// regions of the database instance configuration. Some examples: + /// * For single region database instance configs, specify a single regional + /// location KMS key. + /// * For multi-regional database instance configs of type GOOGLE_MANAGED, + /// either specify a multi-regional location KMS key or multiple regional + /// location KMS keys that cover all regions in the instance config. + /// * For a database instance config of type USER_MANAGED, please specify only + /// regional location KMS keys to cover each region in the instance config. + /// Multi-regional location KMS keys are not supported for USER_MANAGED + /// instance configs. + #[prost(string, repeated, tag = "3")] + pub kms_key_names: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, } /// Encryption information for a Cloud Spanner database or backup. #[allow(clippy::derive_partial_eq_without_eq)] @@ -33,15 +50,15 @@ pub struct EncryptionInfo { /// Output only. The type of encryption. #[prost(enumeration = "encryption_info::Type", tag = "3")] pub encryption_type: i32, - /// Output only. If present, the status of a recent encrypt/decrypt call on underlying data - /// for this database or backup. Regardless of status, data is always encrypted - /// at rest. + /// Output only. If present, the status of a recent encrypt/decrypt call on + /// underlying data for this database or backup. Regardless of status, data is + /// always encrypted at rest. #[prost(message, optional, tag = "4")] pub encryption_status: ::core::option::Option< super::super::super::super::rpc::Status, >, - /// Output only. A Cloud KMS key version that is being used to protect the database or - /// backup. + /// Output only. A Cloud KMS key version that is being used to protect the + /// database or backup. #[prost(string, tag = "2")] pub kms_key_version: ::prost::alloc::string::String, } @@ -102,7 +119,7 @@ pub enum DatabaseDialect { /// Default value. This value will create a database with the /// GOOGLE_STANDARD_SQL dialect. Unspecified = 0, - /// Google standard SQL. + /// GoogleSQL supported SQL. GoogleStandardSql = 1, /// PostgreSQL supported SQL. Postgresql = 2, @@ -133,10 +150,10 @@ impl DatabaseDialect { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Backup { - /// Required for the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] operation. - /// Name of the database from which this backup was - /// created. This needs to be in the same instance as the backup. - /// Values are of the form + /// Required for the + /// [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] + /// operation. Name of the database from which this backup was created. This + /// needs to be in the same instance as the backup. Values are of the form /// `projects//instances//databases/`. #[prost(string, tag = "2")] pub database: ::prost::alloc::string::String, @@ -146,7 +163,8 @@ pub struct Backup { /// backup. #[prost(message, optional, tag = "9")] pub version_time: ::core::option::Option<::prost_types::Timestamp>, - /// Required for the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] + /// Required for the + /// [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] /// operation. The expiration time of the backup, with microseconds /// granularity that must be at least 6 hours and at most 366 days /// from the time the CreateBackup request is processed. Once the `expire_time` @@ -154,8 +172,11 @@ pub struct Backup { /// Spanner to free the resources used by the backup. #[prost(message, optional, tag = "3")] pub expire_time: ::core::option::Option<::prost_types::Timestamp>, - /// Output only for the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] operation. - /// Required for the [UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup] operation. + /// Output only for the + /// [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] + /// operation. Required for the + /// [UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup] + /// operation. /// /// A globally unique identifier for the backup which cannot be /// changed. Values are of the form @@ -169,7 +190,8 @@ pub struct Backup { /// `projects//instances/`. #[prost(string, tag = "1")] pub name: ::prost::alloc::string::String, - /// Output only. The time the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] + /// Output only. The time the + /// [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] /// request is received. If the request does not specify `version_time`, the /// `version_time` of the backup will be equivalent to the `create_time`. #[prost(message, optional, tag = "4")] @@ -177,6 +199,24 @@ pub struct Backup { /// Output only. Size of the backup in bytes. #[prost(int64, tag = "5")] pub size_bytes: i64, + /// Output only. The number of bytes that will be freed by deleting this + /// backup. This value will be zero if, for example, this backup is part of an + /// incremental backup chain and younger backups in the chain require that we + /// keep its data. For backups not in an incremental backup chain, this is + /// always the size of the backup. This value may change if backups on the same + /// chain get created, deleted or expired. + #[prost(int64, tag = "15")] + pub freeable_size_bytes: i64, + /// Output only. For a backup in an incremental backup chain, this is the + /// storage space needed to keep the data that has changed since the previous + /// backup. For all other backups, this is always the size of the backup. This + /// value may change if backups on the same chain get deleted or expired. + /// + /// This field can be used to calculate the total storage space used by a set + /// of backups. For example, the total space used by all backups of a database + /// can be computed by summing up this field. + #[prost(int64, tag = "16")] + pub exclusive_size_bytes: i64, /// Output only. The current state of the backup. #[prost(enumeration = "backup::State", tag = "6")] pub state: i32, @@ -192,6 +232,14 @@ pub struct Backup { /// Output only. The encryption information for the backup. #[prost(message, optional, tag = "8")] pub encryption_info: ::core::option::Option, + /// Output only. The encryption information for the backup, whether it is + /// protected by one or more KMS keys. The information includes all Cloud + /// KMS key versions used to encrypt the backup. The `encryption_status' field + /// inside of each `EncryptionInfo` is not populated. At least one of the key + /// versions must be available for the backup to be restored. If a key version + /// is revoked in the middle of a restore, the restore behavior is undefined. + #[prost(message, repeated, tag = "13")] + pub encryption_information: ::prost::alloc::vec::Vec, /// Output only. The database dialect information for the backup. #[prost(enumeration = "DatabaseDialect", tag = "10")] pub database_dialect: i32, @@ -211,6 +259,32 @@ pub struct Backup { /// less than `Backup.max_expire_time`. #[prost(message, optional, tag = "12")] pub max_expire_time: ::core::option::Option<::prost_types::Timestamp>, + /// Output only. List of backup schedule URIs that are associated with + /// creating this backup. This is only applicable for scheduled backups, and + /// is empty for on-demand backups. + /// + /// To optimize for storage, whenever possible, multiple schedules are + /// collapsed together to create one backup. In such cases, this field captures + /// the list of all backup schedule URIs that are associated with creating + /// this backup. If collapsing is not done, then this field captures the + /// single backup schedule URI associated with creating this backup. + #[prost(string, repeated, tag = "14")] + pub backup_schedules: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + /// Output only. Populated only for backups in an incremental backup chain. + /// Backups share the same chain id if and only if they belong to the same + /// incremental backup chain. Use this field to determine which backups are + /// part of the same incremental backup chain. The ordering of backups in the + /// chain can be determined by ordering the backup `version_time`. + #[prost(string, tag = "17")] + pub incremental_backup_chain_id: ::prost::alloc::string::String, + /// Output only. Data deleted at a time older than this is guaranteed not to be + /// retained in order to support this backup. For a backup in an incremental + /// backup chain, this is the version time of the oldest backup that exists or + /// ever existed in the chain. For all other backups, this is the version time + /// of the backup. This field can be used to understand what data is being + /// retained by the backup system. + #[prost(message, optional, tag = "18")] + pub oldest_version_time: ::core::option::Option<::prost_types::Timestamp>, } /// Nested message and enum types in `Backup`. pub mod backup { @@ -259,7 +333,8 @@ pub mod backup { } } } -/// The request for [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]. +/// The request for +/// [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct CreateBackupRequest { @@ -279,11 +354,11 @@ pub struct CreateBackupRequest { /// Required. The backup to create. #[prost(message, optional, tag = "3")] pub backup: ::core::option::Option, - /// Optional. The encryption configuration used to encrypt the backup. If this field is - /// not specified, the backup will use the same - /// encryption configuration as the database by default, namely - /// [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type] = - /// `USE_DATABASE_ENCRYPTION`. + /// Optional. The encryption configuration used to encrypt the backup. If this + /// field is not specified, the backup will use the same encryption + /// configuration as the database by default, namely + /// [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type] + /// = `USE_DATABASE_ENCRYPTION`. #[prost(message, optional, tag = "4")] pub encryption_config: ::core::option::Option, } @@ -299,7 +374,8 @@ pub struct CreateBackupMetadata { #[prost(string, tag = "2")] pub database: ::prost::alloc::string::String, /// The progress of the - /// [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] operation. + /// [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] + /// operation. #[prost(message, optional, tag = "3")] pub progress: ::core::option::Option, /// The time at which cancellation of this operation was received. @@ -317,12 +393,13 @@ pub struct CreateBackupMetadata { #[prost(message, optional, tag = "4")] pub cancel_time: ::core::option::Option<::prost_types::Timestamp>, } -/// The request for [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup]. +/// The request for +/// [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup]. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct CopyBackupRequest { - /// Required. The name of the destination instance that will contain the backup copy. - /// Values are of the form: `projects//instances/`. + /// Required. The name of the destination instance that will contain the backup + /// copy. Values are of the form: `projects//instances/`. #[prost(string, tag = "1")] pub parent: ::prost::alloc::string::String, /// Required. The id of the backup copy. @@ -345,15 +422,15 @@ pub struct CopyBackupRequest { /// to free the resources used by the backup. #[prost(message, optional, tag = "4")] pub expire_time: ::core::option::Option<::prost_types::Timestamp>, - /// Optional. The encryption configuration used to encrypt the backup. If this field is - /// not specified, the backup will use the same - /// encryption configuration as the source backup by default, namely - /// [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type] = - /// `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`. + /// Optional. The encryption configuration used to encrypt the backup. If this + /// field is not specified, the backup will use the same encryption + /// configuration as the source backup by default, namely + /// [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type] + /// = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`. #[prost(message, optional, tag = "5")] pub encryption_config: ::core::option::Option, } -/// Metadata type for the google.longrunning.Operation returned by +/// Metadata type for the operation returned by /// [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup]. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -369,7 +446,8 @@ pub struct CopyBackupMetadata { #[prost(string, tag = "2")] pub source_backup: ::prost::alloc::string::String, /// The progress of the - /// [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup] operation. + /// [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup] + /// operation. #[prost(message, optional, tag = "3")] pub progress: ::core::option::Option, /// The time at which cancellation of CopyBackup operation was received. @@ -387,7 +465,8 @@ pub struct CopyBackupMetadata { #[prost(message, optional, tag = "4")] pub cancel_time: ::core::option::Option<::prost_types::Timestamp>, } -/// The request for [UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup]. +/// The request for +/// [UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup]. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct UpdateBackupRequest { @@ -405,7 +484,8 @@ pub struct UpdateBackupRequest { #[prost(message, optional, tag = "2")] pub update_mask: ::core::option::Option<::prost_types::FieldMask>, } -/// The request for [GetBackup][google.spanner.admin.database.v1.DatabaseAdmin.GetBackup]. +/// The request for +/// [GetBackup][google.spanner.admin.database.v1.DatabaseAdmin.GetBackup]. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct GetBackupRequest { @@ -415,7 +495,8 @@ pub struct GetBackupRequest { #[prost(string, tag = "1")] pub name: ::prost::alloc::string::String, } -/// The request for [DeleteBackup][google.spanner.admin.database.v1.DatabaseAdmin.DeleteBackup]. +/// The request for +/// [DeleteBackup][google.spanner.admin.database.v1.DatabaseAdmin.DeleteBackup]. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct DeleteBackupRequest { @@ -425,7 +506,8 @@ pub struct DeleteBackupRequest { #[prost(string, tag = "1")] pub name: ::prost::alloc::string::String, } -/// The request for [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups]. +/// The request for +/// [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups]. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ListBackupsRequest { @@ -441,7 +523,9 @@ pub struct ListBackupsRequest { /// must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`. /// Colon `:` is the contains operator. Filter rules are not case sensitive. /// - /// The following fields in the [Backup][google.spanner.admin.database.v1.Backup] are eligible for filtering: + /// The following fields in the + /// [Backup][google.spanner.admin.database.v1.Backup] are eligible for + /// filtering: /// /// * `name` /// * `database` @@ -450,6 +534,7 @@ pub struct ListBackupsRequest { /// * `expire_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) /// * `version_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) /// * `size_bytes` + /// * `backup_schedules` /// /// You can combine multiple expressions by enclosing each expression in /// parentheses. By default, expressions are combined with AND logic, but @@ -468,6 +553,8 @@ pub struct ListBackupsRequest { /// * `expire_time < \"2018-03-28T14:50:00Z\"` /// - The backup `expire_time` is before 2018-03-28T14:50:00Z. /// * `size_bytes > 10000000000` - The backup's size is greater than 10GB + /// * `backup_schedules:daily` + /// - The backup is created from a schedule with "daily" in its name. #[prost(string, tag = "2")] pub filter: ::prost::alloc::string::String, /// Number of backups to be returned in the response. If 0 or @@ -475,13 +562,15 @@ pub struct ListBackupsRequest { #[prost(int32, tag = "3")] pub page_size: i32, /// If non-empty, `page_token` should contain a - /// [next_page_token][google.spanner.admin.database.v1.ListBackupsResponse.next_page_token] from a - /// previous [ListBackupsResponse][google.spanner.admin.database.v1.ListBackupsResponse] to the same `parent` and with the same - /// `filter`. + /// [next_page_token][google.spanner.admin.database.v1.ListBackupsResponse.next_page_token] + /// from a previous + /// [ListBackupsResponse][google.spanner.admin.database.v1.ListBackupsResponse] + /// to the same `parent` and with the same `filter`. #[prost(string, tag = "4")] pub page_token: ::prost::alloc::string::String, } -/// The response for [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups]. +/// The response for +/// [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups]. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ListBackupsResponse { @@ -490,8 +579,8 @@ pub struct ListBackupsResponse { #[prost(message, repeated, tag = "1")] pub backups: ::prost::alloc::vec::Vec, /// `next_page_token` can be sent in a subsequent - /// [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups] call to fetch more - /// of the matching backups. + /// [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups] + /// call to fetch more of the matching backups. #[prost(string, tag = "2")] pub next_page_token: ::prost::alloc::string::String, } @@ -518,7 +607,9 @@ pub struct ListBackupOperationsRequest { /// * `name` - The name of the long-running operation /// * `done` - False if the operation is in progress, else true. /// * `metadata.@type` - the type of metadata. For example, the type string - /// for [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata] is + /// for + /// [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata] + /// is /// `type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata`. /// * `metadata.` - any field in metadata.value. /// `metadata.@type` must be specified first if filtering on metadata @@ -536,14 +627,15 @@ pub struct ListBackupOperationsRequest { /// * `done:true` - The operation is complete. /// * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \ /// `metadata.database:prod` - Returns operations where: - /// * The operation's metadata type is [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. - /// * The database the backup was taken from has a name containing the - /// string "prod". + /// * The operation's metadata type is + /// [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. + /// * The source database name of backup contains the string "prod". /// * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \ /// `(metadata.name:howl) AND` \ /// `(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND` \ /// `(error:*)` - Returns operations where: - /// * The operation's metadata type is [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. + /// * The operation's metadata type is + /// [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. /// * The backup name contains the string "howl". /// * The operation started before 2018-03-28T14:50:00Z. /// * The operation resulted in an error. @@ -551,9 +643,9 @@ pub struct ListBackupOperationsRequest { /// `(metadata.source_backup:test) AND` \ /// `(metadata.progress.start_time < \"2022-01-18T14:50:00Z\") AND` \ /// `(error:*)` - Returns operations where: - /// * The operation's metadata type is [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata]. - /// * The source backup of the copied backup name contains the string - /// "test". + /// * The operation's metadata type is + /// [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata]. + /// * The source backup name contains the string "test". /// * The operation started before 2022-01-18T14:50:00Z. /// * The operation resulted in an error. /// * `((metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \ @@ -563,12 +655,13 @@ pub struct ListBackupOperationsRequest { /// `(metadata.source_backup:test_bkp)) AND` \ /// `(error:*)` - Returns operations where: /// * The operation's metadata matches either of criteria: - /// * The operation's metadata type is [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata] AND the - /// database the backup was taken from has name containing string + /// * The operation's metadata type is + /// [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata] + /// AND the source database name of the backup contains the string /// "test_db" - /// * The operation's metadata type is [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata] AND the - /// backup the backup was copied from has name containing string - /// "test_bkp" + /// * The operation's metadata type is + /// [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata] + /// AND the source backup name contains the string "test_bkp" /// * The operation resulted in an error. #[prost(string, tag = "2")] pub filter: ::prost::alloc::string::String, @@ -578,8 +671,9 @@ pub struct ListBackupOperationsRequest { pub page_size: i32, /// If non-empty, `page_token` should contain a /// [next_page_token][google.spanner.admin.database.v1.ListBackupOperationsResponse.next_page_token] - /// from a previous [ListBackupOperationsResponse][google.spanner.admin.database.v1.ListBackupOperationsResponse] to the - /// same `parent` and with the same `filter`. + /// from a previous + /// [ListBackupOperationsResponse][google.spanner.admin.database.v1.ListBackupOperationsResponse] + /// to the same `parent` and with the same `filter`. #[prost(string, tag = "4")] pub page_token: ::prost::alloc::string::String, } @@ -616,13 +710,14 @@ pub struct BackupInfo { pub backup: ::prost::alloc::string::String, /// The backup contains an externally consistent copy of `source_database` at /// the timestamp specified by `version_time`. If the - /// [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] request did not specify - /// `version_time`, the `version_time` of the backup is equivalent to the - /// `create_time`. + /// [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] + /// request did not specify `version_time`, the `version_time` of the backup is + /// equivalent to the `create_time`. #[prost(message, optional, tag = "4")] pub version_time: ::core::option::Option<::prost_types::Timestamp>, - /// The time the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] request was - /// received. + /// The time the + /// [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] + /// request was received. #[prost(message, optional, tag = "2")] pub create_time: ::core::option::Option<::prost_types::Timestamp>, /// Name of the database the backup was created from. @@ -638,11 +733,28 @@ pub struct CreateBackupEncryptionConfig { pub encryption_type: i32, /// Optional. The Cloud KMS key that will be used to protect the backup. /// This field should be set only when - /// [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type] is - /// `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form + /// [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type] + /// is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form /// `projects//locations//keyRings//cryptoKeys/`. #[prost(string, tag = "2")] pub kms_key_name: ::prost::alloc::string::String, + /// Optional. Specifies the KMS configuration for the one or more keys used to + /// protect the backup. Values are of the form + /// `projects//locations//keyRings//cryptoKeys/`. + /// + /// The keys referenced by kms_key_names must fully cover all + /// regions of the backup's instance configuration. Some examples: + /// * For single region instance configs, specify a single regional + /// location KMS key. + /// * For multi-regional instance configs of type GOOGLE_MANAGED, + /// either specify a multi-regional location KMS key or multiple regional + /// location KMS keys that cover all regions in the instance config. + /// * For an instance config of type USER_MANAGED, please specify only + /// regional location KMS keys to cover each region in the instance config. + /// Multi-regional location KMS keys are not supported for USER_MANAGED + /// instance configs. + #[prost(string, repeated, tag = "3")] + pub kms_key_names: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, } /// Nested message and enum types in `CreateBackupEncryptionConfig`. pub mod create_backup_encryption_config { @@ -664,9 +776,10 @@ pub mod create_backup_encryption_config { Unspecified = 0, /// Use the same encryption configuration as the database. This is the /// default option when - /// [encryption_config][google.spanner.admin.database.v1.CreateBackupEncryptionConfig] is empty. - /// For example, if the database is using `Customer_Managed_Encryption`, the - /// backup will be using the same Cloud KMS key as the database. + /// [encryption_config][google.spanner.admin.database.v1.CreateBackupEncryptionConfig] + /// is empty. For example, if the database is using + /// `Customer_Managed_Encryption`, the backup will be using the same Cloud + /// KMS key as the database. UseDatabaseEncryption = 1, /// Use Google default encryption. GoogleDefaultEncryption = 2, @@ -710,11 +823,29 @@ pub struct CopyBackupEncryptionConfig { pub encryption_type: i32, /// Optional. The Cloud KMS key that will be used to protect the backup. /// This field should be set only when - /// [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type] is - /// `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form + /// [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type] + /// is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form /// `projects//locations//keyRings//cryptoKeys/`. #[prost(string, tag = "2")] pub kms_key_name: ::prost::alloc::string::String, + /// Optional. Specifies the KMS configuration for the one or more keys used to + /// protect the backup. Values are of the form + /// `projects//locations//keyRings//cryptoKeys/`. + /// Kms keys specified can be in any order. + /// + /// The keys referenced by kms_key_names must fully cover all + /// regions of the backup's instance configuration. Some examples: + /// * For single region instance configs, specify a single regional + /// location KMS key. + /// * For multi-regional instance configs of type GOOGLE_MANAGED, + /// either specify a multi-regional location KMS key or multiple regional + /// location KMS keys that cover all regions in the instance config. + /// * For an instance config of type USER_MANAGED, please specify only + /// regional location KMS keys to cover each region in the instance config. + /// Multi-regional location KMS keys are not supported for USER_MANAGED + /// instance configs. + #[prost(string, repeated, tag = "3")] + pub kms_key_names: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, } /// Nested message and enum types in `CopyBackupEncryptionConfig`. pub mod copy_backup_encryption_config { @@ -734,15 +865,18 @@ pub mod copy_backup_encryption_config { pub enum EncryptionType { /// Unspecified. Do not use. Unspecified = 0, - /// This is the default option for [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup] - /// when [encryption_config][google.spanner.admin.database.v1.CopyBackupEncryptionConfig] is not specified. - /// For example, if the source backup is using `Customer_Managed_Encryption`, - /// the backup will be using the same Cloud KMS key as the source backup. + /// This is the default option for + /// [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup] + /// when + /// [encryption_config][google.spanner.admin.database.v1.CopyBackupEncryptionConfig] + /// is not specified. For example, if the source backup is using + /// `Customer_Managed_Encryption`, the backup will be using the same Cloud + /// KMS key as the source backup. UseConfigDefaultOrBackupEncryption = 1, /// Use Google default encryption. GoogleDefaultEncryption = 2, - /// Use customer managed encryption. If specified, `kms_key_name` - /// must contain a valid Cloud KMS key. + /// Use customer managed encryption. If specified, either `kms_key_name` or + /// `kms_key_names` must contain valid Cloud KMS key(s). CustomerManagedEncryption = 3, } impl EncryptionType { @@ -776,6 +910,222 @@ pub mod copy_backup_encryption_config { } } } +/// The specification for full backups. +/// A full backup stores the entire contents of the database at a given +/// version time. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct FullBackupSpec {} +/// The specification for incremental backup chains. +/// An incremental backup stores the delta of changes between a previous +/// backup and the database contents at a given version time. An +/// incremental backup chain consists of a full backup and zero or more +/// successive incremental backups. The first backup created for an +/// incremental backup chain is always a full backup. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct IncrementalBackupSpec {} +/// Defines specifications of the backup schedule. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BackupScheduleSpec { + /// Required. + #[prost(oneof = "backup_schedule_spec::ScheduleSpec", tags = "1")] + pub schedule_spec: ::core::option::Option, +} +/// Nested message and enum types in `BackupScheduleSpec`. +pub mod backup_schedule_spec { + /// Required. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum ScheduleSpec { + /// Cron style schedule specification. + #[prost(message, tag = "1")] + CronSpec(super::CrontabSpec), + } +} +/// BackupSchedule expresses the automated backup creation specification for a +/// Spanner database. +/// Next ID: 10 +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BackupSchedule { + /// Identifier. Output only for the + /// [CreateBackupSchedule][DatabaseAdmin.CreateBackupSchededule] operation. + /// Required for the + /// [UpdateBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackupSchedule] + /// operation. A globally unique identifier for the backup schedule which + /// cannot be changed. Values are of the form + /// `projects//instances//databases//backupSchedules/[a-z][a-z0-9_\-]*\[a-z0-9\]` + /// The final segment of the name must be between 2 and 60 characters in + /// length. + #[prost(string, tag = "1")] + pub name: ::prost::alloc::string::String, + /// Optional. The schedule specification based on which the backup creations + /// are triggered. + #[prost(message, optional, tag = "6")] + pub spec: ::core::option::Option, + /// Optional. The retention duration of a backup that must be at least 6 hours + /// and at most 366 days. The backup is eligible to be automatically deleted + /// once the retention period has elapsed. + #[prost(message, optional, tag = "3")] + pub retention_duration: ::core::option::Option<::prost_types::Duration>, + /// Optional. The encryption configuration that will be used to encrypt the + /// backup. If this field is not specified, the backup will use the same + /// encryption configuration as the database. + #[prost(message, optional, tag = "4")] + pub encryption_config: ::core::option::Option, + /// Output only. The timestamp at which the schedule was last updated. + /// If the schedule has never been updated, this field contains the timestamp + /// when the schedule was first created. + #[prost(message, optional, tag = "9")] + pub update_time: ::core::option::Option<::prost_types::Timestamp>, + /// Required. Backup type spec determines the type of backup that is created by + /// the backup schedule. Currently, only full backups are supported. + #[prost(oneof = "backup_schedule::BackupTypeSpec", tags = "7, 8")] + pub backup_type_spec: ::core::option::Option, +} +/// Nested message and enum types in `BackupSchedule`. +pub mod backup_schedule { + /// Required. Backup type spec determines the type of backup that is created by + /// the backup schedule. Currently, only full backups are supported. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, Copy, PartialEq, ::prost::Oneof)] + pub enum BackupTypeSpec { + /// The schedule creates only full backups. + #[prost(message, tag = "7")] + FullBackupSpec(super::FullBackupSpec), + /// The schedule creates incremental backup chains. + #[prost(message, tag = "8")] + IncrementalBackupSpec(super::IncrementalBackupSpec), + } +} +/// CrontabSpec can be used to specify the version time and frequency at +/// which the backup should be created. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CrontabSpec { + /// Required. Textual representation of the crontab. User can customize the + /// backup frequency and the backup version time using the cron + /// expression. The version time must be in UTC timzeone. + /// + /// The backup will contain an externally consistent copy of the + /// database at the version time. Allowed frequencies are 12 hour, 1 day, + /// 1 week and 1 month. Examples of valid cron specifications: + /// * `0 2/12 * * * ` : every 12 hours at (2, 14) hours past midnight in UTC. + /// * `0 2,14 * * * ` : every 12 hours at (2,14) hours past midnight in UTC. + /// * `0 2 * * * ` : once a day at 2 past midnight in UTC. + /// * `0 2 * * 0 ` : once a week every Sunday at 2 past midnight in UTC. + /// * `0 2 8 * * ` : once a month on 8th day at 2 past midnight in UTC. + #[prost(string, tag = "1")] + pub text: ::prost::alloc::string::String, + /// Output only. The time zone of the times in `CrontabSpec.text`. Currently + /// only UTC is supported. + #[prost(string, tag = "2")] + pub time_zone: ::prost::alloc::string::String, + /// Output only. Schedule backups will contain an externally consistent copy + /// of the database at the version time specified in + /// `schedule_spec.cron_spec`. However, Spanner may not initiate the creation + /// of the scheduled backups at that version time. Spanner will initiate + /// the creation of scheduled backups within the time window bounded by the + /// version_time specified in `schedule_spec.cron_spec` and version_time + + /// `creation_window`. + #[prost(message, optional, tag = "3")] + pub creation_window: ::core::option::Option<::prost_types::Duration>, +} +/// The request for +/// [CreateBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackupSchedule]. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CreateBackupScheduleRequest { + /// Required. The name of the database that this backup schedule applies to. + #[prost(string, tag = "1")] + pub parent: ::prost::alloc::string::String, + /// Required. The Id to use for the backup schedule. The `backup_schedule_id` + /// appended to `parent` forms the full backup schedule name of the form + /// `projects//instances//databases//backupSchedules/`. + #[prost(string, tag = "2")] + pub backup_schedule_id: ::prost::alloc::string::String, + /// Required. The backup schedule to create. + #[prost(message, optional, tag = "3")] + pub backup_schedule: ::core::option::Option, +} +/// The request for +/// [GetBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.GetBackupSchedule]. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetBackupScheduleRequest { + /// Required. The name of the schedule to retrieve. + /// Values are of the form + /// `projects//instances//databases//backupSchedules/`. + #[prost(string, tag = "1")] + pub name: ::prost::alloc::string::String, +} +/// The request for +/// [DeleteBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.DeleteBackupSchedule]. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DeleteBackupScheduleRequest { + /// Required. The name of the schedule to delete. + /// Values are of the form + /// `projects//instances//databases//backupSchedules/`. + #[prost(string, tag = "1")] + pub name: ::prost::alloc::string::String, +} +/// The request for +/// [ListBackupSchedules][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupSchedules]. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ListBackupSchedulesRequest { + /// Required. Database is the parent resource whose backup schedules should be + /// listed. Values are of the form + /// projects//instances//databases/ + #[prost(string, tag = "1")] + pub parent: ::prost::alloc::string::String, + /// Optional. Number of backup schedules to be returned in the response. If 0 + /// or less, defaults to the server's maximum allowed page size. + #[prost(int32, tag = "2")] + pub page_size: i32, + /// Optional. If non-empty, `page_token` should contain a + /// [next_page_token][google.spanner.admin.database.v1.ListBackupSchedulesResponse.next_page_token] + /// from a previous + /// [ListBackupSchedulesResponse][google.spanner.admin.database.v1.ListBackupSchedulesResponse] + /// to the same `parent`. + #[prost(string, tag = "4")] + pub page_token: ::prost::alloc::string::String, +} +/// The response for +/// [ListBackupSchedules][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupSchedules]. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ListBackupSchedulesResponse { + /// The list of backup schedules for a database. + #[prost(message, repeated, tag = "1")] + pub backup_schedules: ::prost::alloc::vec::Vec, + /// `next_page_token` can be sent in a subsequent + /// [ListBackupSchedules][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupSchedules] + /// call to fetch more of the schedules. + #[prost(string, tag = "2")] + pub next_page_token: ::prost::alloc::string::String, +} +/// The request for +/// [UpdateBackupScheduleRequest][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackupSchedule]. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct UpdateBackupScheduleRequest { + /// Required. The backup schedule to update. `backup_schedule.name`, and the + /// fields to be updated as specified by `update_mask` are required. Other + /// fields are ignored. + #[prost(message, optional, tag = "1")] + pub backup_schedule: ::core::option::Option, + /// Required. A mask specifying which fields in the BackupSchedule resource + /// should be updated. This mask is relative to the BackupSchedule resource, + /// not to the request message. The field mask must always be + /// specified; this prevents any future fields from being erased + /// accidentally. + #[prost(message, optional, tag = "2")] + pub update_mask: ::core::option::Option<::prost_types::FieldMask>, +} /// Information about the database restore. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -828,7 +1178,8 @@ pub struct Database { pub encryption_config: ::core::option::Option, /// Output only. For databases that are using customer managed encryption, this /// field contains the encryption information for the database, such as - /// encryption state and the Cloud KMS key versions that are in use. + /// all Cloud KMS key versions that are in use. The `encryption_status' field + /// inside of each `EncryptionInfo` is not populated. /// /// For databases that are using Google default or other types of encryption, /// this field is empty. @@ -840,8 +1191,8 @@ pub struct Database { /// Output only. The period in which Cloud Spanner retains all versions of data /// for the database. This is the same as the value of version_retention_period /// database option set using - /// [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]. Defaults to 1 hour, - /// if not set. + /// [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]. + /// Defaults to 1 hour, if not set. #[prost(string, tag = "6")] pub version_retention_period: ::prost::alloc::string::String, /// Output only. Earliest timestamp at which older versions of the data can be @@ -863,7 +1214,9 @@ pub struct Database { #[prost(enumeration = "DatabaseDialect", tag = "10")] pub database_dialect: i32, /// Whether drop protection is enabled for this database. Defaults to false, - /// if not set. + /// if not set. For more details, please see how to [prevent accidental + /// database + /// deletion](). #[prost(bool, tag = "11")] pub enable_drop_protection: bool, /// Output only. If true, the database is being updated. If false, there are no @@ -929,7 +1282,8 @@ pub mod database { } } } -/// The request for [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases]. +/// The request for +/// [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases]. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ListDatabasesRequest { @@ -942,12 +1296,14 @@ pub struct ListDatabasesRequest { #[prost(int32, tag = "3")] pub page_size: i32, /// If non-empty, `page_token` should contain a - /// [next_page_token][google.spanner.admin.database.v1.ListDatabasesResponse.next_page_token] from a - /// previous [ListDatabasesResponse][google.spanner.admin.database.v1.ListDatabasesResponse]. + /// [next_page_token][google.spanner.admin.database.v1.ListDatabasesResponse.next_page_token] + /// from a previous + /// [ListDatabasesResponse][google.spanner.admin.database.v1.ListDatabasesResponse]. #[prost(string, tag = "4")] pub page_token: ::prost::alloc::string::String, } -/// The response for [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases]. +/// The response for +/// [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases]. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ListDatabasesResponse { @@ -955,12 +1311,13 @@ pub struct ListDatabasesResponse { #[prost(message, repeated, tag = "1")] pub databases: ::prost::alloc::vec::Vec, /// `next_page_token` can be sent in a subsequent - /// [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases] call to fetch more - /// of the matching databases. + /// [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases] + /// call to fetch more of the matching databases. #[prost(string, tag = "2")] pub next_page_token: ::prost::alloc::string::String, } -/// The request for [CreateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.CreateDatabase]. +/// The request for +/// [CreateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.CreateDatabase]. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct CreateDatabaseRequest { @@ -981,14 +1338,31 @@ pub struct CreateDatabaseRequest { /// if there is an error in any statement, the database is not created. #[prost(string, repeated, tag = "3")] pub extra_statements: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - /// Optional. The encryption configuration for the database. If this field is not - /// specified, Cloud Spanner will encrypt/decrypt all data at rest using + /// Optional. The encryption configuration for the database. If this field is + /// not specified, Cloud Spanner will encrypt/decrypt all data at rest using /// Google default encryption. #[prost(message, optional, tag = "4")] pub encryption_config: ::core::option::Option, /// Optional. The dialect of the Cloud Spanner Database. #[prost(enumeration = "DatabaseDialect", tag = "5")] pub database_dialect: i32, + /// Optional. Proto descriptors used by CREATE/ALTER PROTO BUNDLE statements in + /// 'extra_statements' above. + /// Contains a protobuf-serialized + /// [google.protobuf.FileDescriptorSet](). + /// To generate it, [install]() and + /// run `protoc` with --include_imports and --descriptor_set_out. For example, + /// to generate for moon/shot/app.proto, run + /// ``` + /// $protoc --proto_path=/app_path --proto_path=/lib_path \ + /// --include_imports \ + /// --descriptor_set_out=descriptors.data \ + /// moon/shot/app.proto + /// ``` + /// For more details, see protobuffer [self + /// description](). + #[prost(bytes = "bytes", tag = "6")] + pub proto_descriptors: ::prost::bytes::Bytes, } /// Metadata type for the operation returned by /// [CreateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.CreateDatabase]. @@ -999,7 +1373,8 @@ pub struct CreateDatabaseMetadata { #[prost(string, tag = "1")] pub database: ::prost::alloc::string::String, } -/// The request for [GetDatabase][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabase]. +/// The request for +/// [GetDatabase][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabase]. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct GetDatabaseRequest { @@ -1056,8 +1431,8 @@ pub struct UpdateDatabaseMetadata { /// Each batch of statements is assigned a name which can be used with /// the [Operations][google.longrunning.Operations] API to monitor /// progress. See the -/// [operation_id][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.operation_id] field for more -/// details. +/// [operation_id][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.operation_id] +/// field for more details. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct UpdateDatabaseDdlRequest { @@ -1074,20 +1449,38 @@ pub struct UpdateDatabaseDdlRequest { /// /// Specifying an explicit operation ID simplifies determining /// whether the statements were executed in the event that the - /// [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl] call is replayed, - /// or the return value is otherwise lost: the [database][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.database] and - /// `operation_id` fields can be combined to form the + /// [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl] + /// call is replayed, or the return value is otherwise lost: the + /// [database][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.database] + /// and `operation_id` fields can be combined to form the /// [name][google.longrunning.Operation.name] of the resulting - /// [longrunning.Operation][google.longrunning.Operation]: `/operations/`. + /// [longrunning.Operation][google.longrunning.Operation]: + /// `/operations/`. /// /// `operation_id` should be unique within the database, and must be /// a valid identifier: `[a-z][a-z0-9_]*`. Note that /// automatically-generated operation IDs always begin with an /// underscore. If the named operation already exists, - /// [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl] returns - /// `ALREADY_EXISTS`. + /// [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl] + /// returns `ALREADY_EXISTS`. #[prost(string, tag = "3")] pub operation_id: ::prost::alloc::string::String, + /// Optional. Proto descriptors used by CREATE/ALTER PROTO BUNDLE statements. + /// Contains a protobuf-serialized + /// [google.protobuf.FileDescriptorSet](). + /// To generate it, [install]() and + /// run `protoc` with --include_imports and --descriptor_set_out. For example, + /// to generate for moon/shot/app.proto, run + /// ``` + /// $protoc --proto_path=/app_path --proto_path=/lib_path \ + /// --include_imports \ + /// --descriptor_set_out=descriptors.data \ + /// moon/shot/app.proto + /// ``` + /// For more details, see protobuffer [self + /// description](). + #[prost(bytes = "bytes", tag = "4")] + pub proto_descriptors: ::prost::bytes::Bytes, } /// Action information extracted from a DDL statement. This proto is used to /// display the brief info of the DDL statement for the operation @@ -1148,7 +1541,8 @@ pub struct UpdateDatabaseDdlMetadata { #[prost(message, repeated, tag = "6")] pub actions: ::prost::alloc::vec::Vec, } -/// The request for [DropDatabase][google.spanner.admin.database.v1.DatabaseAdmin.DropDatabase]. +/// The request for +/// [DropDatabase][google.spanner.admin.database.v1.DatabaseAdmin.DropDatabase]. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct DropDatabaseRequest { @@ -1156,7 +1550,8 @@ pub struct DropDatabaseRequest { #[prost(string, tag = "1")] pub database: ::prost::alloc::string::String, } -/// The request for [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl]. +/// The request for +/// [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl]. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct GetDatabaseDdlRequest { @@ -1166,7 +1561,8 @@ pub struct GetDatabaseDdlRequest { #[prost(string, tag = "1")] pub database: ::prost::alloc::string::String, } -/// The response for [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl]. +/// The response for +/// [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl]. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct GetDatabaseDdlResponse { @@ -1174,6 +1570,13 @@ pub struct GetDatabaseDdlResponse { /// specified in the request. #[prost(string, repeated, tag = "1")] pub statements: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + /// Proto descriptors stored in the database. + /// Contains a protobuf-serialized + /// [google.protobuf.FileDescriptorSet](). + /// For more details, see protobuffer [self + /// description](). + #[prost(bytes = "bytes", tag = "2")] + pub proto_descriptors: ::prost::bytes::Bytes, } /// The request for /// [ListDatabaseOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseOperations]. @@ -1198,7 +1601,9 @@ pub struct ListDatabaseOperationsRequest { /// * `name` - The name of the long-running operation /// * `done` - False if the operation is in progress, else true. /// * `metadata.@type` - the type of metadata. For example, the type string - /// for [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata] is + /// for + /// [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata] + /// is /// `type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseMetadata`. /// * `metadata.` - any field in metadata.value. /// `metadata.@type` must be specified first, if filtering on metadata @@ -1220,7 +1625,8 @@ pub struct ListDatabaseOperationsRequest { /// `(metadata.name:restored_howl) AND` \ /// `(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND` \ /// `(error:*)` - Return operations where: - /// * The operation's metadata type is [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata]. + /// * The operation's metadata type is + /// [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata]. /// * The database is restored from a backup. /// * The backup name contains "backup_howl". /// * The restored database's name contains "restored_howl". @@ -1234,8 +1640,9 @@ pub struct ListDatabaseOperationsRequest { pub page_size: i32, /// If non-empty, `page_token` should contain a /// [next_page_token][google.spanner.admin.database.v1.ListDatabaseOperationsResponse.next_page_token] - /// from a previous [ListDatabaseOperationsResponse][google.spanner.admin.database.v1.ListDatabaseOperationsResponse] to the - /// same `parent` and with the same `filter`. + /// from a previous + /// [ListDatabaseOperationsResponse][google.spanner.admin.database.v1.ListDatabaseOperationsResponse] + /// to the same `parent` and with the same `filter`. #[prost(string, tag = "4")] pub page_token: ::prost::alloc::string::String, } @@ -1277,12 +1684,12 @@ pub struct RestoreDatabaseRequest { /// `projects//instances//databases/`. #[prost(string, tag = "2")] pub database_id: ::prost::alloc::string::String, - /// Optional. An encryption configuration describing the encryption type and key - /// resources in Cloud KMS used to encrypt/decrypt the database to restore to. - /// If this field is not specified, the restored database will use - /// the same encryption configuration as the backup by default, namely - /// [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type] = - /// `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`. + /// Optional. An encryption configuration describing the encryption type and + /// key resources in Cloud KMS used to encrypt/decrypt the database to restore + /// to. If this field is not specified, the restored database will use the same + /// encryption configuration as the backup by default, namely + /// [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type] + /// = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`. #[prost(message, optional, tag = "4")] pub encryption_config: ::core::option::Option, /// Required. The source from which to restore. @@ -1311,13 +1718,30 @@ pub struct RestoreDatabaseEncryptionConfig { tag = "1" )] pub encryption_type: i32, - /// Optional. The Cloud KMS key that will be used to encrypt/decrypt the restored - /// database. This field should be set only when - /// [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type] is - /// `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form + /// Optional. The Cloud KMS key that will be used to encrypt/decrypt the + /// restored database. This field should be set only when + /// [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type] + /// is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form /// `projects//locations//keyRings//cryptoKeys/`. #[prost(string, tag = "2")] pub kms_key_name: ::prost::alloc::string::String, + /// Optional. Specifies the KMS configuration for the one or more keys used to + /// encrypt the database. Values are of the form + /// `projects//locations//keyRings//cryptoKeys/`. + /// + /// The keys referenced by kms_key_names must fully cover all + /// regions of the database instance configuration. Some examples: + /// * For single region database instance configs, specify a single regional + /// location KMS key. + /// * For multi-regional database instance configs of type GOOGLE_MANAGED, + /// either specify a multi-regional location KMS key or multiple regional + /// location KMS keys that cover all regions in the instance config. + /// * For a database instance config of type USER_MANAGED, please specify only + /// regional location KMS keys to cover each region in the instance config. + /// Multi-regional location KMS keys are not supported for USER_MANAGED + /// instance configs. + #[prost(string, repeated, tag = "3")] + pub kms_key_names: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, } /// Nested message and enum types in `RestoreDatabaseEncryptionConfig`. pub mod restore_database_encryption_config { @@ -1338,7 +1762,8 @@ pub mod restore_database_encryption_config { /// Unspecified. Do not use. Unspecified = 0, /// This is the default option when - /// [encryption_config][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig] is not specified. + /// [encryption_config][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig] + /// is not specified. UseConfigDefaultOrBackupEncryption = 1, /// Use Google default encryption. GoogleDefaultEncryption = 2, @@ -1403,7 +1828,8 @@ pub struct RestoreDatabaseMetadata { /// operation completed despite cancellation. On successful cancellation, /// the operation is not deleted; instead, it becomes an operation with /// an [Operation.error][google.longrunning.Operation.error] value with a - /// [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to `Code.CANCELLED`. + /// [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to + /// `Code.CANCELLED`. #[prost(message, optional, tag = "5")] pub cancel_time: ::core::option::Option<::prost_types::Timestamp>, /// If exists, the name of the long-running operation that will be used to @@ -1413,21 +1839,23 @@ pub struct RestoreDatabaseMetadata { /// `projects//instances//databases//operations/` /// where the is the name of database being created and restored to. /// The metadata type of the long-running operation is - /// [OptimizeRestoredDatabaseMetadata][google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata]. This long-running operation will be - /// automatically created by the system after the RestoreDatabase long-running - /// operation completes successfully. This operation will not be created if the - /// restore was not successful. + /// [OptimizeRestoredDatabaseMetadata][google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata]. + /// This long-running operation will be automatically created by the system + /// after the RestoreDatabase long-running operation completes successfully. + /// This operation will not be created if the restore was not successful. #[prost(string, tag = "6")] pub optimize_database_operation_name: ::prost::alloc::string::String, /// Information about the source used to restore the database, as specified by - /// `source` in [RestoreDatabaseRequest][google.spanner.admin.database.v1.RestoreDatabaseRequest]. + /// `source` in + /// [RestoreDatabaseRequest][google.spanner.admin.database.v1.RestoreDatabaseRequest]. #[prost(oneof = "restore_database_metadata::SourceInfo", tags = "3")] pub source_info: ::core::option::Option, } /// Nested message and enum types in `RestoreDatabaseMetadata`. pub mod restore_database_metadata { /// Information about the source used to restore the database, as specified by - /// `source` in [RestoreDatabaseRequest][google.spanner.admin.database.v1.RestoreDatabaseRequest]. + /// `source` in + /// [RestoreDatabaseRequest][google.spanner.admin.database.v1.RestoreDatabaseRequest]. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum SourceInfo { @@ -1455,20 +1883,19 @@ pub struct OptimizeRestoredDatabaseMetadata { #[derive(Clone, PartialEq, ::prost::Message)] pub struct DatabaseRole { /// Required. The name of the database role. Values are of the form - /// `projects//instances//databases//databaseRoles/ - /// {role}`, where `` is as specified in the `CREATE ROLE` - /// DDL statement. This name can be passed to Get/Set IAMPolicy methods to - /// identify the database role. + /// `projects//instances//databases//databaseRoles/` + /// where `` is as specified in the `CREATE ROLE` DDL statement. #[prost(string, tag = "1")] pub name: ::prost::alloc::string::String, } -/// The request for [ListDatabaseRoles][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseRoles]. +/// The request for +/// [ListDatabaseRoles][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseRoles]. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ListDatabaseRolesRequest { /// Required. The database whose roles should be listed. /// Values are of the form - /// `projects//instances//databases//databaseRoles`. + /// `projects//instances//databases/`. #[prost(string, tag = "1")] pub parent: ::prost::alloc::string::String, /// Number of database roles to be returned in the response. If 0 or less, @@ -1476,12 +1903,14 @@ pub struct ListDatabaseRolesRequest { #[prost(int32, tag = "2")] pub page_size: i32, /// If non-empty, `page_token` should contain a - /// [next_page_token][google.spanner.admin.database.v1.ListDatabaseRolesResponse.next_page_token] from a - /// previous [ListDatabaseRolesResponse][google.spanner.admin.database.v1.ListDatabaseRolesResponse]. + /// [next_page_token][google.spanner.admin.database.v1.ListDatabaseRolesResponse.next_page_token] + /// from a previous + /// [ListDatabaseRolesResponse][google.spanner.admin.database.v1.ListDatabaseRolesResponse]. #[prost(string, tag = "3")] pub page_token: ::prost::alloc::string::String, } -/// The response for [ListDatabaseRoles][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseRoles]. +/// The response for +/// [ListDatabaseRoles][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseRoles]. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ListDatabaseRolesResponse { @@ -1533,7 +1962,7 @@ pub mod database_admin_client { /// The Cloud Spanner Database Admin API can be used to: /// * create, drop, and list databases /// * update the schema of pre-existing databases - /// * create, delete and list backups for a database + /// * create, delete, copy and list backups for a database /// * restore a database from an existing backup #[derive(Debug, Clone)] pub struct DatabaseAdminClient { @@ -1651,8 +2080,8 @@ pub mod database_admin_client { /// have a name of the format `/operations/` and /// can be used to track preparation of the database. The /// [metadata][google.longrunning.Operation.metadata] field type is - /// [CreateDatabaseMetadata][google.spanner.admin.database.v1.CreateDatabaseMetadata]. The - /// [response][google.longrunning.Operation.response] field type is + /// [CreateDatabaseMetadata][google.spanner.admin.database.v1.CreateDatabaseMetadata]. + /// The [response][google.longrunning.Operation.response] field type is /// [Database][google.spanner.admin.database.v1.Database], if successful. pub async fn create_database( &mut self, @@ -1784,7 +2213,8 @@ pub mod database_admin_client { /// the format `/operations/` and can be used to /// track execution of the schema change(s). The /// [metadata][google.longrunning.Operation.metadata] field type is - /// [UpdateDatabaseDdlMetadata][google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata]. The operation has no response. + /// [UpdateDatabaseDdlMetadata][google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata]. + /// The operation has no response. pub async fn update_database_ddl( &mut self, request: impl tonic::IntoRequest, @@ -2009,12 +2439,12 @@ pub mod database_admin_client { /// `projects//instances//backups//operations/` /// and can be used to track creation of the backup. The /// [metadata][google.longrunning.Operation.metadata] field type is - /// [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. The - /// [response][google.longrunning.Operation.response] field type is - /// [Backup][google.spanner.admin.database.v1.Backup], if successful. Cancelling the returned operation will stop the - /// creation and delete the backup. - /// There can be only one pending backup creation per database. Backup creation - /// of different databases can run concurrently. + /// [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. + /// The [response][google.longrunning.Operation.response] field type is + /// [Backup][google.spanner.admin.database.v1.Backup], if successful. + /// Cancelling the returned operation will stop the creation and delete the + /// backup. There can be only one pending backup creation per database. Backup + /// creation of different databases can run concurrently. pub async fn create_backup( &mut self, request: impl tonic::IntoRequest, @@ -2054,9 +2484,10 @@ pub mod database_admin_client { /// The [metadata][google.longrunning.Operation.metadata] field type is /// [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata]. /// The [response][google.longrunning.Operation.response] field type is - /// [Backup][google.spanner.admin.database.v1.Backup], if successful. Cancelling the returned operation will stop the - /// copying and delete the backup. - /// Concurrent CopyBackup requests can run on the same source backup. + /// [Backup][google.spanner.admin.database.v1.Backup], if successful. + /// Cancelling the returned operation will stop the copying and delete the + /// destination backup. Concurrent CopyBackup requests can run on the same + /// source backup. pub async fn copy_backup( &mut self, request: impl tonic::IntoRequest, @@ -2087,7 +2518,8 @@ pub mod database_admin_client { ); self.inner.unary(req, path, codec).await } - /// Gets metadata on a pending or completed [Backup][google.spanner.admin.database.v1.Backup]. + /// Gets metadata on a pending or completed + /// [Backup][google.spanner.admin.database.v1.Backup]. pub async fn get_backup( &mut self, request: impl tonic::IntoRequest, @@ -2115,7 +2547,8 @@ pub mod database_admin_client { ); self.inner.unary(req, path, codec).await } - /// Updates a pending or completed [Backup][google.spanner.admin.database.v1.Backup]. + /// Updates a pending or completed + /// [Backup][google.spanner.admin.database.v1.Backup]. pub async fn update_backup( &mut self, request: impl tonic::IntoRequest, @@ -2143,7 +2576,8 @@ pub mod database_admin_client { ); self.inner.unary(req, path, codec).await } - /// Deletes a pending or completed [Backup][google.spanner.admin.database.v1.Backup]. + /// Deletes a pending or completed + /// [Backup][google.spanner.admin.database.v1.Backup]. pub async fn delete_backup( &mut self, request: impl tonic::IntoRequest, @@ -2360,5 +2794,148 @@ pub mod database_admin_client { ); self.inner.unary(req, path, codec).await } + /// Creates a new backup schedule. + pub async fn create_backup_schedule( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/google.spanner.admin.database.v1.DatabaseAdmin/CreateBackupSchedule", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "google.spanner.admin.database.v1.DatabaseAdmin", + "CreateBackupSchedule", + ), + ); + self.inner.unary(req, path, codec).await + } + /// Gets backup schedule for the input schedule name. + pub async fn get_backup_schedule( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/google.spanner.admin.database.v1.DatabaseAdmin/GetBackupSchedule", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "google.spanner.admin.database.v1.DatabaseAdmin", + "GetBackupSchedule", + ), + ); + self.inner.unary(req, path, codec).await + } + /// Updates a backup schedule. + pub async fn update_backup_schedule( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/google.spanner.admin.database.v1.DatabaseAdmin/UpdateBackupSchedule", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "google.spanner.admin.database.v1.DatabaseAdmin", + "UpdateBackupSchedule", + ), + ); + self.inner.unary(req, path, codec).await + } + /// Deletes a backup schedule. + pub async fn delete_backup_schedule( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/google.spanner.admin.database.v1.DatabaseAdmin/DeleteBackupSchedule", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "google.spanner.admin.database.v1.DatabaseAdmin", + "DeleteBackupSchedule", + ), + ); + self.inner.unary(req, path, codec).await + } + /// Lists all the backup schedules for the database. + pub async fn list_backup_schedules( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/google.spanner.admin.database.v1.DatabaseAdmin/ListBackupSchedules", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "google.spanner.admin.database.v1.DatabaseAdmin", + "ListBackupSchedules", + ), + ); + self.inner.unary(req, path, codec).await + } } } diff --git a/googleapis/src/bytes/google.spanner.admin.instance.v1.rs b/googleapis/src/bytes/google.spanner.admin.instance.v1.rs index 03c3742c..cfc8eb26 100644 --- a/googleapis/src/bytes/google.spanner.admin.instance.v1.rs +++ b/googleapis/src/bytes/google.spanner.admin.instance.v1.rs @@ -16,6 +16,41 @@ pub struct OperationProgress { #[prost(message, optional, tag = "3")] pub end_time: ::core::option::Option<::prost_types::Timestamp>, } +/// Indicates the expected fulfillment period of an operation. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum FulfillmentPeriod { + /// Not specified. + Unspecified = 0, + /// Normal fulfillment period. The operation is expected to complete within + /// minutes. + Normal = 1, + /// Extended fulfillment period. It can take up to an hour for the operation + /// to complete. + Extended = 2, +} +impl FulfillmentPeriod { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + FulfillmentPeriod::Unspecified => "FULFILLMENT_PERIOD_UNSPECIFIED", + FulfillmentPeriod::Normal => "FULFILLMENT_PERIOD_NORMAL", + FulfillmentPeriod::Extended => "FULFILLMENT_PERIOD_EXTENDED", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "FULFILLMENT_PERIOD_UNSPECIFIED" => Some(Self::Unspecified), + "FULFILLMENT_PERIOD_NORMAL" => Some(Self::Normal), + "FULFILLMENT_PERIOD_EXTENDED" => Some(Self::Extended), + _ => None, + } + } +} #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ReplicaInfo { @@ -109,13 +144,15 @@ pub struct InstanceConfig { /// A unique identifier for the instance configuration. Values /// are of the form /// `projects//instanceConfigs/[a-z][-a-z0-9]*`. + /// + /// User instance configuration must start with `custom-`. #[prost(string, tag = "1")] pub name: ::prost::alloc::string::String, /// The name of this instance configuration as it appears in UIs. #[prost(string, tag = "2")] pub display_name: ::prost::alloc::string::String, - /// Output only. Whether this instance config is a Google or User Managed - /// Configuration. + /// Output only. Whether this instance configuration is a Google-managed or + /// user-managed configuration. #[prost(enumeration = "instance_config::Type", tag = "5")] pub config_type: i32, /// The geographic placement of nodes in this instance configuration and their @@ -159,26 +196,29 @@ pub struct InstanceConfig { ::prost::alloc::string::String, >, /// etag is used for optimistic concurrency control as a way - /// to help prevent simultaneous updates of a instance config from overwriting - /// each other. It is strongly suggested that systems make use of the etag in - /// the read-modify-write cycle to perform instance config updates in order to - /// avoid race conditions: An etag is returned in the response which contains - /// instance configs, and systems are expected to put that etag in the request - /// to update instance config to ensure that their change will be applied to - /// the same version of the instance config. - /// If no etag is provided in the call to update instance config, then the - /// existing instance config is overwritten blindly. + /// to help prevent simultaneous updates of a instance configuration from + /// overwriting each other. It is strongly suggested that systems make use of + /// the etag in the read-modify-write cycle to perform instance configuration + /// updates in order to avoid race conditions: An etag is returned in the + /// response which contains instance configurations, and systems are expected + /// to put that etag in the request to update instance configuration to ensure + /// that their change is applied to the same version of the instance + /// configuration. If no etag is provided in the call to update the instance + /// configuration, then the existing instance configuration is overwritten + /// blindly. #[prost(string, tag = "9")] pub etag: ::prost::alloc::string::String, /// Allowed values of the "default_leader" schema option for databases in /// instances that use this instance configuration. #[prost(string, repeated, tag = "4")] pub leader_options: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - /// Output only. If true, the instance config is being created or updated. If - /// false, there are no ongoing operations for the instance config. + /// Output only. If true, the instance configuration is being created or + /// updated. If false, there are no ongoing operations for the instance + /// configuration. #[prost(bool, tag = "10")] pub reconciling: bool, - /// Output only. The current instance config state. + /// Output only. The current instance configuration state. Applicable only for + /// `USER_MANAGED` configurations. #[prost(enumeration = "instance_config::State", tag = "11")] pub state: i32, } @@ -227,7 +267,7 @@ pub mod instance_config { } } } - /// Indicates the current state of the instance config. + /// Indicates the current state of the instance configuration. #[derive( Clone, Copy, @@ -243,10 +283,10 @@ pub mod instance_config { pub enum State { /// Not specified. Unspecified = 0, - /// The instance config is still being created. + /// The instance configuration is still being created. Creating = 1, - /// The instance config is fully created and ready to be used to create - /// instances. + /// The instance configuration is fully created and ready to be used to + /// create instances. Ready = 2, } impl State { @@ -272,6 +312,90 @@ pub mod instance_config { } } } +/// Autoscaling configuration for an instance. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct AutoscalingConfig { + /// Required. Autoscaling limits for an instance. + #[prost(message, optional, tag = "1")] + pub autoscaling_limits: ::core::option::Option< + autoscaling_config::AutoscalingLimits, + >, + /// Required. The autoscaling targets for an instance. + #[prost(message, optional, tag = "2")] + pub autoscaling_targets: ::core::option::Option< + autoscaling_config::AutoscalingTargets, + >, +} +/// Nested message and enum types in `AutoscalingConfig`. +pub mod autoscaling_config { + /// The autoscaling limits for the instance. Users can define the minimum and + /// maximum compute capacity allocated to the instance, and the autoscaler will + /// only scale within that range. Users can either use nodes or processing + /// units to specify the limits, but should use the same unit to set both the + /// min_limit and max_limit. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, Copy, PartialEq, ::prost::Message)] + pub struct AutoscalingLimits { + /// The minimum compute capacity for the instance. + #[prost(oneof = "autoscaling_limits::MinLimit", tags = "1, 2")] + pub min_limit: ::core::option::Option, + /// The maximum compute capacity for the instance. The maximum compute + /// capacity should be less than or equal to 10X the minimum compute + /// capacity. + #[prost(oneof = "autoscaling_limits::MaxLimit", tags = "3, 4")] + pub max_limit: ::core::option::Option, + } + /// Nested message and enum types in `AutoscalingLimits`. + pub mod autoscaling_limits { + /// The minimum compute capacity for the instance. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, Copy, PartialEq, ::prost::Oneof)] + pub enum MinLimit { + /// Minimum number of nodes allocated to the instance. If set, this number + /// should be greater than or equal to 1. + #[prost(int32, tag = "1")] + MinNodes(i32), + /// Minimum number of processing units allocated to the instance. If set, + /// this number should be multiples of 1000. + #[prost(int32, tag = "2")] + MinProcessingUnits(i32), + } + /// The maximum compute capacity for the instance. The maximum compute + /// capacity should be less than or equal to 10X the minimum compute + /// capacity. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, Copy, PartialEq, ::prost::Oneof)] + pub enum MaxLimit { + /// Maximum number of nodes allocated to the instance. If set, this number + /// should be greater than or equal to min_nodes. + #[prost(int32, tag = "3")] + MaxNodes(i32), + /// Maximum number of processing units allocated to the instance. If set, + /// this number should be multiples of 1000 and be greater than or equal to + /// min_processing_units. + #[prost(int32, tag = "4")] + MaxProcessingUnits(i32), + } + } + /// The autoscaling targets for an instance. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, Copy, PartialEq, ::prost::Message)] + pub struct AutoscalingTargets { + /// Required. The target high priority cpu utilization percentage that the + /// autoscaler should be trying to achieve for the instance. This number is + /// on a scale from 0 (no utilization) to 100 (full utilization). The valid + /// range is \[10, 90\] inclusive. + #[prost(int32, tag = "1")] + pub high_priority_cpu_utilization_percent: i32, + /// Required. The target storage utilization percentage that the autoscaler + /// should be trying to achieve for the instance. This number is on a scale + /// from 0 (no utilization) to 100 (full utilization). The valid range is + /// \[10, 100\] inclusive. + #[prost(int32, tag = "2")] + pub storage_utilization_percent: i32, + } +} /// An isolated set of Cloud Spanner resources on which databases can be hosted. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -293,8 +417,12 @@ pub struct Instance { #[prost(string, tag = "3")] pub display_name: ::prost::alloc::string::String, /// The number of nodes allocated to this instance. At most one of either - /// node_count or processing_units should be present in the message. This - /// may be zero in API responses for instances that are not yet in state + /// node_count or processing_units should be present in the message. + /// + /// Users can set the node_count field to specify the target number of nodes + /// allocated to the instance. + /// + /// This may be zero in API responses for instances that are not yet in state /// `READY`. /// /// See [the @@ -303,14 +431,25 @@ pub struct Instance { #[prost(int32, tag = "5")] pub node_count: i32, /// The number of processing units allocated to this instance. At most one of - /// processing_units or node_count should be present in the message. This may - /// be zero in API responses for instances that are not yet in state `READY`. + /// processing_units or node_count should be present in the message. + /// + /// Users can set the processing_units field to specify the target number of + /// processing units allocated to the instance. + /// + /// This may be zero in API responses for instances that are not yet in state + /// `READY`. /// /// See [the /// documentation]() /// for more information about nodes and processing units. #[prost(int32, tag = "9")] pub processing_units: i32, + /// Optional. The autoscaling configuration. Autoscaling is enabled if this + /// field is set. When autoscaling is enabled, node_count and processing_units + /// are treated as OUTPUT_ONLY fields and reflect the current compute capacity + /// allocated to the instance. + #[prost(message, optional, tag = "17")] + pub autoscaling_config: ::core::option::Option, /// Output only. The current instance state. For /// [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance], /// the state must be either omitted or set to `CREATING`. For @@ -353,6 +492,9 @@ pub struct Instance { /// Output only. The time at which the instance was most recently updated. #[prost(message, optional, tag = "12")] pub update_time: ::core::option::Option<::prost_types::Timestamp>, + /// Optional. The `Edition` of the current instance. + #[prost(enumeration = "instance::Edition", tag = "20")] + pub edition: i32, } /// Nested message and enum types in `Instance`. pub mod instance { @@ -402,6 +544,54 @@ pub mod instance { } } } + /// The edition selected for this instance. Different editions provide + /// different capabilities at different price points. + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] + #[repr(i32)] + pub enum Edition { + /// Edition not specified. + Unspecified = 0, + /// Standard edition. + Standard = 1, + /// Enterprise edition. + Enterprise = 2, + /// Enterprise Plus edition. + EnterprisePlus = 3, + } + impl Edition { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Edition::Unspecified => "EDITION_UNSPECIFIED", + Edition::Standard => "STANDARD", + Edition::Enterprise => "ENTERPRISE", + Edition::EnterprisePlus => "ENTERPRISE_PLUS", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "EDITION_UNSPECIFIED" => Some(Self::Unspecified), + "STANDARD" => Some(Self::Standard), + "ENTERPRISE" => Some(Self::Enterprise), + "ENTERPRISE_PLUS" => Some(Self::EnterprisePlus), + _ => None, + } + } + } } /// The request for /// [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs]. @@ -453,14 +643,14 @@ pub struct GetInstanceConfigRequest { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct CreateInstanceConfigRequest { - /// Required. The name of the project in which to create the instance config. - /// Values are of the form `projects/`. + /// Required. The name of the project in which to create the instance + /// configuration. Values are of the form `projects/`. #[prost(string, tag = "1")] pub parent: ::prost::alloc::string::String, - /// Required. The ID of the instance config to create. Valid identifiers are - /// of the form `custom-\[-a-z0-9\]*[a-z0-9]` and must be between 2 and 64 + /// Required. The ID of the instance configuration to create. Valid identifiers + /// are of the form `custom-\[-a-z0-9\]*[a-z0-9]` and must be between 2 and 64 /// characters in length. The `custom-` prefix is required to avoid name - /// conflicts with Google managed configurations. + /// conflicts with Google-managed configurations. #[prost(string, tag = "2")] pub instance_config_id: ::prost::alloc::string::String, /// Required. The InstanceConfig proto of the configuration to create. @@ -480,8 +670,9 @@ pub struct CreateInstanceConfigRequest { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct UpdateInstanceConfigRequest { - /// Required. The user instance config to update, which must always include the - /// instance config name. Otherwise, only fields mentioned in + /// Required. The user instance configuration to update, which must always + /// include the instance configuration name. Otherwise, only fields mentioned + /// in /// [update_mask][google.spanner.admin.instance.v1.UpdateInstanceConfigRequest.update_mask] /// need be included. To prevent conflicts of concurrent updates, /// [etag][google.spanner.admin.instance.v1.InstanceConfig.reconciling] can @@ -512,12 +703,12 @@ pub struct DeleteInstanceConfigRequest { #[prost(string, tag = "1")] pub name: ::prost::alloc::string::String, /// Used for optimistic concurrency control as a way to help prevent - /// simultaneous deletes of an instance config from overwriting each + /// simultaneous deletes of an instance configuration from overwriting each /// other. If not empty, the API - /// only deletes the instance config when the etag provided matches the current - /// status of the requested instance config. Otherwise, deletes the instance - /// config without checking the current status of the requested instance - /// config. + /// only deletes the instance configuration when the etag provided matches the + /// current status of the requested instance configuration. Otherwise, deletes + /// the instance configuration without checking the current status of the + /// requested instance configuration. #[prost(string, tag = "2")] pub etag: ::prost::alloc::string::String, /// An option to validate, but not actually execute, a request, @@ -530,7 +721,7 @@ pub struct DeleteInstanceConfigRequest { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ListInstanceConfigOperationsRequest { - /// Required. The project of the instance config operations. + /// Required. The project of the instance configuration operations. /// Values are of the form `projects/`. #[prost(string, tag = "1")] pub parent: ::prost::alloc::string::String, @@ -574,7 +765,7 @@ pub struct ListInstanceConfigOperationsRequest { /// `(error:*)` - Return operations where: /// * The operation's metadata type is /// [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata]. - /// * The instance config name contains "custom-config". + /// * The instance configuration name contains "custom-config". /// * The operation started before 2021-03-28T14:50:00Z. /// * The operation resulted in an error. #[prost(string, tag = "2")] @@ -596,9 +787,9 @@ pub struct ListInstanceConfigOperationsRequest { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ListInstanceConfigOperationsResponse { - /// The list of matching instance config [long-running + /// The list of matching instance configuration [long-running /// operations][google.longrunning.Operation]. Each operation's name will be - /// prefixed by the instance config's name. The operation's + /// prefixed by the name of the instance configuration. The operation's /// [metadata][google.longrunning.Operation.metadata] field type /// `metadata.type_url` describes the type of the metadata. #[prost(message, repeated, tag = "1")] @@ -686,6 +877,14 @@ pub struct ListInstancesRequest { /// containing "dev". #[prost(string, tag = "4")] pub filter: ::prost::alloc::string::String, + /// Deadline used while retrieving metadata for instances. + /// Instances whose metadata cannot be retrieved within this deadline will be + /// added to + /// [unreachable][google.spanner.admin.instance.v1.ListInstancesResponse.unreachable] + /// in + /// [ListInstancesResponse][google.spanner.admin.instance.v1.ListInstancesResponse]. + #[prost(message, optional, tag = "5")] + pub instance_deadline: ::core::option::Option<::prost_types::Timestamp>, } /// The response for /// [ListInstances][google.spanner.admin.instance.v1.InstanceAdmin.ListInstances]. @@ -700,6 +899,12 @@ pub struct ListInstancesResponse { /// call to fetch more of the matching instances. #[prost(string, tag = "2")] pub next_page_token: ::prost::alloc::string::String, + /// The list of unreachable instances. + /// It includes the names of instances whose metadata could not be retrieved + /// within + /// [instance_deadline][google.spanner.admin.instance.v1.ListInstancesRequest.instance_deadline]. + #[prost(string, repeated, tag = "3")] + pub unreachable: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, } /// The request for /// [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance]. @@ -751,6 +956,9 @@ pub struct CreateInstanceMetadata { /// The time at which this operation failed or was completed successfully. #[prost(message, optional, tag = "4")] pub end_time: ::core::option::Option<::prost_types::Timestamp>, + /// The expected fulfillment period of this create operation. + #[prost(enumeration = "FulfillmentPeriod", tag = "5")] + pub expected_fulfillment_period: i32, } /// Metadata type for the operation returned by /// [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance]. @@ -773,13 +981,16 @@ pub struct UpdateInstanceMetadata { /// The time at which this operation failed or was completed successfully. #[prost(message, optional, tag = "4")] pub end_time: ::core::option::Option<::prost_types::Timestamp>, + /// The expected fulfillment period of this update operation. + #[prost(enumeration = "FulfillmentPeriod", tag = "5")] + pub expected_fulfillment_period: i32, } /// Metadata type for the operation returned by /// [CreateInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstanceConfig]. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct CreateInstanceConfigMetadata { - /// The target instance config end state. + /// The target instance configuration end state. #[prost(message, optional, tag = "1")] pub instance_config: ::core::option::Option, /// The progress of the @@ -796,7 +1007,7 @@ pub struct CreateInstanceConfigMetadata { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct UpdateInstanceConfigMetadata { - /// The desired instance config after updating. + /// The desired instance configuration after updating. #[prost(message, optional, tag = "1")] pub instance_config: ::core::option::Option, /// The progress of the @@ -808,157 +1019,599 @@ pub struct UpdateInstanceConfigMetadata { #[prost(message, optional, tag = "3")] pub cancel_time: ::core::option::Option<::prost_types::Timestamp>, } -/// Generated client implementations. -pub mod instance_admin_client { - #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] - use tonic::codegen::*; - use tonic::codegen::http::Uri; - /// Cloud Spanner Instance Admin API - /// - /// The Cloud Spanner Instance Admin API can be used to create, delete, - /// modify and list instances. Instances are dedicated Cloud Spanner serving - /// and storage resources to be used by Cloud Spanner databases. - /// - /// Each instance has a "configuration", which dictates where the - /// serving resources for the Cloud Spanner instance are located (e.g., - /// US-central, Europe). Configurations are created by Google based on - /// resource availability. - /// - /// Cloud Spanner billing is based on the instances that exist and their - /// sizes. After an instance exists, there are no additional - /// per-database or per-operation charges for use of the instance - /// (though there may be additional network bandwidth charges). - /// Instances offer isolation: problems with databases in one instance - /// will not affect other instances. However, within an instance - /// databases can affect each other. For example, if one database in an - /// instance receives a lot of requests and consumes most of the - /// instance resources, fewer resources are available for other - /// databases in that instance, and their performance may suffer. - #[derive(Debug, Clone)] - pub struct InstanceAdminClient { - inner: tonic::client::Grpc, - } - impl InstanceAdminClient { - /// Attempt to create a new client by connecting to a given endpoint. - pub async fn connect(dst: D) -> Result - where - D: TryInto, - D::Error: Into, - { - let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; - Ok(Self::new(conn)) - } +/// An isolated set of Cloud Spanner resources that databases can define +/// placements on. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct InstancePartition { + /// Required. A unique identifier for the instance partition. Values are of the + /// form + /// `projects//instances//instancePartitions/[a-z][-a-z0-9]*\[a-z0-9\]`. + /// The final segment of the name must be between 2 and 64 characters in + /// length. An instance partition's name cannot be changed after the instance + /// partition is created. + #[prost(string, tag = "1")] + pub name: ::prost::alloc::string::String, + /// Required. The name of the instance partition's configuration. Values are of + /// the form `projects//instanceConfigs/`. See also + /// [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig] and + /// [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs]. + #[prost(string, tag = "2")] + pub config: ::prost::alloc::string::String, + /// Required. The descriptive name for this instance partition as it appears in + /// UIs. Must be unique per project and between 4 and 30 characters in length. + #[prost(string, tag = "3")] + pub display_name: ::prost::alloc::string::String, + /// Output only. The current instance partition state. + #[prost(enumeration = "instance_partition::State", tag = "7")] + pub state: i32, + /// Output only. The time at which the instance partition was created. + #[prost(message, optional, tag = "8")] + pub create_time: ::core::option::Option<::prost_types::Timestamp>, + /// Output only. The time at which the instance partition was most recently + /// updated. + #[prost(message, optional, tag = "9")] + pub update_time: ::core::option::Option<::prost_types::Timestamp>, + /// Output only. The names of the databases that reference this + /// instance partition. Referencing databases should share the parent instance. + /// The existence of any referencing database prevents the instance partition + /// from being deleted. + #[prost(string, repeated, tag = "10")] + pub referencing_databases: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + /// Output only. The names of the backups that reference this instance + /// partition. Referencing backups should share the parent instance. The + /// existence of any referencing backup prevents the instance partition from + /// being deleted. + #[prost(string, repeated, tag = "11")] + pub referencing_backups: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + /// Used for optimistic concurrency control as a way + /// to help prevent simultaneous updates of a instance partition from + /// overwriting each other. It is strongly suggested that systems make use of + /// the etag in the read-modify-write cycle to perform instance partition + /// updates in order to avoid race conditions: An etag is returned in the + /// response which contains instance partitions, and systems are expected to + /// put that etag in the request to update instance partitions to ensure that + /// their change will be applied to the same version of the instance partition. + /// If no etag is provided in the call to update instance partition, then the + /// existing instance partition is overwritten blindly. + #[prost(string, tag = "12")] + pub etag: ::prost::alloc::string::String, + /// Compute capacity defines amount of server and storage resources that are + /// available to the databases in an instance partition. At most one of either + /// node_count or processing_units should be present in the message. See [the + /// documentation]() + /// for more information about nodes and processing units. + #[prost(oneof = "instance_partition::ComputeCapacity", tags = "5, 6")] + pub compute_capacity: ::core::option::Option, +} +/// Nested message and enum types in `InstancePartition`. +pub mod instance_partition { + /// Indicates the current state of the instance partition. + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] + #[repr(i32)] + pub enum State { + /// Not specified. + Unspecified = 0, + /// The instance partition is still being created. Resources may not be + /// available yet, and operations such as creating placements using this + /// instance partition may not work. + Creating = 1, + /// The instance partition is fully created and ready to do work such as + /// creating placements and using in databases. + Ready = 2, } - impl InstanceAdminClient - where - T: tonic::client::GrpcService, - T::Error: Into, - T::ResponseBody: Body + Send + 'static, - ::Error: Into + Send, - { - pub fn new(inner: T) -> Self { - let inner = tonic::client::Grpc::new(inner); - Self { inner } - } - pub fn with_origin(inner: T, origin: Uri) -> Self { - let inner = tonic::client::Grpc::with_origin(inner, origin); - Self { inner } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> InstanceAdminClient> - where - F: tonic::service::Interceptor, - T::ResponseBody: Default, - T: tonic::codegen::Service< - http::Request, - Response = http::Response< - >::ResponseBody, - >, - >, - , - >>::Error: Into + Send + Sync, - { - InstanceAdminClient::new(InterceptedService::new(inner, interceptor)) - } - /// Compress requests with the given encoding. + impl State { + /// String value of the enum field names used in the ProtoBuf definition. /// - /// This requires the server to support it otherwise it might respond with an - /// error. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.send_compressed(encoding); - self + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + State::Unspecified => "STATE_UNSPECIFIED", + State::Creating => "CREATING", + State::Ready => "READY", + } } - /// Enable decompressing responses. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.accept_compressed(encoding); - self + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "STATE_UNSPECIFIED" => Some(Self::Unspecified), + "CREATING" => Some(Self::Creating), + "READY" => Some(Self::Ready), + _ => None, + } } - /// Limits the maximum size of a decoded message. + } + /// Compute capacity defines amount of server and storage resources that are + /// available to the databases in an instance partition. At most one of either + /// node_count or processing_units should be present in the message. See [the + /// documentation]() + /// for more information about nodes and processing units. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, Copy, PartialEq, ::prost::Oneof)] + pub enum ComputeCapacity { + /// The number of nodes allocated to this instance partition. /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_decoding_message_size(limit); - self - } - /// Limits the maximum size of an encoded message. + /// Users can set the node_count field to specify the target number of nodes + /// allocated to the instance partition. /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_encoding_message_size(limit); - self - } - /// Lists the supported instance configurations for a given project. - pub async fn list_instance_configs( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/google.spanner.admin.instance.v1.InstanceAdmin/ListInstanceConfigs", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "google.spanner.admin.instance.v1.InstanceAdmin", - "ListInstanceConfigs", - ), - ); - self.inner.unary(req, path, codec).await - } - /// Gets information about a particular instance configuration. - pub async fn get_instance_config( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + /// This may be zero in API responses for instance partitions that are not + /// yet in state `READY`. + #[prost(int32, tag = "5")] + NodeCount(i32), + /// The number of processing units allocated to this instance partition. + /// + /// Users can set the processing_units field to specify the target number of + /// processing units allocated to the instance partition. + /// + /// This may be zero in API responses for instance partitions that are not + /// yet in state `READY`. + #[prost(int32, tag = "6")] + ProcessingUnits(i32), + } +} +/// Metadata type for the operation returned by +/// [CreateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstancePartition]. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CreateInstancePartitionMetadata { + /// The instance partition being created. + #[prost(message, optional, tag = "1")] + pub instance_partition: ::core::option::Option, + /// The time at which the + /// [CreateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstancePartition] + /// request was received. + #[prost(message, optional, tag = "2")] + pub start_time: ::core::option::Option<::prost_types::Timestamp>, + /// The time at which this operation was cancelled. If set, this operation is + /// in the process of undoing itself (which is guaranteed to succeed) and + /// cannot be cancelled again. + #[prost(message, optional, tag = "3")] + pub cancel_time: ::core::option::Option<::prost_types::Timestamp>, + /// The time at which this operation failed or was completed successfully. + #[prost(message, optional, tag = "4")] + pub end_time: ::core::option::Option<::prost_types::Timestamp>, +} +/// The request for +/// [CreateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstancePartition]. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CreateInstancePartitionRequest { + /// Required. The name of the instance in which to create the instance + /// partition. Values are of the form + /// `projects//instances/`. + #[prost(string, tag = "1")] + pub parent: ::prost::alloc::string::String, + /// Required. The ID of the instance partition to create. Valid identifiers are + /// of the form `[a-z][-a-z0-9]*\[a-z0-9\]` and must be between 2 and 64 + /// characters in length. + #[prost(string, tag = "2")] + pub instance_partition_id: ::prost::alloc::string::String, + /// Required. The instance partition to create. The instance_partition.name may + /// be omitted, but if specified must be + /// `/instancePartitions/`. + #[prost(message, optional, tag = "3")] + pub instance_partition: ::core::option::Option, +} +/// The request for +/// [DeleteInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.DeleteInstancePartition]. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DeleteInstancePartitionRequest { + /// Required. The name of the instance partition to be deleted. + /// Values are of the form + /// `projects/{project}/instances/{instance}/instancePartitions/{instance_partition}` + #[prost(string, tag = "1")] + pub name: ::prost::alloc::string::String, + /// Optional. If not empty, the API only deletes the instance partition when + /// the etag provided matches the current status of the requested instance + /// partition. Otherwise, deletes the instance partition without checking the + /// current status of the requested instance partition. + #[prost(string, tag = "2")] + pub etag: ::prost::alloc::string::String, +} +/// The request for +/// [GetInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.GetInstancePartition]. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetInstancePartitionRequest { + /// Required. The name of the requested instance partition. Values are of + /// the form + /// `projects/{project}/instances/{instance}/instancePartitions/{instance_partition}`. + #[prost(string, tag = "1")] + pub name: ::prost::alloc::string::String, +} +/// The request for +/// [UpdateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstancePartition]. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct UpdateInstancePartitionRequest { + /// Required. The instance partition to update, which must always include the + /// instance partition name. Otherwise, only fields mentioned in + /// [field_mask][google.spanner.admin.instance.v1.UpdateInstancePartitionRequest.field_mask] + /// need be included. + #[prost(message, optional, tag = "1")] + pub instance_partition: ::core::option::Option, + /// Required. A mask specifying which fields in + /// [InstancePartition][google.spanner.admin.instance.v1.InstancePartition] + /// should be updated. The field mask must always be specified; this prevents + /// any future fields in + /// [InstancePartition][google.spanner.admin.instance.v1.InstancePartition] + /// from being erased accidentally by clients that do not know about them. + #[prost(message, optional, tag = "2")] + pub field_mask: ::core::option::Option<::prost_types::FieldMask>, +} +/// Metadata type for the operation returned by +/// [UpdateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstancePartition]. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct UpdateInstancePartitionMetadata { + /// The desired end state of the update. + #[prost(message, optional, tag = "1")] + pub instance_partition: ::core::option::Option, + /// The time at which + /// [UpdateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstancePartition] + /// request was received. + #[prost(message, optional, tag = "2")] + pub start_time: ::core::option::Option<::prost_types::Timestamp>, + /// The time at which this operation was cancelled. If set, this operation is + /// in the process of undoing itself (which is guaranteed to succeed) and + /// cannot be cancelled again. + #[prost(message, optional, tag = "3")] + pub cancel_time: ::core::option::Option<::prost_types::Timestamp>, + /// The time at which this operation failed or was completed successfully. + #[prost(message, optional, tag = "4")] + pub end_time: ::core::option::Option<::prost_types::Timestamp>, +} +/// The request for +/// [ListInstancePartitions][google.spanner.admin.instance.v1.InstanceAdmin.ListInstancePartitions]. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ListInstancePartitionsRequest { + /// Required. The instance whose instance partitions should be listed. Values + /// are of the form `projects//instances/`. + #[prost(string, tag = "1")] + pub parent: ::prost::alloc::string::String, + /// Number of instance partitions to be returned in the response. If 0 or less, + /// defaults to the server's maximum allowed page size. + #[prost(int32, tag = "2")] + pub page_size: i32, + /// If non-empty, `page_token` should contain a + /// [next_page_token][google.spanner.admin.instance.v1.ListInstancePartitionsResponse.next_page_token] + /// from a previous + /// [ListInstancePartitionsResponse][google.spanner.admin.instance.v1.ListInstancePartitionsResponse]. + #[prost(string, tag = "3")] + pub page_token: ::prost::alloc::string::String, + /// Optional. Deadline used while retrieving metadata for instance partitions. + /// Instance partitions whose metadata cannot be retrieved within this deadline + /// will be added to + /// [unreachable][google.spanner.admin.instance.v1.ListInstancePartitionsResponse.unreachable] + /// in + /// [ListInstancePartitionsResponse][google.spanner.admin.instance.v1.ListInstancePartitionsResponse]. + #[prost(message, optional, tag = "4")] + pub instance_partition_deadline: ::core::option::Option<::prost_types::Timestamp>, +} +/// The response for +/// [ListInstancePartitions][google.spanner.admin.instance.v1.InstanceAdmin.ListInstancePartitions]. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ListInstancePartitionsResponse { + /// The list of requested instancePartitions. + #[prost(message, repeated, tag = "1")] + pub instance_partitions: ::prost::alloc::vec::Vec, + /// `next_page_token` can be sent in a subsequent + /// [ListInstancePartitions][google.spanner.admin.instance.v1.InstanceAdmin.ListInstancePartitions] + /// call to fetch more of the matching instance partitions. + #[prost(string, tag = "2")] + pub next_page_token: ::prost::alloc::string::String, + /// The list of unreachable instance partitions. + /// It includes the names of instance partitions whose metadata could + /// not be retrieved within + /// [instance_partition_deadline][google.spanner.admin.instance.v1.ListInstancePartitionsRequest.instance_partition_deadline]. + #[prost(string, repeated, tag = "3")] + pub unreachable: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, +} +/// The request for +/// [ListInstancePartitionOperations][google.spanner.admin.instance.v1.InstanceAdmin.ListInstancePartitionOperations]. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ListInstancePartitionOperationsRequest { + /// Required. The parent instance of the instance partition operations. + /// Values are of the form `projects//instances/`. + #[prost(string, tag = "1")] + pub parent: ::prost::alloc::string::String, + /// Optional. An expression that filters the list of returned operations. + /// + /// A filter expression consists of a field name, a + /// comparison operator, and a value for filtering. + /// The value must be a string, a number, or a boolean. The comparison operator + /// must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`. + /// Colon `:` is the contains operator. Filter rules are not case sensitive. + /// + /// The following fields in the [Operation][google.longrunning.Operation] + /// are eligible for filtering: + /// + /// * `name` - The name of the long-running operation + /// * `done` - False if the operation is in progress, else true. + /// * `metadata.@type` - the type of metadata. For example, the type string + /// for + /// [CreateInstancePartitionMetadata][google.spanner.admin.instance.v1.CreateInstancePartitionMetadata] + /// is + /// `type.googleapis.com/google.spanner.admin.instance.v1.CreateInstancePartitionMetadata`. + /// * `metadata.` - any field in metadata.value. + /// `metadata.@type` must be specified first, if filtering on metadata + /// fields. + /// * `error` - Error associated with the long-running operation. + /// * `response.@type` - the type of response. + /// * `response.` - any field in response.value. + /// + /// You can combine multiple expressions by enclosing each expression in + /// parentheses. By default, expressions are combined with AND logic. However, + /// you can specify AND, OR, and NOT logic explicitly. + /// + /// Here are a few examples: + /// + /// * `done:true` - The operation is complete. + /// * `(metadata.@type=` \ + /// `type.googleapis.com/google.spanner.admin.instance.v1.CreateInstancePartitionMetadata) + /// AND` \ + /// `(metadata.instance_partition.name:custom-instance-partition) AND` \ + /// `(metadata.start_time < \"2021-03-28T14:50:00Z\") AND` \ + /// `(error:*)` - Return operations where: + /// * The operation's metadata type is + /// [CreateInstancePartitionMetadata][google.spanner.admin.instance.v1.CreateInstancePartitionMetadata]. + /// * The instance partition name contains "custom-instance-partition". + /// * The operation started before 2021-03-28T14:50:00Z. + /// * The operation resulted in an error. + #[prost(string, tag = "2")] + pub filter: ::prost::alloc::string::String, + /// Optional. Number of operations to be returned in the response. If 0 or + /// less, defaults to the server's maximum allowed page size. + #[prost(int32, tag = "3")] + pub page_size: i32, + /// Optional. If non-empty, `page_token` should contain a + /// [next_page_token][google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse.next_page_token] + /// from a previous + /// [ListInstancePartitionOperationsResponse][google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse] + /// to the same `parent` and with the same `filter`. + #[prost(string, tag = "4")] + pub page_token: ::prost::alloc::string::String, + /// Optional. Deadline used while retrieving metadata for instance partition + /// operations. Instance partitions whose operation metadata cannot be + /// retrieved within this deadline will be added to + /// [unreachable][ListInstancePartitionOperationsResponse.unreachable] in + /// [ListInstancePartitionOperationsResponse][google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse]. + #[prost(message, optional, tag = "5")] + pub instance_partition_deadline: ::core::option::Option<::prost_types::Timestamp>, +} +/// The response for +/// [ListInstancePartitionOperations][google.spanner.admin.instance.v1.InstanceAdmin.ListInstancePartitionOperations]. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ListInstancePartitionOperationsResponse { + /// The list of matching instance partition [long-running + /// operations][google.longrunning.Operation]. Each operation's name will be + /// prefixed by the instance partition's name. The operation's + /// [metadata][google.longrunning.Operation.metadata] field type + /// `metadata.type_url` describes the type of the metadata. + #[prost(message, repeated, tag = "1")] + pub operations: ::prost::alloc::vec::Vec< + super::super::super::super::longrunning::Operation, + >, + /// `next_page_token` can be sent in a subsequent + /// [ListInstancePartitionOperations][google.spanner.admin.instance.v1.InstanceAdmin.ListInstancePartitionOperations] + /// call to fetch more of the matching metadata. + #[prost(string, tag = "2")] + pub next_page_token: ::prost::alloc::string::String, + /// The list of unreachable instance partitions. + /// It includes the names of instance partitions whose operation metadata could + /// not be retrieved within + /// [instance_partition_deadline][google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest.instance_partition_deadline]. + #[prost(string, repeated, tag = "3")] + pub unreachable_instance_partitions: ::prost::alloc::vec::Vec< + ::prost::alloc::string::String, + >, +} +/// The request for +/// [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance]. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MoveInstanceRequest { + /// Required. The instance to move. + /// Values are of the form `projects//instances/`. + #[prost(string, tag = "1")] + pub name: ::prost::alloc::string::String, + /// Required. The target instance configuration where to move the instance. + /// Values are of the form `projects//instanceConfigs/`. + #[prost(string, tag = "2")] + pub target_config: ::prost::alloc::string::String, +} +/// The response for +/// [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance]. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct MoveInstanceResponse {} +/// Metadata type for the operation returned by +/// [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance]. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MoveInstanceMetadata { + /// The target instance configuration where to move the instance. + /// Values are of the form `projects//instanceConfigs/`. + #[prost(string, tag = "1")] + pub target_config: ::prost::alloc::string::String, + /// The progress of the + /// [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance] + /// operation. + /// [progress_percent][google.spanner.admin.instance.v1.OperationProgress.progress_percent] + /// is reset when cancellation is requested. + #[prost(message, optional, tag = "2")] + pub progress: ::core::option::Option, + /// The time at which this operation was cancelled. + #[prost(message, optional, tag = "3")] + pub cancel_time: ::core::option::Option<::prost_types::Timestamp>, +} +/// Generated client implementations. +pub mod instance_admin_client { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + /// Cloud Spanner Instance Admin API + /// + /// The Cloud Spanner Instance Admin API can be used to create, delete, + /// modify and list instances. Instances are dedicated Cloud Spanner serving + /// and storage resources to be used by Cloud Spanner databases. + /// + /// Each instance has a "configuration", which dictates where the + /// serving resources for the Cloud Spanner instance are located (e.g., + /// US-central, Europe). Configurations are created by Google based on + /// resource availability. + /// + /// Cloud Spanner billing is based on the instances that exist and their + /// sizes. After an instance exists, there are no additional + /// per-database or per-operation charges for use of the instance + /// (though there may be additional network bandwidth charges). + /// Instances offer isolation: problems with databases in one instance + /// will not affect other instances. However, within an instance + /// databases can affect each other. For example, if one database in an + /// instance receives a lot of requests and consumes most of the + /// instance resources, fewer resources are available for other + /// databases in that instance, and their performance may suffer. + #[derive(Debug, Clone)] + pub struct InstanceAdminClient { + inner: tonic::client::Grpc, + } + impl InstanceAdminClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl InstanceAdminClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + Send + 'static, + ::Error: Into + Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> InstanceAdminClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + Send + Sync, + { + InstanceAdminClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + /// Lists the supported instance configurations for a given project. + pub async fn list_instance_configs( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/google.spanner.admin.instance.v1.InstanceAdmin/ListInstanceConfigs", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "google.spanner.admin.instance.v1.InstanceAdmin", + "ListInstanceConfigs", + ), + ); + self.inner.unary(req, path, codec).await + } + /// Gets information about a particular instance configuration. + pub async fn get_instance_config( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/google.spanner.admin.instance.v1.InstanceAdmin/GetInstanceConfig", @@ -973,38 +1626,38 @@ pub mod instance_admin_client { ); self.inner.unary(req, path, codec).await } - /// Creates an instance config and begins preparing it to be used. The + /// Creates an instance configuration and begins preparing it to be used. The /// returned [long-running operation][google.longrunning.Operation] /// can be used to track the progress of preparing the new - /// instance config. The instance config name is assigned by the caller. If the - /// named instance config already exists, `CreateInstanceConfig` returns - /// `ALREADY_EXISTS`. + /// instance configuration. The instance configuration name is assigned by the + /// caller. If the named instance configuration already exists, + /// `CreateInstanceConfig` returns `ALREADY_EXISTS`. /// /// Immediately after the request returns: /// - /// * The instance config is readable via the API, with all requested - /// attributes. The instance config's + /// * The instance configuration is readable via the API, with all requested + /// attributes. The instance configuration's /// [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] /// field is set to true. Its state is `CREATING`. /// /// While the operation is pending: /// - /// * Cancelling the operation renders the instance config immediately + /// * Cancelling the operation renders the instance configuration immediately /// unreadable via the API. /// * Except for deleting the creating resource, all other attempts to modify - /// the instance config are rejected. + /// the instance configuration are rejected. /// /// Upon completion of the returned operation: /// /// * Instances can be created using the instance configuration. - /// * The instance config's + /// * The instance configuration's /// [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] /// field becomes false. Its state becomes `READY`. /// /// The returned [long-running operation][google.longrunning.Operation] will /// have a name of the format /// `/operations/` and can be used to track - /// creation of the instance config. The + /// creation of the instance configuration. The /// [metadata][google.longrunning.Operation.metadata] field type is /// [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata]. /// The [response][google.longrunning.Operation.response] field type is @@ -1044,16 +1697,16 @@ pub mod instance_admin_client { ); self.inner.unary(req, path, codec).await } - /// Updates an instance config. The returned + /// Updates an instance configuration. The returned /// [long-running operation][google.longrunning.Operation] can be used to track - /// the progress of updating the instance. If the named instance config does - /// not exist, returns `NOT_FOUND`. + /// the progress of updating the instance. If the named instance configuration + /// does not exist, returns `NOT_FOUND`. /// - /// Only user managed configurations can be updated. + /// Only user-managed configurations can be updated. /// /// Immediately after the request returns: /// - /// * The instance config's + /// * The instance configuration's /// [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] /// field is set to true. /// @@ -1063,23 +1716,23 @@ pub mod instance_admin_client { /// [cancel_time][google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata.cancel_time]. /// The operation is guaranteed to succeed at undoing all changes, after /// which point it terminates with a `CANCELLED` status. - /// * All other attempts to modify the instance config are rejected. - /// * Reading the instance config via the API continues to give the + /// * All other attempts to modify the instance configuration are rejected. + /// * Reading the instance configuration via the API continues to give the /// pre-request values. /// /// Upon completion of the returned operation: /// /// * Creating instances using the instance configuration uses the new /// values. - /// * The instance config's new values are readable via the API. - /// * The instance config's + /// * The new values of the instance configuration are readable via the API. + /// * The instance configuration's /// [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] /// field becomes false. /// /// The returned [long-running operation][google.longrunning.Operation] will /// have a name of the format /// `/operations/` and can be used to track - /// the instance config modification. The + /// the instance configuration modification. The /// [metadata][google.longrunning.Operation.metadata] field type is /// [UpdateInstanceConfigMetadata][google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata]. /// The [response][google.longrunning.Operation.response] field type is @@ -1118,11 +1771,11 @@ pub mod instance_admin_client { ); self.inner.unary(req, path, codec).await } - /// Deletes the instance config. Deletion is only allowed when no + /// Deletes the instance configuration. Deletion is only allowed when no /// instances are using the configuration. If any instances are using - /// the config, returns `FAILED_PRECONDITION`. + /// the configuration, returns `FAILED_PRECONDITION`. /// - /// Only user managed configurations can be deleted. + /// Only user-managed configurations can be deleted. /// /// Authorization requires `spanner.instanceConfigs.delete` permission on /// the resource [name][google.spanner.admin.instance.v1.InstanceConfig.name]. @@ -1153,9 +1806,9 @@ pub mod instance_admin_client { ); self.inner.unary(req, path, codec).await } - /// Lists the user-managed instance config [long-running + /// Lists the user-managed instance configuration [long-running /// operations][google.longrunning.Operation] in the given project. An instance - /// config operation has a name of the form + /// configuration operation has a name of the form /// `projects//instanceConfigs//operations/`. /// The long-running operation /// [metadata][google.longrunning.Operation.metadata] field type @@ -1225,6 +1878,37 @@ pub mod instance_admin_client { ); self.inner.unary(req, path, codec).await } + /// Lists all instance partitions for the given instance. + pub async fn list_instance_partitions( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/google.spanner.admin.instance.v1.InstanceAdmin/ListInstancePartitions", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "google.spanner.admin.instance.v1.InstanceAdmin", + "ListInstancePartitions", + ), + ); + self.inner.unary(req, path, codec).await + } /// Gets information about a particular instance. pub async fn get_instance( &mut self, @@ -1539,5 +2223,349 @@ pub mod instance_admin_client { ); self.inner.unary(req, path, codec).await } + /// Gets information about a particular instance partition. + pub async fn get_instance_partition( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/google.spanner.admin.instance.v1.InstanceAdmin/GetInstancePartition", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "google.spanner.admin.instance.v1.InstanceAdmin", + "GetInstancePartition", + ), + ); + self.inner.unary(req, path, codec).await + } + /// Creates an instance partition and begins preparing it to be used. The + /// returned [long-running operation][google.longrunning.Operation] + /// can be used to track the progress of preparing the new instance partition. + /// The instance partition name is assigned by the caller. If the named + /// instance partition already exists, `CreateInstancePartition` returns + /// `ALREADY_EXISTS`. + /// + /// Immediately upon completion of this request: + /// + /// * The instance partition is readable via the API, with all requested + /// attributes but no allocated resources. Its state is `CREATING`. + /// + /// Until completion of the returned operation: + /// + /// * Cancelling the operation renders the instance partition immediately + /// unreadable via the API. + /// * The instance partition can be deleted. + /// * All other attempts to modify the instance partition are rejected. + /// + /// Upon completion of the returned operation: + /// + /// * Billing for all successfully-allocated resources begins (some types + /// may have lower than the requested levels). + /// * Databases can start using this instance partition. + /// * The instance partition's allocated resource levels are readable via the + /// API. + /// * The instance partition's state becomes `READY`. + /// + /// The returned [long-running operation][google.longrunning.Operation] will + /// have a name of the format + /// `/operations/` and can be used to + /// track creation of the instance partition. The + /// [metadata][google.longrunning.Operation.metadata] field type is + /// [CreateInstancePartitionMetadata][google.spanner.admin.instance.v1.CreateInstancePartitionMetadata]. + /// The [response][google.longrunning.Operation.response] field type is + /// [InstancePartition][google.spanner.admin.instance.v1.InstancePartition], if + /// successful. + pub async fn create_instance_partition( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/google.spanner.admin.instance.v1.InstanceAdmin/CreateInstancePartition", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "google.spanner.admin.instance.v1.InstanceAdmin", + "CreateInstancePartition", + ), + ); + self.inner.unary(req, path, codec).await + } + /// Deletes an existing instance partition. Requires that the + /// instance partition is not used by any database or backup and is not the + /// default instance partition of an instance. + /// + /// Authorization requires `spanner.instancePartitions.delete` permission on + /// the resource + /// [name][google.spanner.admin.instance.v1.InstancePartition.name]. + pub async fn delete_instance_partition( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/google.spanner.admin.instance.v1.InstanceAdmin/DeleteInstancePartition", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "google.spanner.admin.instance.v1.InstanceAdmin", + "DeleteInstancePartition", + ), + ); + self.inner.unary(req, path, codec).await + } + /// Updates an instance partition, and begins allocating or releasing resources + /// as requested. The returned [long-running + /// operation][google.longrunning.Operation] can be used to track the + /// progress of updating the instance partition. If the named instance + /// partition does not exist, returns `NOT_FOUND`. + /// + /// Immediately upon completion of this request: + /// + /// * For resource types for which a decrease in the instance partition's + /// allocation has been requested, billing is based on the newly-requested + /// level. + /// + /// Until completion of the returned operation: + /// + /// * Cancelling the operation sets its metadata's + /// [cancel_time][google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata.cancel_time], + /// and begins restoring resources to their pre-request values. The + /// operation is guaranteed to succeed at undoing all resource changes, + /// after which point it terminates with a `CANCELLED` status. + /// * All other attempts to modify the instance partition are rejected. + /// * Reading the instance partition via the API continues to give the + /// pre-request resource levels. + /// + /// Upon completion of the returned operation: + /// + /// * Billing begins for all successfully-allocated resources (some types + /// may have lower than the requested levels). + /// * All newly-reserved resources are available for serving the instance + /// partition's tables. + /// * The instance partition's new resource levels are readable via the API. + /// + /// The returned [long-running operation][google.longrunning.Operation] will + /// have a name of the format + /// `/operations/` and can be used to + /// track the instance partition modification. The + /// [metadata][google.longrunning.Operation.metadata] field type is + /// [UpdateInstancePartitionMetadata][google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata]. + /// The [response][google.longrunning.Operation.response] field type is + /// [InstancePartition][google.spanner.admin.instance.v1.InstancePartition], if + /// successful. + /// + /// Authorization requires `spanner.instancePartitions.update` permission on + /// the resource + /// [name][google.spanner.admin.instance.v1.InstancePartition.name]. + pub async fn update_instance_partition( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/google.spanner.admin.instance.v1.InstanceAdmin/UpdateInstancePartition", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "google.spanner.admin.instance.v1.InstanceAdmin", + "UpdateInstancePartition", + ), + ); + self.inner.unary(req, path, codec).await + } + /// Lists instance partition [long-running + /// operations][google.longrunning.Operation] in the given instance. + /// An instance partition operation has a name of the form + /// `projects//instances//instancePartitions//operations/`. + /// The long-running operation + /// [metadata][google.longrunning.Operation.metadata] field type + /// `metadata.type_url` describes the type of the metadata. Operations returned + /// include those that have completed/failed/canceled within the last 7 days, + /// and pending operations. Operations returned are ordered by + /// `operation.metadata.value.start_time` in descending order starting from the + /// most recently started operation. + /// + /// Authorization requires `spanner.instancePartitionOperations.list` + /// permission on the resource + /// [parent][google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest.parent]. + pub async fn list_instance_partition_operations( + &mut self, + request: impl tonic::IntoRequest< + super::ListInstancePartitionOperationsRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/google.spanner.admin.instance.v1.InstanceAdmin/ListInstancePartitionOperations", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "google.spanner.admin.instance.v1.InstanceAdmin", + "ListInstancePartitionOperations", + ), + ); + self.inner.unary(req, path, codec).await + } + /// Moves an instance to the target instance configuration. You can use the + /// returned [long-running operation][google.longrunning.Operation] to track + /// the progress of moving the instance. + /// + /// `MoveInstance` returns `FAILED_PRECONDITION` if the instance meets any of + /// the following criteria: + /// + /// * Is undergoing a move to a different instance configuration + /// * Has backups + /// * Has an ongoing update + /// * Contains any CMEK-enabled databases + /// * Is a free trial instance + /// + /// While the operation is pending: + /// + /// * All other attempts to modify the instance, including changes to its + /// compute capacity, are rejected. + /// * The following database and backup admin operations are rejected: + /// + /// * `DatabaseAdmin.CreateDatabase` + /// * `DatabaseAdmin.UpdateDatabaseDdl` (disabled if default_leader is + /// specified in the request.) + /// * `DatabaseAdmin.RestoreDatabase` + /// * `DatabaseAdmin.CreateBackup` + /// * `DatabaseAdmin.CopyBackup` + /// + /// * Both the source and target instance configurations are subject to + /// hourly compute and storage charges. + /// * The instance might experience higher read-write latencies and a higher + /// transaction abort rate. However, moving an instance doesn't cause any + /// downtime. + /// + /// The returned [long-running operation][google.longrunning.Operation] has + /// a name of the format + /// `/operations/` and can be used to track + /// the move instance operation. The + /// [metadata][google.longrunning.Operation.metadata] field type is + /// [MoveInstanceMetadata][google.spanner.admin.instance.v1.MoveInstanceMetadata]. + /// The [response][google.longrunning.Operation.response] field type is + /// [Instance][google.spanner.admin.instance.v1.Instance], + /// if successful. + /// Cancelling the operation sets its metadata's + /// [cancel_time][google.spanner.admin.instance.v1.MoveInstanceMetadata.cancel_time]. + /// Cancellation is not immediate because it involves moving any data + /// previously moved to the target instance configuration back to the original + /// instance configuration. You can use this operation to track the progress of + /// the cancellation. Upon successful completion of the cancellation, the + /// operation terminates with `CANCELLED` status. + /// + /// If not cancelled, upon completion of the returned operation: + /// + /// * The instance successfully moves to the target instance + /// configuration. + /// * You are billed for compute and storage in target instance + /// configuration. + /// + /// Authorization requires the `spanner.instances.update` permission on + /// the resource [instance][google.spanner.admin.instance.v1.Instance]. + /// + /// For more details, see + /// [Move an instance](https://cloud.google.com/spanner/docs/move-instance). + pub async fn move_instance( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/google.spanner.admin.instance.v1.InstanceAdmin/MoveInstance", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "google.spanner.admin.instance.v1.InstanceAdmin", + "MoveInstance", + ), + ); + self.inner.unary(req, path, codec).await + } } } diff --git a/googleapis/src/bytes/google.spanner.v1.rs b/googleapis/src/bytes/google.spanner.v1.rs index 7c563072..0bef2912 100644 --- a/googleapis/src/bytes/google.spanner.v1.rs +++ b/googleapis/src/bytes/google.spanner.v1.rs @@ -744,6 +744,22 @@ pub struct QueryPlan { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct TransactionOptions { + /// When `exclude_txn_from_change_streams` is set to `true`: + /// * Mutations from this transaction will not be recorded in change streams + /// with DDL option `allow_txn_exclusion=true` that are tracking columns + /// modified by these transactions. + /// * Mutations from this transaction will be recorded in change streams with + /// DDL option `allow_txn_exclusion=false or not set` that are tracking + /// columns modified by these transactions. + /// + /// When `exclude_txn_from_change_streams` is set to `false` or not set, + /// mutations from this transaction will be recorded in all change streams that + /// are tracking columns modified by these transactions. + /// `exclude_txn_from_change_streams` may only be specified for read-write or + /// partitioned-dml transactions, otherwise the API will return an + /// `INVALID_ARGUMENT` error. + #[prost(bool, tag = "5")] + pub exclude_txn_from_change_streams: bool, /// Required. The type of transaction. #[prost(oneof = "transaction_options::Mode", tags = "1, 3, 2")] pub mode: ::core::option::Option, @@ -1008,6 +1024,13 @@ pub struct Type { /// affect serialization) and clients can ignore it on the read path. #[prost(enumeration = "TypeAnnotationCode", tag = "4")] pub type_annotation: i32, + /// If [code][google.spanner.v1.Type.code] == + /// [PROTO][google.spanner.v1.TypeCode.PROTO] or + /// [code][google.spanner.v1.Type.code] == + /// [ENUM][google.spanner.v1.TypeCode.ENUM], then `proto_type_fqn` is the fully + /// qualified name of the proto type representing the proto/enum definition. + #[prost(string, tag = "5")] + pub proto_type_fqn: ::prost::alloc::string::String, } /// `StructType` defines the fields of a [STRUCT][google.spanner.v1.TypeCode.STRUCT] type. #[allow(clippy::derive_partial_eq_without_eq)] @@ -1061,6 +1084,9 @@ pub enum TypeCode { /// Encoded as `number`, or the strings `"NaN"`, `"Infinity"`, or /// `"-Infinity"`. Float64 = 3, + /// Encoded as `number`, or the strings `"NaN"`, `"Infinity"`, or + /// `"-Infinity"`. + Float32 = 15, /// Encoded as `string` in RFC 3339 timestamp format. The time zone /// must be present, and must be `"Z"`. /// @@ -1103,6 +1129,11 @@ pub enum TypeCode { /// preserved. /// - JSON array elements will have their order preserved. Json = 11, + /// Encoded as a base64-encoded `string`, as described in RFC 4648, + /// section 4. + Proto = 13, + /// Encoded as `string`, in decimal format. + Enum = 14, } impl TypeCode { /// String value of the enum field names used in the ProtoBuf definition. @@ -1115,6 +1146,7 @@ impl TypeCode { TypeCode::Bool => "BOOL", TypeCode::Int64 => "INT64", TypeCode::Float64 => "FLOAT64", + TypeCode::Float32 => "FLOAT32", TypeCode::Timestamp => "TIMESTAMP", TypeCode::Date => "DATE", TypeCode::String => "STRING", @@ -1123,6 +1155,8 @@ impl TypeCode { TypeCode::Struct => "STRUCT", TypeCode::Numeric => "NUMERIC", TypeCode::Json => "JSON", + TypeCode::Proto => "PROTO", + TypeCode::Enum => "ENUM", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -1132,6 +1166,7 @@ impl TypeCode { "BOOL" => Some(Self::Bool), "INT64" => Some(Self::Int64), "FLOAT64" => Some(Self::Float64), + "FLOAT32" => Some(Self::Float32), "TIMESTAMP" => Some(Self::Timestamp), "DATE" => Some(Self::Date), "STRING" => Some(Self::String), @@ -1140,6 +1175,8 @@ impl TypeCode { "STRUCT" => Some(Self::Struct), "NUMERIC" => Some(Self::Numeric), "JSON" => Some(Self::Json), + "PROTO" => Some(Self::Proto), + "ENUM" => Some(Self::Enum), _ => None, } } @@ -1168,6 +1205,10 @@ pub enum TypeAnnotationCode { /// [JSON][google.spanner.v1.TypeCode.JSON] when a client interacts with PostgreSQL-enabled /// Spanner databases. PgJsonb = 3, + /// PostgreSQL compatible OID type. This annotation can be used by a client + /// interacting with PostgreSQL-enabled Spanner database to specify that a + /// value should be treated using the semantics of the OID type. + PgOid = 4, } impl TypeAnnotationCode { /// String value of the enum field names used in the ProtoBuf definition. @@ -1179,6 +1220,7 @@ impl TypeAnnotationCode { TypeAnnotationCode::Unspecified => "TYPE_ANNOTATION_CODE_UNSPECIFIED", TypeAnnotationCode::PgNumeric => "PG_NUMERIC", TypeAnnotationCode::PgJsonb => "PG_JSONB", + TypeAnnotationCode::PgOid => "PG_OID", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -1187,6 +1229,7 @@ impl TypeAnnotationCode { "TYPE_ANNOTATION_CODE_UNSPECIFIED" => Some(Self::Unspecified), "PG_NUMERIC" => Some(Self::PgNumeric), "PG_JSONB" => Some(Self::PgJsonb), + "PG_OID" => Some(Self::PgOid), _ => None, } } @@ -1403,7 +1446,8 @@ pub struct CreateSessionRequest { #[prost(message, optional, tag = "2")] pub session: ::core::option::Option, } -/// The request for [BatchCreateSessions][google.spanner.v1.Spanner.BatchCreateSessions]. +/// The request for +/// [BatchCreateSessions][google.spanner.v1.Spanner.BatchCreateSessions]. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct BatchCreateSessionsRequest { @@ -1417,11 +1461,13 @@ pub struct BatchCreateSessionsRequest { /// The API may return fewer than the requested number of sessions. If a /// specific number of sessions are desired, the client can make additional /// calls to BatchCreateSessions (adjusting - /// [session_count][google.spanner.v1.BatchCreateSessionsRequest.session_count] as necessary). + /// [session_count][google.spanner.v1.BatchCreateSessionsRequest.session_count] + /// as necessary). #[prost(int32, tag = "3")] pub session_count: i32, } -/// The response for [BatchCreateSessions][google.spanner.v1.Spanner.BatchCreateSessions]. +/// The response for +/// [BatchCreateSessions][google.spanner.v1.Spanner.BatchCreateSessions]. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct BatchCreateSessionsResponse { @@ -1460,6 +1506,15 @@ pub struct Session { /// The database role which created this session. #[prost(string, tag = "5")] pub creator_role: ::prost::alloc::string::String, + /// Optional. If true, specifies a multiplexed session. A multiplexed session + /// may be used for multiple, concurrent read-only operations but can not be + /// used for read-write transactions, partitioned reads, or partitioned + /// queries. Multiplexed sessions can be created via + /// [CreateSession][google.spanner.v1.Spanner.CreateSession] but not via + /// [BatchCreateSessions][google.spanner.v1.Spanner.BatchCreateSessions]. + /// Multiplexed sessions may not be deleted nor listed. + #[prost(bool, tag = "6")] + pub multiplexed: bool, } /// The request for [GetSession][google.spanner.v1.Spanner.GetSession]. #[allow(clippy::derive_partial_eq_without_eq)] @@ -1481,7 +1536,8 @@ pub struct ListSessionsRequest { #[prost(int32, tag = "2")] pub page_size: i32, /// If non-empty, `page_token` should contain a - /// [next_page_token][google.spanner.v1.ListSessionsResponse.next_page_token] from a previous + /// [next_page_token][google.spanner.v1.ListSessionsResponse.next_page_token] + /// from a previous /// [ListSessionsResponse][google.spanner.v1.ListSessionsResponse]. #[prost(string, tag = "3")] pub page_token: ::prost::alloc::string::String, @@ -1506,8 +1562,8 @@ pub struct ListSessionsResponse { #[prost(message, repeated, tag = "1")] pub sessions: ::prost::alloc::vec::Vec, /// `next_page_token` can be sent in a subsequent - /// [ListSessions][google.spanner.v1.Spanner.ListSessions] call to fetch more of the matching - /// sessions. + /// [ListSessions][google.spanner.v1.Spanner.ListSessions] call to fetch more + /// of the matching sessions. #[prost(string, tag = "2")] pub next_page_token: ::prost::alloc::string::String, } @@ -1617,6 +1673,137 @@ pub mod request_options { } } } +/// The DirectedReadOptions can be used to indicate which replicas or regions +/// should be used for non-transactional reads or queries. +/// +/// DirectedReadOptions may only be specified for a read-only transaction, +/// otherwise the API will return an `INVALID_ARGUMENT` error. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DirectedReadOptions { + /// Required. At most one of either include_replicas or exclude_replicas + /// should be present in the message. + #[prost(oneof = "directed_read_options::Replicas", tags = "1, 2")] + pub replicas: ::core::option::Option, +} +/// Nested message and enum types in `DirectedReadOptions`. +pub mod directed_read_options { + /// The directed read replica selector. + /// Callers must provide one or more of the following fields for replica + /// selection: + /// + /// * `location` - The location must be one of the regions within the + /// multi-region configuration of your database. + /// * `type` - The type of the replica. + /// + /// Some examples of using replica_selectors are: + /// + /// * `location:us-east1` --> The "us-east1" replica(s) of any available type + /// will be used to process the request. + /// * `type:READ_ONLY` --> The "READ_ONLY" type replica(s) in nearest + /// available location will be used to process the + /// request. + /// * `location:us-east1 type:READ_ONLY` --> The "READ_ONLY" type replica(s) + /// in location "us-east1" will be used to process + /// the request. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct ReplicaSelection { + /// The location or region of the serving requests, e.g. "us-east1". + #[prost(string, tag = "1")] + pub location: ::prost::alloc::string::String, + /// The type of replica. + #[prost(enumeration = "replica_selection::Type", tag = "2")] + pub r#type: i32, + } + /// Nested message and enum types in `ReplicaSelection`. + pub mod replica_selection { + /// Indicates the type of replica. + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] + #[repr(i32)] + pub enum Type { + /// Not specified. + Unspecified = 0, + /// Read-write replicas support both reads and writes. + ReadWrite = 1, + /// Read-only replicas only support reads (not writes). + ReadOnly = 2, + } + impl Type { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Type::Unspecified => "TYPE_UNSPECIFIED", + Type::ReadWrite => "READ_WRITE", + Type::ReadOnly => "READ_ONLY", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "TYPE_UNSPECIFIED" => Some(Self::Unspecified), + "READ_WRITE" => Some(Self::ReadWrite), + "READ_ONLY" => Some(Self::ReadOnly), + _ => None, + } + } + } + } + /// An IncludeReplicas contains a repeated set of ReplicaSelection which + /// indicates the order in which replicas should be considered. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct IncludeReplicas { + /// The directed read replica selector. + #[prost(message, repeated, tag = "1")] + pub replica_selections: ::prost::alloc::vec::Vec, + /// If true, Spanner will not route requests to a replica outside the + /// include_replicas list when all of the specified replicas are unavailable + /// or unhealthy. Default value is `false`. + #[prost(bool, tag = "2")] + pub auto_failover_disabled: bool, + } + /// An ExcludeReplicas contains a repeated set of ReplicaSelection that should + /// be excluded from serving requests. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct ExcludeReplicas { + /// The directed read replica selector. + #[prost(message, repeated, tag = "1")] + pub replica_selections: ::prost::alloc::vec::Vec, + } + /// Required. At most one of either include_replicas or exclude_replicas + /// should be present in the message. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Replicas { + /// Include_replicas indicates the order of replicas (as they appear in + /// this list) to process the request. If auto_failover_disabled is set to + /// true and all replicas are exhausted without finding a healthy replica, + /// Spanner will wait for a replica in the list to become available, requests + /// may fail due to `DEADLINE_EXCEEDED` errors. + #[prost(message, tag = "1")] + IncludeReplicas(IncludeReplicas), + /// Exclude_replicas indicates that specified replicas should be excluded + /// from serving requests. Spanner will not route requests to the replicas + /// in this list. + #[prost(message, tag = "2")] + ExcludeReplicas(ExcludeReplicas), + } +} /// The request for [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and /// [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql]. #[allow(clippy::derive_partial_eq_without_eq)] @@ -1657,7 +1844,8 @@ pub struct ExecuteSqlRequest { pub params: ::core::option::Option<::prost_types::Struct>, /// It is not always possible for Cloud Spanner to infer the right SQL type /// from a JSON value. For example, values of type `BYTES` and values - /// of type `STRING` both appear in [params][google.spanner.v1.ExecuteSqlRequest.params] as JSON strings. + /// of type `STRING` both appear in + /// [params][google.spanner.v1.ExecuteSqlRequest.params] as JSON strings. /// /// In these cases, `param_types` can be used to specify the exact /// SQL type for some or all of the SQL statement parameters. See the @@ -1667,15 +1855,18 @@ pub struct ExecuteSqlRequest { pub param_types: ::std::collections::HashMap<::prost::alloc::string::String, Type>, /// If this request is resuming a previously interrupted SQL statement /// execution, `resume_token` should be copied from the last - /// [PartialResultSet][google.spanner.v1.PartialResultSet] yielded before the interruption. Doing this - /// enables the new SQL statement execution to resume where the last one left - /// off. The rest of the request parameters must exactly match the - /// request that yielded this token. + /// [PartialResultSet][google.spanner.v1.PartialResultSet] yielded before the + /// interruption. Doing this enables the new SQL statement execution to resume + /// where the last one left off. The rest of the request parameters must + /// exactly match the request that yielded this token. #[prost(bytes = "bytes", tag = "6")] pub resume_token: ::prost::bytes::Bytes, /// Used to control the amount of debugging information returned in - /// [ResultSetStats][google.spanner.v1.ResultSetStats]. If [partition_token][google.spanner.v1.ExecuteSqlRequest.partition_token] is set, [query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode] can only - /// be set to [QueryMode.NORMAL][google.spanner.v1.ExecuteSqlRequest.QueryMode.NORMAL]. + /// [ResultSetStats][google.spanner.v1.ResultSetStats]. If + /// [partition_token][google.spanner.v1.ExecuteSqlRequest.partition_token] is + /// set, [query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode] can only + /// be set to + /// [QueryMode.NORMAL][google.spanner.v1.ExecuteSqlRequest.QueryMode.NORMAL]. #[prost(enumeration = "execute_sql_request::QueryMode", tag = "7")] pub query_mode: i32, /// If present, results will be restricted to the specified partition @@ -1702,11 +1893,14 @@ pub struct ExecuteSqlRequest { /// Common options for this request. #[prost(message, optional, tag = "11")] pub request_options: ::core::option::Option, + /// Directed read options for this request. + #[prost(message, optional, tag = "15")] + pub directed_read_options: ::core::option::Option, /// If this is for a partitioned query and this field is set to `true`, the - /// request will be executed via Spanner independent compute resources. + /// request is executed with Spanner Data Boost independent compute resources. /// /// If the field is set to `true` but the request does not set - /// `partition_token`, the API will return an `INVALID_ARGUMENT` error. + /// `partition_token`, the API returns an `INVALID_ARGUMENT` error. #[prost(bool, tag = "16")] pub data_boost_enabled: bool, } @@ -1827,17 +2021,17 @@ pub struct ExecuteBatchDmlRequest { /// transaction. #[prost(message, optional, tag = "2")] pub transaction: ::core::option::Option, - /// Required. The list of statements to execute in this batch. Statements are executed - /// serially, such that the effects of statement `i` are visible to statement - /// `i+1`. Each statement must be a DML statement. Execution stops at the - /// first failed statement; the remaining statements are not executed. + /// Required. The list of statements to execute in this batch. Statements are + /// executed serially, such that the effects of statement `i` are visible to + /// statement `i+1`. Each statement must be a DML statement. Execution stops at + /// the first failed statement; the remaining statements are not executed. /// /// Callers must provide at least one statement. #[prost(message, repeated, tag = "3")] pub statements: ::prost::alloc::vec::Vec, - /// Required. A per-transaction sequence number used to identify this request. This field - /// makes each request idempotent such that if the request is received multiple - /// times, at most one will succeed. + /// Required. A per-transaction sequence number used to identify this request. + /// This field makes each request idempotent such that if the request is + /// received multiple times, at most one will succeed. /// /// The sequence number must be monotonically increasing within the /// transaction. If a request arrives for the first time with an out-of-order @@ -1874,7 +2068,9 @@ pub mod execute_batch_dml_request { pub params: ::core::option::Option<::prost_types::Struct>, /// It is not always possible for Cloud Spanner to infer the right SQL type /// from a JSON value. For example, values of type `BYTES` and values - /// of type `STRING` both appear in [params][google.spanner.v1.ExecuteBatchDmlRequest.Statement.params] as JSON strings. + /// of type `STRING` both appear in + /// [params][google.spanner.v1.ExecuteBatchDmlRequest.Statement.params] as + /// JSON strings. /// /// In these cases, `param_types` can be used to specify the exact /// SQL type for some or all of the SQL statement parameters. See the @@ -1887,40 +2083,49 @@ pub mod execute_batch_dml_request { >, } } -/// The response for [ExecuteBatchDml][google.spanner.v1.Spanner.ExecuteBatchDml]. Contains a list -/// of [ResultSet][google.spanner.v1.ResultSet] messages, one for each DML statement that has successfully -/// executed, in the same order as the statements in the request. If a statement -/// fails, the status in the response body identifies the cause of the failure. +/// The response for +/// [ExecuteBatchDml][google.spanner.v1.Spanner.ExecuteBatchDml]. Contains a list +/// of [ResultSet][google.spanner.v1.ResultSet] messages, one for each DML +/// statement that has successfully executed, in the same order as the statements +/// in the request. If a statement fails, the status in the response body +/// identifies the cause of the failure. /// /// To check for DML statements that failed, use the following approach: /// -/// 1. Check the status in the response message. The [google.rpc.Code][google.rpc.Code] enum +/// 1. Check the status in the response message. The +/// [google.rpc.Code][google.rpc.Code] enum /// value `OK` indicates that all statements were executed successfully. /// 2. If the status was not `OK`, check the number of result sets in the -/// response. If the response contains `N` [ResultSet][google.spanner.v1.ResultSet] messages, then -/// statement `N+1` in the request failed. +/// response. If the response contains `N` +/// [ResultSet][google.spanner.v1.ResultSet] messages, then statement `N+1` in +/// the request failed. /// /// Example 1: /// /// * Request: 5 DML statements, all executed successfully. -/// * Response: 5 [ResultSet][google.spanner.v1.ResultSet] messages, with the status `OK`. +/// * Response: 5 [ResultSet][google.spanner.v1.ResultSet] messages, with the +/// status `OK`. /// /// Example 2: /// /// * Request: 5 DML statements. The third statement has a syntax error. -/// * Response: 2 [ResultSet][google.spanner.v1.ResultSet] messages, and a syntax error (`INVALID_ARGUMENT`) -/// status. The number of [ResultSet][google.spanner.v1.ResultSet] messages indicates that the third -/// statement failed, and the fourth and fifth statements were not executed. +/// * Response: 2 [ResultSet][google.spanner.v1.ResultSet] messages, and a syntax +/// error (`INVALID_ARGUMENT`) +/// status. The number of [ResultSet][google.spanner.v1.ResultSet] messages +/// indicates that the third statement failed, and the fourth and fifth +/// statements were not executed. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ExecuteBatchDmlResponse { - /// One [ResultSet][google.spanner.v1.ResultSet] for each statement in the request that ran successfully, - /// in the same order as the statements in the request. Each [ResultSet][google.spanner.v1.ResultSet] does - /// not contain any rows. The [ResultSetStats][google.spanner.v1.ResultSetStats] in each [ResultSet][google.spanner.v1.ResultSet] contain - /// the number of rows modified by the statement. + /// One [ResultSet][google.spanner.v1.ResultSet] for each statement in the + /// request that ran successfully, in the same order as the statements in the + /// request. Each [ResultSet][google.spanner.v1.ResultSet] does not contain any + /// rows. The [ResultSetStats][google.spanner.v1.ResultSetStats] in each + /// [ResultSet][google.spanner.v1.ResultSet] contain the number of rows + /// modified by the statement. /// - /// Only the first [ResultSet][google.spanner.v1.ResultSet] in the response contains valid - /// [ResultSetMetadata][google.spanner.v1.ResultSetMetadata]. + /// Only the first [ResultSet][google.spanner.v1.ResultSet] in the response + /// contains valid [ResultSetMetadata][google.spanner.v1.ResultSetMetadata]. #[prost(message, repeated, tag = "1")] pub result_sets: ::prost::alloc::vec::Vec, /// If all DML statements are executed successfully, the status is `OK`. @@ -1963,15 +2168,17 @@ pub struct PartitionQueryRequest { /// transactions are not. #[prost(message, optional, tag = "2")] pub transaction: ::core::option::Option, - /// Required. The query request to generate partitions for. The request will fail if - /// the query is not root partitionable. The query plan of a root - /// partitionable query has a single distributed union operator. A distributed - /// union operator conceptually divides one or more tables into multiple - /// splits, remotely evaluates a subquery independently on each split, and - /// then unions all results. - /// - /// This must not contain DML commands, such as INSERT, UPDATE, or - /// DELETE. Use [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] with a + /// Required. The query request to generate partitions for. The request will + /// fail if the query is not root partitionable. For a query to be root + /// partitionable, it needs to satisfy a few conditions. For example, if the + /// query execution plan contains a distributed union operator, then it must be + /// the first operator in the plan. For more information about other + /// conditions, see [Read data in + /// parallel](). + /// + /// The query request must not contain DML commands, such as INSERT, UPDATE, or + /// DELETE. Use + /// [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] with a /// PartitionedDml transaction for large, partition-friendly DML operations. #[prost(string, tag = "3")] pub sql: ::prost::alloc::string::String, @@ -1991,7 +2198,8 @@ pub struct PartitionQueryRequest { pub params: ::core::option::Option<::prost_types::Struct>, /// It is not always possible for Cloud Spanner to infer the right SQL type /// from a JSON value. For example, values of type `BYTES` and values - /// of type `STRING` both appear in [params][google.spanner.v1.PartitionQueryRequest.params] as JSON strings. + /// of type `STRING` both appear in + /// [params][google.spanner.v1.PartitionQueryRequest.params] as JSON strings. /// /// In these cases, `param_types` can be used to specify the exact /// SQL type for some or all of the SQL query parameters. See the @@ -2017,18 +2225,24 @@ pub struct PartitionReadRequest { /// Required. The name of the table in the database to be read. #[prost(string, tag = "3")] pub table: ::prost::alloc::string::String, - /// If non-empty, the name of an index on [table][google.spanner.v1.PartitionReadRequest.table]. This index is - /// used instead of the table primary key when interpreting [key_set][google.spanner.v1.PartitionReadRequest.key_set] - /// and sorting result rows. See [key_set][google.spanner.v1.PartitionReadRequest.key_set] for further information. + /// If non-empty, the name of an index on + /// [table][google.spanner.v1.PartitionReadRequest.table]. This index is used + /// instead of the table primary key when interpreting + /// [key_set][google.spanner.v1.PartitionReadRequest.key_set] and sorting + /// result rows. See [key_set][google.spanner.v1.PartitionReadRequest.key_set] + /// for further information. #[prost(string, tag = "4")] pub index: ::prost::alloc::string::String, - /// The columns of [table][google.spanner.v1.PartitionReadRequest.table] to be returned for each row matching - /// this request. + /// The columns of [table][google.spanner.v1.PartitionReadRequest.table] to be + /// returned for each row matching this request. #[prost(string, repeated, tag = "5")] pub columns: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, /// Required. `key_set` identifies the rows to be yielded. `key_set` names the - /// primary keys of the rows in [table][google.spanner.v1.PartitionReadRequest.table] to be yielded, unless [index][google.spanner.v1.PartitionReadRequest.index] - /// is present. If [index][google.spanner.v1.PartitionReadRequest.index] is present, then [key_set][google.spanner.v1.PartitionReadRequest.key_set] instead names + /// primary keys of the rows in + /// [table][google.spanner.v1.PartitionReadRequest.table] to be yielded, unless + /// [index][google.spanner.v1.PartitionReadRequest.index] is present. If + /// [index][google.spanner.v1.PartitionReadRequest.index] is present, then + /// [key_set][google.spanner.v1.PartitionReadRequest.key_set] instead names /// index keys in [index][google.spanner.v1.PartitionReadRequest.index]. /// /// It is not an error for the `key_set` to name rows that do not @@ -2077,24 +2291,31 @@ pub struct ReadRequest { /// Required. The name of the table in the database to be read. #[prost(string, tag = "3")] pub table: ::prost::alloc::string::String, - /// If non-empty, the name of an index on [table][google.spanner.v1.ReadRequest.table]. This index is - /// used instead of the table primary key when interpreting [key_set][google.spanner.v1.ReadRequest.key_set] - /// and sorting result rows. See [key_set][google.spanner.v1.ReadRequest.key_set] for further information. + /// If non-empty, the name of an index on + /// [table][google.spanner.v1.ReadRequest.table]. This index is used instead of + /// the table primary key when interpreting + /// [key_set][google.spanner.v1.ReadRequest.key_set] and sorting result rows. + /// See [key_set][google.spanner.v1.ReadRequest.key_set] for further + /// information. #[prost(string, tag = "4")] pub index: ::prost::alloc::string::String, - /// Required. The columns of [table][google.spanner.v1.ReadRequest.table] to be returned for each row matching - /// this request. + /// Required. The columns of [table][google.spanner.v1.ReadRequest.table] to be + /// returned for each row matching this request. #[prost(string, repeated, tag = "5")] pub columns: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, /// Required. `key_set` identifies the rows to be yielded. `key_set` names the - /// primary keys of the rows in [table][google.spanner.v1.ReadRequest.table] to be yielded, unless [index][google.spanner.v1.ReadRequest.index] - /// is present. If [index][google.spanner.v1.ReadRequest.index] is present, then [key_set][google.spanner.v1.ReadRequest.key_set] instead names - /// index keys in [index][google.spanner.v1.ReadRequest.index]. + /// primary keys of the rows in [table][google.spanner.v1.ReadRequest.table] to + /// be yielded, unless [index][google.spanner.v1.ReadRequest.index] is present. + /// If [index][google.spanner.v1.ReadRequest.index] is present, then + /// [key_set][google.spanner.v1.ReadRequest.key_set] instead names index keys + /// in [index][google.spanner.v1.ReadRequest.index]. /// - /// If the [partition_token][google.spanner.v1.ReadRequest.partition_token] field is empty, rows are yielded - /// in table primary key order (if [index][google.spanner.v1.ReadRequest.index] is empty) or index key order - /// (if [index][google.spanner.v1.ReadRequest.index] is non-empty). If the [partition_token][google.spanner.v1.ReadRequest.partition_token] field is not - /// empty, rows will be yielded in an unspecified order. + /// If the [partition_token][google.spanner.v1.ReadRequest.partition_token] + /// field is empty, rows are yielded in table primary key order (if + /// [index][google.spanner.v1.ReadRequest.index] is empty) or index key order + /// (if [index][google.spanner.v1.ReadRequest.index] is non-empty). If the + /// [partition_token][google.spanner.v1.ReadRequest.partition_token] field is + /// not empty, rows will be yielded in an unspecified order. /// /// It is not an error for the `key_set` to name rows that do not /// exist in the database. Read yields nothing for nonexistent rows. @@ -2107,9 +2328,9 @@ pub struct ReadRequest { pub limit: i64, /// If this request is resuming a previously interrupted read, /// `resume_token` should be copied from the last - /// [PartialResultSet][google.spanner.v1.PartialResultSet] yielded before the interruption. Doing this - /// enables the new read to resume where the last read left off. The - /// rest of the request parameters must exactly match the request + /// [PartialResultSet][google.spanner.v1.PartialResultSet] yielded before the + /// interruption. Doing this enables the new read to resume where the last read + /// left off. The rest of the request parameters must exactly match the request /// that yielded this token. #[prost(bytes = "bytes", tag = "9")] pub resume_token: ::prost::bytes::Bytes, @@ -2122,15 +2343,162 @@ pub struct ReadRequest { /// Common options for this request. #[prost(message, optional, tag = "11")] pub request_options: ::core::option::Option, + /// Directed read options for this request. + #[prost(message, optional, tag = "14")] + pub directed_read_options: ::core::option::Option, /// If this is for a partitioned read and this field is set to `true`, the - /// request will be executed via Spanner independent compute resources. + /// request is executed with Spanner Data Boost independent compute resources. /// /// If the field is set to `true` but the request does not set - /// `partition_token`, the API will return an `INVALID_ARGUMENT` error. + /// `partition_token`, the API returns an `INVALID_ARGUMENT` error. #[prost(bool, tag = "15")] pub data_boost_enabled: bool, + /// Optional. Order for the returned rows. + /// + /// By default, Spanner will return result rows in primary key order except for + /// PartitionRead requests. For applications that do not require rows to be + /// returned in primary key (`ORDER_BY_PRIMARY_KEY`) order, setting + /// `ORDER_BY_NO_ORDER` option allows Spanner to optimize row retrieval, + /// resulting in lower latencies in certain cases (e.g. bulk point lookups). + #[prost(enumeration = "read_request::OrderBy", tag = "16")] + pub order_by: i32, + /// Optional. Lock Hint for the request, it can only be used with read-write + /// transactions. + #[prost(enumeration = "read_request::LockHint", tag = "17")] + pub lock_hint: i32, +} +/// Nested message and enum types in `ReadRequest`. +pub mod read_request { + /// An option to control the order in which rows are returned from a read. + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] + #[repr(i32)] + pub enum OrderBy { + /// Default value. + /// + /// ORDER_BY_UNSPECIFIED is equivalent to ORDER_BY_PRIMARY_KEY. + Unspecified = 0, + /// Read rows are returned in primary key order. + /// + /// In the event that this option is used in conjunction with the + /// `partition_token` field, the API will return an `INVALID_ARGUMENT` error. + PrimaryKey = 1, + /// Read rows are returned in any order. + NoOrder = 2, + } + impl OrderBy { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + OrderBy::Unspecified => "ORDER_BY_UNSPECIFIED", + OrderBy::PrimaryKey => "ORDER_BY_PRIMARY_KEY", + OrderBy::NoOrder => "ORDER_BY_NO_ORDER", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "ORDER_BY_UNSPECIFIED" => Some(Self::Unspecified), + "ORDER_BY_PRIMARY_KEY" => Some(Self::PrimaryKey), + "ORDER_BY_NO_ORDER" => Some(Self::NoOrder), + _ => None, + } + } + } + /// A lock hint mechanism for reads done within a transaction. + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] + #[repr(i32)] + pub enum LockHint { + /// Default value. + /// + /// LOCK_HINT_UNSPECIFIED is equivalent to LOCK_HINT_SHARED. + Unspecified = 0, + /// Acquire shared locks. + /// + /// By default when you perform a read as part of a read-write transaction, + /// Spanner acquires shared read locks, which allows other reads to still + /// access the data until your transaction is ready to commit. When your + /// transaction is committing and writes are being applied, the transaction + /// attempts to upgrade to an exclusive lock for any data you are writing. + /// For more information about locks, see [Lock + /// modes](). + Shared = 1, + /// Acquire exclusive locks. + /// + /// Requesting exclusive locks is beneficial if you observe high write + /// contention, which means you notice that multiple transactions are + /// concurrently trying to read and write to the same data, resulting in a + /// large number of aborts. This problem occurs when two transactions + /// initially acquire shared locks and then both try to upgrade to exclusive + /// locks at the same time. In this situation both transactions are waiting + /// for the other to give up their lock, resulting in a deadlocked situation. + /// Spanner is able to detect this occurring and force one of the + /// transactions to abort. However, this is a slow and expensive operation + /// and results in lower performance. In this case it makes sense to acquire + /// exclusive locks at the start of the transaction because then when + /// multiple transactions try to act on the same data, they automatically get + /// serialized. Each transaction waits its turn to acquire the lock and + /// avoids getting into deadlock situations. + /// + /// Because the exclusive lock hint is just a hint, it should not be + /// considered equivalent to a mutex. In other words, you should not use + /// Spanner exclusive locks as a mutual exclusion mechanism for the execution + /// of code outside of Spanner. + /// + /// **Note:** Request exclusive locks judiciously because they block others + /// from reading that data for the entire transaction, rather than just when + /// the writes are being performed. Unless you observe high write contention, + /// you should use the default of shared read locks so you don't prematurely + /// block other clients from reading the data that you're writing to. + Exclusive = 2, + } + impl LockHint { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + LockHint::Unspecified => "LOCK_HINT_UNSPECIFIED", + LockHint::Shared => "LOCK_HINT_SHARED", + LockHint::Exclusive => "LOCK_HINT_EXCLUSIVE", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "LOCK_HINT_UNSPECIFIED" => Some(Self::Unspecified), + "LOCK_HINT_SHARED" => Some(Self::Shared), + "LOCK_HINT_EXCLUSIVE" => Some(Self::Exclusive), + _ => None, + } + } + } } -/// The request for [BeginTransaction][google.spanner.v1.Spanner.BeginTransaction]. +/// The request for +/// [BeginTransaction][google.spanner.v1.Spanner.BeginTransaction]. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct BeginTransactionRequest { @@ -2161,10 +2529,17 @@ pub struct CommitRequest { #[prost(message, repeated, tag = "4")] pub mutations: ::prost::alloc::vec::Vec, /// If `true`, then statistics related to the transaction will be included in - /// the [CommitResponse][google.spanner.v1.CommitResponse.commit_stats]. Default value is - /// `false`. + /// the [CommitResponse][google.spanner.v1.CommitResponse.commit_stats]. + /// Default value is `false`. #[prost(bool, tag = "5")] pub return_commit_stats: bool, + /// Optional. The amount of latency this request is willing to incur in order + /// to improve throughput. If this field is not set, Spanner assumes requests + /// are relatively latency sensitive and automatically determines an + /// appropriate delay time. You can specify a batching delay value between 0 + /// and 500 ms. + #[prost(message, optional, tag = "8")] + pub max_commit_delay: ::core::option::Option<::prost_types::Duration>, /// Common options for this request. #[prost(message, optional, tag = "6")] pub request_options: ::core::option::Option, @@ -2205,6 +2580,63 @@ pub struct RollbackRequest { #[prost(bytes = "bytes", tag = "2")] pub transaction_id: ::prost::bytes::Bytes, } +/// The request for [BatchWrite][google.spanner.v1.Spanner.BatchWrite]. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BatchWriteRequest { + /// Required. The session in which the batch request is to be run. + #[prost(string, tag = "1")] + pub session: ::prost::alloc::string::String, + /// Common options for this request. + #[prost(message, optional, tag = "3")] + pub request_options: ::core::option::Option, + /// Required. The groups of mutations to be applied. + #[prost(message, repeated, tag = "4")] + pub mutation_groups: ::prost::alloc::vec::Vec, + /// Optional. When `exclude_txn_from_change_streams` is set to `true`: + /// * Mutations from all transactions in this batch write operation will not + /// be recorded in change streams with DDL option `allow_txn_exclusion=true` + /// that are tracking columns modified by these transactions. + /// * Mutations from all transactions in this batch write operation will be + /// recorded in change streams with DDL option `allow_txn_exclusion=false or + /// not set` that are tracking columns modified by these transactions. + /// + /// When `exclude_txn_from_change_streams` is set to `false` or not set, + /// mutations from all transactions in this batch write operation will be + /// recorded in all change streams that are tracking columns modified by these + /// transactions. + #[prost(bool, tag = "5")] + pub exclude_txn_from_change_streams: bool, +} +/// Nested message and enum types in `BatchWriteRequest`. +pub mod batch_write_request { + /// A group of mutations to be committed together. Related mutations should be + /// placed in a group. For example, two mutations inserting rows with the same + /// primary key prefix in both parent and child tables are related. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct MutationGroup { + /// Required. The mutations in this group. + #[prost(message, repeated, tag = "1")] + pub mutations: ::prost::alloc::vec::Vec, + } +} +/// The result of applying a batch of mutations. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BatchWriteResponse { + /// The mutation groups applied in this batch. The values index into the + /// `mutation_groups` field in the corresponding `BatchWriteRequest`. + #[prost(int32, repeated, tag = "1")] + pub indexes: ::prost::alloc::vec::Vec, + /// An `OK` status indicates success. Any other status indicates a failure. + #[prost(message, optional, tag = "2")] + pub status: ::core::option::Option, + /// The commit timestamp of the transaction that applied this batch. + /// Present if `status` is `OK`, absent otherwise. + #[prost(message, optional, tag = "3")] + pub commit_timestamp: ::core::option::Option<::prost_types::Timestamp>, +} /// Generated client implementations. pub mod spanner_client { #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] @@ -2449,10 +2881,12 @@ pub mod spanner_client { /// /// Operations inside read-write transactions might return `ABORTED`. If /// this occurs, the application should restart the transaction from - /// the beginning. See [Transaction][google.spanner.v1.Transaction] for more details. + /// the beginning. See [Transaction][google.spanner.v1.Transaction] for more + /// details. /// /// Larger result sets can be fetched in streaming fashion by calling - /// [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] instead. + /// [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] + /// instead. pub async fn execute_sql( &mut self, request: impl tonic::IntoRequest, @@ -2475,11 +2909,11 @@ pub mod spanner_client { .insert(GrpcMethod::new("google.spanner.v1.Spanner", "ExecuteSql")); self.inner.unary(req, path, codec).await } - /// Like [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], except returns the result - /// set as a stream. Unlike [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], there - /// is no limit on the size of the returned result set. However, no - /// individual row in the result set can exceed 100 MiB, and no - /// column value can exceed 10 MiB. + /// Like [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], except returns the + /// result set as a stream. Unlike + /// [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], there is no limit on + /// the size of the returned result set. However, no individual row in the + /// result set can exceed 100 MiB, and no column value can exceed 10 MiB. pub async fn execute_streaming_sql( &mut self, request: impl tonic::IntoRequest, @@ -2512,9 +2946,10 @@ pub mod spanner_client { /// [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. /// /// Statements are executed in sequential order. A request can succeed even if - /// a statement fails. The [ExecuteBatchDmlResponse.status][google.spanner.v1.ExecuteBatchDmlResponse.status] field in the - /// response provides information about the statement that failed. Clients must - /// inspect this field to determine whether an error occurred. + /// a statement fails. The + /// [ExecuteBatchDmlResponse.status][google.spanner.v1.ExecuteBatchDmlResponse.status] + /// field in the response provides information about the statement that failed. + /// Clients must inspect this field to determine whether an error occurred. /// /// Execution stops after the first failed statement; the remaining statements /// are not executed. @@ -2545,14 +2980,15 @@ pub mod spanner_client { } /// Reads rows from the database using key lookups and scans, as a /// simple key/value style alternative to - /// [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. This method cannot be used to - /// return a result set larger than 10 MiB; if the read matches more + /// [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. This method cannot be + /// used to return a result set larger than 10 MiB; if the read matches more /// data than that, the read fails with a `FAILED_PRECONDITION` /// error. /// /// Reads inside read-write transactions might return `ABORTED`. If /// this occurs, the application should restart the transaction from - /// the beginning. See [Transaction][google.spanner.v1.Transaction] for more details. + /// the beginning. See [Transaction][google.spanner.v1.Transaction] for more + /// details. /// /// Larger result sets can be yielded in streaming fashion by calling /// [StreamingRead][google.spanner.v1.Spanner.StreamingRead] instead. @@ -2578,9 +3014,9 @@ pub mod spanner_client { .insert(GrpcMethod::new("google.spanner.v1.Spanner", "Read")); self.inner.unary(req, path, codec).await } - /// Like [Read][google.spanner.v1.Spanner.Read], except returns the result set as a - /// stream. Unlike [Read][google.spanner.v1.Spanner.Read], there is no limit on the - /// size of the returned result set. However, no individual row in + /// Like [Read][google.spanner.v1.Spanner.Read], except returns the result set + /// as a stream. Unlike [Read][google.spanner.v1.Spanner.Read], there is no + /// limit on the size of the returned result set. However, no individual row in /// the result set can exceed 100 MiB, and no column value can exceed /// 10 MiB. pub async fn streaming_read( @@ -2609,7 +3045,8 @@ pub mod spanner_client { self.inner.server_streaming(req, path, codec).await } /// Begins a new transaction. This step can often be skipped: - /// [Read][google.spanner.v1.Spanner.Read], [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and + /// [Read][google.spanner.v1.Spanner.Read], + /// [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and /// [Commit][google.spanner.v1.Spanner.Commit] can begin a new transaction as a /// side-effect. pub async fn begin_transaction( @@ -2674,8 +3111,9 @@ pub mod spanner_client { } /// Rolls back a transaction, releasing any locks it holds. It is a good /// idea to call this for any transaction that includes one or more - /// [Read][google.spanner.v1.Spanner.Read] or [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] requests and - /// ultimately decides not to commit. + /// [Read][google.spanner.v1.Spanner.Read] or + /// [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] requests and ultimately + /// decides not to commit. /// /// `Rollback` returns `OK` if it successfully aborts the transaction, the /// transaction was already aborted, or the transaction is not @@ -2704,10 +3142,11 @@ pub mod spanner_client { } /// Creates a set of partition tokens that can be used to execute a query /// operation in parallel. Each of the returned partition tokens can be used - /// by [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] to specify a subset - /// of the query result to read. The same session and read-only transaction - /// must be used by the PartitionQueryRequest used to create the - /// partition tokens and the ExecuteSqlRequests that use the partition tokens. + /// by [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] to + /// specify a subset of the query result to read. The same session and + /// read-only transaction must be used by the PartitionQueryRequest used to + /// create the partition tokens and the ExecuteSqlRequests that use the + /// partition tokens. /// /// Partition tokens become invalid when the session used to create them /// is deleted, is idle for too long, begins a new transaction, or becomes too @@ -2740,12 +3179,13 @@ pub mod spanner_client { } /// Creates a set of partition tokens that can be used to execute a read /// operation in parallel. Each of the returned partition tokens can be used - /// by [StreamingRead][google.spanner.v1.Spanner.StreamingRead] to specify a subset of the read - /// result to read. The same session and read-only transaction must be used by - /// the PartitionReadRequest used to create the partition tokens and the - /// ReadRequests that use the partition tokens. There are no ordering - /// guarantees on rows returned among the returned partition tokens, or even - /// within each individual StreamingRead call issued with a partition_token. + /// by [StreamingRead][google.spanner.v1.Spanner.StreamingRead] to specify a + /// subset of the read result to read. The same session and read-only + /// transaction must be used by the PartitionReadRequest used to create the + /// partition tokens and the ReadRequests that use the partition tokens. There + /// are no ordering guarantees on rows returned among the returned partition + /// tokens, or even within each individual StreamingRead call issued with a + /// partition_token. /// /// Partition tokens become invalid when the session used to create them /// is deleted, is idle for too long, begins a new transaction, or becomes too @@ -2776,5 +3216,45 @@ pub mod spanner_client { .insert(GrpcMethod::new("google.spanner.v1.Spanner", "PartitionRead")); self.inner.unary(req, path, codec).await } + /// Batches the supplied mutation groups in a collection of efficient + /// transactions. All mutations in a group are committed atomically. However, + /// mutations across groups can be committed non-atomically in an unspecified + /// order and thus, they must be independent of each other. Partial failure is + /// possible, i.e., some groups may have been committed successfully, while + /// some may have failed. The results of individual batches are streamed into + /// the response as the batches are applied. + /// + /// BatchWrite requests are not replay protected, meaning that each mutation + /// group may be applied more than once. Replays of non-idempotent mutations + /// may have undesirable effects. For example, replays of an insert mutation + /// may produce an already exists error or if you use generated or commit + /// timestamp-based keys, it may result in additional rows being added to the + /// mutation's table. We recommend structuring your mutation groups to be + /// idempotent to avoid this issue. + pub async fn batch_write( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response>, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/google.spanner.v1.Spanner/BatchWrite", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("google.spanner.v1.Spanner", "BatchWrite")); + self.inner.server_streaming(req, path, codec).await + } } } diff --git a/googleapis/src/bytes/google.storage.v2.rs b/googleapis/src/bytes/google.storage.v2.rs index 6158a387..f164e142 100644 --- a/googleapis/src/bytes/google.storage.v2.rs +++ b/googleapis/src/bytes/google.storage.v2.rs @@ -44,7 +44,7 @@ pub struct CreateBucketRequest { pub parent: ::prost::alloc::string::String, /// Properties of the new bucket being inserted. /// The name of the bucket is specified in the `bucket_id` field. Populating - /// `bucket.name` field will be ignored. + /// `bucket.name` field will result in an error. /// The project of the bucket must be specified in the `bucket.project` field. /// This field must be in `projects/{projectIdentifier}` format, /// {projectIdentifier} can be the project ID or project number. The `parent` @@ -155,67 +155,6 @@ pub struct UpdateBucketRequest { #[prost(message, optional, tag = "6")] pub update_mask: ::core::option::Option<::prost_types::FieldMask>, } -/// Request message for DeleteNotificationConfig. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct DeleteNotificationConfigRequest { - /// Required. The parent bucket of the NotificationConfig. - #[prost(string, tag = "1")] - pub name: ::prost::alloc::string::String, -} -/// Request message for GetNotificationConfig. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct GetNotificationConfigRequest { - /// Required. The parent bucket of the NotificationConfig. - /// Format: - /// `projects/{project}/buckets/{bucket}/notificationConfigs/{notificationConfig}` - #[prost(string, tag = "1")] - pub name: ::prost::alloc::string::String, -} -/// Request message for CreateNotificationConfig. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct CreateNotificationConfigRequest { - /// Required. The bucket to which this NotificationConfig belongs. - #[prost(string, tag = "1")] - pub parent: ::prost::alloc::string::String, - /// Required. Properties of the NotificationConfig to be inserted. - #[prost(message, optional, tag = "2")] - pub notification_config: ::core::option::Option, -} -/// Request message for ListNotifications. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ListNotificationConfigsRequest { - /// Required. Name of a Google Cloud Storage bucket. - #[prost(string, tag = "1")] - pub parent: ::prost::alloc::string::String, - /// The maximum number of NotificationConfigs to return. The service may - /// return fewer than this value. The default value is 100. Specifying a value - /// above 100 will result in a page_size of 100. - #[prost(int32, tag = "2")] - pub page_size: i32, - /// A page token, received from a previous `ListNotificationConfigs` call. - /// Provide this to retrieve the subsequent page. - /// - /// When paginating, all other parameters provided to `ListNotificationConfigs` - /// must match the call that provided the page token. - #[prost(string, tag = "3")] - pub page_token: ::prost::alloc::string::String, -} -/// The result of a call to ListNotificationConfigs -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ListNotificationConfigsResponse { - /// The list of items. - #[prost(message, repeated, tag = "1")] - pub notification_configs: ::prost::alloc::vec::Vec, - /// A token, which can be sent as `page_token` to retrieve the next page. - /// If this field is omitted, there are no subsequent pages. - #[prost(string, tag = "2")] - pub next_page_token: ::prost::alloc::string::String, -} /// Request message for ComposeObject. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -327,6 +266,48 @@ pub struct DeleteObjectRequest { #[prost(message, optional, tag = "10")] pub common_object_request_params: ::core::option::Option, } +/// Message for restoring an object. +/// `bucket`, `object`, and `generation` **must** be set. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RestoreObjectRequest { + /// Required. Name of the bucket in which the object resides. + #[prost(string, tag = "1")] + pub bucket: ::prost::alloc::string::String, + /// Required. The name of the object to restore. + #[prost(string, tag = "2")] + pub object: ::prost::alloc::string::String, + /// Required. The specific revision of the object to restore. + #[prost(int64, tag = "3")] + pub generation: i64, + /// Makes the operation conditional on whether the object's current generation + /// matches the given value. Setting to 0 makes the operation succeed only if + /// there are no live versions of the object. + #[prost(int64, optional, tag = "4")] + pub if_generation_match: ::core::option::Option, + /// Makes the operation conditional on whether the object's live generation + /// does not match the given value. If no live object exists, the precondition + /// fails. Setting to 0 makes the operation succeed only if there is a live + /// version of the object. + #[prost(int64, optional, tag = "5")] + pub if_generation_not_match: ::core::option::Option, + /// Makes the operation conditional on whether the object's current + /// metageneration matches the given value. + #[prost(int64, optional, tag = "6")] + pub if_metageneration_match: ::core::option::Option, + /// Makes the operation conditional on whether the object's current + /// metageneration does not match the given value. + #[prost(int64, optional, tag = "7")] + pub if_metageneration_not_match: ::core::option::Option, + /// If false or unset, the bucket's default object ACL will be used. + /// If true, copy the source object's access controls. + /// Return an error if bucket has UBLA enabled. + #[prost(bool, optional, tag = "9")] + pub copy_source_acl: ::core::option::Option, + /// A set of parameters common to Storage API requests concerning an object. + #[prost(message, optional, tag = "8")] + pub common_object_request_params: ::core::option::Option, +} /// Message for canceling an in-progress resumable upload. /// `upload_id` **must** be set. #[allow(clippy::derive_partial_eq_without_eq)] @@ -420,6 +401,9 @@ pub struct GetObjectRequest { /// latest version, the default). #[prost(int64, tag = "3")] pub generation: i64, + /// If true, return the soft-deleted version of this object. + #[prost(bool, optional, tag = "11")] + pub soft_deleted: ::core::option::Option, /// Makes the operation conditional on whether the object's current generation /// matches the given value. Setting to 0 makes the operation succeed only if /// there are no live versions of the object. @@ -607,6 +591,115 @@ pub mod write_object_response { Resource(super::Object), } } +/// Request message for BidiWriteObject. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BidiWriteObjectRequest { + /// Required. The offset from the beginning of the object at which the data + /// should be written. + /// + /// In the first `WriteObjectRequest` of a `WriteObject()` action, it + /// indicates the initial offset for the `Write()` call. The value **must** be + /// equal to the `persisted_size` that a call to `QueryWriteStatus()` would + /// return (0 if this is the first write to the object). + /// + /// On subsequent calls, this value **must** be no larger than the sum of the + /// first `write_offset` and the sizes of all `data` chunks sent previously on + /// this stream. + /// + /// An invalid value will cause an error. + #[prost(int64, tag = "3")] + pub write_offset: i64, + /// Checksums for the complete object. If the checksums computed by the service + /// don't match the specified checksums the call will fail. May only be + /// provided in last request (with finish_write set). + #[prost(message, optional, tag = "6")] + pub object_checksums: ::core::option::Option, + /// For each BidiWriteObjectRequest where state_lookup is `true` or the client + /// closes the stream, the service will send a BidiWriteObjectResponse + /// containing the current persisted size. The persisted size sent in responses + /// covers all the bytes the server has persisted thus far and can be used to + /// decide what data is safe for the client to drop. Note that the object's + /// current size reported by the BidiWriteObjectResponse may lag behind the + /// number of bytes written by the client. This field is ignored if + /// `finish_write` is set to true. + #[prost(bool, tag = "7")] + pub state_lookup: bool, + /// Persists data written on the stream, up to and including the current + /// message, to permanent storage. This option should be used sparingly as it + /// may reduce performance. Ongoing writes will periodically be persisted on + /// the server even when `flush` is not set. This field is ignored if + /// `finish_write` is set to true since there's no need to checkpoint or flush + /// if this message completes the write. + #[prost(bool, tag = "8")] + pub flush: bool, + /// If `true`, this indicates that the write is complete. Sending any + /// `WriteObjectRequest`s subsequent to one in which `finish_write` is `true` + /// will cause an error. + /// For a non-resumable write (where the upload_id was not set in the first + /// message), it is an error not to set this field in the final message of the + /// stream. + #[prost(bool, tag = "9")] + pub finish_write: bool, + /// A set of parameters common to Storage API requests concerning an object. + #[prost(message, optional, tag = "10")] + pub common_object_request_params: ::core::option::Option, + /// The first message of each stream should set one of the following. + #[prost(oneof = "bidi_write_object_request::FirstMessage", tags = "1, 2")] + pub first_message: ::core::option::Option, + /// A portion of the data for the object. + #[prost(oneof = "bidi_write_object_request::Data", tags = "4")] + pub data: ::core::option::Option, +} +/// Nested message and enum types in `BidiWriteObjectRequest`. +pub mod bidi_write_object_request { + /// The first message of each stream should set one of the following. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum FirstMessage { + /// For resumable uploads. This should be the `upload_id` returned from a + /// call to `StartResumableWriteResponse`. + #[prost(string, tag = "1")] + UploadId(::prost::alloc::string::String), + /// For non-resumable uploads. Describes the overall upload, including the + /// destination bucket and object name, preconditions, etc. + #[prost(message, tag = "2")] + WriteObjectSpec(super::WriteObjectSpec), + } + /// A portion of the data for the object. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Data { + /// The data to insert. If a crc32c checksum is provided that doesn't match + /// the checksum computed by the service, the request will fail. + #[prost(message, tag = "4")] + ChecksummedData(super::ChecksummedData), + } +} +/// Response message for BidiWriteObject. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BidiWriteObjectResponse { + /// The response will set one of the following. + #[prost(oneof = "bidi_write_object_response::WriteStatus", tags = "1, 2")] + pub write_status: ::core::option::Option, +} +/// Nested message and enum types in `BidiWriteObjectResponse`. +pub mod bidi_write_object_response { + /// The response will set one of the following. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum WriteStatus { + /// The total number of bytes that have been processed for the given object + /// from all `WriteObject` calls. Only set if the upload has not finalized. + #[prost(int64, tag = "1")] + PersistedSize(i64), + /// A resource containing the metadata for the uploaded object. Only set if + /// the upload has finalized. + #[prost(message, tag = "2")] + Resource(super::Object), + } +} /// Request message for ListObjects. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -664,6 +757,20 @@ pub struct ListObjectsRequest { /// lexicographic_end (exclusive). #[prost(string, tag = "11")] pub lexicographic_end: ::prost::alloc::string::String, + /// Optional. If true, only list all soft-deleted versions of the object. + /// Soft delete policy is required to set this option. + #[prost(bool, tag = "12")] + pub soft_deleted: bool, + /// Optional. If true, will also include folders and managed folders (besides + /// objects) in the returned `prefixes`. Requires `delimiter` to be set to '/'. + #[prost(bool, tag = "13")] + pub include_folders_as_prefixes: bool, + /// Optional. Filter results to objects and prefixes that match this glob + /// pattern. See [List Objects Using + /// Glob]() + /// for the full syntax. + #[prost(string, tag = "14")] + pub match_glob: ::prost::alloc::string::String, } /// Request object for `QueryWriteStatus`. #[allow(clippy::derive_partial_eq_without_eq)] @@ -942,6 +1049,16 @@ pub struct GetServiceAccountRequest { #[prost(string, tag = "1")] pub project: ::prost::alloc::string::String, } +/// A service account, owned by Cloud Storage, which may be used when taking +/// action on behalf of a given project, for example to publish Pub/Sub +/// notifications or to retrieve security keys. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ServiceAccount { + /// The ID of the notification. + #[prost(string, tag = "1")] + pub email_address: ::prost::alloc::string::String, +} /// Request message for CreateHmacKey. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -1046,6 +1163,40 @@ pub struct UpdateHmacKeyRequest { #[prost(message, optional, tag = "3")] pub update_mask: ::core::option::Option<::prost_types::FieldMask>, } +/// Hmac Key Metadata, which includes all information other than the secret. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct HmacKeyMetadata { + /// Immutable. Resource name ID of the key in the format + /// {projectIdentifier}/{accessId}. + /// {projectIdentifier} can be the project ID or project number. + #[prost(string, tag = "1")] + pub id: ::prost::alloc::string::String, + /// Immutable. Globally unique id for keys. + #[prost(string, tag = "2")] + pub access_id: ::prost::alloc::string::String, + /// Immutable. Identifies the project that owns the service account of the + /// specified HMAC key, in the format "projects/{projectIdentifier}". + /// {projectIdentifier} can be the project ID or project number. + #[prost(string, tag = "3")] + pub project: ::prost::alloc::string::String, + /// Output only. Email of the service account the key authenticates as. + #[prost(string, tag = "4")] + pub service_account_email: ::prost::alloc::string::String, + /// Optional. State of the key. One of ACTIVE, INACTIVE, or DELETED. + /// Writable, can be updated by UpdateHmacKey operation. + #[prost(string, tag = "5")] + pub state: ::prost::alloc::string::String, + /// Output only. The creation time of the HMAC key. + #[prost(message, optional, tag = "6")] + pub create_time: ::core::option::Option<::prost_types::Timestamp>, + /// Output only. The last modification time of the HMAC key metadata. + #[prost(message, optional, tag = "7")] + pub update_time: ::core::option::Option<::prost_types::Timestamp>, + /// Optional. The etag of the HMAC key. + #[prost(string, tag = "8")] + pub etag: ::prost::alloc::string::String, +} /// Parameters that can be passed to any object request. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -1235,8 +1386,6 @@ pub struct Bucket { #[prost(string, tag = "3")] pub project: ::prost::alloc::string::String, /// Output only. The metadata generation of this bucket. - /// Attempting to set or update this field will result in a - /// [FieldViolation][google.rpc.BadRequest.FieldViolation]. #[prost(int64, tag = "4")] pub metageneration: i64, /// Immutable. The location of the bucket. Object data for objects in the @@ -1264,7 +1413,7 @@ pub struct Bucket { /// replication. "ASYNC_TURBO" enables turbo replication, valid for dual-region /// buckets only. If rpo is not specified when the bucket is created, it /// defaults to "DEFAULT". For more information, see - /// + /// #[prost(string, tag = "27")] pub rpo: ::prost::alloc::string::String, /// Access controls on the bucket. @@ -1283,8 +1432,6 @@ pub struct Bucket { #[prost(message, optional, tag = "10")] pub lifecycle: ::core::option::Option, /// Output only. The creation time of the bucket. - /// Attempting to set or update this field will result in a - /// [FieldViolation][google.rpc.BadRequest.FieldViolation]. #[prost(message, optional, tag = "11")] pub create_time: ::core::option::Option<::prost_types::Timestamp>, /// The bucket's [ Resource Sharing] @@ -1292,8 +1439,6 @@ pub struct Bucket { #[prost(message, repeated, tag = "12")] pub cors: ::prost::alloc::vec::Vec, /// Output only. The modification time of the bucket. - /// Attempting to set or update this field will result in a - /// [FieldViolation][google.rpc.BadRequest.FieldViolation]. #[prost(message, optional, tag = "13")] pub update_time: ::core::option::Option<::prost_types::Timestamp>, /// The default value for event-based hold on newly created objects in this @@ -1358,13 +1503,23 @@ pub struct Bucket { #[prost(bool, tag = "25")] pub satisfies_pzs: bool, /// Configuration that, if present, specifies the data placement for a - /// [ Region]. + /// [ + /// dual-region]. #[prost(message, optional, tag = "26")] pub custom_placement_config: ::core::option::Option, /// The bucket's Autoclass configuration. If there is no configuration, the /// Autoclass feature will be disabled and have no effect on the bucket. #[prost(message, optional, tag = "28")] pub autoclass: ::core::option::Option, + /// Optional. The bucket's hierarchical namespace configuration. If there is no + /// configuration, the hierarchical namespace feature will be disabled and have + /// no effect on the bucket. + #[prost(message, optional, tag = "32")] + pub hierarchical_namespace: ::core::option::Option, + /// Optional. The bucket's soft delete policy. The soft delete policy prevents + /// soft-deleted objects from being permanently deleted. + #[prost(message, optional, tag = "31")] + pub soft_delete_policy: ::core::option::Option, } /// Nested message and enum types in `Bucket`. pub mod bucket { @@ -1589,6 +1744,19 @@ pub mod bucket { #[prost(message, optional, tag = "4")] pub retention_duration: ::core::option::Option<::prost_types::Duration>, } + /// Soft delete policy properties of a bucket. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, Copy, PartialEq, ::prost::Message)] + pub struct SoftDeletePolicy { + /// The period of time that soft-deleted objects in the bucket must be + /// retained and cannot be permanently deleted. The duration must be greater + /// than or equal to 7 days and less than 1 year. + #[prost(message, optional, tag = "1")] + pub retention_duration: ::core::option::Option<::prost_types::Duration>, + /// Time from which the policy was effective. This is service-provided. + #[prost(message, optional, tag = "2")] + pub effective_time: ::core::option::Option<::prost_types::Timestamp>, + } /// Properties of a bucket related to versioning. /// For more on Cloud Storage versioning, see /// @@ -1631,7 +1799,7 @@ pub mod bucket { } /// Configuration for a bucket's Autoclass feature. #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, Copy, PartialEq, ::prost::Message)] + #[derive(Clone, PartialEq, ::prost::Message)] pub struct Autoclass { /// Enables Autoclass. #[prost(bool, tag = "1")] @@ -1642,6 +1810,27 @@ pub mod bucket { /// to the bucket creation time. #[prost(message, optional, tag = "2")] pub toggle_time: ::core::option::Option<::prost_types::Timestamp>, + /// An object in an Autoclass bucket will eventually cool down to the + /// terminal storage class if there is no access to the object. + /// The only valid values are NEARLINE and ARCHIVE. + #[prost(string, optional, tag = "3")] + pub terminal_storage_class: ::core::option::Option< + ::prost::alloc::string::String, + >, + /// Output only. Latest instant at which the autoclass terminal storage class + /// was updated. + #[prost(message, optional, tag = "4")] + pub terminal_storage_class_update_time: ::core::option::Option< + ::prost_types::Timestamp, + >, + } + /// Configuration for a bucket's hierarchical namespace feature. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, Copy, PartialEq, ::prost::Message)] + pub struct HierarchicalNamespace { + /// Optional. Enables the hierarchical namespace feature. + #[prost(bool, tag = "1")] + pub enabled: bool, } } /// An access-control entry. @@ -1702,7 +1891,7 @@ pub struct BucketAccessControl { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ChecksummedData { - /// The data. + /// Optional. The data. #[prost(bytes = "bytes", tag = "1")] pub content: ::prost::bytes::Bytes, /// If set, the CRC32C digest of the content field. @@ -1724,111 +1913,38 @@ pub struct ObjectChecksums { /// [ and /// ETags: Best Practices]. /// Not all objects will provide an MD5 hash. For example, composite objects - /// provide only crc32c hashes. - /// This value is equivalent to running `cat object.txt | openssl md5 -binary` + /// provide only crc32c hashes. This value is equivalent to running `cat + /// object.txt | openssl md5 -binary` #[prost(bytes = "bytes", tag = "2")] pub md5_hash: ::prost::bytes::Bytes, } -/// Hmac Key Metadata, which includes all information other than the secret. +/// Describes the Customer-Supplied Encryption Key mechanism used to store an +/// Object's data at rest. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] -pub struct HmacKeyMetadata { - /// Immutable. Resource name ID of the key in the format - /// {projectIdentifier}/{accessId}. - /// {projectIdentifier} can be the project ID or project number. +pub struct CustomerEncryption { + /// The encryption algorithm. #[prost(string, tag = "1")] - pub id: ::prost::alloc::string::String, - /// Immutable. Globally unique id for keys. - #[prost(string, tag = "2")] - pub access_id: ::prost::alloc::string::String, - /// Immutable. Identifies the project that owns the service account of the - /// specified HMAC key, in the format "projects/{projectIdentifier}". - /// {projectIdentifier} can be the project ID or project number. - #[prost(string, tag = "3")] - pub project: ::prost::alloc::string::String, - /// Output only. Email of the service account the key authenticates as. - #[prost(string, tag = "4")] - pub service_account_email: ::prost::alloc::string::String, - /// State of the key. One of ACTIVE, INACTIVE, or DELETED. - /// Writable, can be updated by UpdateHmacKey operation. - #[prost(string, tag = "5")] - pub state: ::prost::alloc::string::String, - /// Output only. The creation time of the HMAC key. - #[prost(message, optional, tag = "6")] - pub create_time: ::core::option::Option<::prost_types::Timestamp>, - /// Output only. The last modification time of the HMAC key metadata. - #[prost(message, optional, tag = "7")] - pub update_time: ::core::option::Option<::prost_types::Timestamp>, - /// The etag of the HMAC key. - #[prost(string, tag = "8")] - pub etag: ::prost::alloc::string::String, -} -/// A directive to publish Pub/Sub notifications upon changes to a bucket. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct NotificationConfig { - /// Required. The resource name of this NotificationConfig. - /// Format: - /// `projects/{project}/buckets/{bucket}/notificationConfigs/{notificationConfig}` - /// The `{project}` portion may be `_` for globally unique buckets. - #[prost(string, tag = "1")] - pub name: ::prost::alloc::string::String, - /// Required. The Pub/Sub topic to which this subscription publishes. Formatted - /// as: - /// '//pubsub.googleapis.com/projects/{project-identifier}/topics/{my-topic}' - #[prost(string, tag = "2")] - pub topic: ::prost::alloc::string::String, - /// The etag of the NotificationConfig. - /// If included in the metadata of GetNotificationConfigRequest, the operation - /// will only be performed if the etag matches that of the NotificationConfig. - #[prost(string, tag = "7")] - pub etag: ::prost::alloc::string::String, - /// If present, only send notifications about listed event types. If - /// empty, sent notifications for all event types. - #[prost(string, repeated, tag = "3")] - pub event_types: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - /// A list of additional attributes to attach to each Pub/Sub - /// message published for this NotificationConfig. - #[prost(map = "string, string", tag = "4")] - pub custom_attributes: ::std::collections::HashMap< - ::prost::alloc::string::String, - ::prost::alloc::string::String, - >, - /// If present, only apply this NotificationConfig to object names that - /// begin with this prefix. - #[prost(string, tag = "5")] - pub object_name_prefix: ::prost::alloc::string::String, - /// Required. The desired content of the Payload. - #[prost(string, tag = "6")] - pub payload_format: ::prost::alloc::string::String, -} -/// Describes the Customer-Supplied Encryption Key mechanism used to store an -/// Object's data at rest. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct CustomerEncryption { - /// The encryption algorithm. - #[prost(string, tag = "1")] - pub encryption_algorithm: ::prost::alloc::string::String, - /// SHA256 hash value of the encryption key. - /// In raw bytes format (not base64-encoded). - #[prost(bytes = "bytes", tag = "3")] - pub key_sha256_bytes: ::prost::bytes::Bytes, -} -/// An object. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Object { - /// Immutable. The name of this object. Nearly any sequence of unicode - /// characters is valid. See - /// [Guidelines](). - /// Example: `test.txt` - /// The `name` field by itself does not uniquely identify a Cloud Storage - /// object. A Cloud Storage object is uniquely identified by the tuple of - /// (bucket, object, generation). - #[prost(string, tag = "1")] - pub name: ::prost::alloc::string::String, - /// Immutable. The name of the bucket containing this object. + pub encryption_algorithm: ::prost::alloc::string::String, + /// SHA256 hash value of the encryption key. + /// In raw bytes format (not base64-encoded). + #[prost(bytes = "bytes", tag = "3")] + pub key_sha256_bytes: ::prost::bytes::Bytes, +} +/// An object. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Object { + /// Immutable. The name of this object. Nearly any sequence of unicode + /// characters is valid. See + /// [Guidelines](). + /// Example: `test.txt` + /// The `name` field by itself does not uniquely identify a Cloud Storage + /// object. A Cloud Storage object is uniquely identified by the tuple of + /// (bucket, object, generation). + #[prost(string, tag = "1")] + pub name: ::prost::alloc::string::String, + /// Immutable. The name of the bucket containing this object. #[prost(string, tag = "2")] pub bucket: ::prost::alloc::string::String, /// The etag of the object. @@ -1838,15 +1954,13 @@ pub struct Object { #[prost(string, tag = "27")] pub etag: ::prost::alloc::string::String, /// Immutable. The content generation of this object. Used for object - /// versioning. Attempting to set or update this field will result in a - /// [FieldViolation][google.rpc.BadRequest.FieldViolation]. + /// versioning. #[prost(int64, tag = "3")] pub generation: i64, /// Output only. The version of the metadata for this generation of this /// object. Used for preconditions and for detecting changes in metadata. A /// metageneration number is only meaningful in the context of a particular - /// generation of a particular object. Attempting to set or update this field - /// will result in a [FieldViolation][google.rpc.BadRequest.FieldViolation]. + /// generation of a particular object. #[prost(int64, tag = "4")] pub metageneration: i64, /// Storage class of the object. @@ -1854,8 +1968,6 @@ pub struct Object { pub storage_class: ::prost::alloc::string::String, /// Output only. Content-Length of the object data in bytes, matching /// [ 7230 ยง3.3.2]. - /// Attempting to set or update this field will result in a - /// [FieldViolation][google.rpc.BadRequest.FieldViolation]. #[prost(int64, tag = "6")] pub size: i64, /// Content-Encoding of the object data, matching @@ -1882,8 +1994,7 @@ pub struct Object { #[prost(string, tag = "11")] pub content_language: ::prost::alloc::string::String, /// Output only. If this object is noncurrent, this is the time when the object - /// became noncurrent. Attempting to set or update this field will result in a - /// [FieldViolation][google.rpc.BadRequest.FieldViolation]. + /// became noncurrent. #[prost(message, optional, tag = "12")] pub delete_time: ::core::option::Option<::prost_types::Timestamp>, /// Content-Type of the object data, matching @@ -1893,18 +2004,17 @@ pub struct Object { #[prost(string, tag = "13")] pub content_type: ::prost::alloc::string::String, /// Output only. The creation time of the object. - /// Attempting to set or update this field will result in a - /// [FieldViolation][google.rpc.BadRequest.FieldViolation]. #[prost(message, optional, tag = "14")] pub create_time: ::core::option::Option<::prost_types::Timestamp>, /// Output only. Number of underlying components that make up this object. - /// Components are accumulated by compose operations. Attempting to set or - /// update this field will result in a - /// [FieldViolation][google.rpc.BadRequest.FieldViolation]. + /// Components are accumulated by compose operations. #[prost(int32, tag = "15")] pub component_count: i32, /// Output only. Hashes for the data part of this object. This field is used - /// for output only and will be silently ignored if provided in requests. + /// for output only and will be silently ignored if provided in requests. The + /// checksums of the complete object regardless of data range. If the object is + /// downloaded in full, the client should compute one of these checksums over + /// the downloaded object and compare it against the value provided here. #[prost(message, optional, tag = "16")] pub checksums: ::core::option::Option, /// Output only. The modification time of the object metadata. @@ -1913,8 +2023,6 @@ pub struct Object { /// such as modifying custom metadata, as well as changes made by Cloud Storage /// on behalf of a requester, such as changing the storage class based on an /// Object Lifecycle Configuration. - /// Attempting to set or update this field will result in a - /// [FieldViolation][google.rpc.BadRequest.FieldViolation]. #[prost(message, optional, tag = "17")] pub update_time: ::core::option::Option<::prost_types::Timestamp>, /// Cloud KMS Key used to encrypt this object, if the object is encrypted by @@ -1923,8 +2031,6 @@ pub struct Object { pub kms_key: ::prost::alloc::string::String, /// Output only. The time at which the object's storage class was last changed. /// When the object is initially created, it will be set to time_created. - /// Attempting to set or update this field will result in a - /// [FieldViolation][google.rpc.BadRequest.FieldViolation]. #[prost(message, optional, tag = "19")] pub update_storage_class_time: ::core::option::Option<::prost_types::Timestamp>, /// Whether an object is under temporary hold. While this flag is set to true, @@ -1963,8 +2069,7 @@ pub struct Object { #[prost(bool, optional, tag = "23")] pub event_based_hold: ::core::option::Option, /// Output only. The owner of the object. This will always be the uploader of - /// the object. Attempting to set or update this field will result in a - /// [FieldViolation][google.rpc.BadRequest.FieldViolation]. + /// the object. #[prost(message, optional, tag = "24")] pub owner: ::core::option::Option, /// Metadata of Customer-Supplied Encryption Key, if the object is encrypted by @@ -1974,12 +2079,27 @@ pub struct Object { /// A user-specified timestamp set on an object. #[prost(message, optional, tag = "26")] pub custom_time: ::core::option::Option<::prost_types::Timestamp>, + /// Output only. This is the time when the object became soft-deleted. + /// + /// Soft-deleted objects are only accessible if a soft_delete_policy is + /// enabled. Also see hard_delete_time. + #[prost(message, optional, tag = "28")] + pub soft_delete_time: ::core::option::Option<::prost_types::Timestamp>, + /// Output only. The time when the object will be permanently deleted. + /// + /// Only set when an object becomes soft-deleted with a soft_delete_policy. + /// Otherwise, the object will not be accessible. + #[prost(message, optional, tag = "29")] + pub hard_delete_time: ::core::option::Option<::prost_types::Timestamp>, } /// An access-control entry. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ObjectAccessControl { - /// The access permission for the entity. + /// The access permission for the entity. One of the following values: + /// * `READER` + /// * `WRITER` + /// * `OWNER` #[prost(string, tag = "1")] pub role: ::prost::alloc::string::String, /// The ID of the access-control entry. @@ -2055,16 +2175,6 @@ pub struct ProjectTeam { #[prost(string, tag = "2")] pub team: ::prost::alloc::string::String, } -/// A service account, owned by Cloud Storage, which may be used when taking -/// action on behalf of a given project, for example to publish Pub/Sub -/// notifications or to retrieve security keys. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ServiceAccount { - /// The ID of the notification. - #[prost(string, tag = "1")] - pub email_address: ::prost::alloc::string::String, -} /// The owner of a specific resource. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -2080,16 +2190,116 @@ pub struct Owner { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct ContentRange { - /// The starting offset of the object data. + /// The starting offset of the object data. This value is inclusive. #[prost(int64, tag = "1")] pub start: i64, - /// The ending offset of the object data. + /// The ending offset of the object data. This value is exclusive. #[prost(int64, tag = "2")] pub end: i64, /// The complete length of the object data. #[prost(int64, tag = "3")] pub complete_length: i64, } +/// Request message for DeleteNotificationConfig. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DeleteNotificationConfigRequest { + /// Required. The parent bucket of the NotificationConfig. + #[prost(string, tag = "1")] + pub name: ::prost::alloc::string::String, +} +/// Request message for GetNotificationConfig. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetNotificationConfigRequest { + /// Required. The parent bucket of the NotificationConfig. + /// Format: + /// `projects/{project}/buckets/{bucket}/notificationConfigs/{notificationConfig}` + #[prost(string, tag = "1")] + pub name: ::prost::alloc::string::String, +} +/// Request message for CreateNotificationConfig. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CreateNotificationConfigRequest { + /// Required. The bucket to which this NotificationConfig belongs. + #[prost(string, tag = "1")] + pub parent: ::prost::alloc::string::String, + /// Required. Properties of the NotificationConfig to be inserted. + #[prost(message, optional, tag = "2")] + pub notification_config: ::core::option::Option, +} +/// Request message for ListNotifications. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ListNotificationConfigsRequest { + /// Required. Name of a Google Cloud Storage bucket. + #[prost(string, tag = "1")] + pub parent: ::prost::alloc::string::String, + /// Optional. The maximum number of NotificationConfigs to return. The service + /// may return fewer than this value. The default value is 100. Specifying a + /// value above 100 will result in a page_size of 100. + #[prost(int32, tag = "2")] + pub page_size: i32, + /// Optional. A page token, received from a previous `ListNotificationConfigs` + /// call. Provide this to retrieve the subsequent page. + /// + /// When paginating, all other parameters provided to `ListNotificationConfigs` + /// must match the call that provided the page token. + #[prost(string, tag = "3")] + pub page_token: ::prost::alloc::string::String, +} +/// The result of a call to ListNotificationConfigs +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ListNotificationConfigsResponse { + /// The list of items. + #[prost(message, repeated, tag = "1")] + pub notification_configs: ::prost::alloc::vec::Vec, + /// A token, which can be sent as `page_token` to retrieve the next page. + /// If this field is omitted, there are no subsequent pages. + #[prost(string, tag = "2")] + pub next_page_token: ::prost::alloc::string::String, +} +/// A directive to publish Pub/Sub notifications upon changes to a bucket. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct NotificationConfig { + /// Required. The resource name of this NotificationConfig. + /// Format: + /// `projects/{project}/buckets/{bucket}/notificationConfigs/{notificationConfig}` + /// The `{project}` portion may be `_` for globally unique buckets. + #[prost(string, tag = "1")] + pub name: ::prost::alloc::string::String, + /// Required. The Pub/Sub topic to which this subscription publishes. Formatted + /// as: + /// '//pubsub.googleapis.com/projects/{project-identifier}/topics/{my-topic}' + #[prost(string, tag = "2")] + pub topic: ::prost::alloc::string::String, + /// Optional. The etag of the NotificationConfig. + /// If included in the metadata of GetNotificationConfigRequest, the operation + /// will only be performed if the etag matches that of the NotificationConfig. + #[prost(string, tag = "7")] + pub etag: ::prost::alloc::string::String, + /// Optional. If present, only send notifications about listed event types. If + /// empty, sent notifications for all event types. + #[prost(string, repeated, tag = "3")] + pub event_types: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + /// Optional. A list of additional attributes to attach to each Pub/Sub + /// message published for this NotificationConfig. + #[prost(map = "string, string", tag = "4")] + pub custom_attributes: ::std::collections::HashMap< + ::prost::alloc::string::String, + ::prost::alloc::string::String, + >, + /// Optional. If present, only apply this NotificationConfig to object names + /// that begin with this prefix. + #[prost(string, tag = "5")] + pub object_name_prefix: ::prost::alloc::string::String, + /// Required. The desired content of the Payload. + #[prost(string, tag = "6")] + pub payload_format: ::prost::alloc::string::String, +} /// Generated client implementations. pub mod storage_client { #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] @@ -2319,10 +2529,9 @@ pub mod storage_client { ); self.inner.unary(req, path, codec).await } - /// Gets the IAM policy for a specified bucket or object. + /// Gets the IAM policy for a specified bucket. /// The `resource` field in the request should be - /// projects/_/buckets/ for a bucket or - /// projects/_/buckets//objects/ for an object. + /// `projects/_/buckets/{bucket}`. pub async fn get_iam_policy( &mut self, request: impl tonic::IntoRequest< @@ -2350,10 +2559,9 @@ pub mod storage_client { .insert(GrpcMethod::new("google.storage.v2.Storage", "GetIamPolicy")); self.inner.unary(req, path, codec).await } - /// Updates an IAM policy for the specified bucket or object. + /// Updates an IAM policy for the specified bucket. /// The `resource` field in the request should be - /// projects/_/buckets/ for a bucket or - /// projects/_/buckets//objects/ for an object. + /// `projects/_/buckets/{bucket}`. pub async fn set_iam_policy( &mut self, request: impl tonic::IntoRequest< @@ -2381,11 +2589,13 @@ pub mod storage_client { .insert(GrpcMethod::new("google.storage.v2.Storage", "SetIamPolicy")); self.inner.unary(req, path, codec).await } - /// Tests a set of permissions on the given bucket or object to see which, if - /// any, are held by the caller. + /// Tests a set of permissions on the given bucket, object, or managed folder + /// to see which, if any, are held by the caller. /// The `resource` field in the request should be - /// projects/_/buckets/ for a bucket or - /// projects/_/buckets//objects/ for an object. + /// `projects/_/buckets/{bucket}` for a bucket, + /// `projects/_/buckets/{bucket}/objects/{object}` for an object, or + /// `projects/_/buckets/{bucket}/managedFolders/{managedFolder}` + /// for a managed folder. pub async fn test_iam_permissions( &mut self, request: impl tonic::IntoRequest< @@ -2438,127 +2648,6 @@ pub mod storage_client { .insert(GrpcMethod::new("google.storage.v2.Storage", "UpdateBucket")); self.inner.unary(req, path, codec).await } - /// Permanently deletes a NotificationConfig. - pub async fn delete_notification_config( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/google.storage.v2.Storage/DeleteNotificationConfig", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "google.storage.v2.Storage", - "DeleteNotificationConfig", - ), - ); - self.inner.unary(req, path, codec).await - } - /// View a NotificationConfig. - pub async fn get_notification_config( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/google.storage.v2.Storage/GetNotificationConfig", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new("google.storage.v2.Storage", "GetNotificationConfig"), - ); - self.inner.unary(req, path, codec).await - } - /// Creates a NotificationConfig for a given bucket. - /// These NotificationConfigs, when triggered, publish messages to the - /// specified Pub/Sub topics. See - /// https://cloud.google.com/storage/docs/pubsub-notifications. - pub async fn create_notification_config( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/google.storage.v2.Storage/CreateNotificationConfig", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "google.storage.v2.Storage", - "CreateNotificationConfig", - ), - ); - self.inner.unary(req, path, codec).await - } - /// Retrieves a list of NotificationConfigs for a given bucket. - pub async fn list_notification_configs( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/google.storage.v2.Storage/ListNotificationConfigs", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "google.storage.v2.Storage", - "ListNotificationConfigs", - ), - ); - self.inner.unary(req, path, codec).await - } /// Concatenates a list of existing objects into a new object in the same /// bucket. pub async fn compose_object( @@ -2611,6 +2700,29 @@ pub mod storage_client { .insert(GrpcMethod::new("google.storage.v2.Storage", "DeleteObject")); self.inner.unary(req, path, codec).await } + /// Restores a soft-deleted object. + pub async fn restore_object( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/google.storage.v2.Storage/RestoreObject", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("google.storage.v2.Storage", "RestoreObject")); + self.inner.unary(req, path, codec).await + } /// Cancels an in-progress resumable upload. /// /// Any attempts to write to the resumable upload after cancelling the upload @@ -2775,6 +2887,9 @@ pub mod storage_client { /// status, with a WriteObjectResponse containing the finalized object's /// metadata. /// + /// Alternatively, the BidiWriteObject operation may be used to write an + /// object with controls over flushing and the ability to fetch the ability to + /// determine the current persisted size. pub async fn write_object( &mut self, request: impl tonic::IntoStreamingRequest< @@ -2802,6 +2917,48 @@ pub mod storage_client { .insert(GrpcMethod::new("google.storage.v2.Storage", "WriteObject")); self.inner.client_streaming(req, path, codec).await } + /// Stores a new object and metadata. + /// + /// This is similar to the WriteObject call with the added support for + /// manual flushing of persisted state, and the ability to determine current + /// persisted size without closing the stream. + /// + /// The client may specify one or both of the `state_lookup` and `flush` fields + /// in each BidiWriteObjectRequest. If `flush` is specified, the data written + /// so far will be persisted to storage. If `state_lookup` is specified, the + /// service will respond with a BidiWriteObjectResponse that contains the + /// persisted size. If both `flush` and `state_lookup` are specified, the flush + /// will always occur before a `state_lookup`, so that both may be set in the + /// same request and the returned state will be the state of the object + /// post-flush. When the stream is closed, a BidiWriteObjectResponse will + /// always be sent to the client, regardless of the value of `state_lookup`. + pub async fn bidi_write_object( + &mut self, + request: impl tonic::IntoStreamingRequest< + Message = super::BidiWriteObjectRequest, + >, + ) -> std::result::Result< + tonic::Response>, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/google.storage.v2.Storage/BidiWriteObject", + ); + let mut req = request.into_streaming_request(); + req.extensions_mut() + .insert(GrpcMethod::new("google.storage.v2.Storage", "BidiWriteObject")); + self.inner.streaming(req, path, codec).await + } /// Retrieves a list of objects matching the criteria. pub async fn list_objects( &mut self, @@ -3077,5 +3234,126 @@ pub mod storage_client { .insert(GrpcMethod::new("google.storage.v2.Storage", "UpdateHmacKey")); self.inner.unary(req, path, codec).await } + /// Permanently deletes a NotificationConfig. + pub async fn delete_notification_config( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/google.storage.v2.Storage/DeleteNotificationConfig", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "google.storage.v2.Storage", + "DeleteNotificationConfig", + ), + ); + self.inner.unary(req, path, codec).await + } + /// View a NotificationConfig. + pub async fn get_notification_config( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/google.storage.v2.Storage/GetNotificationConfig", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("google.storage.v2.Storage", "GetNotificationConfig"), + ); + self.inner.unary(req, path, codec).await + } + /// Creates a NotificationConfig for a given bucket. + /// These NotificationConfigs, when triggered, publish messages to the + /// specified Pub/Sub topics. See + /// https://cloud.google.com/storage/docs/pubsub-notifications. + pub async fn create_notification_config( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/google.storage.v2.Storage/CreateNotificationConfig", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "google.storage.v2.Storage", + "CreateNotificationConfig", + ), + ); + self.inner.unary(req, path, codec).await + } + /// Retrieves a list of NotificationConfigs for a given bucket. + pub async fn list_notification_configs( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/google.storage.v2.Storage/ListNotificationConfigs", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "google.storage.v2.Storage", + "ListNotificationConfigs", + ), + ); + self.inner.unary(req, path, codec).await + } } } diff --git a/googleapis/src/google.api.rs b/googleapis/src/google.api.rs index c96f9f7a..cd38789a 100644 --- a/googleapis/src/google.api.rs +++ b/googleapis/src/google.api.rs @@ -19,7 +19,7 @@ pub struct Http { #[prost(bool, tag = "2")] pub fully_decode_reserved_expansion: bool, } -/// # gRPC Transcoding +/// gRPC Transcoding /// /// gRPC Transcoding is a feature for mapping between a gRPC method and one or /// more HTTP REST endpoints. It allows developers to build a single API service @@ -60,9 +60,8 @@ pub struct Http { /// /// This enables an HTTP REST to gRPC mapping as below: /// -/// HTTP | gRPC -/// -----|----- -/// `GET /v1/messages/123456` | `GetMessage(name: "messages/123456")` +/// - HTTP: `GET /v1/messages/123456` +/// - gRPC: `GetMessage(name: "messages/123456")` /// /// Any fields in the request message which are not bound by the path template /// automatically become HTTP query parameters if there is no HTTP request body. @@ -86,11 +85,9 @@ pub struct Http { /// /// This enables a HTTP JSON to RPC mapping as below: /// -/// HTTP | gRPC -/// -----|----- -/// `GET /v1/messages/123456?revision=2&sub.subfield=foo` | -/// `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield: -/// "foo"))` +/// - HTTP: `GET /v1/messages/123456?revision=2&sub.subfield=foo` +/// - gRPC: `GetMessage(message_id: "123456" revision: 2 sub: +/// SubMessage(subfield: "foo"))` /// /// Note that fields which are mapped to URL query parameters must have a /// primitive type or a repeated primitive type or a non-repeated message type. @@ -120,10 +117,8 @@ pub struct Http { /// representation of the JSON in the request body is determined by /// protos JSON encoding: /// -/// HTTP | gRPC -/// -----|----- -/// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: -/// "123456" message { text: "Hi!" })` +/// - HTTP: `PATCH /v1/messages/123456 { "text": "Hi!" }` +/// - gRPC: `UpdateMessage(message_id: "123456" message { text: "Hi!" })` /// /// The special name `*` can be used in the body mapping to define that /// every field not bound by the path template should be mapped to the @@ -146,10 +141,8 @@ pub struct Http { /// /// The following HTTP JSON to RPC mapping is enabled: /// -/// HTTP | gRPC -/// -----|----- -/// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: -/// "123456" text: "Hi!")` +/// - HTTP: `PATCH /v1/messages/123456 { "text": "Hi!" }` +/// - gRPC: `UpdateMessage(message_id: "123456" text: "Hi!")` /// /// Note that when using `*` in the body mapping, it is not possible to /// have HTTP parameters, as all fields not bound by the path end in @@ -177,13 +170,13 @@ pub struct Http { /// /// This enables the following two alternative HTTP JSON to RPC mappings: /// -/// HTTP | gRPC -/// -----|----- -/// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")` -/// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id: -/// "123456")` +/// - HTTP: `GET /v1/messages/123456` +/// - gRPC: `GetMessage(message_id: "123456")` /// -/// ## Rules for HTTP mapping +/// - HTTP: `GET /v1/users/me/messages/123456` +/// - gRPC: `GetMessage(user_id: "me" message_id: "123456")` +/// +/// Rules for HTTP mapping /// /// 1. Leaf request fields (recursive expansion nested messages in the request /// message) are classified into three categories: @@ -202,7 +195,7 @@ pub struct Http { /// request body, all /// fields are passed via URL path and URL query parameters. /// -/// ### Path template syntax +/// Path template syntax /// /// Template = "/" Segments \[ Verb \] ; /// Segments = Segment { "/" Segment } ; @@ -241,7 +234,7 @@ pub struct Http { /// Document]() as /// `{+var}`. /// -/// ## Using gRPC API Service Configuration +/// Using gRPC API Service Configuration /// /// gRPC API Service Configuration (service config) is a configuration language /// for configuring a gRPC service to become a user-facing product. The @@ -256,15 +249,14 @@ pub struct Http { /// specified in the service config will override any matching transcoding /// configuration in the proto. /// -/// Example: +/// The following example selects a gRPC method and applies an `HttpRule` to it: /// /// http: /// rules: -/// # Selects a gRPC method and applies HttpRule to it. /// - selector: example.v1.Messaging.GetMessage /// get: /v1/messages/{message_id}/{sub.subfield} /// -/// ## Special notes +/// Special notes /// /// When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the /// proto to JSON conversion must follow the [proto3 @@ -545,6 +537,10 @@ pub struct Publishing { /// #[prost(string, tag = "110")] pub proto_reference_documentation_uri: ::prost::alloc::string::String, + /// Optional link to REST reference documentation. Example: + /// + #[prost(string, tag = "111")] + pub rest_reference_documentation_uri: ::prost::alloc::string::String, } /// Settings for Java client libraries. #[allow(clippy::derive_partial_eq_without_eq)] @@ -609,6 +605,27 @@ pub struct PythonSettings { /// Some settings. #[prost(message, optional, tag = "1")] pub common: ::core::option::Option, + /// Experimental features to be included during client library generation. + #[prost(message, optional, tag = "2")] + pub experimental_features: ::core::option::Option< + python_settings::ExperimentalFeatures, + >, +} +/// Nested message and enum types in `PythonSettings`. +pub mod python_settings { + /// Experimental features to be included during client library generation. + /// These fields will be deprecated once the feature graduates and is enabled + /// by default. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, Copy, PartialEq, ::prost::Message)] + pub struct ExperimentalFeatures { + /// Enables generation of asynchronous REST clients if `rest` transport is + /// enabled. By default, asynchronous REST clients will not be generated. + /// This feature will be enabled by default 1 month after launching the + /// feature in preview packages. + #[prost(bool, tag = "1")] + pub rest_async_io_enabled: bool, + } } /// Settings for Node client libraries. #[allow(clippy::derive_partial_eq_without_eq)] @@ -686,6 +703,13 @@ pub struct GoSettings { pub struct MethodSettings { /// The fully qualified name of the method, for which the options below apply. /// This is used to find the method to apply the options. + /// + /// Example: + /// + /// publishing: + /// method_settings: + /// - selector: google.storage.control.v2.StorageControl.CreateFolder + /// # method settings for CreateFolder... #[prost(string, tag = "1")] pub selector: ::prost::alloc::string::String, /// Describes settings to use for long-running operations when generating @@ -694,19 +718,29 @@ pub struct MethodSettings { /// /// Example of a YAML configuration:: /// - /// publishing: - /// method_settings: + /// publishing: + /// method_settings: /// - selector: google.cloud.speech.v2.Speech.BatchRecognize /// long_running: - /// initial_poll_delay: - /// seconds: 60 # 1 minute + /// initial_poll_delay: 60s # 1 minute /// poll_delay_multiplier: 1.5 - /// max_poll_delay: - /// seconds: 360 # 6 minutes - /// total_poll_timeout: - /// seconds: 54000 # 90 minutes + /// max_poll_delay: 360s # 6 minutes + /// total_poll_timeout: 54000s # 90 minutes #[prost(message, optional, tag = "2")] pub long_running: ::core::option::Option, + /// List of top-level fields of the request message, that should be + /// automatically populated by the client libraries based on their + /// (google.api.field_info).format. Currently supported format: UUID4. + /// + /// Example of a YAML configuration: + /// + /// publishing: + /// method_settings: + /// - selector: google.example.v1.ExampleService.CreateExample + /// auto_populated_fields: + /// - request_id + #[prost(string, repeated, tag = "3")] + pub auto_populated_fields: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, } /// Nested message and enum types in `MethodSettings`. pub mod method_settings { @@ -872,6 +906,19 @@ pub enum FieldBehavior { /// a non-empty value will be returned. The user will not be aware of what /// non-empty value to expect. NonEmptyDefault = 7, + /// Denotes that the field in a resource (a message annotated with + /// google.api.resource) is used in the resource name to uniquely identify the + /// resource. For AIP-compliant APIs, this should only be applied to the + /// `name` field on the resource. + /// + /// This behavior should not be applied to references to other resources within + /// the message. + /// + /// The identifier field of resources often have different field behavior + /// depending on the request it is embedded in (e.g. for Create methods name + /// is optional and unused, while for Update methods it is required). Instead + /// of method-specific annotations, only `IDENTIFIER` is required. + Identifier = 8, } impl FieldBehavior { /// String value of the enum field names used in the ProtoBuf definition. @@ -888,6 +935,7 @@ impl FieldBehavior { FieldBehavior::Immutable => "IMMUTABLE", FieldBehavior::UnorderedList => "UNORDERED_LIST", FieldBehavior::NonEmptyDefault => "NON_EMPTY_DEFAULT", + FieldBehavior::Identifier => "IDENTIFIER", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -901,6 +949,7 @@ impl FieldBehavior { "IMMUTABLE" => Some(Self::Immutable), "UNORDERED_LIST" => Some(Self::UnorderedList), "NON_EMPTY_DEFAULT" => Some(Self::NonEmptyDefault), + "IDENTIFIER" => Some(Self::Identifier), _ => None, } } @@ -1011,8 +1060,13 @@ pub struct ResourceDescriptor { pub history: i32, /// The plural name used in the resource name and permission names, such as /// 'projects' for the resource name of 'projects/{project}' and the permission - /// name of 'cloudresourcemanager.googleapis.com/projects.get'. It is the same - /// concept of the `plural` field in k8s CRD spec + /// name of 'cloudresourcemanager.googleapis.com/projects.get'. One exception + /// to this is for Nested Collections that have stuttering names, as defined + /// in [AIP-122](), where the + /// collection ID in the resource name pattern does not necessarily directly + /// match the `plural` value. + /// + /// It is the same concept of the `plural` field in k8s CRD spec /// /// /// Note: The plural form is required even for singleton resources. See diff --git a/googleapis/src/google.cloud.bigquery.storage.v1.rs b/googleapis/src/google.cloud.bigquery.storage.v1.rs index 9601314a..d0842b97 100644 --- a/googleapis/src/google.cloud.bigquery.storage.v1.rs +++ b/googleapis/src/google.cloud.bigquery.storage.v1.rs @@ -222,9 +222,25 @@ pub struct TableFieldSchema { /// () for this field. #[prost(string, tag = "10")] pub default_value_expression: ::prost::alloc::string::String, + /// Optional. The subtype of the RANGE, if the type of this field is RANGE. If + /// the type is RANGE, this field is required. Possible values for the field + /// element type of a RANGE include: + /// * DATE + /// * DATETIME + /// * TIMESTAMP + #[prost(message, optional, tag = "11")] + pub range_element_type: ::core::option::Option, } /// Nested message and enum types in `TableFieldSchema`. pub mod table_field_schema { + /// Represents the type of a field element. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, Copy, PartialEq, ::prost::Message)] + pub struct FieldElementType { + /// Required. The type of a field element. + #[prost(enumeration = "Type", tag = "1")] + pub r#type: i32, + } #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum Type { @@ -260,6 +276,8 @@ pub mod table_field_schema { Interval = 14, /// JSON, String Json = 15, + /// RANGE + Range = 16, } impl Type { /// String value of the enum field names used in the ProtoBuf definition. @@ -284,6 +302,7 @@ pub mod table_field_schema { Type::Bignumeric => "BIGNUMERIC", Type::Interval => "INTERVAL", Type::Json => "JSON", + Type::Range => "RANGE", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -305,6 +324,7 @@ pub mod table_field_schema { "BIGNUMERIC" => Some(Self::Bignumeric), "INTERVAL" => Some(Self::Interval), "JSON" => Some(Self::Json), + "RANGE" => Some(Self::Range), _ => None, } } @@ -494,12 +514,51 @@ pub mod read_session { /// ) #[prost(double, optional, tag = "5")] pub sample_percentage: ::core::option::Option, + /// Optional. Set response_compression_codec when creating a read session to + /// enable application-level compression of ReadRows responses. + #[prost(enumeration = "table_read_options::ResponseCompressionCodec", optional, tag = "6")] + pub response_compression_codec: ::core::option::Option, #[prost(oneof = "table_read_options::OutputFormatSerializationOptions", tags = "3, 4")] pub output_format_serialization_options: ::core::option::Option, } /// Nested message and enum types in `TableReadOptions`. pub mod table_read_options { + /// Specifies which compression codec to attempt on the entire serialized + /// response payload (either Arrow record batch or Avro rows). This is + /// not to be confused with the Apache Arrow native compression codecs + /// specified in ArrowSerializationOptions. For performance reasons, when + /// creating a read session requesting Arrow responses, setting both native + /// Arrow compression and application-level response compression will not be + /// allowed - choose, at most, one kind of compression. + #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] + #[repr(i32)] + pub enum ResponseCompressionCodec { + /// Default is no compression. + Unspecified = 0, + /// Use raw LZ4 compression. + Lz4 = 2, + } + impl ResponseCompressionCodec { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + ResponseCompressionCodec::Unspecified => "RESPONSE_COMPRESSION_CODEC_UNSPECIFIED", + ResponseCompressionCodec::Lz4 => "RESPONSE_COMPRESSION_CODEC_LZ4", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "RESPONSE_COMPRESSION_CODEC_UNSPECIFIED" => Some(Self::Unspecified), + "RESPONSE_COMPRESSION_CODEC_LZ4" => Some(Self::Lz4), + _ => None, + } + } + } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, Copy, PartialEq, ::prost::Oneof)] pub enum OutputFormatSerializationOptions { @@ -820,6 +879,22 @@ pub struct ReadRowsResponse { /// the current throttling status. #[prost(message, optional, tag = "5")] pub throttle_state: ::core::option::Option, + /// Optional. If the row data in this ReadRowsResponse is compressed, then + /// uncompressed byte size is the original size of the uncompressed row data. + /// If it is set to a value greater than 0, then decompress into a buffer of + /// size uncompressed_byte_size using the compression codec that was requested + /// during session creation time and which is specified in + /// TableReadOptions.response_compression_codec in ReadSession. + /// This value is not set if no response_compression_codec was not requested + /// and it is -1 if the requested compression would not have reduced the size + /// of this ReadRowsResponse's row data. This attempts to match Apache Arrow's + /// behavior described here where + /// the uncompressed length may be set to -1 to indicate that the data that + /// follows is not compressed, which can be useful for cases where compression + /// does not yield appreciable savings. When uncompressed_byte_size is not + /// greater than 0, the client should skip decompression. + #[prost(int64, optional, tag = "9")] + pub uncompressed_byte_size: ::core::option::Option, /// Row data is returned in format specified during session creation. #[prost(oneof = "read_rows_response::Rows", tags = "3, 4")] pub rows: ::core::option::Option, @@ -921,8 +996,8 @@ pub struct AppendRowsRequest { /// * In the first request to an AppendRows connection. /// /// * In all subsequent requests to an AppendRows connection, if you use the - /// same connection to write to multiple tables or change the input schema for - /// default streams. + /// same connection to write to multiple tables or change the input schema for + /// default streams. /// /// For explicitly created write streams, the format is: /// @@ -982,6 +1057,17 @@ pub struct AppendRowsRequest { tag = "7" )] pub missing_value_interpretations: ::std::collections::HashMap<::prost::alloc::string::String, i32>, + /// Optional. Default missing value interpretation for all columns in the + /// table. When a value is specified on an `AppendRowsRequest`, it is applied + /// to all requests on the connection from that point forward, until a + /// subsequent `AppendRowsRequest` sets it to a different value. + /// `missing_value_interpretation` can override + /// `default_missing_value_interpretation`. For example, if you want to write + /// `NULL` instead of using default values for some columns, you can set + /// `default_missing_value_interpretation` to `DEFAULT_VALUE` and at the same + /// time, set `missing_value_interpretations` to `NULL_VALUE` on those columns. + #[prost(enumeration = "append_rows_request::MissingValueInterpretation", tag = "8")] + pub default_missing_value_interpretation: i32, /// Input rows. The `writer_schema` field must be specified at the initial /// request and currently, it will be ignored if specified in following /// requests. Following requests must have data in the same format as the @@ -1257,7 +1343,8 @@ pub mod storage_error { InvalidCmekProvided = 11, /// There is an encryption error while using customer-managed encryption key. CmekEncryptionError = 12, - /// Key Management Service (KMS) service returned an error. + /// Key Management Service (KMS) service returned an error, which can be + /// retried. KmsServiceError = 13, /// Permission denied while using customer-managed encryption key. KmsPermissionDenied = 14, @@ -1657,14 +1744,14 @@ pub mod big_query_write_client { /// table are governed by the type of stream: /// /// * For COMMITTED streams (which includes the default stream), data is - /// visible immediately upon successful append. + /// visible immediately upon successful append. /// /// * For BUFFERED streams, data is made visible via a subsequent `FlushRows` - /// rpc which advances a cursor to a newer offset in the stream. + /// rpc which advances a cursor to a newer offset in the stream. /// /// * For PENDING streams, data is not made visible until the stream itself is - /// finalized (via the `FinalizeWriteStream` rpc), and the stream is explicitly - /// committed via the `BatchCommitWriteStreams` rpc. + /// finalized (via the `FinalizeWriteStream` rpc), and the stream is explicitly + /// committed via the `BatchCommitWriteStreams` rpc. pub async fn append_rows( &mut self, request: impl tonic::IntoStreamingRequest, diff --git a/googleapis/src/google.cloud.kms.v1.rs b/googleapis/src/google.cloud.kms.v1.rs index 42f1ea63..c1216f1b 100644 --- a/googleapis/src/google.cloud.kms.v1.rs +++ b/googleapis/src/google.cloud.kms.v1.rs @@ -88,7 +88,7 @@ pub struct CryptoKey { /// [DESTROY_SCHEDULED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DESTROY_SCHEDULED] /// state before transitioning to /// [DESTROYED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DESTROYED]. - /// If not specified at creation time, the default duration is 24 hours. + /// If not specified at creation time, the default duration is 30 days. #[prost(message, optional, tag = "14")] pub destroy_scheduled_duration: ::core::option::Option<::prost_types::Duration>, /// Immutable. The resource name of the backend environment where the key @@ -103,6 +103,16 @@ pub struct CryptoKey { /// [ProtectionLevels][google.cloud.kms.v1.ProtectionLevel] in the future. #[prost(string, tag = "15")] pub crypto_key_backend: ::prost::alloc::string::String, + /// Optional. The policy used for Key Access Justifications Policy Enforcement. + /// If this field is present and this key is enrolled in Key Access + /// Justifications Policy Enforcement, the policy will be evaluated in encrypt, + /// decrypt, and sign operations, and the operation will fail if rejected by + /// the policy. The policy is defined by specifying zero or more allowed + /// justification codes. + /// + /// By default, this field is absent, and all justification codes are allowed. + #[prost(message, optional, tag = "17")] + pub key_access_justifications_policy: ::core::option::Option, /// Controls the rate of automatic rotation. #[prost(oneof = "crypto_key::RotationSchedule", tags = "8")] pub rotation_schedule: ::core::option::Option, @@ -411,11 +421,11 @@ pub mod crypto_key_version { /// [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] /// [ENCRYPT_DECRYPT][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ENCRYPT_DECRYPT]. /// - /// Algorithms beginning with "RSA_SIGN_" are usable with + /// Algorithms beginning with `RSA_SIGN_` are usable with /// [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] /// [ASYMMETRIC_SIGN][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ASYMMETRIC_SIGN]. /// - /// The fields in the name after "RSA_SIGN_" correspond to the following + /// The fields in the name after `RSA_SIGN_` correspond to the following /// parameters: padding algorithm, modulus bit length, and digest algorithm. /// /// For PSS, the salt length used is equal to the length of digest @@ -423,25 +433,25 @@ pub mod crypto_key_version { /// [RSA_SIGN_PSS_2048_SHA256][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionAlgorithm.RSA_SIGN_PSS_2048_SHA256] /// will use PSS with a salt length of 256 bits or 32 bytes. /// - /// Algorithms beginning with "RSA_DECRYPT_" are usable with + /// Algorithms beginning with `RSA_DECRYPT_` are usable with /// [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] /// [ASYMMETRIC_DECRYPT][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ASYMMETRIC_DECRYPT]. /// - /// The fields in the name after "RSA_DECRYPT_" correspond to the following + /// The fields in the name after `RSA_DECRYPT_` correspond to the following /// parameters: padding algorithm, modulus bit length, and digest algorithm. /// - /// Algorithms beginning with "EC_SIGN_" are usable with + /// Algorithms beginning with `EC_SIGN_` are usable with /// [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] /// [ASYMMETRIC_SIGN][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ASYMMETRIC_SIGN]. /// - /// The fields in the name after "EC_SIGN_" correspond to the following + /// The fields in the name after `EC_SIGN_` correspond to the following /// parameters: elliptic curve, digest algorithm. /// - /// Algorithms beginning with "HMAC_" are usable with + /// Algorithms beginning with `HMAC_` are usable with /// [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] /// [MAC][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.MAC]. /// - /// The suffix following "HMAC_" corresponds to the hash algorithm being used + /// The suffix following `HMAC_` corresponds to the hash algorithm being used /// (eg. SHA256). /// /// For more information, see \[Key purposes and algorithms\] @@ -514,6 +524,8 @@ pub mod crypto_key_version { /// Other hash functions can also be used: /// EcSignSecp256k1Sha256 = 31, + /// EdDSA on the Curve25519 in pure mode (taking data as input). + EcSignEd25519 = 40, /// HMAC-SHA256 signing with a 256 bit key. HmacSha256 = 32, /// HMAC-SHA1 signing with a 160 bit key. @@ -563,6 +575,7 @@ pub mod crypto_key_version { CryptoKeyVersionAlgorithm::EcSignP256Sha256 => "EC_SIGN_P256_SHA256", CryptoKeyVersionAlgorithm::EcSignP384Sha384 => "EC_SIGN_P384_SHA384", CryptoKeyVersionAlgorithm::EcSignSecp256k1Sha256 => "EC_SIGN_SECP256K1_SHA256", + CryptoKeyVersionAlgorithm::EcSignEd25519 => "EC_SIGN_ED25519", CryptoKeyVersionAlgorithm::HmacSha256 => "HMAC_SHA256", CryptoKeyVersionAlgorithm::HmacSha1 => "HMAC_SHA1", CryptoKeyVersionAlgorithm::HmacSha384 => "HMAC_SHA384", @@ -603,6 +616,7 @@ pub mod crypto_key_version { "EC_SIGN_P256_SHA256" => Some(Self::EcSignP256Sha256), "EC_SIGN_P384_SHA384" => Some(Self::EcSignP384Sha384), "EC_SIGN_SECP256K1_SHA256" => Some(Self::EcSignSecp256k1Sha256), + "EC_SIGN_ED25519" => Some(Self::EcSignEd25519), "HMAC_SHA256" => Some(Self::HmacSha256), "HMAC_SHA1" => Some(Self::HmacSha1), "HMAC_SHA384" => Some(Self::HmacSha384), @@ -752,7 +766,7 @@ pub mod crypto_key_version { } } } -/// The public key for a given +/// The public keys for a given /// [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]. Obtained via /// [GetPublicKey][google.cloud.kms.v1.KeyManagementService.GetPublicKey]. #[allow(clippy::derive_partial_eq_without_eq)] @@ -1046,6 +1060,22 @@ pub struct ExternalProtectionLevelOptions { #[prost(string, tag = "2")] pub ekm_connection_key_path: ::prost::alloc::string::String, } +/// A +/// [KeyAccessJustificationsPolicy][google.cloud.kms.v1.KeyAccessJustificationsPolicy] +/// specifies zero or more allowed +/// [AccessReason][google.cloud.kms.v1.AccessReason] values for encrypt, decrypt, +/// and sign operations on a [CryptoKey][google.cloud.kms.v1.CryptoKey]. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct KeyAccessJustificationsPolicy { + /// The list of allowed reasons for access to a + /// [CryptoKey][google.cloud.kms.v1.CryptoKey]. Zero allowed access reasons + /// means all encrypt, decrypt, and sign operations for the + /// [CryptoKey][google.cloud.kms.v1.CryptoKey] associated with this policy will + /// fail. + #[prost(enumeration = "AccessReason", repeated, tag = "1")] + pub allowed_access_reasons: ::prost::alloc::vec::Vec, +} /// [ProtectionLevel][google.cloud.kms.v1.ProtectionLevel] specifies how /// cryptographic operations are performed. For more information, see [Protection /// levels] (). @@ -1089,6 +1119,103 @@ impl ProtectionLevel { } } } +/// Describes the reason for a data access. Please refer to +/// +/// for the detailed semantic meaning of justification reason codes. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum AccessReason { + /// Unspecified access reason. + ReasonUnspecified = 0, + /// Customer-initiated support. + CustomerInitiatedSupport = 1, + /// Google-initiated access for system management and troubleshooting. + GoogleInitiatedService = 2, + /// Google-initiated access in response to a legal request or legal process. + ThirdPartyDataRequest = 3, + /// Google-initiated access for security, fraud, abuse, or compliance purposes. + GoogleInitiatedReview = 4, + /// Customer uses their account to perform any access to their own data which + /// their IAM policy authorizes. + CustomerInitiatedAccess = 5, + /// Google systems access customer data to help optimize the structure of the + /// data or quality for future uses by the customer. + GoogleInitiatedSystemOperation = 6, + /// No reason is expected for this key request. + ReasonNotExpected = 7, + /// Customer uses their account to perform any access to their own data which + /// their IAM policy authorizes, and one of the following is true: + /// + /// * A Google administrator has reset the root-access account associated with + /// the user's organization within the past 7 days. + /// * A Google-initiated emergency access operation has interacted with a + /// resource in the same project or folder as the currently accessed resource + /// within the past 7 days. + ModifiedCustomerInitiatedAccess = 8, + /// Google systems access customer data to help optimize the structure of the + /// data or quality for future uses by the customer, and one of the following + /// is true: + /// + /// * A Google administrator has reset the root-access account associated with + /// the user's organization within the past 7 days. + /// * A Google-initiated emergency access operation has interacted with a + /// resource in the same project or folder as the currently accessed resource + /// within the past 7 days. + ModifiedGoogleInitiatedSystemOperation = 9, + /// Google-initiated access to maintain system reliability. + GoogleResponseToProductionAlert = 10, + /// One of the following operations is being executed while simultaneously + /// encountering an internal technical issue which prevented a more precise + /// justification code from being generated: + /// + /// * Your account has been used to perform any access to your own data which + /// your IAM policy authorizes. + /// * An automated Google system operates on encrypted customer data which your + /// IAM policy authorizes. + /// * Customer-initiated Google support access. + /// * Google-initiated support access to protect system reliability. + CustomerAuthorizedWorkflowServicing = 11, +} +impl AccessReason { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + AccessReason::ReasonUnspecified => "REASON_UNSPECIFIED", + AccessReason::CustomerInitiatedSupport => "CUSTOMER_INITIATED_SUPPORT", + AccessReason::GoogleInitiatedService => "GOOGLE_INITIATED_SERVICE", + AccessReason::ThirdPartyDataRequest => "THIRD_PARTY_DATA_REQUEST", + AccessReason::GoogleInitiatedReview => "GOOGLE_INITIATED_REVIEW", + AccessReason::CustomerInitiatedAccess => "CUSTOMER_INITIATED_ACCESS", + AccessReason::GoogleInitiatedSystemOperation => "GOOGLE_INITIATED_SYSTEM_OPERATION", + AccessReason::ReasonNotExpected => "REASON_NOT_EXPECTED", + AccessReason::ModifiedCustomerInitiatedAccess => "MODIFIED_CUSTOMER_INITIATED_ACCESS", + AccessReason::ModifiedGoogleInitiatedSystemOperation => "MODIFIED_GOOGLE_INITIATED_SYSTEM_OPERATION", + AccessReason::GoogleResponseToProductionAlert => "GOOGLE_RESPONSE_TO_PRODUCTION_ALERT", + AccessReason::CustomerAuthorizedWorkflowServicing => "CUSTOMER_AUTHORIZED_WORKFLOW_SERVICING", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "REASON_UNSPECIFIED" => Some(Self::ReasonUnspecified), + "CUSTOMER_INITIATED_SUPPORT" => Some(Self::CustomerInitiatedSupport), + "GOOGLE_INITIATED_SERVICE" => Some(Self::GoogleInitiatedService), + "THIRD_PARTY_DATA_REQUEST" => Some(Self::ThirdPartyDataRequest), + "GOOGLE_INITIATED_REVIEW" => Some(Self::GoogleInitiatedReview), + "CUSTOMER_INITIATED_ACCESS" => Some(Self::CustomerInitiatedAccess), + "GOOGLE_INITIATED_SYSTEM_OPERATION" => Some(Self::GoogleInitiatedSystemOperation), + "REASON_NOT_EXPECTED" => Some(Self::ReasonNotExpected), + "MODIFIED_CUSTOMER_INITIATED_ACCESS" => Some(Self::ModifiedCustomerInitiatedAccess), + "MODIFIED_GOOGLE_INITIATED_SYSTEM_OPERATION" => Some(Self::ModifiedGoogleInitiatedSystemOperation), + "GOOGLE_RESPONSE_TO_PRODUCTION_ALERT" => Some(Self::GoogleResponseToProductionAlert), + "CUSTOMER_AUTHORIZED_WORKFLOW_SERVICING" => Some(Self::CustomerAuthorizedWorkflowServicing), + _ => None, + } + } +} /// Request message for /// [KeyManagementService.ListKeyRings][google.cloud.kms.v1.KeyManagementService.ListKeyRings]. #[allow(clippy::derive_partial_eq_without_eq)] @@ -3547,7 +3674,7 @@ pub struct EkmConnection { /// [EkmConnection][google.cloud.kms.v1.EkmConnection] was created. #[prost(message, optional, tag = "2")] pub create_time: ::core::option::Option<::prost_types::Timestamp>, - /// A list of + /// Optional. A list of /// [ServiceResolvers][google.cloud.kms.v1.EkmConnection.ServiceResolver] where /// the EKM can be reached. There should be one ServiceResolver per EKM /// replica. Currently, only a single @@ -3615,8 +3742,8 @@ pub mod ekm_connection { /// [EkmConnection][google.cloud.kms.v1.EkmConnection] must be initiated from /// the EKM directly and cannot be performed from Cloud KMS. This means that: /// * When creating a - /// [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] associated with - /// this + /// [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] associated with + /// this /// [EkmConnection][google.cloud.kms.v1.EkmConnection], the caller must /// supply the key path of pre-existing external key material that will be /// linked to the [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]. @@ -3628,8 +3755,8 @@ pub mod ekm_connection { /// [EkmConnection][google.cloud.kms.v1.EkmConnection] use EKM-side key /// management operations initiated from Cloud KMS. This means that: /// * When a [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] - /// associated with this [EkmConnection][google.cloud.kms.v1.EkmConnection] - /// is + /// associated with this [EkmConnection][google.cloud.kms.v1.EkmConnection] + /// is /// created, the EKM automatically generates new key material and a new /// key path. The caller cannot supply the key path of pre-existing /// external key material. diff --git a/googleapis/src/google.devtools.artifactregistry.v1.rs b/googleapis/src/google.devtools.artifactregistry.v1.rs index e2255ad2..dd9d13ea 100644 --- a/googleapis/src/google.devtools.artifactregistry.v1.rs +++ b/googleapis/src/google.devtools.artifactregistry.v1.rs @@ -719,9 +719,9 @@ pub struct ListFilesRequest { /// An example of using a filter: /// /// * `name="projects/p1/locations/us-central1/repositories/repo1/files/a/b/*"` --> Files with an - /// ID starting with "a/b/". + /// ID starting with "a/b/". /// * `owner="projects/p1/locations/us-central1/repositories/repo1/packages/pkg1/versions/1.0"` --> - /// Files owned by the version `1.0` in package `pkg1`. + /// Files owned by the version `1.0` in package `pkg1`. #[prost(string, tag = "4")] pub filter: ::prost::alloc::string::String, /// The maximum number of files to return. @@ -759,7 +759,7 @@ pub struct GetFileRequest { #[derive(Clone, PartialEq, ::prost::Message)] pub struct Package { /// The name of the package, for example: - /// "projects/p1/locations/us-central1/repositories/repo1/packages/pkg1". + /// `projects/p1/locations/us-central1/repositories/repo1/packages/pkg1`. /// If the package ID part contains slashes, the slashes are escaped. #[prost(string, tag = "1")] pub name: ::prost::alloc::string::String, @@ -816,15 +816,596 @@ pub struct DeletePackageRequest { #[prost(string, tag = "1")] pub name: ::prost::alloc::string::String, } +/// Artifact policy configuration for the repository contents. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct UpstreamPolicy { + /// The user-provided ID of the upstream policy. + #[prost(string, tag = "1")] + pub id: ::prost::alloc::string::String, + /// A reference to the repository resource, for example: + /// `projects/p1/locations/us-central1/repositories/repo1`. + #[prost(string, tag = "2")] + pub repository: ::prost::alloc::string::String, + /// Entries with a greater priority value take precedence in the pull order. + #[prost(int32, tag = "3")] + pub priority: i32, +} +/// CleanupPolicyCondition is a set of conditions attached to a CleanupPolicy. +/// If multiple entries are set, all must be satisfied for the condition to be +/// satisfied. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CleanupPolicyCondition { + /// Match versions by tag status. + #[prost(enumeration = "cleanup_policy_condition::TagState", optional, tag = "2")] + pub tag_state: ::core::option::Option, + /// Match versions by tag prefix. Applied on any prefix match. + #[prost(string, repeated, tag = "3")] + pub tag_prefixes: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + /// Match versions by version name prefix. Applied on any prefix match. + #[prost(string, repeated, tag = "4")] + pub version_name_prefixes: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + /// Match versions by package prefix. Applied on any prefix match. + #[prost(string, repeated, tag = "5")] + pub package_name_prefixes: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + /// Match versions older than a duration. + #[prost(message, optional, tag = "6")] + pub older_than: ::core::option::Option<::prost_types::Duration>, + /// Match versions newer than a duration. + #[prost(message, optional, tag = "7")] + pub newer_than: ::core::option::Option<::prost_types::Duration>, +} +/// Nested message and enum types in `CleanupPolicyCondition`. +pub mod cleanup_policy_condition { + /// Statuses applying to versions. + #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] + #[repr(i32)] + pub enum TagState { + /// Tag status not specified. + Unspecified = 0, + /// Applies to tagged versions only. + Tagged = 1, + /// Applies to untagged versions only. + Untagged = 2, + /// Applies to all versions. + Any = 3, + } + impl TagState { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + TagState::Unspecified => "TAG_STATE_UNSPECIFIED", + TagState::Tagged => "TAGGED", + TagState::Untagged => "UNTAGGED", + TagState::Any => "ANY", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "TAG_STATE_UNSPECIFIED" => Some(Self::Unspecified), + "TAGGED" => Some(Self::Tagged), + "UNTAGGED" => Some(Self::Untagged), + "ANY" => Some(Self::Any), + _ => None, + } + } + } +} +/// CleanupPolicyMostRecentVersions is an alternate condition of a CleanupPolicy +/// for retaining a minimum number of versions. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CleanupPolicyMostRecentVersions { + /// List of package name prefixes that will apply this rule. + #[prost(string, repeated, tag = "1")] + pub package_name_prefixes: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + /// Minimum number of versions to keep. + #[prost(int32, optional, tag = "2")] + pub keep_count: ::core::option::Option, +} +/// Artifact policy configuration for repository cleanup policies. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CleanupPolicy { + /// The user-provided ID of the cleanup policy. + #[prost(string, tag = "1")] + pub id: ::prost::alloc::string::String, + /// Policy action. + #[prost(enumeration = "cleanup_policy::Action", tag = "3")] + pub action: i32, + #[prost(oneof = "cleanup_policy::ConditionType", tags = "2, 4")] + pub condition_type: ::core::option::Option, +} +/// Nested message and enum types in `CleanupPolicy`. +pub mod cleanup_policy { + /// Action type for a cleanup policy. + #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] + #[repr(i32)] + pub enum Action { + /// Action not specified. + Unspecified = 0, + /// Delete action. + Delete = 1, + /// Keep action. + Keep = 2, + } + impl Action { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Action::Unspecified => "ACTION_UNSPECIFIED", + Action::Delete => "DELETE", + Action::Keep => "KEEP", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "ACTION_UNSPECIFIED" => Some(Self::Unspecified), + "DELETE" => Some(Self::Delete), + "KEEP" => Some(Self::Keep), + _ => None, + } + } + } + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum ConditionType { + /// Policy condition for matching versions. + #[prost(message, tag = "2")] + Condition(super::CleanupPolicyCondition), + /// Policy condition for retaining a minimum number of versions. May only be + /// specified with a Keep action. + #[prost(message, tag = "4")] + MostRecentVersions(super::CleanupPolicyMostRecentVersions), + } +} +/// Virtual repository configuration. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct VirtualRepositoryConfig { + /// Policies that configure the upstream artifacts distributed by the Virtual + /// Repository. Upstream policies cannot be set on a standard repository. + #[prost(message, repeated, tag = "1")] + pub upstream_policies: ::prost::alloc::vec::Vec, +} +/// Remote repository configuration. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RemoteRepositoryConfig { + /// The description of the remote source. + #[prost(string, tag = "1")] + pub description: ::prost::alloc::string::String, + /// Optional. The credentials used to access the remote repository. + #[prost(message, optional, tag = "9")] + pub upstream_credentials: ::core::option::Option, + /// Settings specific to the remote repository. + #[prost(oneof = "remote_repository_config::RemoteSource", tags = "2, 3, 4, 5, 6, 7")] + pub remote_source: ::core::option::Option, +} +/// Nested message and enum types in `RemoteRepositoryConfig`. +pub mod remote_repository_config { + /// The credentials to access the remote repository. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct UpstreamCredentials { + #[prost(oneof = "upstream_credentials::Credentials", tags = "1")] + pub credentials: ::core::option::Option, + } + /// Nested message and enum types in `UpstreamCredentials`. + pub mod upstream_credentials { + /// Username and password credentials. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct UsernamePasswordCredentials { + /// The username to access the remote repository. + #[prost(string, tag = "1")] + pub username: ::prost::alloc::string::String, + /// The Secret Manager key version that holds the password to access the + /// remote repository. Must be in the format of + /// `projects/{project}/secrets/{secret}/versions/{version}`. + #[prost(string, tag = "2")] + pub password_secret_version: ::prost::alloc::string::String, + } + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Credentials { + /// Use username and password to access the remote repository. + #[prost(message, tag = "1")] + UsernamePasswordCredentials(UsernamePasswordCredentials), + } + } + /// Configuration for a Docker remote repository. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, Copy, PartialEq, ::prost::Message)] + pub struct DockerRepository { + /// Address of the remote repository. + #[prost(oneof = "docker_repository::Upstream", tags = "1")] + pub upstream: ::core::option::Option, + } + /// Nested message and enum types in `DockerRepository`. + pub mod docker_repository { + /// Predefined list of publicly available Docker repositories like Docker + /// Hub. + #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] + #[repr(i32)] + pub enum PublicRepository { + /// Unspecified repository. + Unspecified = 0, + /// Docker Hub. + DockerHub = 1, + } + impl PublicRepository { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + PublicRepository::Unspecified => "PUBLIC_REPOSITORY_UNSPECIFIED", + PublicRepository::DockerHub => "DOCKER_HUB", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "PUBLIC_REPOSITORY_UNSPECIFIED" => Some(Self::Unspecified), + "DOCKER_HUB" => Some(Self::DockerHub), + _ => None, + } + } + } + /// Address of the remote repository. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, Copy, PartialEq, ::prost::Oneof)] + pub enum Upstream { + /// One of the publicly available Docker repositories supported by Artifact + /// Registry. + #[prost(enumeration = "PublicRepository", tag = "1")] + PublicRepository(i32), + } + } + /// Configuration for a Maven remote repository. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, Copy, PartialEq, ::prost::Message)] + pub struct MavenRepository { + /// Address of the remote repository. + #[prost(oneof = "maven_repository::Upstream", tags = "1")] + pub upstream: ::core::option::Option, + } + /// Nested message and enum types in `MavenRepository`. + pub mod maven_repository { + /// Predefined list of publicly available Maven repositories like Maven + /// Central. + #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] + #[repr(i32)] + pub enum PublicRepository { + /// Unspecified repository. + Unspecified = 0, + /// Maven Central. + MavenCentral = 1, + } + impl PublicRepository { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + PublicRepository::Unspecified => "PUBLIC_REPOSITORY_UNSPECIFIED", + PublicRepository::MavenCentral => "MAVEN_CENTRAL", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "PUBLIC_REPOSITORY_UNSPECIFIED" => Some(Self::Unspecified), + "MAVEN_CENTRAL" => Some(Self::MavenCentral), + _ => None, + } + } + } + /// Address of the remote repository. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, Copy, PartialEq, ::prost::Oneof)] + pub enum Upstream { + /// One of the publicly available Maven repositories supported by Artifact + /// Registry. + #[prost(enumeration = "PublicRepository", tag = "1")] + PublicRepository(i32), + } + } + /// Configuration for a Npm remote repository. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, Copy, PartialEq, ::prost::Message)] + pub struct NpmRepository { + /// Address of the remote repository + #[prost(oneof = "npm_repository::Upstream", tags = "1")] + pub upstream: ::core::option::Option, + } + /// Nested message and enum types in `NpmRepository`. + pub mod npm_repository { + /// Predefined list of publicly available NPM repositories like npmjs. + #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] + #[repr(i32)] + pub enum PublicRepository { + /// Unspecified repository. + Unspecified = 0, + /// npmjs. + Npmjs = 1, + } + impl PublicRepository { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + PublicRepository::Unspecified => "PUBLIC_REPOSITORY_UNSPECIFIED", + PublicRepository::Npmjs => "NPMJS", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "PUBLIC_REPOSITORY_UNSPECIFIED" => Some(Self::Unspecified), + "NPMJS" => Some(Self::Npmjs), + _ => None, + } + } + } + /// Address of the remote repository + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, Copy, PartialEq, ::prost::Oneof)] + pub enum Upstream { + /// One of the publicly available Npm repositories supported by Artifact + /// Registry. + #[prost(enumeration = "PublicRepository", tag = "1")] + PublicRepository(i32), + } + } + /// Configuration for a Python remote repository. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, Copy, PartialEq, ::prost::Message)] + pub struct PythonRepository { + /// Address of the remote repository. + #[prost(oneof = "python_repository::Upstream", tags = "1")] + pub upstream: ::core::option::Option, + } + /// Nested message and enum types in `PythonRepository`. + pub mod python_repository { + /// Predefined list of publicly available Python repositories like PyPI.org. + #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] + #[repr(i32)] + pub enum PublicRepository { + /// Unspecified repository. + Unspecified = 0, + /// PyPI. + Pypi = 1, + } + impl PublicRepository { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + PublicRepository::Unspecified => "PUBLIC_REPOSITORY_UNSPECIFIED", + PublicRepository::Pypi => "PYPI", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "PUBLIC_REPOSITORY_UNSPECIFIED" => Some(Self::Unspecified), + "PYPI" => Some(Self::Pypi), + _ => None, + } + } + } + /// Address of the remote repository. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, Copy, PartialEq, ::prost::Oneof)] + pub enum Upstream { + /// One of the publicly available Python repositories supported by Artifact + /// Registry. + #[prost(enumeration = "PublicRepository", tag = "1")] + PublicRepository(i32), + } + } + /// Configuration for an Apt remote repository. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct AptRepository { + /// Address of the remote repository. + #[prost(oneof = "apt_repository::Upstream", tags = "1")] + pub upstream: ::core::option::Option, + } + /// Nested message and enum types in `AptRepository`. + pub mod apt_repository { + /// Publicly available Apt repositories constructed from a common repository + /// base and a custom repository path. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct PublicRepository { + /// A common public repository base for Apt. + #[prost(enumeration = "public_repository::RepositoryBase", tag = "1")] + pub repository_base: i32, + /// A custom field to define a path to a specific repository from the base. + #[prost(string, tag = "2")] + pub repository_path: ::prost::alloc::string::String, + } + /// Nested message and enum types in `PublicRepository`. + pub mod public_repository { + /// Predefined list of publicly available repository bases for Apt. + #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] + #[repr(i32)] + pub enum RepositoryBase { + /// Unspecified repository base. + Unspecified = 0, + /// Debian. + Debian = 1, + /// Ubuntu LTS/Pro. + Ubuntu = 2, + } + impl RepositoryBase { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + RepositoryBase::Unspecified => "REPOSITORY_BASE_UNSPECIFIED", + RepositoryBase::Debian => "DEBIAN", + RepositoryBase::Ubuntu => "UBUNTU", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "REPOSITORY_BASE_UNSPECIFIED" => Some(Self::Unspecified), + "DEBIAN" => Some(Self::Debian), + "UBUNTU" => Some(Self::Ubuntu), + _ => None, + } + } + } + } + /// Address of the remote repository. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Upstream { + /// One of the publicly available Apt repositories supported by Artifact + /// Registry. + #[prost(message, tag = "1")] + PublicRepository(PublicRepository), + } + } + /// Configuration for a Yum remote repository. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct YumRepository { + /// Address of the remote repository. + #[prost(oneof = "yum_repository::Upstream", tags = "1")] + pub upstream: ::core::option::Option, + } + /// Nested message and enum types in `YumRepository`. + pub mod yum_repository { + /// Publicly available Yum repositories constructed from a common repository + /// base and a custom repository path. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct PublicRepository { + /// A common public repository base for Yum. + #[prost(enumeration = "public_repository::RepositoryBase", tag = "1")] + pub repository_base: i32, + /// A custom field to define a path to a specific repository from the base. + #[prost(string, tag = "2")] + pub repository_path: ::prost::alloc::string::String, + } + /// Nested message and enum types in `PublicRepository`. + pub mod public_repository { + /// Predefined list of publicly available repository bases for Yum. + #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] + #[repr(i32)] + pub enum RepositoryBase { + /// Unspecified repository base. + Unspecified = 0, + /// CentOS. + Centos = 1, + /// CentOS Debug. + CentosDebug = 2, + /// CentOS Vault. + CentosVault = 3, + /// CentOS Stream. + CentosStream = 4, + /// Rocky. + Rocky = 5, + /// Fedora Extra Packages for Enterprise Linux (EPEL). + Epel = 6, + } + impl RepositoryBase { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + RepositoryBase::Unspecified => "REPOSITORY_BASE_UNSPECIFIED", + RepositoryBase::Centos => "CENTOS", + RepositoryBase::CentosDebug => "CENTOS_DEBUG", + RepositoryBase::CentosVault => "CENTOS_VAULT", + RepositoryBase::CentosStream => "CENTOS_STREAM", + RepositoryBase::Rocky => "ROCKY", + RepositoryBase::Epel => "EPEL", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "REPOSITORY_BASE_UNSPECIFIED" => Some(Self::Unspecified), + "CENTOS" => Some(Self::Centos), + "CENTOS_DEBUG" => Some(Self::CentosDebug), + "CENTOS_VAULT" => Some(Self::CentosVault), + "CENTOS_STREAM" => Some(Self::CentosStream), + "ROCKY" => Some(Self::Rocky), + "EPEL" => Some(Self::Epel), + _ => None, + } + } + } + } + /// Address of the remote repository. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Upstream { + /// One of the publicly available Yum repositories supported by Artifact + /// Registry. + #[prost(message, tag = "1")] + PublicRepository(PublicRepository), + } + } + /// Settings specific to the remote repository. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum RemoteSource { + /// Specific settings for a Docker remote repository. + #[prost(message, tag = "2")] + DockerRepository(DockerRepository), + /// Specific settings for a Maven remote repository. + #[prost(message, tag = "3")] + MavenRepository(MavenRepository), + /// Specific settings for an Npm remote repository. + #[prost(message, tag = "4")] + NpmRepository(NpmRepository), + /// Specific settings for a Python remote repository. + #[prost(message, tag = "5")] + PythonRepository(PythonRepository), + /// Specific settings for an Apt remote repository. + #[prost(message, tag = "6")] + AptRepository(AptRepository), + /// Specific settings for a Yum remote repository. + #[prost(message, tag = "7")] + YumRepository(YumRepository), + } +} /// A Repository for storing artifacts with a specific format. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Repository { /// The name of the repository, for example: - /// "projects/p1/locations/us-central1/repositories/repo1". + /// `projects/p1/locations/us-central1/repositories/repo1`. #[prost(string, tag = "1")] pub name: ::prost::alloc::string::String, - /// The format of packages that are stored in the repository. + /// Optional. The format of packages that are stored in the repository. #[prost(enumeration = "repository::Format", tag = "2")] pub format: i32, /// The user-provided description of the repository. @@ -837,10 +1418,10 @@ pub struct Repository { /// and dashes. #[prost(map = "string, string", tag = "4")] pub labels: ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, - /// The time when the repository was created. + /// Output only. The time when the repository was created. #[prost(message, optional, tag = "5")] pub create_time: ::core::option::Option<::prost_types::Timestamp>, - /// The time when the repository was last updated. + /// Output only. The time when the repository was last updated. #[prost(message, optional, tag = "6")] pub update_time: ::core::option::Option<::prost_types::Timestamp>, /// The Cloud KMS resource name of the customer managed encryption key that's @@ -849,9 +1430,34 @@ pub struct Repository { /// This value may not be changed after the Repository has been created. #[prost(string, tag = "8")] pub kms_key_name: ::prost::alloc::string::String, + /// Optional. The mode of the repository. + #[prost(enumeration = "repository::Mode", tag = "10")] + pub mode: i32, + /// Optional. Cleanup policies for this repository. Cleanup policies indicate + /// when certain package versions can be automatically deleted. Map keys are + /// policy IDs supplied by users during policy creation. They must unique + /// within a repository and be under 128 characters in length. + #[prost(map = "string, message", tag = "12")] + pub cleanup_policies: ::std::collections::HashMap<::prost::alloc::string::String, CleanupPolicy>, + /// Output only. The size, in bytes, of all artifact storage in this + /// repository. Repositories that are generally available or in public preview + /// use this to calculate storage costs. + #[prost(int64, tag = "13")] + pub size_bytes: i64, + /// Output only. If set, the repository satisfies physical zone separation. + #[prost(bool, tag = "16")] + pub satisfies_pzs: bool, + /// Optional. If true, the cleanup pipeline is prevented from deleting versions + /// in this repository. + #[prost(bool, tag = "18")] + pub cleanup_policy_dry_run: bool, /// Repository-specific configurations. - #[prost(oneof = "repository::FormatConfig", tags = "9")] + #[prost(oneof = "repository::FormatConfig", tags = "9, 17")] pub format_config: ::core::option::Option, + /// Repository configuration specific to the Mode value being selected (Remote + /// or Virtual) + #[prost(oneof = "repository::ModeConfig", tags = "14, 15")] + pub mode_config: ::core::option::Option, } /// Nested message and enum types in `Repository`. pub mod repository { @@ -907,6 +1513,18 @@ pub mod repository { } } } + /// DockerRepositoryConfig is docker related repository details. + /// Provides additional configuration details for repositories of the docker + /// format type. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, Copy, PartialEq, ::prost::Message)] + pub struct DockerRepositoryConfig { + /// The repository which enabled this flag prevents all tags from being + /// modified, moved or deleted. This does not prevent tags from being + /// created. + #[prost(bool, tag = "1")] + pub immutable_tags: bool, + } /// A package format. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] @@ -925,6 +1543,10 @@ pub mod repository { Yum = 6, /// Python package format. Python = 8, + /// Kubeflow Pipelines package format. + Kfp = 9, + /// Go package format. + Go = 10, } impl Format { /// String value of the enum field names used in the ProtoBuf definition. @@ -940,6 +1562,8 @@ pub mod repository { Format::Apt => "APT", Format::Yum => "YUM", Format::Python => "PYTHON", + Format::Kfp => "KFP", + Format::Go => "GO", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -952,6 +1576,46 @@ pub mod repository { "APT" => Some(Self::Apt), "YUM" => Some(Self::Yum), "PYTHON" => Some(Self::Python), + "KFP" => Some(Self::Kfp), + "GO" => Some(Self::Go), + _ => None, + } + } + } + /// The mode configures the repository to serve artifacts from different + /// sources. + #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] + #[repr(i32)] + pub enum Mode { + /// Unspecified mode. + Unspecified = 0, + /// A standard repository storing artifacts. + StandardRepository = 1, + /// A virtual repository to serve artifacts from one or more sources. + VirtualRepository = 2, + /// A remote repository to serve artifacts from a remote source. + RemoteRepository = 3, + } + impl Mode { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Mode::Unspecified => "MODE_UNSPECIFIED", + Mode::StandardRepository => "STANDARD_REPOSITORY", + Mode::VirtualRepository => "VIRTUAL_REPOSITORY", + Mode::RemoteRepository => "REMOTE_REPOSITORY", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "MODE_UNSPECIFIED" => Some(Self::Unspecified), + "STANDARD_REPOSITORY" => Some(Self::StandardRepository), + "VIRTUAL_REPOSITORY" => Some(Self::VirtualRepository), + "REMOTE_REPOSITORY" => Some(Self::RemoteRepository), _ => None, } } @@ -964,13 +1628,30 @@ pub mod repository { /// for the repositories of maven type. #[prost(message, tag = "9")] MavenConfig(MavenRepositoryConfig), + /// Docker repository config contains repository level configuration + /// for the repositories of docker type. + #[prost(message, tag = "17")] + DockerConfig(DockerRepositoryConfig), + } + /// Repository configuration specific to the Mode value being selected (Remote + /// or Virtual) + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum ModeConfig { + /// Configuration specific for a Virtual Repository. + #[prost(message, tag = "14")] + VirtualRepositoryConfig(super::VirtualRepositoryConfig), + /// Configuration specific for a Remote Repository. + #[prost(message, tag = "15")] + RemoteRepositoryConfig(super::RemoteRepositoryConfig), } } /// The request to list repositories. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ListRepositoriesRequest { - /// Required. The name of the parent resource whose repositories will be listed. + /// Required. The name of the parent resource whose repositories will be + /// listed. #[prost(string, tag = "1")] pub parent: ::prost::alloc::string::String, /// The maximum number of repositories to return. Maximum page size is 1,000. @@ -1004,13 +1685,14 @@ pub struct GetRepositoryRequest { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct CreateRepositoryRequest { - /// Required. The name of the parent resource where the repository will be created. + /// Required. The name of the parent resource where the repository will be + /// created. #[prost(string, tag = "1")] pub parent: ::prost::alloc::string::String, - /// The repository id to use for this repository. + /// Required. The repository id to use for this repository. #[prost(string, tag = "2")] pub repository_id: ::prost::alloc::string::String, - /// The repository to be created. + /// Required. The repository to be created. #[prost(message, optional, tag = "3")] pub repository: ::core::option::Option, } @@ -1134,7 +1816,9 @@ pub struct Tag { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ListTagsRequest { - /// The name of the parent resource whose tags will be listed. + /// The name of the parent package whose tags will be listed. + /// For example: + /// `projects/p1/locations/us-central1/repositories/repo1/packages/pkg1`. #[prost(string, tag = "1")] pub parent: ::prost::alloc::string::String, /// An expression for filtering the results of the request. Filter rules are @@ -1145,7 +1829,7 @@ pub struct ListTagsRequest { /// An example of using a filter: /// /// * `version="projects/p1/locations/us-central1/repositories/repo1/packages/pkg1/versions/1.0"` - /// --> Tags that are applied to the version `1.0` in package `pkg1`. + /// --> Tags that are applied to the version `1.0` in package `pkg1`. #[prost(string, tag = "4")] pub filter: ::prost::alloc::string::String, /// The maximum number of tags to return. Maximum page size is 10,000. @@ -1298,6 +1982,21 @@ pub struct DeleteVersionRequest { #[prost(bool, tag = "2")] pub force: bool, } +/// The request to delete multiple versions across a repository. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BatchDeleteVersionsRequest { + /// The name of the repository holding all requested versions. + #[prost(string, tag = "1")] + pub parent: ::prost::alloc::string::String, + /// Required. The names of the versions to delete. + /// A maximum of 10000 versions can be deleted in a batch. + #[prost(string, repeated, tag = "2")] + pub names: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + /// If true, the request is performed without deleting data, following AIP-163. + #[prost(bool, tag = "3")] + pub validate_only: bool, +} /// The metadata of an LRO from deleting multiple versions. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -1852,6 +2551,27 @@ pub mod artifact_registry_client { )); self.inner.unary(req, path, codec).await } + /// Deletes multiple versions across a repository. The returned operation will + /// complete once the versions have been deleted. + pub async fn batch_delete_versions( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> + { + self.inner.ready().await.map_err(|e| { + tonic::Status::new(tonic::Code::Unknown, format!("Service was not ready: {}", e.into())) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/google.devtools.artifactregistry.v1.ArtifactRegistry/BatchDeleteVersions", + ); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new( + "google.devtools.artifactregistry.v1.ArtifactRegistry", + "BatchDeleteVersions", + )); + self.inner.unary(req, path, codec).await + } /// Lists files. pub async fn list_files( &mut self, diff --git a/googleapis/src/google.iam.v1.rs b/googleapis/src/google.iam.v1.rs index 43f8cefc..e6457016 100644 --- a/googleapis/src/google.iam.v1.rs +++ b/googleapis/src/google.iam.v1.rs @@ -44,6 +44,7 @@ pub struct GetPolicyOptions { /// /// **JSON example:** /// +/// ``` /// { /// "bindings": [ /// { @@ -71,9 +72,11 @@ pub struct GetPolicyOptions { /// "etag": "BwWWja0YfJA=", /// "version": 3 /// } +/// ``` /// /// **YAML example:** /// +/// ``` /// bindings: /// - members: /// - user:mike@example.com @@ -90,6 +93,7 @@ pub struct GetPolicyOptions { /// expression: request.time < timestamp('2020-10-01T00:00:00.000Z') /// etag: BwWWja0YfJA= /// version: 3 +/// ``` /// /// For a description of IAM and its features, see the /// [IAM documentation](). @@ -161,7 +165,7 @@ pub struct Binding { /// For example, `roles/viewer`, `roles/editor`, or `roles/owner`. #[prost(string, tag = "1")] pub role: ::prost::alloc::string::String, - /// Specifies the principals requesting access for a Cloud Platform resource. + /// Specifies the principals requesting access for a Google Cloud resource. /// `members` can have the following values: /// /// * `allUsers`: A special identifier that represents anyone who is @@ -271,8 +275,8 @@ pub struct Binding { /// } /// /// For sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ -/// logging. It also exempts jose@example.com from DATA_READ logging, and -/// aliya@example.com from DATA_WRITE logging. +/// logging. It also exempts `jose@example.com` from DATA_READ logging, and +/// `aliya@example.com` from DATA_WRITE logging. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct AuditConfig { @@ -383,7 +387,7 @@ pub struct BindingDelta { /// Required #[prost(string, tag = "2")] pub role: ::prost::alloc::string::String, - /// A single identity requesting access for a Cloud Platform resource. + /// A single identity requesting access for a Google Cloud resource. /// Follows the same format of Binding.members. /// Required #[prost(string, tag = "3")] diff --git a/googleapis/src/google.pubsub.v1.rs b/googleapis/src/google.pubsub.v1.rs index 6b11e4b1..d24df08a 100644 --- a/googleapis/src/google.pubsub.v1.rs +++ b/googleapis/src/google.pubsub.v1.rs @@ -76,8 +76,8 @@ pub struct CreateSchemaRequest { /// The ID to use for the schema, which will become the final component of /// the schema's resource name. /// - /// See for resource - /// name constraints. + /// See for + /// resource name constraints. #[prost(string, tag = "3")] pub schema_id: ::prost::alloc::string::String, } @@ -569,13 +569,21 @@ pub mod schema_service_client { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct MessageStoragePolicy { - /// A list of IDs of GCP regions where messages that are published to the topic - /// may be persisted in storage. Messages published by publishers running in - /// non-allowed GCP regions (or running outside of GCP altogether) will be - /// routed for storage in one of the allowed regions. An empty list means that - /// no regions are allowed, and is not a valid configuration. + /// Optional. A list of IDs of Google Cloud regions where messages that are + /// published to the topic may be persisted in storage. Messages published by + /// publishers running in non-allowed Google Cloud regions (or running outside + /// of Google Cloud altogether) are routed for storage in one of the allowed + /// regions. An empty list means that no regions are allowed, and is not a + /// valid configuration. #[prost(string, repeated, tag = "1")] pub allowed_persistence_regions: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + /// Optional. If true, `allowed_persistence_regions` is also used to enforce + /// in-transit guarantees for messages. That is, Pub/Sub will fail + /// Publish operations on this topic and subscribe operations + /// on any subscription attached to this topic in any region that is + /// not in `allowed_persistence_regions`. + #[prost(bool, tag = "2")] + pub enforce_in_transit: bool, } /// Settings for validating messages published against a schema. #[allow(clippy::derive_partial_eq_without_eq)] @@ -587,20 +595,309 @@ pub struct SchemaSettings { /// deleted. #[prost(string, tag = "1")] pub schema: ::prost::alloc::string::String, - /// The encoding of messages validated against `schema`. + /// Optional. The encoding of messages validated against `schema`. #[prost(enumeration = "Encoding", tag = "2")] pub encoding: i32, - /// The minimum (inclusive) revision allowed for validating messages. If empty - /// or not present, allow any revision to be validated against last_revision or - /// any revision created before. + /// Optional. The minimum (inclusive) revision allowed for validating messages. + /// If empty or not present, allow any revision to be validated against + /// last_revision or any revision created before. #[prost(string, tag = "3")] pub first_revision_id: ::prost::alloc::string::String, - /// The maximum (inclusive) revision allowed for validating messages. If empty - /// or not present, allow any revision to be validated against first_revision - /// or any revision created after. + /// Optional. The maximum (inclusive) revision allowed for validating messages. + /// If empty or not present, allow any revision to be validated against + /// first_revision or any revision created after. #[prost(string, tag = "4")] pub last_revision_id: ::prost::alloc::string::String, } +/// Settings for an ingestion data source on a topic. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct IngestionDataSourceSettings { + /// Optional. Platform Logs settings. If unset, no Platform Logs will be + /// generated. + #[prost(message, optional, tag = "4")] + pub platform_logs_settings: ::core::option::Option, + /// Only one source type can have settings set. + #[prost(oneof = "ingestion_data_source_settings::Source", tags = "1, 2")] + pub source: ::core::option::Option, +} +/// Nested message and enum types in `IngestionDataSourceSettings`. +pub mod ingestion_data_source_settings { + /// Ingestion settings for Amazon Kinesis Data Streams. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct AwsKinesis { + /// Output only. An output-only field that indicates the state of the Kinesis + /// ingestion source. + #[prost(enumeration = "aws_kinesis::State", tag = "1")] + pub state: i32, + /// Required. The Kinesis stream ARN to ingest data from. + #[prost(string, tag = "2")] + pub stream_arn: ::prost::alloc::string::String, + /// Required. The Kinesis consumer ARN to used for ingestion in Enhanced + /// Fan-Out mode. The consumer must be already created and ready to be used. + #[prost(string, tag = "3")] + pub consumer_arn: ::prost::alloc::string::String, + /// Required. AWS role ARN to be used for Federated Identity authentication + /// with Kinesis. Check the Pub/Sub docs for how to set up this role and the + /// required permissions that need to be attached to it. + #[prost(string, tag = "4")] + pub aws_role_arn: ::prost::alloc::string::String, + /// Required. The GCP service account to be used for Federated Identity + /// authentication with Kinesis (via a `AssumeRoleWithWebIdentity` call for + /// the provided role). The `aws_role_arn` must be set up with + /// `accounts.google.com:sub` equals to this service account number. + #[prost(string, tag = "5")] + pub gcp_service_account: ::prost::alloc::string::String, + } + /// Nested message and enum types in `AwsKinesis`. + pub mod aws_kinesis { + /// Possible states for ingestion from Amazon Kinesis Data Streams. + #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] + #[repr(i32)] + pub enum State { + /// Default value. This value is unused. + Unspecified = 0, + /// Ingestion is active. + Active = 1, + /// Permission denied encountered while consuming data from Kinesis. + /// This can happen if: + /// - The provided `aws_role_arn` does not exist or does not have the + /// appropriate permissions attached. + /// - The provided `aws_role_arn` is not set up properly for Identity + /// Federation using `gcp_service_account`. + /// - The Pub/Sub SA is not granted the + /// `iam.serviceAccounts.getOpenIdToken` permission on + /// `gcp_service_account`. + KinesisPermissionDenied = 2, + /// Permission denied encountered while publishing to the topic. This can + /// happen if the Pub/Sub SA has not been granted the [appropriate publish + /// permissions]() + PublishPermissionDenied = 3, + /// The Kinesis stream does not exist. + StreamNotFound = 4, + /// The Kinesis consumer does not exist. + ConsumerNotFound = 5, + } + impl State { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + State::Unspecified => "STATE_UNSPECIFIED", + State::Active => "ACTIVE", + State::KinesisPermissionDenied => "KINESIS_PERMISSION_DENIED", + State::PublishPermissionDenied => "PUBLISH_PERMISSION_DENIED", + State::StreamNotFound => "STREAM_NOT_FOUND", + State::ConsumerNotFound => "CONSUMER_NOT_FOUND", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "STATE_UNSPECIFIED" => Some(Self::Unspecified), + "ACTIVE" => Some(Self::Active), + "KINESIS_PERMISSION_DENIED" => Some(Self::KinesisPermissionDenied), + "PUBLISH_PERMISSION_DENIED" => Some(Self::PublishPermissionDenied), + "STREAM_NOT_FOUND" => Some(Self::StreamNotFound), + "CONSUMER_NOT_FOUND" => Some(Self::ConsumerNotFound), + _ => None, + } + } + } + } + /// Ingestion settings for Cloud Storage. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct CloudStorage { + /// Output only. An output-only field that indicates the state of the Cloud + /// Storage ingestion source. + #[prost(enumeration = "cloud_storage::State", tag = "1")] + pub state: i32, + /// Optional. Cloud Storage bucket. The bucket name must be without any + /// prefix like "gs://". See the \[bucket naming requirements\] + /// (). + #[prost(string, tag = "2")] + pub bucket: ::prost::alloc::string::String, + /// Optional. Only objects with a larger or equal creation timestamp will be + /// ingested. + #[prost(message, optional, tag = "6")] + pub minimum_object_create_time: ::core::option::Option<::prost_types::Timestamp>, + /// Optional. Glob pattern used to match objects that will be ingested. If + /// unset, all objects will be ingested. See the [supported + /// patterns](). + #[prost(string, tag = "9")] + pub match_glob: ::prost::alloc::string::String, + /// Defaults to text format. + #[prost(oneof = "cloud_storage::InputFormat", tags = "3, 4, 5")] + pub input_format: ::core::option::Option, + } + /// Nested message and enum types in `CloudStorage`. + pub mod cloud_storage { + /// Configuration for reading Cloud Storage data in text format. Each line of + /// text as specified by the delimiter will be set to the `data` field of a + /// Pub/Sub message. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct TextFormat { + /// Optional. When unset, '\n' is used. + #[prost(string, optional, tag = "1")] + pub delimiter: ::core::option::Option<::prost::alloc::string::String>, + } + /// Configuration for reading Cloud Storage data in Avro binary format. The + /// bytes of each object will be set to the `data` field of a Pub/Sub + /// message. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, Copy, PartialEq, ::prost::Message)] + pub struct AvroFormat {} + /// Configuration for reading Cloud Storage data written via [Cloud Storage + /// subscriptions](). The + /// data and attributes fields of the originally exported Pub/Sub message + /// will be restored when publishing. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, Copy, PartialEq, ::prost::Message)] + pub struct PubSubAvroFormat {} + /// Possible states for ingestion from Cloud Storage. + #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] + #[repr(i32)] + pub enum State { + /// Default value. This value is unused. + Unspecified = 0, + /// Ingestion is active. + Active = 1, + /// Permission denied encountered while calling the Cloud Storage API. This + /// can happen if the Pub/Sub SA has not been granted the + /// [appropriate + /// permissions](): + /// - storage.objects.list: to list the objects in a bucket. + /// - storage.objects.get: to read the objects in a bucket. + /// - storage.buckets.get: to verify the bucket exists. + CloudStoragePermissionDenied = 2, + /// Permission denied encountered while publishing to the topic. This can + /// happen if the Pub/Sub SA has not been granted the [appropriate publish + /// permissions]() + PublishPermissionDenied = 3, + /// The provided Cloud Storage bucket doesn't exist. + BucketNotFound = 4, + /// The Cloud Storage bucket has too many objects, ingestion will be + /// paused. + TooManyObjects = 5, + } + impl State { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + State::Unspecified => "STATE_UNSPECIFIED", + State::Active => "ACTIVE", + State::CloudStoragePermissionDenied => "CLOUD_STORAGE_PERMISSION_DENIED", + State::PublishPermissionDenied => "PUBLISH_PERMISSION_DENIED", + State::BucketNotFound => "BUCKET_NOT_FOUND", + State::TooManyObjects => "TOO_MANY_OBJECTS", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "STATE_UNSPECIFIED" => Some(Self::Unspecified), + "ACTIVE" => Some(Self::Active), + "CLOUD_STORAGE_PERMISSION_DENIED" => Some(Self::CloudStoragePermissionDenied), + "PUBLISH_PERMISSION_DENIED" => Some(Self::PublishPermissionDenied), + "BUCKET_NOT_FOUND" => Some(Self::BucketNotFound), + "TOO_MANY_OBJECTS" => Some(Self::TooManyObjects), + _ => None, + } + } + } + /// Defaults to text format. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum InputFormat { + /// Optional. Data from Cloud Storage will be interpreted as text. + #[prost(message, tag = "3")] + TextFormat(TextFormat), + /// Optional. Data from Cloud Storage will be interpreted in Avro format. + #[prost(message, tag = "4")] + AvroFormat(AvroFormat), + /// Optional. It will be assumed data from Cloud Storage was written via + /// [Cloud Storage + /// subscriptions](). + #[prost(message, tag = "5")] + PubsubAvroFormat(PubSubAvroFormat), + } + } + /// Only one source type can have settings set. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Source { + /// Optional. Amazon Kinesis Data Streams. + #[prost(message, tag = "1")] + AwsKinesis(AwsKinesis), + /// Optional. Cloud Storage. + #[prost(message, tag = "2")] + CloudStorage(CloudStorage), + } +} +/// Settings for Platform Logs produced by Pub/Sub. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct PlatformLogsSettings { + /// Optional. The minimum severity level of Platform Logs that will be written. + #[prost(enumeration = "platform_logs_settings::Severity", tag = "1")] + pub severity: i32, +} +/// Nested message and enum types in `PlatformLogsSettings`. +pub mod platform_logs_settings { + /// Severity levels of Platform Logs. + #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] + #[repr(i32)] + pub enum Severity { + /// Default value. Logs level is unspecified. Logs will be disabled. + Unspecified = 0, + /// Logs will be disabled. + Disabled = 1, + /// Debug logs and higher-severity logs will be written. + Debug = 2, + /// Info logs and higher-severity logs will be written. + Info = 3, + /// Warning logs and higher-severity logs will be written. + Warning = 4, + /// Only error logs will be written. + Error = 5, + } + impl Severity { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Severity::Unspecified => "SEVERITY_UNSPECIFIED", + Severity::Disabled => "DISABLED", + Severity::Debug => "DEBUG", + Severity::Info => "INFO", + Severity::Warning => "WARNING", + Severity::Error => "ERROR", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "SEVERITY_UNSPECIFIED" => Some(Self::Unspecified), + "DISABLED" => Some(Self::Disabled), + "DEBUG" => Some(Self::Debug), + "INFO" => Some(Self::Info), + "WARNING" => Some(Self::Warning), + "ERROR" => Some(Self::Error), + _ => None, + } + } + } +} /// A topic resource. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -613,38 +910,83 @@ pub struct Topic { /// must not start with `"goog"`. #[prost(string, tag = "1")] pub name: ::prost::alloc::string::String, - /// See \[Creating and managing labels\] + /// Optional. See \[Creating and managing labels\] /// (). #[prost(map = "string, string", tag = "2")] pub labels: ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, - /// Policy constraining the set of Google Cloud Platform regions where messages - /// published to the topic may be stored. If not present, then no constraints - /// are in effect. + /// Optional. Policy constraining the set of Google Cloud Platform regions + /// where messages published to the topic may be stored. If not present, then + /// no constraints are in effect. #[prost(message, optional, tag = "3")] pub message_storage_policy: ::core::option::Option, - /// The resource name of the Cloud KMS CryptoKey to be used to protect access - /// to messages published on this topic. + /// Optional. The resource name of the Cloud KMS CryptoKey to be used to + /// protect access to messages published on this topic. /// /// The expected format is `projects/*/locations/*/keyRings/*/cryptoKeys/*`. #[prost(string, tag = "5")] pub kms_key_name: ::prost::alloc::string::String, - /// Settings for validating messages published against a schema. + /// Optional. Settings for validating messages published against a schema. #[prost(message, optional, tag = "6")] pub schema_settings: ::core::option::Option, - /// Reserved for future use. This field is set only in responses from the - /// server; it is ignored if it is set in any requests. + /// Optional. Reserved for future use. This field is set only in responses from + /// the server; it is ignored if it is set in any requests. #[prost(bool, tag = "7")] pub satisfies_pzs: bool, - /// Indicates the minimum duration to retain a message after it is published to - /// the topic. If this field is set, messages published to the topic in the - /// last `message_retention_duration` are always available to subscribers. For - /// instance, it allows any attached subscription to [seek to a + /// Optional. Indicates the minimum duration to retain a message after it is + /// published to the topic. If this field is set, messages published to the + /// topic in the last `message_retention_duration` are always available to + /// subscribers. For instance, it allows any attached subscription to [seek to + /// a /// timestamp]() /// that is up to `message_retention_duration` in the past. If this field is /// not set, message retention is controlled by settings on individual /// subscriptions. Cannot be more than 31 days or less than 10 minutes. #[prost(message, optional, tag = "8")] pub message_retention_duration: ::core::option::Option<::prost_types::Duration>, + /// Output only. An output-only field indicating the state of the topic. + #[prost(enumeration = "topic::State", tag = "9")] + pub state: i32, + /// Optional. Settings for ingestion from a data source into this topic. + #[prost(message, optional, tag = "10")] + pub ingestion_data_source_settings: ::core::option::Option, +} +/// Nested message and enum types in `Topic`. +pub mod topic { + /// The state of the topic. + #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] + #[repr(i32)] + pub enum State { + /// Default value. This value is unused. + Unspecified = 0, + /// The topic does not have any persistent errors. + Active = 1, + /// Ingestion from the data source has encountered a permanent error. + /// See the more detailed error state in the corresponding ingestion + /// source configuration. + IngestionResourceError = 2, + } + impl State { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + State::Unspecified => "STATE_UNSPECIFIED", + State::Active => "ACTIVE", + State::IngestionResourceError => "INGESTION_RESOURCE_ERROR", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "STATE_UNSPECIFIED" => Some(Self::Unspecified), + "ACTIVE" => Some(Self::Active), + "INGESTION_RESOURCE_ERROR" => Some(Self::IngestionResourceError), + _ => None, + } + } + } } /// A message that is published by publishers and consumed by subscribers. The /// message must contain either a non-empty data field or at least one attribute. @@ -657,12 +999,12 @@ pub struct Topic { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct PubsubMessage { - /// The message data field. If this field is empty, the message must contain - /// at least one attribute. + /// Optional. The message data field. If this field is empty, the message must + /// contain at least one attribute. #[prost(bytes = "vec", tag = "1")] pub data: ::prost::alloc::vec::Vec, - /// Attributes for this message. If this field is empty, the message must - /// contain non-empty data. This can be used to filter messages on the + /// Optional. Attributes for this message. If this field is empty, the message + /// must contain non-empty data. This can be used to filter messages on the /// subscription. #[prost(map = "string, string", tag = "2")] pub attributes: ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, @@ -677,13 +1019,13 @@ pub struct PubsubMessage { /// publisher in a `Publish` call. #[prost(message, optional, tag = "4")] pub publish_time: ::core::option::Option<::prost_types::Timestamp>, - /// If non-empty, identifies related messages for which publish order should be - /// respected. If a `Subscription` has `enable_message_ordering` set to `true`, - /// messages published with the same non-empty `ordering_key` value will be - /// delivered to subscribers in the order in which they are received by the - /// Pub/Sub system. All `PubsubMessage`s published in a given `PublishRequest` - /// must specify the same `ordering_key` value. - /// For more information, see [ordering + /// Optional. If non-empty, identifies related messages for which publish order + /// should be respected. If a `Subscription` has `enable_message_ordering` set + /// to `true`, messages published with the same non-empty `ordering_key` value + /// will be delivered to subscribers in the order in which they are received by + /// the Pub/Sub system. All `PubsubMessage`s published in a given + /// `PublishRequest` must specify the same `ordering_key` value. For more + /// information, see [ordering /// messages](). #[prost(string, tag = "5")] pub ordering_key: ::prost::alloc::string::String, @@ -728,9 +1070,9 @@ pub struct PublishRequest { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct PublishResponse { - /// The server-assigned ID of each published message, in the same order as - /// the messages in the request. IDs are guaranteed to be unique within - /// the topic. + /// Optional. The server-assigned ID of each published message, in the same + /// order as the messages in the request. IDs are guaranteed to be unique + /// within the topic. #[prost(string, repeated, tag = "1")] pub message_ids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, } @@ -742,12 +1084,12 @@ pub struct ListTopicsRequest { /// Format is `projects/{project-id}`. #[prost(string, tag = "1")] pub project: ::prost::alloc::string::String, - /// Maximum number of topics to return. + /// Optional. Maximum number of topics to return. #[prost(int32, tag = "2")] pub page_size: i32, - /// The value returned by the last `ListTopicsResponse`; indicates that this is - /// a continuation of a prior `ListTopics` call, and that the system should - /// return the next page of data. + /// Optional. The value returned by the last `ListTopicsResponse`; indicates + /// that this is a continuation of a prior `ListTopics` call, and that the + /// system should return the next page of data. #[prost(string, tag = "3")] pub page_token: ::prost::alloc::string::String, } @@ -755,11 +1097,11 @@ pub struct ListTopicsRequest { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ListTopicsResponse { - /// The resulting topics. + /// Optional. The resulting topics. #[prost(message, repeated, tag = "1")] pub topics: ::prost::alloc::vec::Vec, - /// If not empty, indicates that there may be more topics that match the - /// request; this value should be passed in a new `ListTopicsRequest`. + /// Optional. If not empty, indicates that there may be more topics that match + /// the request; this value should be passed in a new `ListTopicsRequest`. #[prost(string, tag = "2")] pub next_page_token: ::prost::alloc::string::String, } @@ -771,12 +1113,12 @@ pub struct ListTopicSubscriptionsRequest { /// Format is `projects/{project}/topics/{topic}`. #[prost(string, tag = "1")] pub topic: ::prost::alloc::string::String, - /// Maximum number of subscription names to return. + /// Optional. Maximum number of subscription names to return. #[prost(int32, tag = "2")] pub page_size: i32, - /// The value returned by the last `ListTopicSubscriptionsResponse`; indicates - /// that this is a continuation of a prior `ListTopicSubscriptions` call, and - /// that the system should return the next page of data. + /// Optional. The value returned by the last `ListTopicSubscriptionsResponse`; + /// indicates that this is a continuation of a prior `ListTopicSubscriptions` + /// call, and that the system should return the next page of data. #[prost(string, tag = "3")] pub page_token: ::prost::alloc::string::String, } @@ -784,11 +1126,12 @@ pub struct ListTopicSubscriptionsRequest { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ListTopicSubscriptionsResponse { - /// The names of subscriptions attached to the topic specified in the request. + /// Optional. The names of subscriptions attached to the topic specified in the + /// request. #[prost(string, repeated, tag = "1")] pub subscriptions: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - /// If not empty, indicates that there may be more subscriptions that match - /// the request; this value should be passed in a new + /// Optional. If not empty, indicates that there may be more subscriptions that + /// match the request; this value should be passed in a new /// `ListTopicSubscriptionsRequest` to get more subscriptions. #[prost(string, tag = "2")] pub next_page_token: ::prost::alloc::string::String, @@ -801,12 +1144,12 @@ pub struct ListTopicSnapshotsRequest { /// Format is `projects/{project}/topics/{topic}`. #[prost(string, tag = "1")] pub topic: ::prost::alloc::string::String, - /// Maximum number of snapshot names to return. + /// Optional. Maximum number of snapshot names to return. #[prost(int32, tag = "2")] pub page_size: i32, - /// The value returned by the last `ListTopicSnapshotsResponse`; indicates - /// that this is a continuation of a prior `ListTopicSnapshots` call, and - /// that the system should return the next page of data. + /// Optional. The value returned by the last `ListTopicSnapshotsResponse`; + /// indicates that this is a continuation of a prior `ListTopicSnapshots` call, + /// and that the system should return the next page of data. #[prost(string, tag = "3")] pub page_token: ::prost::alloc::string::String, } @@ -814,11 +1157,11 @@ pub struct ListTopicSnapshotsRequest { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ListTopicSnapshotsResponse { - /// The names of the snapshots that match the request. + /// Optional. The names of the snapshots that match the request. #[prost(string, repeated, tag = "1")] pub snapshots: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - /// If not empty, indicates that there may be more snapshots that match - /// the request; this value should be passed in a new + /// Optional. If not empty, indicates that there may be more snapshots that + /// match the request; this value should be passed in a new /// `ListTopicSnapshotsRequest` to get more snapshots. #[prost(string, tag = "2")] pub next_page_token: ::prost::alloc::string::String, @@ -865,23 +1208,23 @@ pub struct Subscription { /// field will be `_deleted-topic_` if the topic has been deleted. #[prost(string, tag = "2")] pub topic: ::prost::alloc::string::String, - /// If push delivery is used with this subscription, this field is + /// Optional. If push delivery is used with this subscription, this field is /// used to configure it. #[prost(message, optional, tag = "4")] pub push_config: ::core::option::Option, - /// If delivery to BigQuery is used with this subscription, this field is - /// used to configure it. + /// Optional. If delivery to BigQuery is used with this subscription, this + /// field is used to configure it. #[prost(message, optional, tag = "18")] pub bigquery_config: ::core::option::Option, - /// If delivery to Google Cloud Storage is used with this subscription, this - /// field is used to configure it. + /// Optional. If delivery to Google Cloud Storage is used with this + /// subscription, this field is used to configure it. #[prost(message, optional, tag = "22")] pub cloud_storage_config: ::core::option::Option, - /// The approximate amount of time (on a best-effort basis) Pub/Sub waits for - /// the subscriber to acknowledge receipt before resending the message. In the - /// interval after the message is delivered and before it is acknowledged, it - /// is considered to be _outstanding_. During that time period, the - /// message will not be redelivered (on a best-effort basis). + /// Optional. The approximate amount of time (on a best-effort basis) Pub/Sub + /// waits for the subscriber to acknowledge receipt before resending the + /// message. In the interval after the message is delivered and before it is + /// acknowledged, it is considered to be _outstanding_. During that time + /// period, the message will not be redelivered (on a best-effort basis). /// /// For pull subscriptions, this value is used as the initial value for the ack /// deadline. To override this value for a given message, call @@ -899,7 +1242,7 @@ pub struct Subscription { /// system will eventually redeliver the message. #[prost(int32, tag = "5")] pub ack_deadline_seconds: i32, - /// Indicates whether to retain acknowledged messages. If true, then + /// Optional. Indicates whether to retain acknowledged messages. If true, then /// messages are not expunged from the subscription's backlog, even if they are /// acknowledged, until they fall out of the `message_retention_duration` /// window. This must be true if you would like to \[`Seek` to a timestamp\] @@ -907,52 +1250,51 @@ pub struct Subscription { /// the past to replay previously-acknowledged messages. #[prost(bool, tag = "7")] pub retain_acked_messages: bool, - /// How long to retain unacknowledged messages in the subscription's backlog, - /// from the moment a message is published. - /// If `retain_acked_messages` is true, then this also configures the retention - /// of acknowledged messages, and thus configures how far back in time a `Seek` - /// can be done. Defaults to 7 days. Cannot be more than 7 days or less than 10 - /// minutes. + /// Optional. How long to retain unacknowledged messages in the subscription's + /// backlog, from the moment a message is published. If `retain_acked_messages` + /// is true, then this also configures the retention of acknowledged messages, + /// and thus configures how far back in time a `Seek` can be done. Defaults to + /// 7 days. Cannot be more than 31 days or less than 10 minutes. #[prost(message, optional, tag = "8")] pub message_retention_duration: ::core::option::Option<::prost_types::Duration>, - /// See [Creating and managing + /// Optional. See [Creating and managing /// labels](). #[prost(map = "string, string", tag = "9")] pub labels: ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, - /// If true, messages published with the same `ordering_key` in `PubsubMessage` - /// will be delivered to the subscribers in the order in which they - /// are received by the Pub/Sub system. Otherwise, they may be delivered in - /// any order. + /// Optional. If true, messages published with the same `ordering_key` in + /// `PubsubMessage` will be delivered to the subscribers in the order in which + /// they are received by the Pub/Sub system. Otherwise, they may be delivered + /// in any order. #[prost(bool, tag = "10")] pub enable_message_ordering: bool, - /// A policy that specifies the conditions for this subscription's expiration. - /// A subscription is considered active as long as any connected subscriber is - /// successfully consuming messages from the subscription or is issuing - /// operations on the subscription. If `expiration_policy` is not set, a - /// *default policy* with `ttl` of 31 days will be used. The minimum allowed + /// Optional. A policy that specifies the conditions for this subscription's + /// expiration. A subscription is considered active as long as any connected + /// subscriber is successfully consuming messages from the subscription or is + /// issuing operations on the subscription. If `expiration_policy` is not set, + /// a *default policy* with `ttl` of 31 days will be used. The minimum allowed /// value for `expiration_policy.ttl` is 1 day. If `expiration_policy` is set, /// but `expiration_policy.ttl` is not set, the subscription never expires. #[prost(message, optional, tag = "11")] pub expiration_policy: ::core::option::Option, - /// An expression written in the Pub/Sub [filter + /// Optional. An expression written in the Pub/Sub [filter /// language](). If non-empty, /// then only `PubsubMessage`s whose `attributes` field matches the filter are /// delivered on this subscription. If empty, then no messages are filtered /// out. #[prost(string, tag = "12")] pub filter: ::prost::alloc::string::String, - /// A policy that specifies the conditions for dead lettering messages in - /// this subscription. If dead_letter_policy is not set, dead lettering - /// is disabled. + /// Optional. A policy that specifies the conditions for dead lettering + /// messages in this subscription. If dead_letter_policy is not set, dead + /// lettering is disabled. /// - /// The Cloud Pub/Sub service account associated with this subscriptions's + /// The Pub/Sub service account associated with this subscriptions's /// parent project (i.e., /// service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com) must have /// permission to Acknowledge() messages on this subscription. #[prost(message, optional, tag = "13")] pub dead_letter_policy: ::core::option::Option, - /// A policy that specifies how Pub/Sub retries message delivery for this - /// subscription. + /// Optional. A policy that specifies how Pub/Sub retries message delivery for + /// this subscription. /// /// If not set, the default retry policy is applied. This generally implies /// that messages will be retried as soon as possible for healthy subscribers. @@ -960,18 +1302,19 @@ pub struct Subscription { /// exceeded events for a given message. #[prost(message, optional, tag = "14")] pub retry_policy: ::core::option::Option, - /// Indicates whether the subscription is detached from its topic. Detached - /// subscriptions don't receive messages from their topic and don't retain any - /// backlog. `Pull` and `StreamingPull` requests will return + /// Optional. Indicates whether the subscription is detached from its topic. + /// Detached subscriptions don't receive messages from their topic and don't + /// retain any backlog. `Pull` and `StreamingPull` requests will return /// FAILED_PRECONDITION. If the subscription is a push subscription, pushes to /// the endpoint will not be made. #[prost(bool, tag = "15")] pub detached: bool, - /// If true, Pub/Sub provides the following guarantees for the delivery of - /// a message with a given value of `message_id` on this subscription: + /// Optional. If true, Pub/Sub provides the following guarantees for the + /// delivery of a message with a given value of `message_id` on this + /// subscription: /// /// * The message sent to a subscriber is guaranteed not to be resent - /// before the message's acknowledgement deadline expires. + /// before the message's acknowledgement deadline expires. /// * An acknowledged message will not be resent to a subscriber. /// /// Note that subscribers may still receive multiple copies of a message @@ -992,9 +1335,29 @@ pub struct Subscription { /// subscription can receive messages. #[prost(enumeration = "subscription::State", tag = "19")] pub state: i32, + /// Output only. Information about the associated Analytics Hub subscription. + /// Only set if the subscritpion is created by Analytics Hub. + #[prost(message, optional, tag = "23")] + pub analytics_hub_subscription_info: ::core::option::Option, } /// Nested message and enum types in `Subscription`. pub mod subscription { + /// Information about an associated Analytics Hub subscription + /// (). + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct AnalyticsHubSubscriptionInfo { + /// Optional. The name of the associated Analytics Hub listing resource. + /// Pattern: + /// "projects/{project}/locations/{location}/dataExchanges/{data_exchange}/listings/{listing}" + #[prost(string, tag = "1")] + pub listing: ::prost::alloc::string::String, + /// Optional. The name of the associated Analytics Hub subscription resource. + /// Pattern: + /// "projects/{project}/locations/{location}/subscriptions/{subscription}" + #[prost(string, tag = "2")] + pub subscription: ::prost::alloc::string::String, + } /// Possible states for a subscription. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] @@ -1031,7 +1394,7 @@ pub mod subscription { } } } -/// A policy that specifies how Cloud Pub/Sub retries message delivery. +/// A policy that specifies how Pub/Sub retries message delivery. /// /// Retry delay will be exponential based on provided minimum and maximum /// backoffs. @@ -1045,12 +1408,13 @@ pub mod subscription { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct RetryPolicy { - /// The minimum delay between consecutive deliveries of a given message. - /// Value should be between 0 and 600 seconds. Defaults to 10 seconds. + /// Optional. The minimum delay between consecutive deliveries of a given + /// message. Value should be between 0 and 600 seconds. Defaults to 10 seconds. #[prost(message, optional, tag = "1")] pub minimum_backoff: ::core::option::Option<::prost_types::Duration>, - /// The maximum delay between consecutive deliveries of a given message. - /// Value should be between 0 and 600 seconds. Defaults to 600 seconds. + /// Optional. The maximum delay between consecutive deliveries of a given + /// message. Value should be between 0 and 600 seconds. Defaults to 600 + /// seconds. #[prost(message, optional, tag = "2")] pub maximum_backoff: ::core::option::Option<::prost_types::Duration>, } @@ -1062,19 +1426,19 @@ pub struct RetryPolicy { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct DeadLetterPolicy { - /// The name of the topic to which dead letter messages should be published. - /// Format is `projects/{project}/topics/{topic}`.The Cloud Pub/Sub service - /// account associated with the enclosing subscription's parent project (i.e., - /// service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com) must have - /// permission to Publish() to this topic. + /// Optional. The name of the topic to which dead letter messages should be + /// published. Format is `projects/{project}/topics/{topic}`.The Pub/Sub + /// service account associated with the enclosing subscription's parent project + /// (i.e., service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com) must + /// have permission to Publish() to this topic. /// /// The operation will fail if the topic does not exist. /// Users should ensure that there is a subscription attached to this topic /// since messages published to a topic with no subscriptions are lost. #[prost(string, tag = "1")] pub dead_letter_topic: ::prost::alloc::string::String, - /// The maximum number of delivery attempts for any message. The value must be - /// between 5 and 100. + /// Optional. The maximum number of delivery attempts for any message. The + /// value must be between 5 and 100. /// /// The number of delivery attempts is defined as 1 + (the sum of number of /// NACKs and number of times the acknowledgement deadline has been exceeded @@ -1094,12 +1458,12 @@ pub struct DeadLetterPolicy { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct ExpirationPolicy { - /// Specifies the "time-to-live" duration for an associated resource. The - /// resource expires if it is not active for a period of `ttl`. The definition - /// of "activity" depends on the type of the associated resource. The minimum - /// and maximum allowed values for `ttl` depend on the type of the associated - /// resource, as well. If `ttl` is not set, the associated resource never - /// expires. + /// Optional. Specifies the "time-to-live" duration for an associated resource. + /// The resource expires if it is not active for a period of `ttl`. The + /// definition of "activity" depends on the type of the associated resource. + /// The minimum and maximum allowed values for `ttl` depend on the type of the + /// associated resource, as well. If `ttl` is not set, the associated resource + /// never expires. #[prost(message, optional, tag = "1")] pub ttl: ::core::option::Option<::prost_types::Duration>, } @@ -1107,12 +1471,12 @@ pub struct ExpirationPolicy { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct PushConfig { - /// A URL locating the endpoint to which messages should be pushed. + /// Optional. A URL locating the endpoint to which messages should be pushed. /// For example, a Webhook endpoint might use ` #[prost(string, tag = "1")] pub push_endpoint: ::prost::alloc::string::String, - /// Endpoint configuration attributes that can be used to control different - /// aspects of the message delivery. + /// Optional. Endpoint configuration attributes that can be used to control + /// different aspects of the message delivery. /// /// The only currently supported attribute is `x-goog-version`, which you can /// use to change the format of the pushed message. This attribute @@ -1136,7 +1500,7 @@ pub struct PushConfig { pub attributes: ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, /// An authentication method used by push endpoints to verify the source of /// push requests. This can be used with push endpoints that are private by - /// default to allow requests only from the Cloud Pub/Sub system, for example. + /// default to allow requests only from the Pub/Sub system, for example. /// This field is optional and should be set only by users interested in /// authenticated push. #[prost(oneof = "push_config::AuthenticationMethod", tags = "3")] @@ -1154,19 +1518,20 @@ pub mod push_config { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct OidcToken { - /// [Service account + /// Optional. [Service account /// email]() /// used for generating the OIDC token. For more information /// on setting up authentication, see /// [Push subscriptions](). #[prost(string, tag = "1")] pub service_account_email: ::prost::alloc::string::String, - /// Audience to be used when generating OIDC token. The audience claim - /// identifies the recipients that the JWT is intended for. The audience - /// value is a single case-sensitive string. Having multiple values (array) - /// for the audience field is not supported. More info about the OIDC JWT - /// token audience here: - /// Note: if not specified, the Push endpoint URL will be used. + /// Optional. Audience to be used when generating OIDC token. The audience + /// claim identifies the recipients that the JWT is intended for. The + /// audience value is a single case-sensitive string. Having multiple values + /// (array) for the audience field is not supported. More info about the OIDC + /// JWT token audience here: + /// Note: if not specified, + /// the Push endpoint URL will be used. #[prost(string, tag = "2")] pub audience: ::prost::alloc::string::String, } @@ -1180,7 +1545,7 @@ pub mod push_config { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct NoWrapper { - /// When true, writes the Pub/Sub message metadata to + /// Optional. When true, writes the Pub/Sub message metadata to /// `x-goog-pubsub-:` headers of the HTTP request. Writes the /// Pub/Sub message attributes to `:` headers of the HTTP request. #[prost(bool, tag = "1")] @@ -1188,14 +1553,15 @@ pub mod push_config { } /// An authentication method used by push endpoints to verify the source of /// push requests. This can be used with push endpoints that are private by - /// default to allow requests only from the Cloud Pub/Sub system, for example. + /// default to allow requests only from the Pub/Sub system, for example. /// This field is optional and should be set only by users interested in /// authenticated push. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum AuthenticationMethod { - /// If specified, Pub/Sub will generate and attach an OIDC JWT token as an - /// `Authorization` header in the HTTP request for every pushed message. + /// Optional. If specified, Pub/Sub will generate and attach an OIDC JWT + /// token as an `Authorization` header in the HTTP request for every pushed + /// message. #[prost(message, tag = "3")] OidcToken(OidcToken), } @@ -1204,12 +1570,12 @@ pub mod push_config { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, Copy, PartialEq, ::prost::Oneof)] pub enum Wrapper { - /// When set, the payload to the push endpoint is in the form of the JSON - /// representation of a PubsubMessage + /// Optional. When set, the payload to the push endpoint is in the form of + /// the JSON representation of a PubsubMessage /// (). #[prost(message, tag = "4")] PubsubWrapper(PubsubWrapper), - /// When set, the payload to the push endpoint is not wrapped. + /// Optional. When set, the payload to the push endpoint is not wrapped. #[prost(message, tag = "5")] NoWrapper(NoWrapper), } @@ -1218,25 +1584,26 @@ pub mod push_config { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct BigQueryConfig { - /// The name of the table to which to write data, of the form + /// Optional. The name of the table to which to write data, of the form /// {projectId}.{datasetId}.{tableId} #[prost(string, tag = "1")] pub table: ::prost::alloc::string::String, - /// When true, use the topic's schema as the columns to write to in BigQuery, - /// if it exists. + /// Optional. When true, use the topic's schema as the columns to write to in + /// BigQuery, if it exists. `use_topic_schema` and `use_table_schema` cannot be + /// enabled at the same time. #[prost(bool, tag = "2")] pub use_topic_schema: bool, - /// When true, write the subscription name, message_id, publish_time, + /// Optional. When true, write the subscription name, message_id, publish_time, /// attributes, and ordering_key to additional columns in the table. The /// subscription name, message_id, and publish_time fields are put in their own /// columns while all other message properties (other than data) are written to /// a JSON object in the attributes column. #[prost(bool, tag = "3")] pub write_metadata: bool, - /// When true and use_topic_schema is true, any fields that are a part of the - /// topic schema that are not part of the BigQuery table schema are dropped - /// when writing to BigQuery. Otherwise, the schemas must be kept in sync and - /// any messages with extra fields are not written and remain in the + /// Optional. When true and use_topic_schema is true, any fields that are a + /// part of the topic schema that are not part of the BigQuery table schema are + /// dropped when writing to BigQuery. Otherwise, the schemas must be kept in + /// sync and any messages with extra fields are not written and remain in the /// subscription's backlog. #[prost(bool, tag = "4")] pub drop_unknown_fields: bool, @@ -1244,6 +1611,19 @@ pub struct BigQueryConfig { /// subscription can receive messages. #[prost(enumeration = "big_query_config::State", tag = "5")] pub state: i32, + /// Optional. When true, use the BigQuery table's schema as the columns to + /// write to in BigQuery. `use_table_schema` and `use_topic_schema` cannot be + /// enabled at the same time. + #[prost(bool, tag = "6")] + pub use_table_schema: bool, + /// Optional. The service account to use to write to BigQuery. The subscription + /// creator or updater that specifies this field must have + /// `iam.serviceAccounts.actAs` permission on the service account. If not + /// specified, the Pub/Sub [service + /// agent](), + /// service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com, is used. + #[prost(string, tag = "7")] + pub service_account_email: ::prost::alloc::string::String, } /// Nested message and enum types in `BigQueryConfig`. pub mod big_query_config { @@ -1258,14 +1638,17 @@ pub mod big_query_config { /// Cannot write to the BigQuery table because of permission denied errors. /// This can happen if /// - Pub/Sub SA has not been granted the [appropriate BigQuery IAM - /// permissions]() + /// permissions]() /// - bigquery.googleapis.com API is not enabled for the project - /// ([instructions]()) + /// ([instructions]()) PermissionDenied = 2, /// Cannot write to the BigQuery table because it does not exist. NotFound = 3, /// Cannot write to the BigQuery table due to a schema mismatch. SchemaMismatch = 4, + /// Cannot write to the destination because enforce_in_transit is set to true + /// and the destination locations are not in the allowed regions. + InTransitLocationRestriction = 5, } impl State { /// String value of the enum field names used in the ProtoBuf definition. @@ -1279,6 +1662,7 @@ pub mod big_query_config { State::PermissionDenied => "PERMISSION_DENIED", State::NotFound => "NOT_FOUND", State::SchemaMismatch => "SCHEMA_MISMATCH", + State::InTransitLocationRestriction => "IN_TRANSIT_LOCATION_RESTRICTION", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -1289,6 +1673,7 @@ pub mod big_query_config { "PERMISSION_DENIED" => Some(Self::PermissionDenied), "NOT_FOUND" => Some(Self::NotFound), "SCHEMA_MISMATCH" => Some(Self::SchemaMismatch), + "IN_TRANSIT_LOCATION_RESTRICTION" => Some(Self::InTransitLocationRestriction), _ => None, } } @@ -1304,29 +1689,46 @@ pub struct CloudStorageConfig { /// requirements] (). #[prost(string, tag = "1")] pub bucket: ::prost::alloc::string::String, - /// User-provided prefix for Cloud Storage filename. See the [object naming - /// requirements](). + /// Optional. User-provided prefix for Cloud Storage filename. See the [object + /// naming requirements](). #[prost(string, tag = "2")] pub filename_prefix: ::prost::alloc::string::String, - /// User-provided suffix for Cloud Storage filename. See the [object naming - /// requirements](). Must - /// not end in "/". + /// Optional. User-provided suffix for Cloud Storage filename. See the [object + /// naming requirements](). + /// Must not end in "/". #[prost(string, tag = "3")] pub filename_suffix: ::prost::alloc::string::String, - /// The maximum duration that can elapse before a new Cloud Storage file is - /// created. Min 1 minute, max 10 minutes, default 5 minutes. May not exceed - /// the subscription's acknowledgement deadline. + /// Optional. User-provided format string specifying how to represent datetimes + /// in Cloud Storage filenames. See the [datetime format + /// guidance](). + #[prost(string, tag = "10")] + pub filename_datetime_format: ::prost::alloc::string::String, + /// Optional. The maximum duration that can elapse before a new Cloud Storage + /// file is created. Min 1 minute, max 10 minutes, default 5 minutes. May not + /// exceed the subscription's acknowledgement deadline. #[prost(message, optional, tag = "6")] pub max_duration: ::core::option::Option<::prost_types::Duration>, - /// The maximum bytes that can be written to a Cloud Storage file before a new - /// file is created. Min 1 KB, max 10 GiB. The max_bytes limit may be exceeded - /// in cases where messages are larger than the limit. + /// Optional. The maximum bytes that can be written to a Cloud Storage file + /// before a new file is created. Min 1 KB, max 10 GiB. The max_bytes limit may + /// be exceeded in cases where messages are larger than the limit. #[prost(int64, tag = "7")] pub max_bytes: i64, + /// Optional. The maximum number of messages that can be written to a Cloud + /// Storage file before a new file is created. Min 1000 messages. + #[prost(int64, tag = "8")] + pub max_messages: i64, /// Output only. An output-only field that indicates whether or not the /// subscription can receive messages. #[prost(enumeration = "cloud_storage_config::State", tag = "9")] pub state: i32, + /// Optional. The service account to use to write to Cloud Storage. The + /// subscription creator or updater that specifies this field must have + /// `iam.serviceAccounts.actAs` permission on the service account. If not + /// specified, the Pub/Sub + /// [service agent](), + /// service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com, is used. + #[prost(string, tag = "11")] + pub service_account_email: ::prost::alloc::string::String, /// Defaults to text format. #[prost(oneof = "cloud_storage_config::OutputFormat", tags = "4, 5")] pub output_format: ::core::option::Option, @@ -1344,14 +1746,18 @@ pub mod cloud_storage_config { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct AvroConfig { - /// When true, write the subscription name, message_id, publish_time, - /// attributes, and ordering_key as additional fields in the output. The - /// subscription name, message_id, and publish_time fields are put in their - /// own fields while all other message properties other than data (for - /// example, an ordering_key, if present) are added as entries in the - /// attributes map. + /// Optional. When true, write the subscription name, message_id, + /// publish_time, attributes, and ordering_key as additional fields in the + /// output. The subscription name, message_id, and publish_time fields are + /// put in their own fields while all other message properties other than + /// data (for example, an ordering_key, if present) are added as entries in + /// the attributes map. #[prost(bool, tag = "1")] pub write_metadata: bool, + /// Optional. When true, the output Cloud Storage file will be serialized + /// using the topic schema, if it exists. + #[prost(bool, tag = "2")] + pub use_topic_schema: bool, } /// Possible states for a Cloud Storage subscription. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] @@ -1366,6 +1772,12 @@ pub mod cloud_storage_config { PermissionDenied = 2, /// Cannot write to the Cloud Storage bucket because it does not exist. NotFound = 3, + /// Cannot write to the destination because enforce_in_transit is set to true + /// and the destination locations are not in the allowed regions. + InTransitLocationRestriction = 4, + /// Cannot write to the Cloud Storage bucket due to an incompatibility + /// between the topic schema and subscription settings. + SchemaMismatch = 5, } impl State { /// String value of the enum field names used in the ProtoBuf definition. @@ -1378,6 +1790,8 @@ pub mod cloud_storage_config { State::Active => "ACTIVE", State::PermissionDenied => "PERMISSION_DENIED", State::NotFound => "NOT_FOUND", + State::InTransitLocationRestriction => "IN_TRANSIT_LOCATION_RESTRICTION", + State::SchemaMismatch => "SCHEMA_MISMATCH", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -1387,6 +1801,8 @@ pub mod cloud_storage_config { "ACTIVE" => Some(Self::Active), "PERMISSION_DENIED" => Some(Self::PermissionDenied), "NOT_FOUND" => Some(Self::NotFound), + "IN_TRANSIT_LOCATION_RESTRICTION" => Some(Self::InTransitLocationRestriction), + "SCHEMA_MISMATCH" => Some(Self::SchemaMismatch), _ => None, } } @@ -1395,10 +1811,12 @@ pub mod cloud_storage_config { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, Copy, PartialEq, ::prost::Oneof)] pub enum OutputFormat { - /// If set, message data will be written to Cloud Storage in text format. + /// Optional. If set, message data will be written to Cloud Storage in text + /// format. #[prost(message, tag = "4")] TextConfig(TextConfig), - /// If set, message data will be written to Cloud Storage in Avro format. + /// Optional. If set, message data will be written to Cloud Storage in Avro + /// format. #[prost(message, tag = "5")] AvroConfig(AvroConfig), } @@ -1407,14 +1825,14 @@ pub mod cloud_storage_config { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ReceivedMessage { - /// This ID can be used to acknowledge the received message. + /// Optional. This ID can be used to acknowledge the received message. #[prost(string, tag = "1")] pub ack_id: ::prost::alloc::string::String, - /// The message. + /// Optional. The message. #[prost(message, optional, tag = "2")] pub message: ::core::option::Option, - /// The approximate number of times that Cloud Pub/Sub has attempted to deliver - /// the associated message to a subscriber. + /// Optional. The approximate number of times that Pub/Sub has attempted to + /// deliver the associated message to a subscriber. /// /// More precisely, this is 1 + (number of NACKs) + /// (number of ack_deadline exceeds) for this message. @@ -1461,12 +1879,12 @@ pub struct ListSubscriptionsRequest { /// Format is `projects/{project-id}`. #[prost(string, tag = "1")] pub project: ::prost::alloc::string::String, - /// Maximum number of subscriptions to return. + /// Optional. Maximum number of subscriptions to return. #[prost(int32, tag = "2")] pub page_size: i32, - /// The value returned by the last `ListSubscriptionsResponse`; indicates that - /// this is a continuation of a prior `ListSubscriptions` call, and that the - /// system should return the next page of data. + /// Optional. The value returned by the last `ListSubscriptionsResponse`; + /// indicates that this is a continuation of a prior `ListSubscriptions` call, + /// and that the system should return the next page of data. #[prost(string, tag = "3")] pub page_token: ::prost::alloc::string::String, } @@ -1474,11 +1892,11 @@ pub struct ListSubscriptionsRequest { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ListSubscriptionsResponse { - /// The subscriptions that match the request. + /// Optional. The subscriptions that match the request. #[prost(message, repeated, tag = "1")] pub subscriptions: ::prost::alloc::vec::Vec, - /// If not empty, indicates that there may be more subscriptions that match - /// the request; this value should be passed in a new + /// Optional. If not empty, indicates that there may be more subscriptions that + /// match the request; this value should be passed in a new /// `ListSubscriptionsRequest` to get more subscriptions. #[prost(string, tag = "2")] pub next_page_token: ::prost::alloc::string::String, @@ -1537,8 +1955,8 @@ pub struct PullRequest { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct PullResponse { - /// Received Pub/Sub messages. The list will be empty if there are no more - /// messages available in the backlog, or if no messages could be returned + /// Optional. Received Pub/Sub messages. The list will be empty if there are no + /// more messages available in the backlog, or if no messages could be returned /// before the request timeout. For JSON, the response can be entirely /// empty. The Pub/Sub system may return fewer than the `maxMessages` requested /// even if there are more messages available in the backlog. @@ -1563,7 +1981,8 @@ pub struct ModifyAckDeadlineRequest { /// delivery to another subscriber client. This typically results in an /// increase in the rate of message redeliveries (that is, duplicates). /// The minimum deadline you can specify is 0 seconds. - /// The maximum deadline you can specify is 600 seconds (10 minutes). + /// The maximum deadline you can specify in a single request is 600 seconds + /// (10 minutes). #[prost(int32, tag = "3")] pub ack_deadline_seconds: i32, } @@ -1593,14 +2012,15 @@ pub struct StreamingPullRequest { /// Format is `projects/{project}/subscriptions/{sub}`. #[prost(string, tag = "1")] pub subscription: ::prost::alloc::string::String, - /// List of acknowledgement IDs for acknowledging previously received messages - /// (received on this stream or a different stream). If an ack ID has expired, - /// the corresponding message may be redelivered later. Acknowledging a message - /// more than once will not result in an error. If the acknowledgement ID is - /// malformed, the stream will be aborted with status `INVALID_ARGUMENT`. + /// Optional. List of acknowledgement IDs for acknowledging previously received + /// messages (received on this stream or a different stream). If an ack ID has + /// expired, the corresponding message may be redelivered later. Acknowledging + /// a message more than once will not result in an error. If the + /// acknowledgement ID is malformed, the stream will be aborted with status + /// `INVALID_ARGUMENT`. #[prost(string, repeated, tag = "2")] pub ack_ids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - /// The list of new ack deadlines for the IDs listed in + /// Optional. The list of new ack deadlines for the IDs listed in /// `modify_deadline_ack_ids`. The size of this list must be the same as the /// size of `modify_deadline_ack_ids`. If it differs the stream will be aborted /// with `INVALID_ARGUMENT`. Each element in this list is applied to the @@ -1611,11 +2031,11 @@ pub struct StreamingPullRequest { /// the message is immediately made available for another streaming or /// non-streaming pull request. If the value is < 0 (an error), the stream will /// be aborted with status `INVALID_ARGUMENT`. - #[prost(int32, repeated, tag = "3")] + #[prost(int32, repeated, packed = "false", tag = "3")] pub modify_deadline_seconds: ::prost::alloc::vec::Vec, - /// List of acknowledgement IDs whose deadline will be modified based on the - /// corresponding element in `modify_deadline_seconds`. This field can be used - /// to indicate that more time is needed to process a message by the + /// Optional. List of acknowledgement IDs whose deadline will be modified based + /// on the corresponding element in `modify_deadline_seconds`. This field can + /// be used to indicate that more time is needed to process a message by the /// subscriber, or to make the message available for redelivery if the /// processing was interrupted. #[prost(string, repeated, tag = "4")] @@ -1626,16 +2046,16 @@ pub struct StreamingPullRequest { /// seconds. The maximum deadline you can specify is 600 seconds (10 minutes). #[prost(int32, tag = "5")] pub stream_ack_deadline_seconds: i32, - /// A unique identifier that is used to distinguish client instances from each - /// other. Only needs to be provided on the initial request. When a stream - /// disconnects and reconnects for the same stream, the client_id should be set - /// to the same value so that state associated with the old stream can be - /// transferred to the new stream. The same client_id should not be used for + /// Optional. A unique identifier that is used to distinguish client instances + /// from each other. Only needs to be provided on the initial request. When a + /// stream disconnects and reconnects for the same stream, the client_id should + /// be set to the same value so that state associated with the old stream can + /// be transferred to the new stream. The same client_id should not be used for /// different client instances. #[prost(string, tag = "6")] pub client_id: ::prost::alloc::string::String, - /// Flow control settings for the maximum number of outstanding messages. When - /// there are `max_outstanding_messages` or more currently sent to the + /// Optional. Flow control settings for the maximum number of outstanding + /// messages. When there are `max_outstanding_messages` currently sent to the /// streaming pull client that have not yet been acked or nacked, the server /// stops sending more messages. The sending of messages resumes once the /// number of outstanding messages is less than this value. If the value is @@ -1645,14 +2065,14 @@ pub struct StreamingPullRequest { /// `INVALID_ARGUMENT`. #[prost(int64, tag = "7")] pub max_outstanding_messages: i64, - /// Flow control settings for the maximum number of outstanding bytes. When - /// there are `max_outstanding_bytes` or more worth of messages currently sent - /// to the streaming pull client that have not yet been acked or nacked, the - /// server will stop sending more messages. The sending of messages resumes - /// once the number of outstanding bytes is less than this value. If the value - /// is <= 0, there is no limit to the number of outstanding bytes. This - /// property can only be set on the initial StreamingPullRequest. If it is set - /// on a subsequent request, the stream will be aborted with status + /// Optional. Flow control settings for the maximum number of outstanding + /// bytes. When there are `max_outstanding_bytes` or more worth of messages + /// currently sent to the streaming pull client that have not yet been acked or + /// nacked, the server will stop sending more messages. The sending of messages + /// resumes once the number of outstanding bytes is less than this value. If + /// the value is <= 0, there is no limit to the number of outstanding bytes. + /// This property can only be set on the initial StreamingPullRequest. If it is + /// set on a subsequent request, the stream will be aborted with status /// `INVALID_ARGUMENT`. #[prost(int64, tag = "8")] pub max_outstanding_bytes: i64, @@ -1662,19 +2082,19 @@ pub struct StreamingPullRequest { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct StreamingPullResponse { - /// Received Pub/Sub messages. This will not be empty. + /// Optional. Received Pub/Sub messages. This will not be empty. #[prost(message, repeated, tag = "1")] pub received_messages: ::prost::alloc::vec::Vec, - /// This field will only be set if `enable_exactly_once_delivery` is set to - /// `true`. + /// Optional. This field will only be set if `enable_exactly_once_delivery` is + /// set to `true`. #[prost(message, optional, tag = "5")] pub acknowledge_confirmation: ::core::option::Option, - /// This field will only be set if `enable_exactly_once_delivery` is set to - /// `true`. + /// Optional. This field will only be set if `enable_exactly_once_delivery` is + /// set to `true`. #[prost(message, optional, tag = "3")] pub modify_ack_deadline_confirmation: ::core::option::Option, - /// Properties associated with this subscription. + /// Optional. Properties associated with this subscription. #[prost(message, optional, tag = "4")] pub subscription_properties: ::core::option::Option, } @@ -1685,17 +2105,18 @@ pub mod streaming_pull_response { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct AcknowledgeConfirmation { - /// Successfully processed acknowledgement IDs. + /// Optional. Successfully processed acknowledgement IDs. #[prost(string, repeated, tag = "1")] pub ack_ids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - /// List of acknowledgement IDs that were malformed or whose acknowledgement - /// deadline has expired. + /// Optional. List of acknowledgement IDs that were malformed or whose + /// acknowledgement deadline has expired. #[prost(string, repeated, tag = "2")] pub invalid_ack_ids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - /// List of acknowledgement IDs that were out of order. + /// Optional. List of acknowledgement IDs that were out of order. #[prost(string, repeated, tag = "3")] pub unordered_ack_ids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - /// List of acknowledgement IDs that failed processing with temporary issues. + /// Optional. List of acknowledgement IDs that failed processing with + /// temporary issues. #[prost(string, repeated, tag = "4")] pub temporary_failed_ack_ids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, } @@ -1704,14 +2125,15 @@ pub mod streaming_pull_response { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ModifyAckDeadlineConfirmation { - /// Successfully processed acknowledgement IDs. + /// Optional. Successfully processed acknowledgement IDs. #[prost(string, repeated, tag = "1")] pub ack_ids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - /// List of acknowledgement IDs that were malformed or whose acknowledgement - /// deadline has expired. + /// Optional. List of acknowledgement IDs that were malformed or whose + /// acknowledgement deadline has expired. #[prost(string, repeated, tag = "2")] pub invalid_ack_ids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - /// List of acknowledgement IDs that failed processing with temporary issues. + /// Optional. List of acknowledgement IDs that failed processing with + /// temporary issues. #[prost(string, repeated, tag = "3")] pub temporary_failed_ack_ids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, } @@ -1719,10 +2141,11 @@ pub mod streaming_pull_response { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct SubscriptionProperties { - /// True iff exactly once delivery is enabled for this subscription. + /// Optional. True iff exactly once delivery is enabled for this + /// subscription. #[prost(bool, tag = "1")] pub exactly_once_delivery_enabled: bool, - /// True iff message ordering is enabled for this subscription. + /// Optional. True iff message ordering is enabled for this subscription. #[prost(bool, tag = "2")] pub message_ordering_enabled: bool, } @@ -1735,8 +2158,8 @@ pub struct CreateSnapshotRequest { /// in the request, the server will assign a random name for this snapshot on /// the same project as the subscription. Note that for REST API requests, you /// must specify a name. See the [resource name - /// rules](). Format - /// is `projects/{project}/snapshots/{snap}`. + /// rules](). + /// Format is `projects/{project}/snapshots/{snap}`. #[prost(string, tag = "1")] pub name: ::prost::alloc::string::String, /// Required. The subscription whose backlog the snapshot retains. @@ -1750,7 +2173,7 @@ pub struct CreateSnapshotRequest { /// Format is `projects/{project}/subscriptions/{sub}`. #[prost(string, tag = "2")] pub subscription: ::prost::alloc::string::String, - /// See [Creating and managing + /// Optional. See [Creating and managing /// labels](). #[prost(map = "string, string", tag = "3")] pub labels: ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, @@ -1775,13 +2198,14 @@ pub struct UpdateSnapshotRequest { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Snapshot { - /// The name of the snapshot. + /// Optional. The name of the snapshot. #[prost(string, tag = "1")] pub name: ::prost::alloc::string::String, - /// The name of the topic from which this snapshot is retaining messages. + /// Optional. The name of the topic from which this snapshot is retaining + /// messages. #[prost(string, tag = "2")] pub topic: ::prost::alloc::string::String, - /// The snapshot is guaranteed to exist up until this time. + /// Optional. The snapshot is guaranteed to exist up until this time. /// A newly-created snapshot expires no later than 7 days from the time of its /// creation. Its exact lifetime is determined at creation by the existing /// backlog in the source subscription. Specifically, the lifetime of the @@ -1793,7 +2217,7 @@ pub struct Snapshot { /// snapshot that would expire in less than 1 hour after creation. #[prost(message, optional, tag = "3")] pub expire_time: ::core::option::Option<::prost_types::Timestamp>, - /// See \[Creating and managing labels\] + /// Optional. See \[Creating and managing labels\] /// (). #[prost(map = "string, string", tag = "4")] pub labels: ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, @@ -1815,12 +2239,12 @@ pub struct ListSnapshotsRequest { /// Format is `projects/{project-id}`. #[prost(string, tag = "1")] pub project: ::prost::alloc::string::String, - /// Maximum number of snapshots to return. + /// Optional. Maximum number of snapshots to return. #[prost(int32, tag = "2")] pub page_size: i32, - /// The value returned by the last `ListSnapshotsResponse`; indicates that this - /// is a continuation of a prior `ListSnapshots` call, and that the system - /// should return the next page of data. + /// Optional. The value returned by the last `ListSnapshotsResponse`; indicates + /// that this is a continuation of a prior `ListSnapshots` call, and that the + /// system should return the next page of data. #[prost(string, tag = "3")] pub page_token: ::prost::alloc::string::String, } @@ -1828,11 +2252,12 @@ pub struct ListSnapshotsRequest { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ListSnapshotsResponse { - /// The resulting snapshots. + /// Optional. The resulting snapshots. #[prost(message, repeated, tag = "1")] pub snapshots: ::prost::alloc::vec::Vec, - /// If not empty, indicates that there may be more snapshot that match the - /// request; this value should be passed in a new `ListSnapshotsRequest`. + /// Optional. If not empty, indicates that there may be more snapshot that + /// match the request; this value should be passed in a new + /// `ListSnapshotsRequest`. #[prost(string, tag = "2")] pub next_page_token: ::prost::alloc::string::String, } @@ -1860,7 +2285,7 @@ pub mod seek_request { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Target { - /// The time to seek to. + /// Optional. The time to seek to. /// Messages retained in the subscription that were published before this /// time are marked as acknowledged, and messages retained in the /// subscription that were published after this time are marked as @@ -1873,9 +2298,9 @@ pub mod seek_request { /// and already-expunged messages will not be restored. #[prost(message, tag = "2")] Time(::prost_types::Timestamp), - /// The snapshot to seek to. The snapshot's topic must be the same as that of - /// the provided subscription. - /// Format is `projects/{project}/snapshots/{snap}`. + /// Optional. The snapshot to seek to. The snapshot's topic must be the same + /// as that of the provided subscription. Format is + /// `projects/{project}/snapshots/{snap}`. #[prost(string, tag = "3")] Snapshot(::prost::alloc::string::String), } @@ -1965,7 +2390,7 @@ pub mod publisher_client { self } /// Creates the given topic with the given name. See the [resource name rules] - /// (https://cloud.google.com/pubsub/docs/admin#resource_names). + /// (https://cloud.google.com/pubsub/docs/pubsub-basics#resource_names). pub async fn create_topic( &mut self, request: impl tonic::IntoRequest, @@ -1980,8 +2405,8 @@ pub mod publisher_client { .insert(GrpcMethod::new("google.pubsub.v1.Publisher", "CreateTopic")); self.inner.unary(req, path, codec).await } - /// Updates an existing topic. Note that certain properties of a - /// topic are not modifiable. + /// Updates an existing topic by updating the fields specified in the update + /// mask. Note that certain properties of a topic are not modifiable. pub async fn update_topic( &mut self, request: impl tonic::IntoRequest, @@ -2197,16 +2622,16 @@ pub mod subscriber_client { self } /// Creates a subscription to a given topic. See the [resource name rules] - /// (https://cloud.google.com/pubsub/docs/admin#resource_names). + /// (https://cloud.google.com/pubsub/docs/pubsub-basics#resource_names). /// If the subscription already exists, returns `ALREADY_EXISTS`. /// If the corresponding topic doesn't exist, returns `NOT_FOUND`. /// /// If the name is not provided in the request, the server will assign a random /// name for this subscription on the same project as the topic, conforming /// to the [resource name format] - /// (https://cloud.google.com/pubsub/docs/admin#resource_names). The generated - /// name is populated in the returned Subscription object. Note that for REST - /// API requests, you must specify a name in the request. + /// (https://cloud.google.com/pubsub/docs/pubsub-basics#resource_names). The + /// generated name is populated in the returned Subscription object. Note that + /// for REST API requests, you must specify a name in the request. pub async fn create_subscription( &mut self, request: impl tonic::IntoRequest, @@ -2236,8 +2661,9 @@ pub mod subscriber_client { .insert(GrpcMethod::new("google.pubsub.v1.Subscriber", "GetSubscription")); self.inner.unary(req, path, codec).await } - /// Updates an existing subscription. Note that certain properties of a - /// subscription, such as its topic, are not modifiable. + /// Updates an existing subscription by updating the fields specified in the + /// update mask. Note that certain properties of a subscription, such as its + /// topic, are not modifiable. pub async fn update_subscription( &mut self, request: impl tonic::IntoRequest, @@ -2434,7 +2860,7 @@ pub mod subscriber_client { /// the request, the server will assign a random /// name for this snapshot on the same project as the subscription, conforming /// to the [resource name format] - /// (https://cloud.google.com/pubsub/docs/admin#resource_names). The + /// (https://cloud.google.com/pubsub/docs/pubsub-basics#resource_names). The /// generated name is populated in the returned Snapshot object. Note that for /// REST API requests, you must specify a name in the request. pub async fn create_snapshot( @@ -2451,7 +2877,8 @@ pub mod subscriber_client { .insert(GrpcMethod::new("google.pubsub.v1.Subscriber", "CreateSnapshot")); self.inner.unary(req, path, codec).await } - /// Updates an existing snapshot. Snapshots are used in + /// Updates an existing snapshot by updating the fields specified in the update + /// mask. Snapshots are used in /// [Seek](https://cloud.google.com/pubsub/docs/replay-overview) operations, /// which allow you to manage message acknowledgments in bulk. That is, you can /// set the acknowledgment state of messages in an existing subscription to the diff --git a/googleapis/src/google.r#type.rs b/googleapis/src/google.r#type.rs index 57d13ed7..9eb064a3 100644 --- a/googleapis/src/google.r#type.rs +++ b/googleapis/src/google.r#type.rs @@ -60,7 +60,7 @@ pub struct Expr { /// * A month and day value, with a zero year, such as an anniversary /// * A year on its own, with zero month and day values /// * A year and month value, with a zero day, such as a credit card expiration -/// date +/// date /// /// Related types are [google.type.TimeOfDay][google.type.TimeOfDay] and /// `google.protobuf.Timestamp`. diff --git a/googleapis/src/google.spanner.admin.database.v1.rs b/googleapis/src/google.spanner.admin.database.v1.rs index a68fe2cc..6d20627f 100644 --- a/googleapis/src/google.spanner.admin.database.v1.rs +++ b/googleapis/src/google.spanner.admin.database.v1.rs @@ -25,6 +25,23 @@ pub struct EncryptionConfig { /// `projects//locations//keyRings//cryptoKeys/`. #[prost(string, tag = "2")] pub kms_key_name: ::prost::alloc::string::String, + /// Specifies the KMS configuration for the one or more keys used to encrypt + /// the database. Values are of the form + /// `projects//locations//keyRings//cryptoKeys/`. + /// + /// The keys referenced by kms_key_names must fully cover all + /// regions of the database instance configuration. Some examples: + /// * For single region database instance configs, specify a single regional + /// location KMS key. + /// * For multi-regional database instance configs of type GOOGLE_MANAGED, + /// either specify a multi-regional location KMS key or multiple regional + /// location KMS keys that cover all regions in the instance config. + /// * For a database instance config of type USER_MANAGED, please specify only + /// regional location KMS keys to cover each region in the instance config. + /// Multi-regional location KMS keys are not supported for USER_MANAGED + /// instance configs. + #[prost(string, repeated, tag = "3")] + pub kms_key_names: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, } /// Encryption information for a Cloud Spanner database or backup. #[allow(clippy::derive_partial_eq_without_eq)] @@ -33,13 +50,13 @@ pub struct EncryptionInfo { /// Output only. The type of encryption. #[prost(enumeration = "encryption_info::Type", tag = "3")] pub encryption_type: i32, - /// Output only. If present, the status of a recent encrypt/decrypt call on underlying data - /// for this database or backup. Regardless of status, data is always encrypted - /// at rest. + /// Output only. If present, the status of a recent encrypt/decrypt call on + /// underlying data for this database or backup. Regardless of status, data is + /// always encrypted at rest. #[prost(message, optional, tag = "4")] pub encryption_status: ::core::option::Option, - /// Output only. A Cloud KMS key version that is being used to protect the database or - /// backup. + /// Output only. A Cloud KMS key version that is being used to protect the + /// database or backup. #[prost(string, tag = "2")] pub kms_key_version: ::prost::alloc::string::String, } @@ -90,7 +107,7 @@ pub enum DatabaseDialect { /// Default value. This value will create a database with the /// GOOGLE_STANDARD_SQL dialect. Unspecified = 0, - /// Google standard SQL. + /// GoogleSQL supported SQL. GoogleStandardSql = 1, /// PostgreSQL supported SQL. Postgresql = 2, @@ -121,10 +138,10 @@ impl DatabaseDialect { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Backup { - /// Required for the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] operation. - /// Name of the database from which this backup was - /// created. This needs to be in the same instance as the backup. - /// Values are of the form + /// Required for the + /// [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] + /// operation. Name of the database from which this backup was created. This + /// needs to be in the same instance as the backup. Values are of the form /// `projects//instances//databases/`. #[prost(string, tag = "2")] pub database: ::prost::alloc::string::String, @@ -134,7 +151,8 @@ pub struct Backup { /// backup. #[prost(message, optional, tag = "9")] pub version_time: ::core::option::Option<::prost_types::Timestamp>, - /// Required for the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] + /// Required for the + /// [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] /// operation. The expiration time of the backup, with microseconds /// granularity that must be at least 6 hours and at most 366 days /// from the time the CreateBackup request is processed. Once the `expire_time` @@ -142,8 +160,11 @@ pub struct Backup { /// Spanner to free the resources used by the backup. #[prost(message, optional, tag = "3")] pub expire_time: ::core::option::Option<::prost_types::Timestamp>, - /// Output only for the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] operation. - /// Required for the [UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup] operation. + /// Output only for the + /// [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] + /// operation. Required for the + /// [UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup] + /// operation. /// /// A globally unique identifier for the backup which cannot be /// changed. Values are of the form @@ -157,7 +178,8 @@ pub struct Backup { /// `projects//instances/`. #[prost(string, tag = "1")] pub name: ::prost::alloc::string::String, - /// Output only. The time the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] + /// Output only. The time the + /// [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] /// request is received. If the request does not specify `version_time`, the /// `version_time` of the backup will be equivalent to the `create_time`. #[prost(message, optional, tag = "4")] @@ -165,6 +187,24 @@ pub struct Backup { /// Output only. Size of the backup in bytes. #[prost(int64, tag = "5")] pub size_bytes: i64, + /// Output only. The number of bytes that will be freed by deleting this + /// backup. This value will be zero if, for example, this backup is part of an + /// incremental backup chain and younger backups in the chain require that we + /// keep its data. For backups not in an incremental backup chain, this is + /// always the size of the backup. This value may change if backups on the same + /// chain get created, deleted or expired. + #[prost(int64, tag = "15")] + pub freeable_size_bytes: i64, + /// Output only. For a backup in an incremental backup chain, this is the + /// storage space needed to keep the data that has changed since the previous + /// backup. For all other backups, this is always the size of the backup. This + /// value may change if backups on the same chain get deleted or expired. + /// + /// This field can be used to calculate the total storage space used by a set + /// of backups. For example, the total space used by all backups of a database + /// can be computed by summing up this field. + #[prost(int64, tag = "16")] + pub exclusive_size_bytes: i64, /// Output only. The current state of the backup. #[prost(enumeration = "backup::State", tag = "6")] pub state: i32, @@ -180,6 +220,14 @@ pub struct Backup { /// Output only. The encryption information for the backup. #[prost(message, optional, tag = "8")] pub encryption_info: ::core::option::Option, + /// Output only. The encryption information for the backup, whether it is + /// protected by one or more KMS keys. The information includes all Cloud + /// KMS key versions used to encrypt the backup. The `encryption_status' field + /// inside of each `EncryptionInfo` is not populated. At least one of the key + /// versions must be available for the backup to be restored. If a key version + /// is revoked in the middle of a restore, the restore behavior is undefined. + #[prost(message, repeated, tag = "13")] + pub encryption_information: ::prost::alloc::vec::Vec, /// Output only. The database dialect information for the backup. #[prost(enumeration = "DatabaseDialect", tag = "10")] pub database_dialect: i32, @@ -199,6 +247,32 @@ pub struct Backup { /// less than `Backup.max_expire_time`. #[prost(message, optional, tag = "12")] pub max_expire_time: ::core::option::Option<::prost_types::Timestamp>, + /// Output only. List of backup schedule URIs that are associated with + /// creating this backup. This is only applicable for scheduled backups, and + /// is empty for on-demand backups. + /// + /// To optimize for storage, whenever possible, multiple schedules are + /// collapsed together to create one backup. In such cases, this field captures + /// the list of all backup schedule URIs that are associated with creating + /// this backup. If collapsing is not done, then this field captures the + /// single backup schedule URI associated with creating this backup. + #[prost(string, repeated, tag = "14")] + pub backup_schedules: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + /// Output only. Populated only for backups in an incremental backup chain. + /// Backups share the same chain id if and only if they belong to the same + /// incremental backup chain. Use this field to determine which backups are + /// part of the same incremental backup chain. The ordering of backups in the + /// chain can be determined by ordering the backup `version_time`. + #[prost(string, tag = "17")] + pub incremental_backup_chain_id: ::prost::alloc::string::String, + /// Output only. Data deleted at a time older than this is guaranteed not to be + /// retained in order to support this backup. For a backup in an incremental + /// backup chain, this is the version time of the oldest backup that exists or + /// ever existed in the chain. For all other backups, this is the version time + /// of the backup. This field can be used to understand what data is being + /// retained by the backup system. + #[prost(message, optional, tag = "18")] + pub oldest_version_time: ::core::option::Option<::prost_types::Timestamp>, } /// Nested message and enum types in `Backup`. pub mod backup { @@ -237,7 +311,8 @@ pub mod backup { } } } -/// The request for [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]. +/// The request for +/// [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct CreateBackupRequest { @@ -257,11 +332,11 @@ pub struct CreateBackupRequest { /// Required. The backup to create. #[prost(message, optional, tag = "3")] pub backup: ::core::option::Option, - /// Optional. The encryption configuration used to encrypt the backup. If this field is - /// not specified, the backup will use the same - /// encryption configuration as the database by default, namely - /// [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type] = - /// `USE_DATABASE_ENCRYPTION`. + /// Optional. The encryption configuration used to encrypt the backup. If this + /// field is not specified, the backup will use the same encryption + /// configuration as the database by default, namely + /// [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type] + /// = `USE_DATABASE_ENCRYPTION`. #[prost(message, optional, tag = "4")] pub encryption_config: ::core::option::Option, } @@ -277,7 +352,8 @@ pub struct CreateBackupMetadata { #[prost(string, tag = "2")] pub database: ::prost::alloc::string::String, /// The progress of the - /// [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] operation. + /// [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] + /// operation. #[prost(message, optional, tag = "3")] pub progress: ::core::option::Option, /// The time at which cancellation of this operation was received. @@ -295,12 +371,13 @@ pub struct CreateBackupMetadata { #[prost(message, optional, tag = "4")] pub cancel_time: ::core::option::Option<::prost_types::Timestamp>, } -/// The request for [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup]. +/// The request for +/// [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup]. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct CopyBackupRequest { - /// Required. The name of the destination instance that will contain the backup copy. - /// Values are of the form: `projects//instances/`. + /// Required. The name of the destination instance that will contain the backup + /// copy. Values are of the form: `projects//instances/`. #[prost(string, tag = "1")] pub parent: ::prost::alloc::string::String, /// Required. The id of the backup copy. @@ -323,15 +400,15 @@ pub struct CopyBackupRequest { /// to free the resources used by the backup. #[prost(message, optional, tag = "4")] pub expire_time: ::core::option::Option<::prost_types::Timestamp>, - /// Optional. The encryption configuration used to encrypt the backup. If this field is - /// not specified, the backup will use the same - /// encryption configuration as the source backup by default, namely - /// [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type] = - /// `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`. + /// Optional. The encryption configuration used to encrypt the backup. If this + /// field is not specified, the backup will use the same encryption + /// configuration as the source backup by default, namely + /// [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type] + /// = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`. #[prost(message, optional, tag = "5")] pub encryption_config: ::core::option::Option, } -/// Metadata type for the google.longrunning.Operation returned by +/// Metadata type for the operation returned by /// [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup]. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -347,7 +424,8 @@ pub struct CopyBackupMetadata { #[prost(string, tag = "2")] pub source_backup: ::prost::alloc::string::String, /// The progress of the - /// [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup] operation. + /// [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup] + /// operation. #[prost(message, optional, tag = "3")] pub progress: ::core::option::Option, /// The time at which cancellation of CopyBackup operation was received. @@ -365,7 +443,8 @@ pub struct CopyBackupMetadata { #[prost(message, optional, tag = "4")] pub cancel_time: ::core::option::Option<::prost_types::Timestamp>, } -/// The request for [UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup]. +/// The request for +/// [UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup]. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct UpdateBackupRequest { @@ -383,7 +462,8 @@ pub struct UpdateBackupRequest { #[prost(message, optional, tag = "2")] pub update_mask: ::core::option::Option<::prost_types::FieldMask>, } -/// The request for [GetBackup][google.spanner.admin.database.v1.DatabaseAdmin.GetBackup]. +/// The request for +/// [GetBackup][google.spanner.admin.database.v1.DatabaseAdmin.GetBackup]. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct GetBackupRequest { @@ -393,7 +473,8 @@ pub struct GetBackupRequest { #[prost(string, tag = "1")] pub name: ::prost::alloc::string::String, } -/// The request for [DeleteBackup][google.spanner.admin.database.v1.DatabaseAdmin.DeleteBackup]. +/// The request for +/// [DeleteBackup][google.spanner.admin.database.v1.DatabaseAdmin.DeleteBackup]. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct DeleteBackupRequest { @@ -403,7 +484,8 @@ pub struct DeleteBackupRequest { #[prost(string, tag = "1")] pub name: ::prost::alloc::string::String, } -/// The request for [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups]. +/// The request for +/// [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups]. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ListBackupsRequest { @@ -419,7 +501,9 @@ pub struct ListBackupsRequest { /// must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`. /// Colon `:` is the contains operator. Filter rules are not case sensitive. /// - /// The following fields in the [Backup][google.spanner.admin.database.v1.Backup] are eligible for filtering: + /// The following fields in the + /// [Backup][google.spanner.admin.database.v1.Backup] are eligible for + /// filtering: /// /// * `name` /// * `database` @@ -428,6 +512,7 @@ pub struct ListBackupsRequest { /// * `expire_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) /// * `version_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) /// * `size_bytes` + /// * `backup_schedules` /// /// You can combine multiple expressions by enclosing each expression in /// parentheses. By default, expressions are combined with AND logic, but @@ -446,6 +531,8 @@ pub struct ListBackupsRequest { /// * `expire_time < \"2018-03-28T14:50:00Z\"` /// - The backup `expire_time` is before 2018-03-28T14:50:00Z. /// * `size_bytes > 10000000000` - The backup's size is greater than 10GB + /// * `backup_schedules:daily` + /// - The backup is created from a schedule with "daily" in its name. #[prost(string, tag = "2")] pub filter: ::prost::alloc::string::String, /// Number of backups to be returned in the response. If 0 or @@ -453,13 +540,15 @@ pub struct ListBackupsRequest { #[prost(int32, tag = "3")] pub page_size: i32, /// If non-empty, `page_token` should contain a - /// [next_page_token][google.spanner.admin.database.v1.ListBackupsResponse.next_page_token] from a - /// previous [ListBackupsResponse][google.spanner.admin.database.v1.ListBackupsResponse] to the same `parent` and with the same - /// `filter`. + /// [next_page_token][google.spanner.admin.database.v1.ListBackupsResponse.next_page_token] + /// from a previous + /// [ListBackupsResponse][google.spanner.admin.database.v1.ListBackupsResponse] + /// to the same `parent` and with the same `filter`. #[prost(string, tag = "4")] pub page_token: ::prost::alloc::string::String, } -/// The response for [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups]. +/// The response for +/// [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups]. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ListBackupsResponse { @@ -468,8 +557,8 @@ pub struct ListBackupsResponse { #[prost(message, repeated, tag = "1")] pub backups: ::prost::alloc::vec::Vec, /// `next_page_token` can be sent in a subsequent - /// [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups] call to fetch more - /// of the matching backups. + /// [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups] + /// call to fetch more of the matching backups. #[prost(string, tag = "2")] pub next_page_token: ::prost::alloc::string::String, } @@ -496,7 +585,9 @@ pub struct ListBackupOperationsRequest { /// * `name` - The name of the long-running operation /// * `done` - False if the operation is in progress, else true. /// * `metadata.@type` - the type of metadata. For example, the type string - /// for [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata] is + /// for + /// [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata] + /// is /// `type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata`. /// * `metadata.` - any field in metadata.value. /// `metadata.@type` must be specified first if filtering on metadata @@ -514,14 +605,15 @@ pub struct ListBackupOperationsRequest { /// * `done:true` - The operation is complete. /// * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \ /// `metadata.database:prod` - Returns operations where: - /// * The operation's metadata type is [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. - /// * The database the backup was taken from has a name containing the - /// string "prod". + /// * The operation's metadata type is + /// [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. + /// * The source database name of backup contains the string "prod". /// * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \ /// `(metadata.name:howl) AND` \ /// `(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND` \ /// `(error:*)` - Returns operations where: - /// * The operation's metadata type is [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. + /// * The operation's metadata type is + /// [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. /// * The backup name contains the string "howl". /// * The operation started before 2018-03-28T14:50:00Z. /// * The operation resulted in an error. @@ -529,9 +621,9 @@ pub struct ListBackupOperationsRequest { /// `(metadata.source_backup:test) AND` \ /// `(metadata.progress.start_time < \"2022-01-18T14:50:00Z\") AND` \ /// `(error:*)` - Returns operations where: - /// * The operation's metadata type is [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata]. - /// * The source backup of the copied backup name contains the string - /// "test". + /// * The operation's metadata type is + /// [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata]. + /// * The source backup name contains the string "test". /// * The operation started before 2022-01-18T14:50:00Z. /// * The operation resulted in an error. /// * `((metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \ @@ -541,12 +633,13 @@ pub struct ListBackupOperationsRequest { /// `(metadata.source_backup:test_bkp)) AND` \ /// `(error:*)` - Returns operations where: /// * The operation's metadata matches either of criteria: - /// * The operation's metadata type is [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata] AND the - /// database the backup was taken from has name containing string - /// "test_db" - /// * The operation's metadata type is [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata] AND the - /// backup the backup was copied from has name containing string - /// "test_bkp" + /// * The operation's metadata type is + /// [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata] + /// AND the source database name of the backup contains the string + /// "test_db" + /// * The operation's metadata type is + /// [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata] + /// AND the source backup name contains the string "test_bkp" /// * The operation resulted in an error. #[prost(string, tag = "2")] pub filter: ::prost::alloc::string::String, @@ -556,8 +649,9 @@ pub struct ListBackupOperationsRequest { pub page_size: i32, /// If non-empty, `page_token` should contain a /// [next_page_token][google.spanner.admin.database.v1.ListBackupOperationsResponse.next_page_token] - /// from a previous [ListBackupOperationsResponse][google.spanner.admin.database.v1.ListBackupOperationsResponse] to the - /// same `parent` and with the same `filter`. + /// from a previous + /// [ListBackupOperationsResponse][google.spanner.admin.database.v1.ListBackupOperationsResponse] + /// to the same `parent` and with the same `filter`. #[prost(string, tag = "4")] pub page_token: ::prost::alloc::string::String, } @@ -592,13 +686,14 @@ pub struct BackupInfo { pub backup: ::prost::alloc::string::String, /// The backup contains an externally consistent copy of `source_database` at /// the timestamp specified by `version_time`. If the - /// [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] request did not specify - /// `version_time`, the `version_time` of the backup is equivalent to the - /// `create_time`. + /// [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] + /// request did not specify `version_time`, the `version_time` of the backup is + /// equivalent to the `create_time`. #[prost(message, optional, tag = "4")] pub version_time: ::core::option::Option<::prost_types::Timestamp>, - /// The time the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] request was - /// received. + /// The time the + /// [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] + /// request was received. #[prost(message, optional, tag = "2")] pub create_time: ::core::option::Option<::prost_types::Timestamp>, /// Name of the database the backup was created from. @@ -614,11 +709,28 @@ pub struct CreateBackupEncryptionConfig { pub encryption_type: i32, /// Optional. The Cloud KMS key that will be used to protect the backup. /// This field should be set only when - /// [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type] is - /// `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form + /// [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type] + /// is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form /// `projects//locations//keyRings//cryptoKeys/`. #[prost(string, tag = "2")] pub kms_key_name: ::prost::alloc::string::String, + /// Optional. Specifies the KMS configuration for the one or more keys used to + /// protect the backup. Values are of the form + /// `projects//locations//keyRings//cryptoKeys/`. + /// + /// The keys referenced by kms_key_names must fully cover all + /// regions of the backup's instance configuration. Some examples: + /// * For single region instance configs, specify a single regional + /// location KMS key. + /// * For multi-regional instance configs of type GOOGLE_MANAGED, + /// either specify a multi-regional location KMS key or multiple regional + /// location KMS keys that cover all regions in the instance config. + /// * For an instance config of type USER_MANAGED, please specify only + /// regional location KMS keys to cover each region in the instance config. + /// Multi-regional location KMS keys are not supported for USER_MANAGED + /// instance configs. + #[prost(string, repeated, tag = "3")] + pub kms_key_names: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, } /// Nested message and enum types in `CreateBackupEncryptionConfig`. pub mod create_backup_encryption_config { @@ -630,9 +742,10 @@ pub mod create_backup_encryption_config { Unspecified = 0, /// Use the same encryption configuration as the database. This is the /// default option when - /// [encryption_config][google.spanner.admin.database.v1.CreateBackupEncryptionConfig] is empty. - /// For example, if the database is using `Customer_Managed_Encryption`, the - /// backup will be using the same Cloud KMS key as the database. + /// [encryption_config][google.spanner.admin.database.v1.CreateBackupEncryptionConfig] + /// is empty. For example, if the database is using + /// `Customer_Managed_Encryption`, the backup will be using the same Cloud + /// KMS key as the database. UseDatabaseEncryption = 1, /// Use Google default encryption. GoogleDefaultEncryption = 2, @@ -674,11 +787,29 @@ pub struct CopyBackupEncryptionConfig { pub encryption_type: i32, /// Optional. The Cloud KMS key that will be used to protect the backup. /// This field should be set only when - /// [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type] is - /// `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form + /// [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type] + /// is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form /// `projects//locations//keyRings//cryptoKeys/`. #[prost(string, tag = "2")] pub kms_key_name: ::prost::alloc::string::String, + /// Optional. Specifies the KMS configuration for the one or more keys used to + /// protect the backup. Values are of the form + /// `projects//locations//keyRings//cryptoKeys/`. + /// Kms keys specified can be in any order. + /// + /// The keys referenced by kms_key_names must fully cover all + /// regions of the backup's instance configuration. Some examples: + /// * For single region instance configs, specify a single regional + /// location KMS key. + /// * For multi-regional instance configs of type GOOGLE_MANAGED, + /// either specify a multi-regional location KMS key or multiple regional + /// location KMS keys that cover all regions in the instance config. + /// * For an instance config of type USER_MANAGED, please specify only + /// regional location KMS keys to cover each region in the instance config. + /// Multi-regional location KMS keys are not supported for USER_MANAGED + /// instance configs. + #[prost(string, repeated, tag = "3")] + pub kms_key_names: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, } /// Nested message and enum types in `CopyBackupEncryptionConfig`. pub mod copy_backup_encryption_config { @@ -688,15 +819,18 @@ pub mod copy_backup_encryption_config { pub enum EncryptionType { /// Unspecified. Do not use. Unspecified = 0, - /// This is the default option for [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup] - /// when [encryption_config][google.spanner.admin.database.v1.CopyBackupEncryptionConfig] is not specified. - /// For example, if the source backup is using `Customer_Managed_Encryption`, - /// the backup will be using the same Cloud KMS key as the source backup. + /// This is the default option for + /// [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup] + /// when + /// [encryption_config][google.spanner.admin.database.v1.CopyBackupEncryptionConfig] + /// is not specified. For example, if the source backup is using + /// `Customer_Managed_Encryption`, the backup will be using the same Cloud + /// KMS key as the source backup. UseConfigDefaultOrBackupEncryption = 1, /// Use Google default encryption. GoogleDefaultEncryption = 2, - /// Use customer managed encryption. If specified, `kms_key_name` - /// must contain a valid Cloud KMS key. + /// Use customer managed encryption. If specified, either `kms_key_name` or + /// `kms_key_names` must contain valid Cloud KMS key(s). CustomerManagedEncryption = 3, } impl EncryptionType { @@ -724,6 +858,222 @@ pub mod copy_backup_encryption_config { } } } +/// The specification for full backups. +/// A full backup stores the entire contents of the database at a given +/// version time. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct FullBackupSpec {} +/// The specification for incremental backup chains. +/// An incremental backup stores the delta of changes between a previous +/// backup and the database contents at a given version time. An +/// incremental backup chain consists of a full backup and zero or more +/// successive incremental backups. The first backup created for an +/// incremental backup chain is always a full backup. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct IncrementalBackupSpec {} +/// Defines specifications of the backup schedule. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BackupScheduleSpec { + /// Required. + #[prost(oneof = "backup_schedule_spec::ScheduleSpec", tags = "1")] + pub schedule_spec: ::core::option::Option, +} +/// Nested message and enum types in `BackupScheduleSpec`. +pub mod backup_schedule_spec { + /// Required. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum ScheduleSpec { + /// Cron style schedule specification. + #[prost(message, tag = "1")] + CronSpec(super::CrontabSpec), + } +} +/// BackupSchedule expresses the automated backup creation specification for a +/// Spanner database. +/// Next ID: 10 +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BackupSchedule { + /// Identifier. Output only for the + /// [CreateBackupSchedule][DatabaseAdmin.CreateBackupSchededule] operation. + /// Required for the + /// [UpdateBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackupSchedule] + /// operation. A globally unique identifier for the backup schedule which + /// cannot be changed. Values are of the form + /// `projects//instances//databases//backupSchedules/[a-z][a-z0-9_\-]*\[a-z0-9\]` + /// The final segment of the name must be between 2 and 60 characters in + /// length. + #[prost(string, tag = "1")] + pub name: ::prost::alloc::string::String, + /// Optional. The schedule specification based on which the backup creations + /// are triggered. + #[prost(message, optional, tag = "6")] + pub spec: ::core::option::Option, + /// Optional. The retention duration of a backup that must be at least 6 hours + /// and at most 366 days. The backup is eligible to be automatically deleted + /// once the retention period has elapsed. + #[prost(message, optional, tag = "3")] + pub retention_duration: ::core::option::Option<::prost_types::Duration>, + /// Optional. The encryption configuration that will be used to encrypt the + /// backup. If this field is not specified, the backup will use the same + /// encryption configuration as the database. + #[prost(message, optional, tag = "4")] + pub encryption_config: ::core::option::Option, + /// Output only. The timestamp at which the schedule was last updated. + /// If the schedule has never been updated, this field contains the timestamp + /// when the schedule was first created. + #[prost(message, optional, tag = "9")] + pub update_time: ::core::option::Option<::prost_types::Timestamp>, + /// Required. Backup type spec determines the type of backup that is created by + /// the backup schedule. Currently, only full backups are supported. + #[prost(oneof = "backup_schedule::BackupTypeSpec", tags = "7, 8")] + pub backup_type_spec: ::core::option::Option, +} +/// Nested message and enum types in `BackupSchedule`. +pub mod backup_schedule { + /// Required. Backup type spec determines the type of backup that is created by + /// the backup schedule. Currently, only full backups are supported. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, Copy, PartialEq, ::prost::Oneof)] + pub enum BackupTypeSpec { + /// The schedule creates only full backups. + #[prost(message, tag = "7")] + FullBackupSpec(super::FullBackupSpec), + /// The schedule creates incremental backup chains. + #[prost(message, tag = "8")] + IncrementalBackupSpec(super::IncrementalBackupSpec), + } +} +/// CrontabSpec can be used to specify the version time and frequency at +/// which the backup should be created. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CrontabSpec { + /// Required. Textual representation of the crontab. User can customize the + /// backup frequency and the backup version time using the cron + /// expression. The version time must be in UTC timzeone. + /// + /// The backup will contain an externally consistent copy of the + /// database at the version time. Allowed frequencies are 12 hour, 1 day, + /// 1 week and 1 month. Examples of valid cron specifications: + /// * `0 2/12 * * * ` : every 12 hours at (2, 14) hours past midnight in UTC. + /// * `0 2,14 * * * ` : every 12 hours at (2,14) hours past midnight in UTC. + /// * `0 2 * * * ` : once a day at 2 past midnight in UTC. + /// * `0 2 * * 0 ` : once a week every Sunday at 2 past midnight in UTC. + /// * `0 2 8 * * ` : once a month on 8th day at 2 past midnight in UTC. + #[prost(string, tag = "1")] + pub text: ::prost::alloc::string::String, + /// Output only. The time zone of the times in `CrontabSpec.text`. Currently + /// only UTC is supported. + #[prost(string, tag = "2")] + pub time_zone: ::prost::alloc::string::String, + /// Output only. Schedule backups will contain an externally consistent copy + /// of the database at the version time specified in + /// `schedule_spec.cron_spec`. However, Spanner may not initiate the creation + /// of the scheduled backups at that version time. Spanner will initiate + /// the creation of scheduled backups within the time window bounded by the + /// version_time specified in `schedule_spec.cron_spec` and version_time + + /// `creation_window`. + #[prost(message, optional, tag = "3")] + pub creation_window: ::core::option::Option<::prost_types::Duration>, +} +/// The request for +/// [CreateBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackupSchedule]. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CreateBackupScheduleRequest { + /// Required. The name of the database that this backup schedule applies to. + #[prost(string, tag = "1")] + pub parent: ::prost::alloc::string::String, + /// Required. The Id to use for the backup schedule. The `backup_schedule_id` + /// appended to `parent` forms the full backup schedule name of the form + /// `projects//instances//databases//backupSchedules/`. + #[prost(string, tag = "2")] + pub backup_schedule_id: ::prost::alloc::string::String, + /// Required. The backup schedule to create. + #[prost(message, optional, tag = "3")] + pub backup_schedule: ::core::option::Option, +} +/// The request for +/// [GetBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.GetBackupSchedule]. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetBackupScheduleRequest { + /// Required. The name of the schedule to retrieve. + /// Values are of the form + /// `projects//instances//databases//backupSchedules/`. + #[prost(string, tag = "1")] + pub name: ::prost::alloc::string::String, +} +/// The request for +/// [DeleteBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.DeleteBackupSchedule]. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DeleteBackupScheduleRequest { + /// Required. The name of the schedule to delete. + /// Values are of the form + /// `projects//instances//databases//backupSchedules/`. + #[prost(string, tag = "1")] + pub name: ::prost::alloc::string::String, +} +/// The request for +/// [ListBackupSchedules][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupSchedules]. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ListBackupSchedulesRequest { + /// Required. Database is the parent resource whose backup schedules should be + /// listed. Values are of the form + /// projects//instances//databases/ + #[prost(string, tag = "1")] + pub parent: ::prost::alloc::string::String, + /// Optional. Number of backup schedules to be returned in the response. If 0 + /// or less, defaults to the server's maximum allowed page size. + #[prost(int32, tag = "2")] + pub page_size: i32, + /// Optional. If non-empty, `page_token` should contain a + /// [next_page_token][google.spanner.admin.database.v1.ListBackupSchedulesResponse.next_page_token] + /// from a previous + /// [ListBackupSchedulesResponse][google.spanner.admin.database.v1.ListBackupSchedulesResponse] + /// to the same `parent`. + #[prost(string, tag = "4")] + pub page_token: ::prost::alloc::string::String, +} +/// The response for +/// [ListBackupSchedules][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupSchedules]. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ListBackupSchedulesResponse { + /// The list of backup schedules for a database. + #[prost(message, repeated, tag = "1")] + pub backup_schedules: ::prost::alloc::vec::Vec, + /// `next_page_token` can be sent in a subsequent + /// [ListBackupSchedules][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupSchedules] + /// call to fetch more of the schedules. + #[prost(string, tag = "2")] + pub next_page_token: ::prost::alloc::string::String, +} +/// The request for +/// [UpdateBackupScheduleRequest][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackupSchedule]. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct UpdateBackupScheduleRequest { + /// Required. The backup schedule to update. `backup_schedule.name`, and the + /// fields to be updated as specified by `update_mask` are required. Other + /// fields are ignored. + #[prost(message, optional, tag = "1")] + pub backup_schedule: ::core::option::Option, + /// Required. A mask specifying which fields in the BackupSchedule resource + /// should be updated. This mask is relative to the BackupSchedule resource, + /// not to the request message. The field mask must always be + /// specified; this prevents any future fields from being erased + /// accidentally. + #[prost(message, optional, tag = "2")] + pub update_mask: ::core::option::Option<::prost_types::FieldMask>, +} /// Information about the database restore. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -776,7 +1126,8 @@ pub struct Database { pub encryption_config: ::core::option::Option, /// Output only. For databases that are using customer managed encryption, this /// field contains the encryption information for the database, such as - /// encryption state and the Cloud KMS key versions that are in use. + /// all Cloud KMS key versions that are in use. The `encryption_status' field + /// inside of each `EncryptionInfo` is not populated. /// /// For databases that are using Google default or other types of encryption, /// this field is empty. @@ -788,8 +1139,8 @@ pub struct Database { /// Output only. The period in which Cloud Spanner retains all versions of data /// for the database. This is the same as the value of version_retention_period /// database option set using - /// [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]. Defaults to 1 hour, - /// if not set. + /// [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]. + /// Defaults to 1 hour, if not set. #[prost(string, tag = "6")] pub version_retention_period: ::prost::alloc::string::String, /// Output only. Earliest timestamp at which older versions of the data can be @@ -811,7 +1162,9 @@ pub struct Database { #[prost(enumeration = "DatabaseDialect", tag = "10")] pub database_dialect: i32, /// Whether drop protection is enabled for this database. Defaults to false, - /// if not set. + /// if not set. For more details, please see how to [prevent accidental + /// database + /// deletion](). #[prost(bool, tag = "11")] pub enable_drop_protection: bool, /// Output only. If true, the database is being updated. If false, there are no @@ -867,7 +1220,8 @@ pub mod database { } } } -/// The request for [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases]. +/// The request for +/// [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases]. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ListDatabasesRequest { @@ -880,12 +1234,14 @@ pub struct ListDatabasesRequest { #[prost(int32, tag = "3")] pub page_size: i32, /// If non-empty, `page_token` should contain a - /// [next_page_token][google.spanner.admin.database.v1.ListDatabasesResponse.next_page_token] from a - /// previous [ListDatabasesResponse][google.spanner.admin.database.v1.ListDatabasesResponse]. + /// [next_page_token][google.spanner.admin.database.v1.ListDatabasesResponse.next_page_token] + /// from a previous + /// [ListDatabasesResponse][google.spanner.admin.database.v1.ListDatabasesResponse]. #[prost(string, tag = "4")] pub page_token: ::prost::alloc::string::String, } -/// The response for [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases]. +/// The response for +/// [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases]. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ListDatabasesResponse { @@ -893,12 +1249,13 @@ pub struct ListDatabasesResponse { #[prost(message, repeated, tag = "1")] pub databases: ::prost::alloc::vec::Vec, /// `next_page_token` can be sent in a subsequent - /// [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases] call to fetch more - /// of the matching databases. + /// [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases] + /// call to fetch more of the matching databases. #[prost(string, tag = "2")] pub next_page_token: ::prost::alloc::string::String, } -/// The request for [CreateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.CreateDatabase]. +/// The request for +/// [CreateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.CreateDatabase]. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct CreateDatabaseRequest { @@ -919,14 +1276,31 @@ pub struct CreateDatabaseRequest { /// if there is an error in any statement, the database is not created. #[prost(string, repeated, tag = "3")] pub extra_statements: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - /// Optional. The encryption configuration for the database. If this field is not - /// specified, Cloud Spanner will encrypt/decrypt all data at rest using + /// Optional. The encryption configuration for the database. If this field is + /// not specified, Cloud Spanner will encrypt/decrypt all data at rest using /// Google default encryption. #[prost(message, optional, tag = "4")] pub encryption_config: ::core::option::Option, /// Optional. The dialect of the Cloud Spanner Database. #[prost(enumeration = "DatabaseDialect", tag = "5")] pub database_dialect: i32, + /// Optional. Proto descriptors used by CREATE/ALTER PROTO BUNDLE statements in + /// 'extra_statements' above. + /// Contains a protobuf-serialized + /// [google.protobuf.FileDescriptorSet](). + /// To generate it, [install]() and + /// run `protoc` with --include_imports and --descriptor_set_out. For example, + /// to generate for moon/shot/app.proto, run + /// ``` + /// $protoc --proto_path=/app_path --proto_path=/lib_path \ + /// --include_imports \ + /// --descriptor_set_out=descriptors.data \ + /// moon/shot/app.proto + /// ``` + /// For more details, see protobuffer [self + /// description](). + #[prost(bytes = "vec", tag = "6")] + pub proto_descriptors: ::prost::alloc::vec::Vec, } /// Metadata type for the operation returned by /// [CreateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.CreateDatabase]. @@ -937,7 +1311,8 @@ pub struct CreateDatabaseMetadata { #[prost(string, tag = "1")] pub database: ::prost::alloc::string::String, } -/// The request for [GetDatabase][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabase]. +/// The request for +/// [GetDatabase][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabase]. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct GetDatabaseRequest { @@ -994,8 +1369,8 @@ pub struct UpdateDatabaseMetadata { /// Each batch of statements is assigned a name which can be used with /// the [Operations][google.longrunning.Operations] API to monitor /// progress. See the -/// [operation_id][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.operation_id] field for more -/// details. +/// [operation_id][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.operation_id] +/// field for more details. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct UpdateDatabaseDdlRequest { @@ -1012,20 +1387,38 @@ pub struct UpdateDatabaseDdlRequest { /// /// Specifying an explicit operation ID simplifies determining /// whether the statements were executed in the event that the - /// [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl] call is replayed, - /// or the return value is otherwise lost: the [database][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.database] and - /// `operation_id` fields can be combined to form the + /// [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl] + /// call is replayed, or the return value is otherwise lost: the + /// [database][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.database] + /// and `operation_id` fields can be combined to form the /// [name][google.longrunning.Operation.name] of the resulting - /// [longrunning.Operation][google.longrunning.Operation]: `/operations/`. + /// [longrunning.Operation][google.longrunning.Operation]: + /// `/operations/`. /// /// `operation_id` should be unique within the database, and must be /// a valid identifier: `[a-z][a-z0-9_]*`. Note that /// automatically-generated operation IDs always begin with an /// underscore. If the named operation already exists, - /// [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl] returns - /// `ALREADY_EXISTS`. + /// [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl] + /// returns `ALREADY_EXISTS`. #[prost(string, tag = "3")] pub operation_id: ::prost::alloc::string::String, + /// Optional. Proto descriptors used by CREATE/ALTER PROTO BUNDLE statements. + /// Contains a protobuf-serialized + /// [google.protobuf.FileDescriptorSet](). + /// To generate it, [install]() and + /// run `protoc` with --include_imports and --descriptor_set_out. For example, + /// to generate for moon/shot/app.proto, run + /// ``` + /// $protoc --proto_path=/app_path --proto_path=/lib_path \ + /// --include_imports \ + /// --descriptor_set_out=descriptors.data \ + /// moon/shot/app.proto + /// ``` + /// For more details, see protobuffer [self + /// description](). + #[prost(bytes = "vec", tag = "4")] + pub proto_descriptors: ::prost::alloc::vec::Vec, } /// Action information extracted from a DDL statement. This proto is used to /// display the brief info of the DDL statement for the operation @@ -1086,7 +1479,8 @@ pub struct UpdateDatabaseDdlMetadata { #[prost(message, repeated, tag = "6")] pub actions: ::prost::alloc::vec::Vec, } -/// The request for [DropDatabase][google.spanner.admin.database.v1.DatabaseAdmin.DropDatabase]. +/// The request for +/// [DropDatabase][google.spanner.admin.database.v1.DatabaseAdmin.DropDatabase]. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct DropDatabaseRequest { @@ -1094,7 +1488,8 @@ pub struct DropDatabaseRequest { #[prost(string, tag = "1")] pub database: ::prost::alloc::string::String, } -/// The request for [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl]. +/// The request for +/// [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl]. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct GetDatabaseDdlRequest { @@ -1104,7 +1499,8 @@ pub struct GetDatabaseDdlRequest { #[prost(string, tag = "1")] pub database: ::prost::alloc::string::String, } -/// The response for [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl]. +/// The response for +/// [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl]. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct GetDatabaseDdlResponse { @@ -1112,6 +1508,13 @@ pub struct GetDatabaseDdlResponse { /// specified in the request. #[prost(string, repeated, tag = "1")] pub statements: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + /// Proto descriptors stored in the database. + /// Contains a protobuf-serialized + /// [google.protobuf.FileDescriptorSet](). + /// For more details, see protobuffer [self + /// description](). + #[prost(bytes = "vec", tag = "2")] + pub proto_descriptors: ::prost::alloc::vec::Vec, } /// The request for /// [ListDatabaseOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseOperations]. @@ -1136,7 +1539,9 @@ pub struct ListDatabaseOperationsRequest { /// * `name` - The name of the long-running operation /// * `done` - False if the operation is in progress, else true. /// * `metadata.@type` - the type of metadata. For example, the type string - /// for [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata] is + /// for + /// [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata] + /// is /// `type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseMetadata`. /// * `metadata.` - any field in metadata.value. /// `metadata.@type` must be specified first, if filtering on metadata @@ -1158,7 +1563,8 @@ pub struct ListDatabaseOperationsRequest { /// `(metadata.name:restored_howl) AND` \ /// `(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND` \ /// `(error:*)` - Return operations where: - /// * The operation's metadata type is [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata]. + /// * The operation's metadata type is + /// [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata]. /// * The database is restored from a backup. /// * The backup name contains "backup_howl". /// * The restored database's name contains "restored_howl". @@ -1172,8 +1578,9 @@ pub struct ListDatabaseOperationsRequest { pub page_size: i32, /// If non-empty, `page_token` should contain a /// [next_page_token][google.spanner.admin.database.v1.ListDatabaseOperationsResponse.next_page_token] - /// from a previous [ListDatabaseOperationsResponse][google.spanner.admin.database.v1.ListDatabaseOperationsResponse] to the - /// same `parent` and with the same `filter`. + /// from a previous + /// [ListDatabaseOperationsResponse][google.spanner.admin.database.v1.ListDatabaseOperationsResponse] + /// to the same `parent` and with the same `filter`. #[prost(string, tag = "4")] pub page_token: ::prost::alloc::string::String, } @@ -1213,12 +1620,12 @@ pub struct RestoreDatabaseRequest { /// `projects//instances//databases/`. #[prost(string, tag = "2")] pub database_id: ::prost::alloc::string::String, - /// Optional. An encryption configuration describing the encryption type and key - /// resources in Cloud KMS used to encrypt/decrypt the database to restore to. - /// If this field is not specified, the restored database will use - /// the same encryption configuration as the backup by default, namely - /// [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type] = - /// `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`. + /// Optional. An encryption configuration describing the encryption type and + /// key resources in Cloud KMS used to encrypt/decrypt the database to restore + /// to. If this field is not specified, the restored database will use the same + /// encryption configuration as the backup by default, namely + /// [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type] + /// = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`. #[prost(message, optional, tag = "4")] pub encryption_config: ::core::option::Option, /// Required. The source from which to restore. @@ -1244,13 +1651,30 @@ pub struct RestoreDatabaseEncryptionConfig { /// Required. The encryption type of the restored database. #[prost(enumeration = "restore_database_encryption_config::EncryptionType", tag = "1")] pub encryption_type: i32, - /// Optional. The Cloud KMS key that will be used to encrypt/decrypt the restored - /// database. This field should be set only when - /// [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type] is - /// `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form + /// Optional. The Cloud KMS key that will be used to encrypt/decrypt the + /// restored database. This field should be set only when + /// [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type] + /// is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form /// `projects//locations//keyRings//cryptoKeys/`. #[prost(string, tag = "2")] pub kms_key_name: ::prost::alloc::string::String, + /// Optional. Specifies the KMS configuration for the one or more keys used to + /// encrypt the database. Values are of the form + /// `projects//locations//keyRings//cryptoKeys/`. + /// + /// The keys referenced by kms_key_names must fully cover all + /// regions of the database instance configuration. Some examples: + /// * For single region database instance configs, specify a single regional + /// location KMS key. + /// * For multi-regional database instance configs of type GOOGLE_MANAGED, + /// either specify a multi-regional location KMS key or multiple regional + /// location KMS keys that cover all regions in the instance config. + /// * For a database instance config of type USER_MANAGED, please specify only + /// regional location KMS keys to cover each region in the instance config. + /// Multi-regional location KMS keys are not supported for USER_MANAGED + /// instance configs. + #[prost(string, repeated, tag = "3")] + pub kms_key_names: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, } /// Nested message and enum types in `RestoreDatabaseEncryptionConfig`. pub mod restore_database_encryption_config { @@ -1261,7 +1685,8 @@ pub mod restore_database_encryption_config { /// Unspecified. Do not use. Unspecified = 0, /// This is the default option when - /// [encryption_config][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig] is not specified. + /// [encryption_config][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig] + /// is not specified. UseConfigDefaultOrBackupEncryption = 1, /// Use Google default encryption. GoogleDefaultEncryption = 2, @@ -1320,7 +1745,8 @@ pub struct RestoreDatabaseMetadata { /// operation completed despite cancellation. On successful cancellation, /// the operation is not deleted; instead, it becomes an operation with /// an [Operation.error][google.longrunning.Operation.error] value with a - /// [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to `Code.CANCELLED`. + /// [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to + /// `Code.CANCELLED`. #[prost(message, optional, tag = "5")] pub cancel_time: ::core::option::Option<::prost_types::Timestamp>, /// If exists, the name of the long-running operation that will be used to @@ -1330,21 +1756,23 @@ pub struct RestoreDatabaseMetadata { /// `projects//instances//databases//operations/` /// where the is the name of database being created and restored to. /// The metadata type of the long-running operation is - /// [OptimizeRestoredDatabaseMetadata][google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata]. This long-running operation will be - /// automatically created by the system after the RestoreDatabase long-running - /// operation completes successfully. This operation will not be created if the - /// restore was not successful. + /// [OptimizeRestoredDatabaseMetadata][google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata]. + /// This long-running operation will be automatically created by the system + /// after the RestoreDatabase long-running operation completes successfully. + /// This operation will not be created if the restore was not successful. #[prost(string, tag = "6")] pub optimize_database_operation_name: ::prost::alloc::string::String, /// Information about the source used to restore the database, as specified by - /// `source` in [RestoreDatabaseRequest][google.spanner.admin.database.v1.RestoreDatabaseRequest]. + /// `source` in + /// [RestoreDatabaseRequest][google.spanner.admin.database.v1.RestoreDatabaseRequest]. #[prost(oneof = "restore_database_metadata::SourceInfo", tags = "3")] pub source_info: ::core::option::Option, } /// Nested message and enum types in `RestoreDatabaseMetadata`. pub mod restore_database_metadata { /// Information about the source used to restore the database, as specified by - /// `source` in [RestoreDatabaseRequest][google.spanner.admin.database.v1.RestoreDatabaseRequest]. + /// `source` in + /// [RestoreDatabaseRequest][google.spanner.admin.database.v1.RestoreDatabaseRequest]. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum SourceInfo { @@ -1372,20 +1800,19 @@ pub struct OptimizeRestoredDatabaseMetadata { #[derive(Clone, PartialEq, ::prost::Message)] pub struct DatabaseRole { /// Required. The name of the database role. Values are of the form - /// `projects//instances//databases//databaseRoles/ - /// {role}`, where `` is as specified in the `CREATE ROLE` - /// DDL statement. This name can be passed to Get/Set IAMPolicy methods to - /// identify the database role. + /// `projects//instances//databases//databaseRoles/` + /// where `` is as specified in the `CREATE ROLE` DDL statement. #[prost(string, tag = "1")] pub name: ::prost::alloc::string::String, } -/// The request for [ListDatabaseRoles][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseRoles]. +/// The request for +/// [ListDatabaseRoles][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseRoles]. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ListDatabaseRolesRequest { /// Required. The database whose roles should be listed. /// Values are of the form - /// `projects//instances//databases//databaseRoles`. + /// `projects//instances//databases/`. #[prost(string, tag = "1")] pub parent: ::prost::alloc::string::String, /// Number of database roles to be returned in the response. If 0 or less, @@ -1393,12 +1820,14 @@ pub struct ListDatabaseRolesRequest { #[prost(int32, tag = "2")] pub page_size: i32, /// If non-empty, `page_token` should contain a - /// [next_page_token][google.spanner.admin.database.v1.ListDatabaseRolesResponse.next_page_token] from a - /// previous [ListDatabaseRolesResponse][google.spanner.admin.database.v1.ListDatabaseRolesResponse]. + /// [next_page_token][google.spanner.admin.database.v1.ListDatabaseRolesResponse.next_page_token] + /// from a previous + /// [ListDatabaseRolesResponse][google.spanner.admin.database.v1.ListDatabaseRolesResponse]. #[prost(string, tag = "3")] pub page_token: ::prost::alloc::string::String, } -/// The response for [ListDatabaseRoles][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseRoles]. +/// The response for +/// [ListDatabaseRoles][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseRoles]. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ListDatabaseRolesResponse { @@ -1450,7 +1879,7 @@ pub mod database_admin_client { /// The Cloud Spanner Database Admin API can be used to: /// * create, drop, and list databases /// * update the schema of pre-existing databases - /// * create, delete and list backups for a database + /// * create, delete, copy and list backups for a database /// * restore a database from an existing backup #[derive(Debug, Clone)] pub struct DatabaseAdminClient { @@ -1548,8 +1977,8 @@ pub mod database_admin_client { /// have a name of the format `/operations/` and /// can be used to track preparation of the database. The /// [metadata][google.longrunning.Operation.metadata] field type is - /// [CreateDatabaseMetadata][google.spanner.admin.database.v1.CreateDatabaseMetadata]. The - /// [response][google.longrunning.Operation.response] field type is + /// [CreateDatabaseMetadata][google.spanner.admin.database.v1.CreateDatabaseMetadata]. + /// The [response][google.longrunning.Operation.response] field type is /// [Database][google.spanner.admin.database.v1.Database], if successful. pub async fn create_database( &mut self, @@ -1649,7 +2078,8 @@ pub mod database_admin_client { /// the format `/operations/` and can be used to /// track execution of the schema change(s). The /// [metadata][google.longrunning.Operation.metadata] field type is - /// [UpdateDatabaseDdlMetadata][google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata]. The operation has no response. + /// [UpdateDatabaseDdlMetadata][google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata]. + /// The operation has no response. pub async fn update_database_ddl( &mut self, request: impl tonic::IntoRequest, @@ -1801,12 +2231,12 @@ pub mod database_admin_client { /// `projects//instances//backups//operations/` /// and can be used to track creation of the backup. The /// [metadata][google.longrunning.Operation.metadata] field type is - /// [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. The - /// [response][google.longrunning.Operation.response] field type is - /// [Backup][google.spanner.admin.database.v1.Backup], if successful. Cancelling the returned operation will stop the - /// creation and delete the backup. - /// There can be only one pending backup creation per database. Backup creation - /// of different databases can run concurrently. + /// [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. + /// The [response][google.longrunning.Operation.response] field type is + /// [Backup][google.spanner.admin.database.v1.Backup], if successful. + /// Cancelling the returned operation will stop the creation and delete the + /// backup. There can be only one pending backup creation per database. Backup + /// creation of different databases can run concurrently. pub async fn create_backup( &mut self, request: impl tonic::IntoRequest, @@ -1836,9 +2266,10 @@ pub mod database_admin_client { /// The [metadata][google.longrunning.Operation.metadata] field type is /// [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata]. /// The [response][google.longrunning.Operation.response] field type is - /// [Backup][google.spanner.admin.database.v1.Backup], if successful. Cancelling the returned operation will stop the - /// copying and delete the backup. - /// Concurrent CopyBackup requests can run on the same source backup. + /// [Backup][google.spanner.admin.database.v1.Backup], if successful. + /// Cancelling the returned operation will stop the copying and delete the + /// destination backup. Concurrent CopyBackup requests can run on the same + /// source backup. pub async fn copy_backup( &mut self, request: impl tonic::IntoRequest, @@ -1857,7 +2288,8 @@ pub mod database_admin_client { .insert(GrpcMethod::new("google.spanner.admin.database.v1.DatabaseAdmin", "CopyBackup")); self.inner.unary(req, path, codec).await } - /// Gets metadata on a pending or completed [Backup][google.spanner.admin.database.v1.Backup]. + /// Gets metadata on a pending or completed + /// [Backup][google.spanner.admin.database.v1.Backup]. pub async fn get_backup( &mut self, request: impl tonic::IntoRequest, @@ -1873,7 +2305,8 @@ pub mod database_admin_client { .insert(GrpcMethod::new("google.spanner.admin.database.v1.DatabaseAdmin", "GetBackup")); self.inner.unary(req, path, codec).await } - /// Updates a pending or completed [Backup][google.spanner.admin.database.v1.Backup]. + /// Updates a pending or completed + /// [Backup][google.spanner.admin.database.v1.Backup]. pub async fn update_backup( &mut self, request: impl tonic::IntoRequest, @@ -1891,7 +2324,8 @@ pub mod database_admin_client { )); self.inner.unary(req, path, codec).await } - /// Deletes a pending or completed [Backup][google.spanner.admin.database.v1.Backup]. + /// Deletes a pending or completed + /// [Backup][google.spanner.admin.database.v1.Backup]. pub async fn delete_backup( &mut self, request: impl tonic::IntoRequest, @@ -2037,5 +2471,100 @@ pub mod database_admin_client { )); self.inner.unary(req, path, codec).await } + /// Creates a new backup schedule. + pub async fn create_backup_schedule( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner.ready().await.map_err(|e| { + tonic::Status::new(tonic::Code::Unknown, format!("Service was not ready: {}", e.into())) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/google.spanner.admin.database.v1.DatabaseAdmin/CreateBackupSchedule", + ); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new( + "google.spanner.admin.database.v1.DatabaseAdmin", + "CreateBackupSchedule", + )); + self.inner.unary(req, path, codec).await + } + /// Gets backup schedule for the input schedule name. + pub async fn get_backup_schedule( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner.ready().await.map_err(|e| { + tonic::Status::new(tonic::Code::Unknown, format!("Service was not ready: {}", e.into())) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/google.spanner.admin.database.v1.DatabaseAdmin/GetBackupSchedule", + ); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new( + "google.spanner.admin.database.v1.DatabaseAdmin", + "GetBackupSchedule", + )); + self.inner.unary(req, path, codec).await + } + /// Updates a backup schedule. + pub async fn update_backup_schedule( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner.ready().await.map_err(|e| { + tonic::Status::new(tonic::Code::Unknown, format!("Service was not ready: {}", e.into())) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/google.spanner.admin.database.v1.DatabaseAdmin/UpdateBackupSchedule", + ); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new( + "google.spanner.admin.database.v1.DatabaseAdmin", + "UpdateBackupSchedule", + )); + self.inner.unary(req, path, codec).await + } + /// Deletes a backup schedule. + pub async fn delete_backup_schedule( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner.ready().await.map_err(|e| { + tonic::Status::new(tonic::Code::Unknown, format!("Service was not ready: {}", e.into())) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/google.spanner.admin.database.v1.DatabaseAdmin/DeleteBackupSchedule", + ); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new( + "google.spanner.admin.database.v1.DatabaseAdmin", + "DeleteBackupSchedule", + )); + self.inner.unary(req, path, codec).await + } + /// Lists all the backup schedules for the database. + pub async fn list_backup_schedules( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner.ready().await.map_err(|e| { + tonic::Status::new(tonic::Code::Unknown, format!("Service was not ready: {}", e.into())) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/google.spanner.admin.database.v1.DatabaseAdmin/ListBackupSchedules", + ); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new( + "google.spanner.admin.database.v1.DatabaseAdmin", + "ListBackupSchedules", + )); + self.inner.unary(req, path, codec).await + } } } diff --git a/googleapis/src/google.spanner.admin.instance.v1.rs b/googleapis/src/google.spanner.admin.instance.v1.rs index 49b8a6d3..b6d52431 100644 --- a/googleapis/src/google.spanner.admin.instance.v1.rs +++ b/googleapis/src/google.spanner.admin.instance.v1.rs @@ -16,6 +16,41 @@ pub struct OperationProgress { #[prost(message, optional, tag = "3")] pub end_time: ::core::option::Option<::prost_types::Timestamp>, } +/// Indicates the expected fulfillment period of an operation. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum FulfillmentPeriod { + /// Not specified. + Unspecified = 0, + /// Normal fulfillment period. The operation is expected to complete within + /// minutes. + Normal = 1, + /// Extended fulfillment period. It can take up to an hour for the operation + /// to complete. + Extended = 2, +} +impl FulfillmentPeriod { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + FulfillmentPeriod::Unspecified => "FULFILLMENT_PERIOD_UNSPECIFIED", + FulfillmentPeriod::Normal => "FULFILLMENT_PERIOD_NORMAL", + FulfillmentPeriod::Extended => "FULFILLMENT_PERIOD_EXTENDED", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "FULFILLMENT_PERIOD_UNSPECIFIED" => Some(Self::Unspecified), + "FULFILLMENT_PERIOD_NORMAL" => Some(Self::Normal), + "FULFILLMENT_PERIOD_EXTENDED" => Some(Self::Extended), + _ => None, + } + } +} #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ReplicaInfo { @@ -99,13 +134,15 @@ pub struct InstanceConfig { /// A unique identifier for the instance configuration. Values /// are of the form /// `projects//instanceConfigs/[a-z][-a-z0-9]*`. + /// + /// User instance configuration must start with `custom-`. #[prost(string, tag = "1")] pub name: ::prost::alloc::string::String, /// The name of this instance configuration as it appears in UIs. #[prost(string, tag = "2")] pub display_name: ::prost::alloc::string::String, - /// Output only. Whether this instance config is a Google or User Managed - /// Configuration. + /// Output only. Whether this instance configuration is a Google-managed or + /// user-managed configuration. #[prost(enumeration = "instance_config::Type", tag = "5")] pub config_type: i32, /// The geographic placement of nodes in this instance configuration and their @@ -146,26 +183,29 @@ pub struct InstanceConfig { #[prost(map = "string, string", tag = "8")] pub labels: ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, /// etag is used for optimistic concurrency control as a way - /// to help prevent simultaneous updates of a instance config from overwriting - /// each other. It is strongly suggested that systems make use of the etag in - /// the read-modify-write cycle to perform instance config updates in order to - /// avoid race conditions: An etag is returned in the response which contains - /// instance configs, and systems are expected to put that etag in the request - /// to update instance config to ensure that their change will be applied to - /// the same version of the instance config. - /// If no etag is provided in the call to update instance config, then the - /// existing instance config is overwritten blindly. + /// to help prevent simultaneous updates of a instance configuration from + /// overwriting each other. It is strongly suggested that systems make use of + /// the etag in the read-modify-write cycle to perform instance configuration + /// updates in order to avoid race conditions: An etag is returned in the + /// response which contains instance configurations, and systems are expected + /// to put that etag in the request to update instance configuration to ensure + /// that their change is applied to the same version of the instance + /// configuration. If no etag is provided in the call to update the instance + /// configuration, then the existing instance configuration is overwritten + /// blindly. #[prost(string, tag = "9")] pub etag: ::prost::alloc::string::String, /// Allowed values of the "default_leader" schema option for databases in /// instances that use this instance configuration. #[prost(string, repeated, tag = "4")] pub leader_options: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - /// Output only. If true, the instance config is being created or updated. If - /// false, there are no ongoing operations for the instance config. + /// Output only. If true, the instance configuration is being created or + /// updated. If false, there are no ongoing operations for the instance + /// configuration. #[prost(bool, tag = "10")] pub reconciling: bool, - /// Output only. The current instance config state. + /// Output only. The current instance configuration state. Applicable only for + /// `USER_MANAGED` configurations. #[prost(enumeration = "instance_config::State", tag = "11")] pub state: i32, } @@ -204,16 +244,16 @@ pub mod instance_config { } } } - /// Indicates the current state of the instance config. + /// Indicates the current state of the instance configuration. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum State { /// Not specified. Unspecified = 0, - /// The instance config is still being created. + /// The instance configuration is still being created. Creating = 1, - /// The instance config is fully created and ready to be used to create - /// instances. + /// The instance configuration is fully created and ready to be used to + /// create instances. Ready = 2, } impl State { @@ -239,6 +279,86 @@ pub mod instance_config { } } } +/// Autoscaling configuration for an instance. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct AutoscalingConfig { + /// Required. Autoscaling limits for an instance. + #[prost(message, optional, tag = "1")] + pub autoscaling_limits: ::core::option::Option, + /// Required. The autoscaling targets for an instance. + #[prost(message, optional, tag = "2")] + pub autoscaling_targets: ::core::option::Option, +} +/// Nested message and enum types in `AutoscalingConfig`. +pub mod autoscaling_config { + /// The autoscaling limits for the instance. Users can define the minimum and + /// maximum compute capacity allocated to the instance, and the autoscaler will + /// only scale within that range. Users can either use nodes or processing + /// units to specify the limits, but should use the same unit to set both the + /// min_limit and max_limit. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, Copy, PartialEq, ::prost::Message)] + pub struct AutoscalingLimits { + /// The minimum compute capacity for the instance. + #[prost(oneof = "autoscaling_limits::MinLimit", tags = "1, 2")] + pub min_limit: ::core::option::Option, + /// The maximum compute capacity for the instance. The maximum compute + /// capacity should be less than or equal to 10X the minimum compute + /// capacity. + #[prost(oneof = "autoscaling_limits::MaxLimit", tags = "3, 4")] + pub max_limit: ::core::option::Option, + } + /// Nested message and enum types in `AutoscalingLimits`. + pub mod autoscaling_limits { + /// The minimum compute capacity for the instance. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, Copy, PartialEq, ::prost::Oneof)] + pub enum MinLimit { + /// Minimum number of nodes allocated to the instance. If set, this number + /// should be greater than or equal to 1. + #[prost(int32, tag = "1")] + MinNodes(i32), + /// Minimum number of processing units allocated to the instance. If set, + /// this number should be multiples of 1000. + #[prost(int32, tag = "2")] + MinProcessingUnits(i32), + } + /// The maximum compute capacity for the instance. The maximum compute + /// capacity should be less than or equal to 10X the minimum compute + /// capacity. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, Copy, PartialEq, ::prost::Oneof)] + pub enum MaxLimit { + /// Maximum number of nodes allocated to the instance. If set, this number + /// should be greater than or equal to min_nodes. + #[prost(int32, tag = "3")] + MaxNodes(i32), + /// Maximum number of processing units allocated to the instance. If set, + /// this number should be multiples of 1000 and be greater than or equal to + /// min_processing_units. + #[prost(int32, tag = "4")] + MaxProcessingUnits(i32), + } + } + /// The autoscaling targets for an instance. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, Copy, PartialEq, ::prost::Message)] + pub struct AutoscalingTargets { + /// Required. The target high priority cpu utilization percentage that the + /// autoscaler should be trying to achieve for the instance. This number is + /// on a scale from 0 (no utilization) to 100 (full utilization). The valid + /// range is \[10, 90\] inclusive. + #[prost(int32, tag = "1")] + pub high_priority_cpu_utilization_percent: i32, + /// Required. The target storage utilization percentage that the autoscaler + /// should be trying to achieve for the instance. This number is on a scale + /// from 0 (no utilization) to 100 (full utilization). The valid range is + /// \[10, 100\] inclusive. + #[prost(int32, tag = "2")] + pub storage_utilization_percent: i32, + } +} /// An isolated set of Cloud Spanner resources on which databases can be hosted. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -260,8 +380,12 @@ pub struct Instance { #[prost(string, tag = "3")] pub display_name: ::prost::alloc::string::String, /// The number of nodes allocated to this instance. At most one of either - /// node_count or processing_units should be present in the message. This - /// may be zero in API responses for instances that are not yet in state + /// node_count or processing_units should be present in the message. + /// + /// Users can set the node_count field to specify the target number of nodes + /// allocated to the instance. + /// + /// This may be zero in API responses for instances that are not yet in state /// `READY`. /// /// See [the @@ -270,14 +394,25 @@ pub struct Instance { #[prost(int32, tag = "5")] pub node_count: i32, /// The number of processing units allocated to this instance. At most one of - /// processing_units or node_count should be present in the message. This may - /// be zero in API responses for instances that are not yet in state `READY`. + /// processing_units or node_count should be present in the message. + /// + /// Users can set the processing_units field to specify the target number of + /// processing units allocated to the instance. + /// + /// This may be zero in API responses for instances that are not yet in state + /// `READY`. /// /// See [the /// documentation]() /// for more information about nodes and processing units. #[prost(int32, tag = "9")] pub processing_units: i32, + /// Optional. The autoscaling configuration. Autoscaling is enabled if this + /// field is set. When autoscaling is enabled, node_count and processing_units + /// are treated as OUTPUT_ONLY fields and reflect the current compute capacity + /// allocated to the instance. + #[prost(message, optional, tag = "17")] + pub autoscaling_config: ::core::option::Option, /// Output only. The current instance state. For /// [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance], /// the state must be either omitted or set to `CREATING`. For @@ -317,6 +452,9 @@ pub struct Instance { /// Output only. The time at which the instance was most recently updated. #[prost(message, optional, tag = "12")] pub update_time: ::core::option::Option<::prost_types::Timestamp>, + /// Optional. The `Edition` of the current instance. + #[prost(enumeration = "instance::Edition", tag = "20")] + pub edition: i32, } /// Nested message and enum types in `Instance`. pub mod instance { @@ -356,6 +494,44 @@ pub mod instance { } } } + /// The edition selected for this instance. Different editions provide + /// different capabilities at different price points. + #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] + #[repr(i32)] + pub enum Edition { + /// Edition not specified. + Unspecified = 0, + /// Standard edition. + Standard = 1, + /// Enterprise edition. + Enterprise = 2, + /// Enterprise Plus edition. + EnterprisePlus = 3, + } + impl Edition { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Edition::Unspecified => "EDITION_UNSPECIFIED", + Edition::Standard => "STANDARD", + Edition::Enterprise => "ENTERPRISE", + Edition::EnterprisePlus => "ENTERPRISE_PLUS", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "EDITION_UNSPECIFIED" => Some(Self::Unspecified), + "STANDARD" => Some(Self::Standard), + "ENTERPRISE" => Some(Self::Enterprise), + "ENTERPRISE_PLUS" => Some(Self::EnterprisePlus), + _ => None, + } + } + } } /// The request for /// [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs]. @@ -407,14 +583,14 @@ pub struct GetInstanceConfigRequest { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct CreateInstanceConfigRequest { - /// Required. The name of the project in which to create the instance config. - /// Values are of the form `projects/`. + /// Required. The name of the project in which to create the instance + /// configuration. Values are of the form `projects/`. #[prost(string, tag = "1")] pub parent: ::prost::alloc::string::String, - /// Required. The ID of the instance config to create. Valid identifiers are - /// of the form `custom-\[-a-z0-9\]*[a-z0-9]` and must be between 2 and 64 + /// Required. The ID of the instance configuration to create. Valid identifiers + /// are of the form `custom-\[-a-z0-9\]*[a-z0-9]` and must be between 2 and 64 /// characters in length. The `custom-` prefix is required to avoid name - /// conflicts with Google managed configurations. + /// conflicts with Google-managed configurations. #[prost(string, tag = "2")] pub instance_config_id: ::prost::alloc::string::String, /// Required. The InstanceConfig proto of the configuration to create. @@ -434,8 +610,9 @@ pub struct CreateInstanceConfigRequest { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct UpdateInstanceConfigRequest { - /// Required. The user instance config to update, which must always include the - /// instance config name. Otherwise, only fields mentioned in + /// Required. The user instance configuration to update, which must always + /// include the instance configuration name. Otherwise, only fields mentioned + /// in /// [update_mask][google.spanner.admin.instance.v1.UpdateInstanceConfigRequest.update_mask] /// need be included. To prevent conflicts of concurrent updates, /// [etag][google.spanner.admin.instance.v1.InstanceConfig.reconciling] can @@ -466,12 +643,12 @@ pub struct DeleteInstanceConfigRequest { #[prost(string, tag = "1")] pub name: ::prost::alloc::string::String, /// Used for optimistic concurrency control as a way to help prevent - /// simultaneous deletes of an instance config from overwriting each + /// simultaneous deletes of an instance configuration from overwriting each /// other. If not empty, the API - /// only deletes the instance config when the etag provided matches the current - /// status of the requested instance config. Otherwise, deletes the instance - /// config without checking the current status of the requested instance - /// config. + /// only deletes the instance configuration when the etag provided matches the + /// current status of the requested instance configuration. Otherwise, deletes + /// the instance configuration without checking the current status of the + /// requested instance configuration. #[prost(string, tag = "2")] pub etag: ::prost::alloc::string::String, /// An option to validate, but not actually execute, a request, @@ -484,7 +661,7 @@ pub struct DeleteInstanceConfigRequest { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ListInstanceConfigOperationsRequest { - /// Required. The project of the instance config operations. + /// Required. The project of the instance configuration operations. /// Values are of the form `projects/`. #[prost(string, tag = "1")] pub parent: ::prost::alloc::string::String, @@ -527,8 +704,8 @@ pub struct ListInstanceConfigOperationsRequest { /// `(metadata.progress.start_time < \"2021-03-28T14:50:00Z\") AND` \ /// `(error:*)` - Return operations where: /// * The operation's metadata type is - /// [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata]. - /// * The instance config name contains "custom-config". + /// [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata]. + /// * The instance configuration name contains "custom-config". /// * The operation started before 2021-03-28T14:50:00Z. /// * The operation resulted in an error. #[prost(string, tag = "2")] @@ -550,9 +727,9 @@ pub struct ListInstanceConfigOperationsRequest { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ListInstanceConfigOperationsResponse { - /// The list of matching instance config [long-running + /// The list of matching instance configuration [long-running /// operations][google.longrunning.Operation]. Each operation's name will be - /// prefixed by the instance config's name. The operation's + /// prefixed by the name of the instance configuration. The operation's /// [metadata][google.longrunning.Operation.metadata] field type /// `metadata.type_url` describes the type of the metadata. #[prost(message, repeated, tag = "1")] @@ -638,6 +815,14 @@ pub struct ListInstancesRequest { /// containing "dev". #[prost(string, tag = "4")] pub filter: ::prost::alloc::string::String, + /// Deadline used while retrieving metadata for instances. + /// Instances whose metadata cannot be retrieved within this deadline will be + /// added to + /// [unreachable][google.spanner.admin.instance.v1.ListInstancesResponse.unreachable] + /// in + /// [ListInstancesResponse][google.spanner.admin.instance.v1.ListInstancesResponse]. + #[prost(message, optional, tag = "5")] + pub instance_deadline: ::core::option::Option<::prost_types::Timestamp>, } /// The response for /// [ListInstances][google.spanner.admin.instance.v1.InstanceAdmin.ListInstances]. @@ -652,6 +837,12 @@ pub struct ListInstancesResponse { /// call to fetch more of the matching instances. #[prost(string, tag = "2")] pub next_page_token: ::prost::alloc::string::String, + /// The list of unreachable instances. + /// It includes the names of instances whose metadata could not be retrieved + /// within + /// [instance_deadline][google.spanner.admin.instance.v1.ListInstancesRequest.instance_deadline]. + #[prost(string, repeated, tag = "3")] + pub unreachable: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, } /// The request for /// [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance]. @@ -703,6 +894,9 @@ pub struct CreateInstanceMetadata { /// The time at which this operation failed or was completed successfully. #[prost(message, optional, tag = "4")] pub end_time: ::core::option::Option<::prost_types::Timestamp>, + /// The expected fulfillment period of this create operation. + #[prost(enumeration = "FulfillmentPeriod", tag = "5")] + pub expected_fulfillment_period: i32, } /// Metadata type for the operation returned by /// [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance]. @@ -725,13 +919,16 @@ pub struct UpdateInstanceMetadata { /// The time at which this operation failed or was completed successfully. #[prost(message, optional, tag = "4")] pub end_time: ::core::option::Option<::prost_types::Timestamp>, + /// The expected fulfillment period of this update operation. + #[prost(enumeration = "FulfillmentPeriod", tag = "5")] + pub expected_fulfillment_period: i32, } /// Metadata type for the operation returned by /// [CreateInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstanceConfig]. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct CreateInstanceConfigMetadata { - /// The target instance config end state. + /// The target instance configuration end state. #[prost(message, optional, tag = "1")] pub instance_config: ::core::option::Option, /// The progress of the @@ -748,7 +945,7 @@ pub struct CreateInstanceConfigMetadata { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct UpdateInstanceConfigMetadata { - /// The desired instance config after updating. + /// The desired instance configuration after updating. #[prost(message, optional, tag = "1")] pub instance_config: ::core::option::Option, /// The progress of the @@ -760,6 +957,434 @@ pub struct UpdateInstanceConfigMetadata { #[prost(message, optional, tag = "3")] pub cancel_time: ::core::option::Option<::prost_types::Timestamp>, } +/// An isolated set of Cloud Spanner resources that databases can define +/// placements on. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct InstancePartition { + /// Required. A unique identifier for the instance partition. Values are of the + /// form + /// `projects//instances//instancePartitions/[a-z][-a-z0-9]*\[a-z0-9\]`. + /// The final segment of the name must be between 2 and 64 characters in + /// length. An instance partition's name cannot be changed after the instance + /// partition is created. + #[prost(string, tag = "1")] + pub name: ::prost::alloc::string::String, + /// Required. The name of the instance partition's configuration. Values are of + /// the form `projects//instanceConfigs/`. See also + /// [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig] and + /// [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs]. + #[prost(string, tag = "2")] + pub config: ::prost::alloc::string::String, + /// Required. The descriptive name for this instance partition as it appears in + /// UIs. Must be unique per project and between 4 and 30 characters in length. + #[prost(string, tag = "3")] + pub display_name: ::prost::alloc::string::String, + /// Output only. The current instance partition state. + #[prost(enumeration = "instance_partition::State", tag = "7")] + pub state: i32, + /// Output only. The time at which the instance partition was created. + #[prost(message, optional, tag = "8")] + pub create_time: ::core::option::Option<::prost_types::Timestamp>, + /// Output only. The time at which the instance partition was most recently + /// updated. + #[prost(message, optional, tag = "9")] + pub update_time: ::core::option::Option<::prost_types::Timestamp>, + /// Output only. The names of the databases that reference this + /// instance partition. Referencing databases should share the parent instance. + /// The existence of any referencing database prevents the instance partition + /// from being deleted. + #[prost(string, repeated, tag = "10")] + pub referencing_databases: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + /// Output only. The names of the backups that reference this instance + /// partition. Referencing backups should share the parent instance. The + /// existence of any referencing backup prevents the instance partition from + /// being deleted. + #[prost(string, repeated, tag = "11")] + pub referencing_backups: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + /// Used for optimistic concurrency control as a way + /// to help prevent simultaneous updates of a instance partition from + /// overwriting each other. It is strongly suggested that systems make use of + /// the etag in the read-modify-write cycle to perform instance partition + /// updates in order to avoid race conditions: An etag is returned in the + /// response which contains instance partitions, and systems are expected to + /// put that etag in the request to update instance partitions to ensure that + /// their change will be applied to the same version of the instance partition. + /// If no etag is provided in the call to update instance partition, then the + /// existing instance partition is overwritten blindly. + #[prost(string, tag = "12")] + pub etag: ::prost::alloc::string::String, + /// Compute capacity defines amount of server and storage resources that are + /// available to the databases in an instance partition. At most one of either + /// node_count or processing_units should be present in the message. See [the + /// documentation]() + /// for more information about nodes and processing units. + #[prost(oneof = "instance_partition::ComputeCapacity", tags = "5, 6")] + pub compute_capacity: ::core::option::Option, +} +/// Nested message and enum types in `InstancePartition`. +pub mod instance_partition { + /// Indicates the current state of the instance partition. + #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] + #[repr(i32)] + pub enum State { + /// Not specified. + Unspecified = 0, + /// The instance partition is still being created. Resources may not be + /// available yet, and operations such as creating placements using this + /// instance partition may not work. + Creating = 1, + /// The instance partition is fully created and ready to do work such as + /// creating placements and using in databases. + Ready = 2, + } + impl State { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + State::Unspecified => "STATE_UNSPECIFIED", + State::Creating => "CREATING", + State::Ready => "READY", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "STATE_UNSPECIFIED" => Some(Self::Unspecified), + "CREATING" => Some(Self::Creating), + "READY" => Some(Self::Ready), + _ => None, + } + } + } + /// Compute capacity defines amount of server and storage resources that are + /// available to the databases in an instance partition. At most one of either + /// node_count or processing_units should be present in the message. See [the + /// documentation]() + /// for more information about nodes and processing units. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, Copy, PartialEq, ::prost::Oneof)] + pub enum ComputeCapacity { + /// The number of nodes allocated to this instance partition. + /// + /// Users can set the node_count field to specify the target number of nodes + /// allocated to the instance partition. + /// + /// This may be zero in API responses for instance partitions that are not + /// yet in state `READY`. + #[prost(int32, tag = "5")] + NodeCount(i32), + /// The number of processing units allocated to this instance partition. + /// + /// Users can set the processing_units field to specify the target number of + /// processing units allocated to the instance partition. + /// + /// This may be zero in API responses for instance partitions that are not + /// yet in state `READY`. + #[prost(int32, tag = "6")] + ProcessingUnits(i32), + } +} +/// Metadata type for the operation returned by +/// [CreateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstancePartition]. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CreateInstancePartitionMetadata { + /// The instance partition being created. + #[prost(message, optional, tag = "1")] + pub instance_partition: ::core::option::Option, + /// The time at which the + /// [CreateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstancePartition] + /// request was received. + #[prost(message, optional, tag = "2")] + pub start_time: ::core::option::Option<::prost_types::Timestamp>, + /// The time at which this operation was cancelled. If set, this operation is + /// in the process of undoing itself (which is guaranteed to succeed) and + /// cannot be cancelled again. + #[prost(message, optional, tag = "3")] + pub cancel_time: ::core::option::Option<::prost_types::Timestamp>, + /// The time at which this operation failed or was completed successfully. + #[prost(message, optional, tag = "4")] + pub end_time: ::core::option::Option<::prost_types::Timestamp>, +} +/// The request for +/// [CreateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstancePartition]. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CreateInstancePartitionRequest { + /// Required. The name of the instance in which to create the instance + /// partition. Values are of the form + /// `projects//instances/`. + #[prost(string, tag = "1")] + pub parent: ::prost::alloc::string::String, + /// Required. The ID of the instance partition to create. Valid identifiers are + /// of the form `[a-z][-a-z0-9]*\[a-z0-9\]` and must be between 2 and 64 + /// characters in length. + #[prost(string, tag = "2")] + pub instance_partition_id: ::prost::alloc::string::String, + /// Required. The instance partition to create. The instance_partition.name may + /// be omitted, but if specified must be + /// `/instancePartitions/`. + #[prost(message, optional, tag = "3")] + pub instance_partition: ::core::option::Option, +} +/// The request for +/// [DeleteInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.DeleteInstancePartition]. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DeleteInstancePartitionRequest { + /// Required. The name of the instance partition to be deleted. + /// Values are of the form + /// `projects/{project}/instances/{instance}/instancePartitions/{instance_partition}` + #[prost(string, tag = "1")] + pub name: ::prost::alloc::string::String, + /// Optional. If not empty, the API only deletes the instance partition when + /// the etag provided matches the current status of the requested instance + /// partition. Otherwise, deletes the instance partition without checking the + /// current status of the requested instance partition. + #[prost(string, tag = "2")] + pub etag: ::prost::alloc::string::String, +} +/// The request for +/// [GetInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.GetInstancePartition]. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetInstancePartitionRequest { + /// Required. The name of the requested instance partition. Values are of + /// the form + /// `projects/{project}/instances/{instance}/instancePartitions/{instance_partition}`. + #[prost(string, tag = "1")] + pub name: ::prost::alloc::string::String, +} +/// The request for +/// [UpdateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstancePartition]. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct UpdateInstancePartitionRequest { + /// Required. The instance partition to update, which must always include the + /// instance partition name. Otherwise, only fields mentioned in + /// [field_mask][google.spanner.admin.instance.v1.UpdateInstancePartitionRequest.field_mask] + /// need be included. + #[prost(message, optional, tag = "1")] + pub instance_partition: ::core::option::Option, + /// Required. A mask specifying which fields in + /// [InstancePartition][google.spanner.admin.instance.v1.InstancePartition] + /// should be updated. The field mask must always be specified; this prevents + /// any future fields in + /// [InstancePartition][google.spanner.admin.instance.v1.InstancePartition] + /// from being erased accidentally by clients that do not know about them. + #[prost(message, optional, tag = "2")] + pub field_mask: ::core::option::Option<::prost_types::FieldMask>, +} +/// Metadata type for the operation returned by +/// [UpdateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstancePartition]. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct UpdateInstancePartitionMetadata { + /// The desired end state of the update. + #[prost(message, optional, tag = "1")] + pub instance_partition: ::core::option::Option, + /// The time at which + /// [UpdateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstancePartition] + /// request was received. + #[prost(message, optional, tag = "2")] + pub start_time: ::core::option::Option<::prost_types::Timestamp>, + /// The time at which this operation was cancelled. If set, this operation is + /// in the process of undoing itself (which is guaranteed to succeed) and + /// cannot be cancelled again. + #[prost(message, optional, tag = "3")] + pub cancel_time: ::core::option::Option<::prost_types::Timestamp>, + /// The time at which this operation failed or was completed successfully. + #[prost(message, optional, tag = "4")] + pub end_time: ::core::option::Option<::prost_types::Timestamp>, +} +/// The request for +/// [ListInstancePartitions][google.spanner.admin.instance.v1.InstanceAdmin.ListInstancePartitions]. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ListInstancePartitionsRequest { + /// Required. The instance whose instance partitions should be listed. Values + /// are of the form `projects//instances/`. + #[prost(string, tag = "1")] + pub parent: ::prost::alloc::string::String, + /// Number of instance partitions to be returned in the response. If 0 or less, + /// defaults to the server's maximum allowed page size. + #[prost(int32, tag = "2")] + pub page_size: i32, + /// If non-empty, `page_token` should contain a + /// [next_page_token][google.spanner.admin.instance.v1.ListInstancePartitionsResponse.next_page_token] + /// from a previous + /// [ListInstancePartitionsResponse][google.spanner.admin.instance.v1.ListInstancePartitionsResponse]. + #[prost(string, tag = "3")] + pub page_token: ::prost::alloc::string::String, + /// Optional. Deadline used while retrieving metadata for instance partitions. + /// Instance partitions whose metadata cannot be retrieved within this deadline + /// will be added to + /// [unreachable][google.spanner.admin.instance.v1.ListInstancePartitionsResponse.unreachable] + /// in + /// [ListInstancePartitionsResponse][google.spanner.admin.instance.v1.ListInstancePartitionsResponse]. + #[prost(message, optional, tag = "4")] + pub instance_partition_deadline: ::core::option::Option<::prost_types::Timestamp>, +} +/// The response for +/// [ListInstancePartitions][google.spanner.admin.instance.v1.InstanceAdmin.ListInstancePartitions]. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ListInstancePartitionsResponse { + /// The list of requested instancePartitions. + #[prost(message, repeated, tag = "1")] + pub instance_partitions: ::prost::alloc::vec::Vec, + /// `next_page_token` can be sent in a subsequent + /// [ListInstancePartitions][google.spanner.admin.instance.v1.InstanceAdmin.ListInstancePartitions] + /// call to fetch more of the matching instance partitions. + #[prost(string, tag = "2")] + pub next_page_token: ::prost::alloc::string::String, + /// The list of unreachable instance partitions. + /// It includes the names of instance partitions whose metadata could + /// not be retrieved within + /// [instance_partition_deadline][google.spanner.admin.instance.v1.ListInstancePartitionsRequest.instance_partition_deadline]. + #[prost(string, repeated, tag = "3")] + pub unreachable: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, +} +/// The request for +/// [ListInstancePartitionOperations][google.spanner.admin.instance.v1.InstanceAdmin.ListInstancePartitionOperations]. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ListInstancePartitionOperationsRequest { + /// Required. The parent instance of the instance partition operations. + /// Values are of the form `projects//instances/`. + #[prost(string, tag = "1")] + pub parent: ::prost::alloc::string::String, + /// Optional. An expression that filters the list of returned operations. + /// + /// A filter expression consists of a field name, a + /// comparison operator, and a value for filtering. + /// The value must be a string, a number, or a boolean. The comparison operator + /// must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`. + /// Colon `:` is the contains operator. Filter rules are not case sensitive. + /// + /// The following fields in the [Operation][google.longrunning.Operation] + /// are eligible for filtering: + /// + /// * `name` - The name of the long-running operation + /// * `done` - False if the operation is in progress, else true. + /// * `metadata.@type` - the type of metadata. For example, the type string + /// for + /// [CreateInstancePartitionMetadata][google.spanner.admin.instance.v1.CreateInstancePartitionMetadata] + /// is + /// `type.googleapis.com/google.spanner.admin.instance.v1.CreateInstancePartitionMetadata`. + /// * `metadata.` - any field in metadata.value. + /// `metadata.@type` must be specified first, if filtering on metadata + /// fields. + /// * `error` - Error associated with the long-running operation. + /// * `response.@type` - the type of response. + /// * `response.` - any field in response.value. + /// + /// You can combine multiple expressions by enclosing each expression in + /// parentheses. By default, expressions are combined with AND logic. However, + /// you can specify AND, OR, and NOT logic explicitly. + /// + /// Here are a few examples: + /// + /// * `done:true` - The operation is complete. + /// * `(metadata.@type=` \ + /// `type.googleapis.com/google.spanner.admin.instance.v1.CreateInstancePartitionMetadata) + /// AND` \ + /// `(metadata.instance_partition.name:custom-instance-partition) AND` \ + /// `(metadata.start_time < \"2021-03-28T14:50:00Z\") AND` \ + /// `(error:*)` - Return operations where: + /// * The operation's metadata type is + /// [CreateInstancePartitionMetadata][google.spanner.admin.instance.v1.CreateInstancePartitionMetadata]. + /// * The instance partition name contains "custom-instance-partition". + /// * The operation started before 2021-03-28T14:50:00Z. + /// * The operation resulted in an error. + #[prost(string, tag = "2")] + pub filter: ::prost::alloc::string::String, + /// Optional. Number of operations to be returned in the response. If 0 or + /// less, defaults to the server's maximum allowed page size. + #[prost(int32, tag = "3")] + pub page_size: i32, + /// Optional. If non-empty, `page_token` should contain a + /// [next_page_token][google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse.next_page_token] + /// from a previous + /// [ListInstancePartitionOperationsResponse][google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse] + /// to the same `parent` and with the same `filter`. + #[prost(string, tag = "4")] + pub page_token: ::prost::alloc::string::String, + /// Optional. Deadline used while retrieving metadata for instance partition + /// operations. Instance partitions whose operation metadata cannot be + /// retrieved within this deadline will be added to + /// [unreachable][ListInstancePartitionOperationsResponse.unreachable] in + /// [ListInstancePartitionOperationsResponse][google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse]. + #[prost(message, optional, tag = "5")] + pub instance_partition_deadline: ::core::option::Option<::prost_types::Timestamp>, +} +/// The response for +/// [ListInstancePartitionOperations][google.spanner.admin.instance.v1.InstanceAdmin.ListInstancePartitionOperations]. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ListInstancePartitionOperationsResponse { + /// The list of matching instance partition [long-running + /// operations][google.longrunning.Operation]. Each operation's name will be + /// prefixed by the instance partition's name. The operation's + /// [metadata][google.longrunning.Operation.metadata] field type + /// `metadata.type_url` describes the type of the metadata. + #[prost(message, repeated, tag = "1")] + pub operations: ::prost::alloc::vec::Vec, + /// `next_page_token` can be sent in a subsequent + /// [ListInstancePartitionOperations][google.spanner.admin.instance.v1.InstanceAdmin.ListInstancePartitionOperations] + /// call to fetch more of the matching metadata. + #[prost(string, tag = "2")] + pub next_page_token: ::prost::alloc::string::String, + /// The list of unreachable instance partitions. + /// It includes the names of instance partitions whose operation metadata could + /// not be retrieved within + /// [instance_partition_deadline][google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest.instance_partition_deadline]. + #[prost(string, repeated, tag = "3")] + pub unreachable_instance_partitions: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, +} +/// The request for +/// [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance]. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MoveInstanceRequest { + /// Required. The instance to move. + /// Values are of the form `projects//instances/`. + #[prost(string, tag = "1")] + pub name: ::prost::alloc::string::String, + /// Required. The target instance configuration where to move the instance. + /// Values are of the form `projects//instanceConfigs/`. + #[prost(string, tag = "2")] + pub target_config: ::prost::alloc::string::String, +} +/// The response for +/// [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance]. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct MoveInstanceResponse {} +/// Metadata type for the operation returned by +/// [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance]. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MoveInstanceMetadata { + /// The target instance configuration where to move the instance. + /// Values are of the form `projects//instanceConfigs/`. + #[prost(string, tag = "1")] + pub target_config: ::prost::alloc::string::String, + /// The progress of the + /// [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance] + /// operation. + /// [progress_percent][google.spanner.admin.instance.v1.OperationProgress.progress_percent] + /// is reset when cancellation is requested. + #[prost(message, optional, tag = "2")] + pub progress: ::core::option::Option, + /// The time at which this operation was cancelled. + #[prost(message, optional, tag = "3")] + pub cancel_time: ::core::option::Option<::prost_types::Timestamp>, +} /// Generated client implementations. pub mod instance_admin_client { #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] @@ -897,38 +1522,38 @@ pub mod instance_admin_client { )); self.inner.unary(req, path, codec).await } - /// Creates an instance config and begins preparing it to be used. The + /// Creates an instance configuration and begins preparing it to be used. The /// returned [long-running operation][google.longrunning.Operation] /// can be used to track the progress of preparing the new - /// instance config. The instance config name is assigned by the caller. If the - /// named instance config already exists, `CreateInstanceConfig` returns - /// `ALREADY_EXISTS`. + /// instance configuration. The instance configuration name is assigned by the + /// caller. If the named instance configuration already exists, + /// `CreateInstanceConfig` returns `ALREADY_EXISTS`. /// /// Immediately after the request returns: /// - /// * The instance config is readable via the API, with all requested - /// attributes. The instance config's + /// * The instance configuration is readable via the API, with all requested + /// attributes. The instance configuration's /// [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] /// field is set to true. Its state is `CREATING`. /// /// While the operation is pending: /// - /// * Cancelling the operation renders the instance config immediately + /// * Cancelling the operation renders the instance configuration immediately /// unreadable via the API. /// * Except for deleting the creating resource, all other attempts to modify - /// the instance config are rejected. + /// the instance configuration are rejected. /// /// Upon completion of the returned operation: /// /// * Instances can be created using the instance configuration. - /// * The instance config's - /// [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] - /// field becomes false. Its state becomes `READY`. + /// * The instance configuration's + /// [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] + /// field becomes false. Its state becomes `READY`. /// /// The returned [long-running operation][google.longrunning.Operation] will /// have a name of the format /// `/operations/` and can be used to track - /// creation of the instance config. The + /// creation of the instance configuration. The /// [metadata][google.longrunning.Operation.metadata] field type is /// [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata]. /// The [response][google.longrunning.Operation.response] field type is @@ -959,16 +1584,16 @@ pub mod instance_admin_client { )); self.inner.unary(req, path, codec).await } - /// Updates an instance config. The returned + /// Updates an instance configuration. The returned /// [long-running operation][google.longrunning.Operation] can be used to track - /// the progress of updating the instance. If the named instance config does - /// not exist, returns `NOT_FOUND`. + /// the progress of updating the instance. If the named instance configuration + /// does not exist, returns `NOT_FOUND`. /// - /// Only user managed configurations can be updated. + /// Only user-managed configurations can be updated. /// /// Immediately after the request returns: /// - /// * The instance config's + /// * The instance configuration's /// [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] /// field is set to true. /// @@ -978,23 +1603,23 @@ pub mod instance_admin_client { /// [cancel_time][google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata.cancel_time]. /// The operation is guaranteed to succeed at undoing all changes, after /// which point it terminates with a `CANCELLED` status. - /// * All other attempts to modify the instance config are rejected. - /// * Reading the instance config via the API continues to give the + /// * All other attempts to modify the instance configuration are rejected. + /// * Reading the instance configuration via the API continues to give the /// pre-request values. /// /// Upon completion of the returned operation: /// /// * Creating instances using the instance configuration uses the new /// values. - /// * The instance config's new values are readable via the API. - /// * The instance config's - /// [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] - /// field becomes false. + /// * The new values of the instance configuration are readable via the API. + /// * The instance configuration's + /// [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] + /// field becomes false. /// /// The returned [long-running operation][google.longrunning.Operation] will /// have a name of the format /// `/operations/` and can be used to track - /// the instance config modification. The + /// the instance configuration modification. The /// [metadata][google.longrunning.Operation.metadata] field type is /// [UpdateInstanceConfigMetadata][google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata]. /// The [response][google.longrunning.Operation.response] field type is @@ -1024,11 +1649,11 @@ pub mod instance_admin_client { )); self.inner.unary(req, path, codec).await } - /// Deletes the instance config. Deletion is only allowed when no + /// Deletes the instance configuration. Deletion is only allowed when no /// instances are using the configuration. If any instances are using - /// the config, returns `FAILED_PRECONDITION`. + /// the configuration, returns `FAILED_PRECONDITION`. /// - /// Only user managed configurations can be deleted. + /// Only user-managed configurations can be deleted. /// /// Authorization requires `spanner.instanceConfigs.delete` permission on /// the resource [name][google.spanner.admin.instance.v1.InstanceConfig.name]. @@ -1050,9 +1675,9 @@ pub mod instance_admin_client { )); self.inner.unary(req, path, codec).await } - /// Lists the user-managed instance config [long-running + /// Lists the user-managed instance configuration [long-running /// operations][google.longrunning.Operation] in the given project. An instance - /// config operation has a name of the form + /// configuration operation has a name of the form /// `projects//instanceConfigs//operations/`. /// The long-running operation /// [metadata][google.longrunning.Operation.metadata] field type @@ -1097,6 +1722,25 @@ pub mod instance_admin_client { )); self.inner.unary(req, path, codec).await } + /// Lists all instance partitions for the given instance. + pub async fn list_instance_partitions( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner.ready().await.map_err(|e| { + tonic::Status::new(tonic::Code::Unknown, format!("Service was not ready: {}", e.into())) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/google.spanner.admin.instance.v1.InstanceAdmin/ListInstancePartitions", + ); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new( + "google.spanner.admin.instance.v1.InstanceAdmin", + "ListInstancePartitions", + )); + self.inner.unary(req, path, codec).await + } /// Gets information about a particular instance. pub async fn get_instance( &mut self, @@ -1328,5 +1972,287 @@ pub mod instance_admin_client { )); self.inner.unary(req, path, codec).await } + /// Gets information about a particular instance partition. + pub async fn get_instance_partition( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner.ready().await.map_err(|e| { + tonic::Status::new(tonic::Code::Unknown, format!("Service was not ready: {}", e.into())) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/google.spanner.admin.instance.v1.InstanceAdmin/GetInstancePartition", + ); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new( + "google.spanner.admin.instance.v1.InstanceAdmin", + "GetInstancePartition", + )); + self.inner.unary(req, path, codec).await + } + /// Creates an instance partition and begins preparing it to be used. The + /// returned [long-running operation][google.longrunning.Operation] + /// can be used to track the progress of preparing the new instance partition. + /// The instance partition name is assigned by the caller. If the named + /// instance partition already exists, `CreateInstancePartition` returns + /// `ALREADY_EXISTS`. + /// + /// Immediately upon completion of this request: + /// + /// * The instance partition is readable via the API, with all requested + /// attributes but no allocated resources. Its state is `CREATING`. + /// + /// Until completion of the returned operation: + /// + /// * Cancelling the operation renders the instance partition immediately + /// unreadable via the API. + /// * The instance partition can be deleted. + /// * All other attempts to modify the instance partition are rejected. + /// + /// Upon completion of the returned operation: + /// + /// * Billing for all successfully-allocated resources begins (some types + /// may have lower than the requested levels). + /// * Databases can start using this instance partition. + /// * The instance partition's allocated resource levels are readable via the + /// API. + /// * The instance partition's state becomes `READY`. + /// + /// The returned [long-running operation][google.longrunning.Operation] will + /// have a name of the format + /// `/operations/` and can be used to + /// track creation of the instance partition. The + /// [metadata][google.longrunning.Operation.metadata] field type is + /// [CreateInstancePartitionMetadata][google.spanner.admin.instance.v1.CreateInstancePartitionMetadata]. + /// The [response][google.longrunning.Operation.response] field type is + /// [InstancePartition][google.spanner.admin.instance.v1.InstancePartition], if + /// successful. + pub async fn create_instance_partition( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner.ready().await.map_err(|e| { + tonic::Status::new(tonic::Code::Unknown, format!("Service was not ready: {}", e.into())) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/google.spanner.admin.instance.v1.InstanceAdmin/CreateInstancePartition", + ); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new( + "google.spanner.admin.instance.v1.InstanceAdmin", + "CreateInstancePartition", + )); + self.inner.unary(req, path, codec).await + } + /// Deletes an existing instance partition. Requires that the + /// instance partition is not used by any database or backup and is not the + /// default instance partition of an instance. + /// + /// Authorization requires `spanner.instancePartitions.delete` permission on + /// the resource + /// [name][google.spanner.admin.instance.v1.InstancePartition.name]. + pub async fn delete_instance_partition( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner.ready().await.map_err(|e| { + tonic::Status::new(tonic::Code::Unknown, format!("Service was not ready: {}", e.into())) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/google.spanner.admin.instance.v1.InstanceAdmin/DeleteInstancePartition", + ); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new( + "google.spanner.admin.instance.v1.InstanceAdmin", + "DeleteInstancePartition", + )); + self.inner.unary(req, path, codec).await + } + /// Updates an instance partition, and begins allocating or releasing resources + /// as requested. The returned [long-running + /// operation][google.longrunning.Operation] can be used to track the + /// progress of updating the instance partition. If the named instance + /// partition does not exist, returns `NOT_FOUND`. + /// + /// Immediately upon completion of this request: + /// + /// * For resource types for which a decrease in the instance partition's + /// allocation has been requested, billing is based on the newly-requested + /// level. + /// + /// Until completion of the returned operation: + /// + /// * Cancelling the operation sets its metadata's + /// [cancel_time][google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata.cancel_time], + /// and begins restoring resources to their pre-request values. The + /// operation is guaranteed to succeed at undoing all resource changes, + /// after which point it terminates with a `CANCELLED` status. + /// * All other attempts to modify the instance partition are rejected. + /// * Reading the instance partition via the API continues to give the + /// pre-request resource levels. + /// + /// Upon completion of the returned operation: + /// + /// * Billing begins for all successfully-allocated resources (some types + /// may have lower than the requested levels). + /// * All newly-reserved resources are available for serving the instance + /// partition's tables. + /// * The instance partition's new resource levels are readable via the API. + /// + /// The returned [long-running operation][google.longrunning.Operation] will + /// have a name of the format + /// `/operations/` and can be used to + /// track the instance partition modification. The + /// [metadata][google.longrunning.Operation.metadata] field type is + /// [UpdateInstancePartitionMetadata][google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata]. + /// The [response][google.longrunning.Operation.response] field type is + /// [InstancePartition][google.spanner.admin.instance.v1.InstancePartition], if + /// successful. + /// + /// Authorization requires `spanner.instancePartitions.update` permission on + /// the resource + /// [name][google.spanner.admin.instance.v1.InstancePartition.name]. + pub async fn update_instance_partition( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner.ready().await.map_err(|e| { + tonic::Status::new(tonic::Code::Unknown, format!("Service was not ready: {}", e.into())) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/google.spanner.admin.instance.v1.InstanceAdmin/UpdateInstancePartition", + ); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new( + "google.spanner.admin.instance.v1.InstanceAdmin", + "UpdateInstancePartition", + )); + self.inner.unary(req, path, codec).await + } + /// Lists instance partition [long-running + /// operations][google.longrunning.Operation] in the given instance. + /// An instance partition operation has a name of the form + /// `projects//instances//instancePartitions//operations/`. + /// The long-running operation + /// [metadata][google.longrunning.Operation.metadata] field type + /// `metadata.type_url` describes the type of the metadata. Operations returned + /// include those that have completed/failed/canceled within the last 7 days, + /// and pending operations. Operations returned are ordered by + /// `operation.metadata.value.start_time` in descending order starting from the + /// most recently started operation. + /// + /// Authorization requires `spanner.instancePartitionOperations.list` + /// permission on the resource + /// [parent][google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest.parent]. + pub async fn list_instance_partition_operations( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> + { + self.inner.ready().await.map_err(|e| { + tonic::Status::new(tonic::Code::Unknown, format!("Service was not ready: {}", e.into())) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/google.spanner.admin.instance.v1.InstanceAdmin/ListInstancePartitionOperations", + ); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new( + "google.spanner.admin.instance.v1.InstanceAdmin", + "ListInstancePartitionOperations", + )); + self.inner.unary(req, path, codec).await + } + /// Moves an instance to the target instance configuration. You can use the + /// returned [long-running operation][google.longrunning.Operation] to track + /// the progress of moving the instance. + /// + /// `MoveInstance` returns `FAILED_PRECONDITION` if the instance meets any of + /// the following criteria: + /// + /// * Is undergoing a move to a different instance configuration + /// * Has backups + /// * Has an ongoing update + /// * Contains any CMEK-enabled databases + /// * Is a free trial instance + /// + /// While the operation is pending: + /// + /// * All other attempts to modify the instance, including changes to its + /// compute capacity, are rejected. + /// * The following database and backup admin operations are rejected: + /// + /// * `DatabaseAdmin.CreateDatabase` + /// * `DatabaseAdmin.UpdateDatabaseDdl` (disabled if default_leader is + /// specified in the request.) + /// * `DatabaseAdmin.RestoreDatabase` + /// * `DatabaseAdmin.CreateBackup` + /// * `DatabaseAdmin.CopyBackup` + /// + /// * Both the source and target instance configurations are subject to + /// hourly compute and storage charges. + /// * The instance might experience higher read-write latencies and a higher + /// transaction abort rate. However, moving an instance doesn't cause any + /// downtime. + /// + /// The returned [long-running operation][google.longrunning.Operation] has + /// a name of the format + /// `/operations/` and can be used to track + /// the move instance operation. The + /// [metadata][google.longrunning.Operation.metadata] field type is + /// [MoveInstanceMetadata][google.spanner.admin.instance.v1.MoveInstanceMetadata]. + /// The [response][google.longrunning.Operation.response] field type is + /// [Instance][google.spanner.admin.instance.v1.Instance], + /// if successful. + /// Cancelling the operation sets its metadata's + /// [cancel_time][google.spanner.admin.instance.v1.MoveInstanceMetadata.cancel_time]. + /// Cancellation is not immediate because it involves moving any data + /// previously moved to the target instance configuration back to the original + /// instance configuration. You can use this operation to track the progress of + /// the cancellation. Upon successful completion of the cancellation, the + /// operation terminates with `CANCELLED` status. + /// + /// If not cancelled, upon completion of the returned operation: + /// + /// * The instance successfully moves to the target instance + /// configuration. + /// * You are billed for compute and storage in target instance + /// configuration. + /// + /// Authorization requires the `spanner.instances.update` permission on + /// the resource [instance][google.spanner.admin.instance.v1.Instance]. + /// + /// For more details, see + /// [Move an instance](https://cloud.google.com/spanner/docs/move-instance). + pub async fn move_instance( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner.ready().await.map_err(|e| { + tonic::Status::new(tonic::Code::Unknown, format!("Service was not ready: {}", e.into())) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = + http::uri::PathAndQuery::from_static("/google.spanner.admin.instance.v1.InstanceAdmin/MoveInstance"); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new( + "google.spanner.admin.instance.v1.InstanceAdmin", + "MoveInstance", + )); + self.inner.unary(req, path, codec).await + } } } diff --git a/googleapis/src/google.spanner.v1.rs b/googleapis/src/google.spanner.v1.rs index dd37ca81..f079b66b 100644 --- a/googleapis/src/google.spanner.v1.rs +++ b/googleapis/src/google.spanner.v1.rs @@ -734,6 +734,22 @@ pub struct QueryPlan { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct TransactionOptions { + /// When `exclude_txn_from_change_streams` is set to `true`: + /// * Mutations from this transaction will not be recorded in change streams + /// with DDL option `allow_txn_exclusion=true` that are tracking columns + /// modified by these transactions. + /// * Mutations from this transaction will be recorded in change streams with + /// DDL option `allow_txn_exclusion=false or not set` that are tracking + /// columns modified by these transactions. + /// + /// When `exclude_txn_from_change_streams` is set to `false` or not set, + /// mutations from this transaction will be recorded in all change streams that + /// are tracking columns modified by these transactions. + /// `exclude_txn_from_change_streams` may only be specified for read-write or + /// partitioned-dml transactions, otherwise the API will return an + /// `INVALID_ARGUMENT` error. + #[prost(bool, tag = "5")] + pub exclude_txn_from_change_streams: bool, /// Required. The type of transaction. #[prost(oneof = "transaction_options::Mode", tags = "1, 3, 2")] pub mode: ::core::option::Option, @@ -988,6 +1004,13 @@ pub struct Type { /// affect serialization) and clients can ignore it on the read path. #[prost(enumeration = "TypeAnnotationCode", tag = "4")] pub type_annotation: i32, + /// If [code][google.spanner.v1.Type.code] == + /// [PROTO][google.spanner.v1.TypeCode.PROTO] or + /// [code][google.spanner.v1.Type.code] == + /// [ENUM][google.spanner.v1.TypeCode.ENUM], then `proto_type_fqn` is the fully + /// qualified name of the proto type representing the proto/enum definition. + #[prost(string, tag = "5")] + pub proto_type_fqn: ::prost::alloc::string::String, } /// `StructType` defines the fields of a [STRUCT][google.spanner.v1.TypeCode.STRUCT] type. #[allow(clippy::derive_partial_eq_without_eq)] @@ -1041,6 +1064,9 @@ pub enum TypeCode { /// Encoded as `number`, or the strings `"NaN"`, `"Infinity"`, or /// `"-Infinity"`. Float64 = 3, + /// Encoded as `number`, or the strings `"NaN"`, `"Infinity"`, or + /// `"-Infinity"`. + Float32 = 15, /// Encoded as `string` in RFC 3339 timestamp format. The time zone /// must be present, and must be `"Z"`. /// @@ -1083,6 +1109,11 @@ pub enum TypeCode { /// preserved. /// - JSON array elements will have their order preserved. Json = 11, + /// Encoded as a base64-encoded `string`, as described in RFC 4648, + /// section 4. + Proto = 13, + /// Encoded as `string`, in decimal format. + Enum = 14, } impl TypeCode { /// String value of the enum field names used in the ProtoBuf definition. @@ -1095,6 +1126,7 @@ impl TypeCode { TypeCode::Bool => "BOOL", TypeCode::Int64 => "INT64", TypeCode::Float64 => "FLOAT64", + TypeCode::Float32 => "FLOAT32", TypeCode::Timestamp => "TIMESTAMP", TypeCode::Date => "DATE", TypeCode::String => "STRING", @@ -1103,6 +1135,8 @@ impl TypeCode { TypeCode::Struct => "STRUCT", TypeCode::Numeric => "NUMERIC", TypeCode::Json => "JSON", + TypeCode::Proto => "PROTO", + TypeCode::Enum => "ENUM", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -1112,6 +1146,7 @@ impl TypeCode { "BOOL" => Some(Self::Bool), "INT64" => Some(Self::Int64), "FLOAT64" => Some(Self::Float64), + "FLOAT32" => Some(Self::Float32), "TIMESTAMP" => Some(Self::Timestamp), "DATE" => Some(Self::Date), "STRING" => Some(Self::String), @@ -1120,6 +1155,8 @@ impl TypeCode { "STRUCT" => Some(Self::Struct), "NUMERIC" => Some(Self::Numeric), "JSON" => Some(Self::Json), + "PROTO" => Some(Self::Proto), + "ENUM" => Some(Self::Enum), _ => None, } } @@ -1148,6 +1185,10 @@ pub enum TypeAnnotationCode { /// [JSON][google.spanner.v1.TypeCode.JSON] when a client interacts with PostgreSQL-enabled /// Spanner databases. PgJsonb = 3, + /// PostgreSQL compatible OID type. This annotation can be used by a client + /// interacting with PostgreSQL-enabled Spanner database to specify that a + /// value should be treated using the semantics of the OID type. + PgOid = 4, } impl TypeAnnotationCode { /// String value of the enum field names used in the ProtoBuf definition. @@ -1159,6 +1200,7 @@ impl TypeAnnotationCode { TypeAnnotationCode::Unspecified => "TYPE_ANNOTATION_CODE_UNSPECIFIED", TypeAnnotationCode::PgNumeric => "PG_NUMERIC", TypeAnnotationCode::PgJsonb => "PG_JSONB", + TypeAnnotationCode::PgOid => "PG_OID", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -1167,6 +1209,7 @@ impl TypeAnnotationCode { "TYPE_ANNOTATION_CODE_UNSPECIFIED" => Some(Self::Unspecified), "PG_NUMERIC" => Some(Self::PgNumeric), "PG_JSONB" => Some(Self::PgJsonb), + "PG_OID" => Some(Self::PgOid), _ => None, } } @@ -1383,7 +1426,8 @@ pub struct CreateSessionRequest { #[prost(message, optional, tag = "2")] pub session: ::core::option::Option, } -/// The request for [BatchCreateSessions][google.spanner.v1.Spanner.BatchCreateSessions]. +/// The request for +/// [BatchCreateSessions][google.spanner.v1.Spanner.BatchCreateSessions]. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct BatchCreateSessionsRequest { @@ -1397,11 +1441,13 @@ pub struct BatchCreateSessionsRequest { /// The API may return fewer than the requested number of sessions. If a /// specific number of sessions are desired, the client can make additional /// calls to BatchCreateSessions (adjusting - /// [session_count][google.spanner.v1.BatchCreateSessionsRequest.session_count] as necessary). + /// [session_count][google.spanner.v1.BatchCreateSessionsRequest.session_count] + /// as necessary). #[prost(int32, tag = "3")] pub session_count: i32, } -/// The response for [BatchCreateSessions][google.spanner.v1.Spanner.BatchCreateSessions]. +/// The response for +/// [BatchCreateSessions][google.spanner.v1.Spanner.BatchCreateSessions]. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct BatchCreateSessionsResponse { @@ -1437,6 +1483,15 @@ pub struct Session { /// The database role which created this session. #[prost(string, tag = "5")] pub creator_role: ::prost::alloc::string::String, + /// Optional. If true, specifies a multiplexed session. A multiplexed session + /// may be used for multiple, concurrent read-only operations but can not be + /// used for read-write transactions, partitioned reads, or partitioned + /// queries. Multiplexed sessions can be created via + /// [CreateSession][google.spanner.v1.Spanner.CreateSession] but not via + /// [BatchCreateSessions][google.spanner.v1.Spanner.BatchCreateSessions]. + /// Multiplexed sessions may not be deleted nor listed. + #[prost(bool, tag = "6")] + pub multiplexed: bool, } /// The request for [GetSession][google.spanner.v1.Spanner.GetSession]. #[allow(clippy::derive_partial_eq_without_eq)] @@ -1458,7 +1513,8 @@ pub struct ListSessionsRequest { #[prost(int32, tag = "2")] pub page_size: i32, /// If non-empty, `page_token` should contain a - /// [next_page_token][google.spanner.v1.ListSessionsResponse.next_page_token] from a previous + /// [next_page_token][google.spanner.v1.ListSessionsResponse.next_page_token] + /// from a previous /// [ListSessionsResponse][google.spanner.v1.ListSessionsResponse]. #[prost(string, tag = "3")] pub page_token: ::prost::alloc::string::String, @@ -1483,8 +1539,8 @@ pub struct ListSessionsResponse { #[prost(message, repeated, tag = "1")] pub sessions: ::prost::alloc::vec::Vec, /// `next_page_token` can be sent in a subsequent - /// [ListSessions][google.spanner.v1.Spanner.ListSessions] call to fetch more of the matching - /// sessions. + /// [ListSessions][google.spanner.v1.Spanner.ListSessions] call to fetch more + /// of the matching sessions. #[prost(string, tag = "2")] pub next_page_token: ::prost::alloc::string::String, } @@ -1584,6 +1640,127 @@ pub mod request_options { } } } +/// The DirectedReadOptions can be used to indicate which replicas or regions +/// should be used for non-transactional reads or queries. +/// +/// DirectedReadOptions may only be specified for a read-only transaction, +/// otherwise the API will return an `INVALID_ARGUMENT` error. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DirectedReadOptions { + /// Required. At most one of either include_replicas or exclude_replicas + /// should be present in the message. + #[prost(oneof = "directed_read_options::Replicas", tags = "1, 2")] + pub replicas: ::core::option::Option, +} +/// Nested message and enum types in `DirectedReadOptions`. +pub mod directed_read_options { + /// The directed read replica selector. + /// Callers must provide one or more of the following fields for replica + /// selection: + /// + /// * `location` - The location must be one of the regions within the + /// multi-region configuration of your database. + /// * `type` - The type of the replica. + /// + /// Some examples of using replica_selectors are: + /// + /// * `location:us-east1` --> The "us-east1" replica(s) of any available type + /// will be used to process the request. + /// * `type:READ_ONLY` --> The "READ_ONLY" type replica(s) in nearest + /// available location will be used to process the + /// request. + /// * `location:us-east1 type:READ_ONLY` --> The "READ_ONLY" type replica(s) + /// in location "us-east1" will be used to process + /// the request. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct ReplicaSelection { + /// The location or region of the serving requests, e.g. "us-east1". + #[prost(string, tag = "1")] + pub location: ::prost::alloc::string::String, + /// The type of replica. + #[prost(enumeration = "replica_selection::Type", tag = "2")] + pub r#type: i32, + } + /// Nested message and enum types in `ReplicaSelection`. + pub mod replica_selection { + /// Indicates the type of replica. + #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] + #[repr(i32)] + pub enum Type { + /// Not specified. + Unspecified = 0, + /// Read-write replicas support both reads and writes. + ReadWrite = 1, + /// Read-only replicas only support reads (not writes). + ReadOnly = 2, + } + impl Type { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Type::Unspecified => "TYPE_UNSPECIFIED", + Type::ReadWrite => "READ_WRITE", + Type::ReadOnly => "READ_ONLY", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "TYPE_UNSPECIFIED" => Some(Self::Unspecified), + "READ_WRITE" => Some(Self::ReadWrite), + "READ_ONLY" => Some(Self::ReadOnly), + _ => None, + } + } + } + } + /// An IncludeReplicas contains a repeated set of ReplicaSelection which + /// indicates the order in which replicas should be considered. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct IncludeReplicas { + /// The directed read replica selector. + #[prost(message, repeated, tag = "1")] + pub replica_selections: ::prost::alloc::vec::Vec, + /// If true, Spanner will not route requests to a replica outside the + /// include_replicas list when all of the specified replicas are unavailable + /// or unhealthy. Default value is `false`. + #[prost(bool, tag = "2")] + pub auto_failover_disabled: bool, + } + /// An ExcludeReplicas contains a repeated set of ReplicaSelection that should + /// be excluded from serving requests. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct ExcludeReplicas { + /// The directed read replica selector. + #[prost(message, repeated, tag = "1")] + pub replica_selections: ::prost::alloc::vec::Vec, + } + /// Required. At most one of either include_replicas or exclude_replicas + /// should be present in the message. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Replicas { + /// Include_replicas indicates the order of replicas (as they appear in + /// this list) to process the request. If auto_failover_disabled is set to + /// true and all replicas are exhausted without finding a healthy replica, + /// Spanner will wait for a replica in the list to become available, requests + /// may fail due to `DEADLINE_EXCEEDED` errors. + #[prost(message, tag = "1")] + IncludeReplicas(IncludeReplicas), + /// Exclude_replicas indicates that specified replicas should be excluded + /// from serving requests. Spanner will not route requests to the replicas + /// in this list. + #[prost(message, tag = "2")] + ExcludeReplicas(ExcludeReplicas), + } +} /// The request for [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and /// [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql]. #[allow(clippy::derive_partial_eq_without_eq)] @@ -1624,7 +1801,8 @@ pub struct ExecuteSqlRequest { pub params: ::core::option::Option<::prost_types::Struct>, /// It is not always possible for Cloud Spanner to infer the right SQL type /// from a JSON value. For example, values of type `BYTES` and values - /// of type `STRING` both appear in [params][google.spanner.v1.ExecuteSqlRequest.params] as JSON strings. + /// of type `STRING` both appear in + /// [params][google.spanner.v1.ExecuteSqlRequest.params] as JSON strings. /// /// In these cases, `param_types` can be used to specify the exact /// SQL type for some or all of the SQL statement parameters. See the @@ -1634,15 +1812,18 @@ pub struct ExecuteSqlRequest { pub param_types: ::std::collections::HashMap<::prost::alloc::string::String, Type>, /// If this request is resuming a previously interrupted SQL statement /// execution, `resume_token` should be copied from the last - /// [PartialResultSet][google.spanner.v1.PartialResultSet] yielded before the interruption. Doing this - /// enables the new SQL statement execution to resume where the last one left - /// off. The rest of the request parameters must exactly match the - /// request that yielded this token. + /// [PartialResultSet][google.spanner.v1.PartialResultSet] yielded before the + /// interruption. Doing this enables the new SQL statement execution to resume + /// where the last one left off. The rest of the request parameters must + /// exactly match the request that yielded this token. #[prost(bytes = "vec", tag = "6")] pub resume_token: ::prost::alloc::vec::Vec, /// Used to control the amount of debugging information returned in - /// [ResultSetStats][google.spanner.v1.ResultSetStats]. If [partition_token][google.spanner.v1.ExecuteSqlRequest.partition_token] is set, [query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode] can only - /// be set to [QueryMode.NORMAL][google.spanner.v1.ExecuteSqlRequest.QueryMode.NORMAL]. + /// [ResultSetStats][google.spanner.v1.ResultSetStats]. If + /// [partition_token][google.spanner.v1.ExecuteSqlRequest.partition_token] is + /// set, [query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode] can only + /// be set to + /// [QueryMode.NORMAL][google.spanner.v1.ExecuteSqlRequest.QueryMode.NORMAL]. #[prost(enumeration = "execute_sql_request::QueryMode", tag = "7")] pub query_mode: i32, /// If present, results will be restricted to the specified partition @@ -1669,11 +1850,14 @@ pub struct ExecuteSqlRequest { /// Common options for this request. #[prost(message, optional, tag = "11")] pub request_options: ::core::option::Option, + /// Directed read options for this request. + #[prost(message, optional, tag = "15")] + pub directed_read_options: ::core::option::Option, /// If this is for a partitioned query and this field is set to `true`, the - /// request will be executed via Spanner independent compute resources. + /// request is executed with Spanner Data Boost independent compute resources. /// /// If the field is set to `true` but the request does not set - /// `partition_token`, the API will return an `INVALID_ARGUMENT` error. + /// `partition_token`, the API returns an `INVALID_ARGUMENT` error. #[prost(bool, tag = "16")] pub data_boost_enabled: bool, } @@ -1784,17 +1968,17 @@ pub struct ExecuteBatchDmlRequest { /// transaction. #[prost(message, optional, tag = "2")] pub transaction: ::core::option::Option, - /// Required. The list of statements to execute in this batch. Statements are executed - /// serially, such that the effects of statement `i` are visible to statement - /// `i+1`. Each statement must be a DML statement. Execution stops at the - /// first failed statement; the remaining statements are not executed. + /// Required. The list of statements to execute in this batch. Statements are + /// executed serially, such that the effects of statement `i` are visible to + /// statement `i+1`. Each statement must be a DML statement. Execution stops at + /// the first failed statement; the remaining statements are not executed. /// /// Callers must provide at least one statement. #[prost(message, repeated, tag = "3")] pub statements: ::prost::alloc::vec::Vec, - /// Required. A per-transaction sequence number used to identify this request. This field - /// makes each request idempotent such that if the request is received multiple - /// times, at most one will succeed. + /// Required. A per-transaction sequence number used to identify this request. + /// This field makes each request idempotent such that if the request is + /// received multiple times, at most one will succeed. /// /// The sequence number must be monotonically increasing within the /// transaction. If a request arrives for the first time with an out-of-order @@ -1831,7 +2015,9 @@ pub mod execute_batch_dml_request { pub params: ::core::option::Option<::prost_types::Struct>, /// It is not always possible for Cloud Spanner to infer the right SQL type /// from a JSON value. For example, values of type `BYTES` and values - /// of type `STRING` both appear in [params][google.spanner.v1.ExecuteBatchDmlRequest.Statement.params] as JSON strings. + /// of type `STRING` both appear in + /// [params][google.spanner.v1.ExecuteBatchDmlRequest.Statement.params] as + /// JSON strings. /// /// In these cases, `param_types` can be used to specify the exact /// SQL type for some or all of the SQL statement parameters. See the @@ -1841,40 +2027,49 @@ pub mod execute_batch_dml_request { pub param_types: ::std::collections::HashMap<::prost::alloc::string::String, super::Type>, } } -/// The response for [ExecuteBatchDml][google.spanner.v1.Spanner.ExecuteBatchDml]. Contains a list -/// of [ResultSet][google.spanner.v1.ResultSet] messages, one for each DML statement that has successfully -/// executed, in the same order as the statements in the request. If a statement -/// fails, the status in the response body identifies the cause of the failure. +/// The response for +/// [ExecuteBatchDml][google.spanner.v1.Spanner.ExecuteBatchDml]. Contains a list +/// of [ResultSet][google.spanner.v1.ResultSet] messages, one for each DML +/// statement that has successfully executed, in the same order as the statements +/// in the request. If a statement fails, the status in the response body +/// identifies the cause of the failure. /// /// To check for DML statements that failed, use the following approach: /// -/// 1. Check the status in the response message. The [google.rpc.Code][google.rpc.Code] enum +/// 1. Check the status in the response message. The +/// [google.rpc.Code][google.rpc.Code] enum /// value `OK` indicates that all statements were executed successfully. /// 2. If the status was not `OK`, check the number of result sets in the -/// response. If the response contains `N` [ResultSet][google.spanner.v1.ResultSet] messages, then -/// statement `N+1` in the request failed. +/// response. If the response contains `N` +/// [ResultSet][google.spanner.v1.ResultSet] messages, then statement `N+1` in +/// the request failed. /// /// Example 1: /// /// * Request: 5 DML statements, all executed successfully. -/// * Response: 5 [ResultSet][google.spanner.v1.ResultSet] messages, with the status `OK`. +/// * Response: 5 [ResultSet][google.spanner.v1.ResultSet] messages, with the +/// status `OK`. /// /// Example 2: /// /// * Request: 5 DML statements. The third statement has a syntax error. -/// * Response: 2 [ResultSet][google.spanner.v1.ResultSet] messages, and a syntax error (`INVALID_ARGUMENT`) -/// status. The number of [ResultSet][google.spanner.v1.ResultSet] messages indicates that the third -/// statement failed, and the fourth and fifth statements were not executed. +/// * Response: 2 [ResultSet][google.spanner.v1.ResultSet] messages, and a syntax +/// error (`INVALID_ARGUMENT`) +/// status. The number of [ResultSet][google.spanner.v1.ResultSet] messages +/// indicates that the third statement failed, and the fourth and fifth +/// statements were not executed. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ExecuteBatchDmlResponse { - /// One [ResultSet][google.spanner.v1.ResultSet] for each statement in the request that ran successfully, - /// in the same order as the statements in the request. Each [ResultSet][google.spanner.v1.ResultSet] does - /// not contain any rows. The [ResultSetStats][google.spanner.v1.ResultSetStats] in each [ResultSet][google.spanner.v1.ResultSet] contain - /// the number of rows modified by the statement. + /// One [ResultSet][google.spanner.v1.ResultSet] for each statement in the + /// request that ran successfully, in the same order as the statements in the + /// request. Each [ResultSet][google.spanner.v1.ResultSet] does not contain any + /// rows. The [ResultSetStats][google.spanner.v1.ResultSetStats] in each + /// [ResultSet][google.spanner.v1.ResultSet] contain the number of rows + /// modified by the statement. /// - /// Only the first [ResultSet][google.spanner.v1.ResultSet] in the response contains valid - /// [ResultSetMetadata][google.spanner.v1.ResultSetMetadata]. + /// Only the first [ResultSet][google.spanner.v1.ResultSet] in the response + /// contains valid [ResultSetMetadata][google.spanner.v1.ResultSetMetadata]. #[prost(message, repeated, tag = "1")] pub result_sets: ::prost::alloc::vec::Vec, /// If all DML statements are executed successfully, the status is `OK`. @@ -1917,15 +2112,17 @@ pub struct PartitionQueryRequest { /// transactions are not. #[prost(message, optional, tag = "2")] pub transaction: ::core::option::Option, - /// Required. The query request to generate partitions for. The request will fail if - /// the query is not root partitionable. The query plan of a root - /// partitionable query has a single distributed union operator. A distributed - /// union operator conceptually divides one or more tables into multiple - /// splits, remotely evaluates a subquery independently on each split, and - /// then unions all results. - /// - /// This must not contain DML commands, such as INSERT, UPDATE, or - /// DELETE. Use [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] with a + /// Required. The query request to generate partitions for. The request will + /// fail if the query is not root partitionable. For a query to be root + /// partitionable, it needs to satisfy a few conditions. For example, if the + /// query execution plan contains a distributed union operator, then it must be + /// the first operator in the plan. For more information about other + /// conditions, see [Read data in + /// parallel](). + /// + /// The query request must not contain DML commands, such as INSERT, UPDATE, or + /// DELETE. Use + /// [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] with a /// PartitionedDml transaction for large, partition-friendly DML operations. #[prost(string, tag = "3")] pub sql: ::prost::alloc::string::String, @@ -1945,7 +2142,8 @@ pub struct PartitionQueryRequest { pub params: ::core::option::Option<::prost_types::Struct>, /// It is not always possible for Cloud Spanner to infer the right SQL type /// from a JSON value. For example, values of type `BYTES` and values - /// of type `STRING` both appear in [params][google.spanner.v1.PartitionQueryRequest.params] as JSON strings. + /// of type `STRING` both appear in + /// [params][google.spanner.v1.PartitionQueryRequest.params] as JSON strings. /// /// In these cases, `param_types` can be used to specify the exact /// SQL type for some or all of the SQL query parameters. See the @@ -1971,18 +2169,24 @@ pub struct PartitionReadRequest { /// Required. The name of the table in the database to be read. #[prost(string, tag = "3")] pub table: ::prost::alloc::string::String, - /// If non-empty, the name of an index on [table][google.spanner.v1.PartitionReadRequest.table]. This index is - /// used instead of the table primary key when interpreting [key_set][google.spanner.v1.PartitionReadRequest.key_set] - /// and sorting result rows. See [key_set][google.spanner.v1.PartitionReadRequest.key_set] for further information. + /// If non-empty, the name of an index on + /// [table][google.spanner.v1.PartitionReadRequest.table]. This index is used + /// instead of the table primary key when interpreting + /// [key_set][google.spanner.v1.PartitionReadRequest.key_set] and sorting + /// result rows. See [key_set][google.spanner.v1.PartitionReadRequest.key_set] + /// for further information. #[prost(string, tag = "4")] pub index: ::prost::alloc::string::String, - /// The columns of [table][google.spanner.v1.PartitionReadRequest.table] to be returned for each row matching - /// this request. + /// The columns of [table][google.spanner.v1.PartitionReadRequest.table] to be + /// returned for each row matching this request. #[prost(string, repeated, tag = "5")] pub columns: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, /// Required. `key_set` identifies the rows to be yielded. `key_set` names the - /// primary keys of the rows in [table][google.spanner.v1.PartitionReadRequest.table] to be yielded, unless [index][google.spanner.v1.PartitionReadRequest.index] - /// is present. If [index][google.spanner.v1.PartitionReadRequest.index] is present, then [key_set][google.spanner.v1.PartitionReadRequest.key_set] instead names + /// primary keys of the rows in + /// [table][google.spanner.v1.PartitionReadRequest.table] to be yielded, unless + /// [index][google.spanner.v1.PartitionReadRequest.index] is present. If + /// [index][google.spanner.v1.PartitionReadRequest.index] is present, then + /// [key_set][google.spanner.v1.PartitionReadRequest.key_set] instead names /// index keys in [index][google.spanner.v1.PartitionReadRequest.index]. /// /// It is not an error for the `key_set` to name rows that do not @@ -2031,24 +2235,31 @@ pub struct ReadRequest { /// Required. The name of the table in the database to be read. #[prost(string, tag = "3")] pub table: ::prost::alloc::string::String, - /// If non-empty, the name of an index on [table][google.spanner.v1.ReadRequest.table]. This index is - /// used instead of the table primary key when interpreting [key_set][google.spanner.v1.ReadRequest.key_set] - /// and sorting result rows. See [key_set][google.spanner.v1.ReadRequest.key_set] for further information. + /// If non-empty, the name of an index on + /// [table][google.spanner.v1.ReadRequest.table]. This index is used instead of + /// the table primary key when interpreting + /// [key_set][google.spanner.v1.ReadRequest.key_set] and sorting result rows. + /// See [key_set][google.spanner.v1.ReadRequest.key_set] for further + /// information. #[prost(string, tag = "4")] pub index: ::prost::alloc::string::String, - /// Required. The columns of [table][google.spanner.v1.ReadRequest.table] to be returned for each row matching - /// this request. + /// Required. The columns of [table][google.spanner.v1.ReadRequest.table] to be + /// returned for each row matching this request. #[prost(string, repeated, tag = "5")] pub columns: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, /// Required. `key_set` identifies the rows to be yielded. `key_set` names the - /// primary keys of the rows in [table][google.spanner.v1.ReadRequest.table] to be yielded, unless [index][google.spanner.v1.ReadRequest.index] - /// is present. If [index][google.spanner.v1.ReadRequest.index] is present, then [key_set][google.spanner.v1.ReadRequest.key_set] instead names - /// index keys in [index][google.spanner.v1.ReadRequest.index]. + /// primary keys of the rows in [table][google.spanner.v1.ReadRequest.table] to + /// be yielded, unless [index][google.spanner.v1.ReadRequest.index] is present. + /// If [index][google.spanner.v1.ReadRequest.index] is present, then + /// [key_set][google.spanner.v1.ReadRequest.key_set] instead names index keys + /// in [index][google.spanner.v1.ReadRequest.index]. /// - /// If the [partition_token][google.spanner.v1.ReadRequest.partition_token] field is empty, rows are yielded - /// in table primary key order (if [index][google.spanner.v1.ReadRequest.index] is empty) or index key order - /// (if [index][google.spanner.v1.ReadRequest.index] is non-empty). If the [partition_token][google.spanner.v1.ReadRequest.partition_token] field is not - /// empty, rows will be yielded in an unspecified order. + /// If the [partition_token][google.spanner.v1.ReadRequest.partition_token] + /// field is empty, rows are yielded in table primary key order (if + /// [index][google.spanner.v1.ReadRequest.index] is empty) or index key order + /// (if [index][google.spanner.v1.ReadRequest.index] is non-empty). If the + /// [partition_token][google.spanner.v1.ReadRequest.partition_token] field is + /// not empty, rows will be yielded in an unspecified order. /// /// It is not an error for the `key_set` to name rows that do not /// exist in the database. Read yields nothing for nonexistent rows. @@ -2061,9 +2272,9 @@ pub struct ReadRequest { pub limit: i64, /// If this request is resuming a previously interrupted read, /// `resume_token` should be copied from the last - /// [PartialResultSet][google.spanner.v1.PartialResultSet] yielded before the interruption. Doing this - /// enables the new read to resume where the last read left off. The - /// rest of the request parameters must exactly match the request + /// [PartialResultSet][google.spanner.v1.PartialResultSet] yielded before the + /// interruption. Doing this enables the new read to resume where the last read + /// left off. The rest of the request parameters must exactly match the request /// that yielded this token. #[prost(bytes = "vec", tag = "9")] pub resume_token: ::prost::alloc::vec::Vec, @@ -2076,15 +2287,142 @@ pub struct ReadRequest { /// Common options for this request. #[prost(message, optional, tag = "11")] pub request_options: ::core::option::Option, + /// Directed read options for this request. + #[prost(message, optional, tag = "14")] + pub directed_read_options: ::core::option::Option, /// If this is for a partitioned read and this field is set to `true`, the - /// request will be executed via Spanner independent compute resources. + /// request is executed with Spanner Data Boost independent compute resources. /// /// If the field is set to `true` but the request does not set - /// `partition_token`, the API will return an `INVALID_ARGUMENT` error. + /// `partition_token`, the API returns an `INVALID_ARGUMENT` error. #[prost(bool, tag = "15")] pub data_boost_enabled: bool, + /// Optional. Order for the returned rows. + /// + /// By default, Spanner will return result rows in primary key order except for + /// PartitionRead requests. For applications that do not require rows to be + /// returned in primary key (`ORDER_BY_PRIMARY_KEY`) order, setting + /// `ORDER_BY_NO_ORDER` option allows Spanner to optimize row retrieval, + /// resulting in lower latencies in certain cases (e.g. bulk point lookups). + #[prost(enumeration = "read_request::OrderBy", tag = "16")] + pub order_by: i32, + /// Optional. Lock Hint for the request, it can only be used with read-write + /// transactions. + #[prost(enumeration = "read_request::LockHint", tag = "17")] + pub lock_hint: i32, +} +/// Nested message and enum types in `ReadRequest`. +pub mod read_request { + /// An option to control the order in which rows are returned from a read. + #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] + #[repr(i32)] + pub enum OrderBy { + /// Default value. + /// + /// ORDER_BY_UNSPECIFIED is equivalent to ORDER_BY_PRIMARY_KEY. + Unspecified = 0, + /// Read rows are returned in primary key order. + /// + /// In the event that this option is used in conjunction with the + /// `partition_token` field, the API will return an `INVALID_ARGUMENT` error. + PrimaryKey = 1, + /// Read rows are returned in any order. + NoOrder = 2, + } + impl OrderBy { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + OrderBy::Unspecified => "ORDER_BY_UNSPECIFIED", + OrderBy::PrimaryKey => "ORDER_BY_PRIMARY_KEY", + OrderBy::NoOrder => "ORDER_BY_NO_ORDER", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "ORDER_BY_UNSPECIFIED" => Some(Self::Unspecified), + "ORDER_BY_PRIMARY_KEY" => Some(Self::PrimaryKey), + "ORDER_BY_NO_ORDER" => Some(Self::NoOrder), + _ => None, + } + } + } + /// A lock hint mechanism for reads done within a transaction. + #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] + #[repr(i32)] + pub enum LockHint { + /// Default value. + /// + /// LOCK_HINT_UNSPECIFIED is equivalent to LOCK_HINT_SHARED. + Unspecified = 0, + /// Acquire shared locks. + /// + /// By default when you perform a read as part of a read-write transaction, + /// Spanner acquires shared read locks, which allows other reads to still + /// access the data until your transaction is ready to commit. When your + /// transaction is committing and writes are being applied, the transaction + /// attempts to upgrade to an exclusive lock for any data you are writing. + /// For more information about locks, see [Lock + /// modes](). + Shared = 1, + /// Acquire exclusive locks. + /// + /// Requesting exclusive locks is beneficial if you observe high write + /// contention, which means you notice that multiple transactions are + /// concurrently trying to read and write to the same data, resulting in a + /// large number of aborts. This problem occurs when two transactions + /// initially acquire shared locks and then both try to upgrade to exclusive + /// locks at the same time. In this situation both transactions are waiting + /// for the other to give up their lock, resulting in a deadlocked situation. + /// Spanner is able to detect this occurring and force one of the + /// transactions to abort. However, this is a slow and expensive operation + /// and results in lower performance. In this case it makes sense to acquire + /// exclusive locks at the start of the transaction because then when + /// multiple transactions try to act on the same data, they automatically get + /// serialized. Each transaction waits its turn to acquire the lock and + /// avoids getting into deadlock situations. + /// + /// Because the exclusive lock hint is just a hint, it should not be + /// considered equivalent to a mutex. In other words, you should not use + /// Spanner exclusive locks as a mutual exclusion mechanism for the execution + /// of code outside of Spanner. + /// + /// **Note:** Request exclusive locks judiciously because they block others + /// from reading that data for the entire transaction, rather than just when + /// the writes are being performed. Unless you observe high write contention, + /// you should use the default of shared read locks so you don't prematurely + /// block other clients from reading the data that you're writing to. + Exclusive = 2, + } + impl LockHint { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + LockHint::Unspecified => "LOCK_HINT_UNSPECIFIED", + LockHint::Shared => "LOCK_HINT_SHARED", + LockHint::Exclusive => "LOCK_HINT_EXCLUSIVE", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "LOCK_HINT_UNSPECIFIED" => Some(Self::Unspecified), + "LOCK_HINT_SHARED" => Some(Self::Shared), + "LOCK_HINT_EXCLUSIVE" => Some(Self::Exclusive), + _ => None, + } + } + } } -/// The request for [BeginTransaction][google.spanner.v1.Spanner.BeginTransaction]. +/// The request for +/// [BeginTransaction][google.spanner.v1.Spanner.BeginTransaction]. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct BeginTransactionRequest { @@ -2115,10 +2453,17 @@ pub struct CommitRequest { #[prost(message, repeated, tag = "4")] pub mutations: ::prost::alloc::vec::Vec, /// If `true`, then statistics related to the transaction will be included in - /// the [CommitResponse][google.spanner.v1.CommitResponse.commit_stats]. Default value is - /// `false`. + /// the [CommitResponse][google.spanner.v1.CommitResponse.commit_stats]. + /// Default value is `false`. #[prost(bool, tag = "5")] pub return_commit_stats: bool, + /// Optional. The amount of latency this request is willing to incur in order + /// to improve throughput. If this field is not set, Spanner assumes requests + /// are relatively latency sensitive and automatically determines an + /// appropriate delay time. You can specify a batching delay value between 0 + /// and 500 ms. + #[prost(message, optional, tag = "8")] + pub max_commit_delay: ::core::option::Option<::prost_types::Duration>, /// Common options for this request. #[prost(message, optional, tag = "6")] pub request_options: ::core::option::Option, @@ -2159,6 +2504,63 @@ pub struct RollbackRequest { #[prost(bytes = "vec", tag = "2")] pub transaction_id: ::prost::alloc::vec::Vec, } +/// The request for [BatchWrite][google.spanner.v1.Spanner.BatchWrite]. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BatchWriteRequest { + /// Required. The session in which the batch request is to be run. + #[prost(string, tag = "1")] + pub session: ::prost::alloc::string::String, + /// Common options for this request. + #[prost(message, optional, tag = "3")] + pub request_options: ::core::option::Option, + /// Required. The groups of mutations to be applied. + #[prost(message, repeated, tag = "4")] + pub mutation_groups: ::prost::alloc::vec::Vec, + /// Optional. When `exclude_txn_from_change_streams` is set to `true`: + /// * Mutations from all transactions in this batch write operation will not + /// be recorded in change streams with DDL option `allow_txn_exclusion=true` + /// that are tracking columns modified by these transactions. + /// * Mutations from all transactions in this batch write operation will be + /// recorded in change streams with DDL option `allow_txn_exclusion=false or + /// not set` that are tracking columns modified by these transactions. + /// + /// When `exclude_txn_from_change_streams` is set to `false` or not set, + /// mutations from all transactions in this batch write operation will be + /// recorded in all change streams that are tracking columns modified by these + /// transactions. + #[prost(bool, tag = "5")] + pub exclude_txn_from_change_streams: bool, +} +/// Nested message and enum types in `BatchWriteRequest`. +pub mod batch_write_request { + /// A group of mutations to be committed together. Related mutations should be + /// placed in a group. For example, two mutations inserting rows with the same + /// primary key prefix in both parent and child tables are related. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct MutationGroup { + /// Required. The mutations in this group. + #[prost(message, repeated, tag = "1")] + pub mutations: ::prost::alloc::vec::Vec, + } +} +/// The result of applying a batch of mutations. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BatchWriteResponse { + /// The mutation groups applied in this batch. The values index into the + /// `mutation_groups` field in the corresponding `BatchWriteRequest`. + #[prost(int32, repeated, tag = "1")] + pub indexes: ::prost::alloc::vec::Vec, + /// An `OK` status indicates success. Any other status indicates a failure. + #[prost(message, optional, tag = "2")] + pub status: ::core::option::Option, + /// The commit timestamp of the transaction that applied this batch. + /// Present if `status` is `OK`, absent otherwise. + #[prost(message, optional, tag = "3")] + pub commit_timestamp: ::core::option::Option<::prost_types::Timestamp>, +} /// Generated client implementations. pub mod spanner_client { #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] @@ -2348,10 +2750,12 @@ pub mod spanner_client { /// /// Operations inside read-write transactions might return `ABORTED`. If /// this occurs, the application should restart the transaction from - /// the beginning. See [Transaction][google.spanner.v1.Transaction] for more details. + /// the beginning. See [Transaction][google.spanner.v1.Transaction] for more + /// details. /// /// Larger result sets can be fetched in streaming fashion by calling - /// [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] instead. + /// [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] + /// instead. pub async fn execute_sql( &mut self, request: impl tonic::IntoRequest, @@ -2366,11 +2770,11 @@ pub mod spanner_client { .insert(GrpcMethod::new("google.spanner.v1.Spanner", "ExecuteSql")); self.inner.unary(req, path, codec).await } - /// Like [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], except returns the result - /// set as a stream. Unlike [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], there - /// is no limit on the size of the returned result set. However, no - /// individual row in the result set can exceed 100 MiB, and no - /// column value can exceed 10 MiB. + /// Like [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], except returns the + /// result set as a stream. Unlike + /// [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], there is no limit on + /// the size of the returned result set. However, no individual row in the + /// result set can exceed 100 MiB, and no column value can exceed 10 MiB. pub async fn execute_streaming_sql( &mut self, request: impl tonic::IntoRequest, @@ -2391,9 +2795,10 @@ pub mod spanner_client { /// [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. /// /// Statements are executed in sequential order. A request can succeed even if - /// a statement fails. The [ExecuteBatchDmlResponse.status][google.spanner.v1.ExecuteBatchDmlResponse.status] field in the - /// response provides information about the statement that failed. Clients must - /// inspect this field to determine whether an error occurred. + /// a statement fails. The + /// [ExecuteBatchDmlResponse.status][google.spanner.v1.ExecuteBatchDmlResponse.status] + /// field in the response provides information about the statement that failed. + /// Clients must inspect this field to determine whether an error occurred. /// /// Execution stops after the first failed statement; the remaining statements /// are not executed. @@ -2413,14 +2818,15 @@ pub mod spanner_client { } /// Reads rows from the database using key lookups and scans, as a /// simple key/value style alternative to - /// [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. This method cannot be used to - /// return a result set larger than 10 MiB; if the read matches more + /// [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. This method cannot be + /// used to return a result set larger than 10 MiB; if the read matches more /// data than that, the read fails with a `FAILED_PRECONDITION` /// error. /// /// Reads inside read-write transactions might return `ABORTED`. If /// this occurs, the application should restart the transaction from - /// the beginning. See [Transaction][google.spanner.v1.Transaction] for more details. + /// the beginning. See [Transaction][google.spanner.v1.Transaction] for more + /// details. /// /// Larger result sets can be yielded in streaming fashion by calling /// [StreamingRead][google.spanner.v1.Spanner.StreamingRead] instead. @@ -2438,9 +2844,9 @@ pub mod spanner_client { .insert(GrpcMethod::new("google.spanner.v1.Spanner", "Read")); self.inner.unary(req, path, codec).await } - /// Like [Read][google.spanner.v1.Spanner.Read], except returns the result set as a - /// stream. Unlike [Read][google.spanner.v1.Spanner.Read], there is no limit on the - /// size of the returned result set. However, no individual row in + /// Like [Read][google.spanner.v1.Spanner.Read], except returns the result set + /// as a stream. Unlike [Read][google.spanner.v1.Spanner.Read], there is no + /// limit on the size of the returned result set. However, no individual row in /// the result set can exceed 100 MiB, and no column value can exceed /// 10 MiB. pub async fn streaming_read( @@ -2459,7 +2865,8 @@ pub mod spanner_client { self.inner.server_streaming(req, path, codec).await } /// Begins a new transaction. This step can often be skipped: - /// [Read][google.spanner.v1.Spanner.Read], [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and + /// [Read][google.spanner.v1.Spanner.Read], + /// [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and /// [Commit][google.spanner.v1.Spanner.Commit] can begin a new transaction as a /// side-effect. pub async fn begin_transaction( @@ -2506,8 +2913,9 @@ pub mod spanner_client { } /// Rolls back a transaction, releasing any locks it holds. It is a good /// idea to call this for any transaction that includes one or more - /// [Read][google.spanner.v1.Spanner.Read] or [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] requests and - /// ultimately decides not to commit. + /// [Read][google.spanner.v1.Spanner.Read] or + /// [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] requests and ultimately + /// decides not to commit. /// /// `Rollback` returns `OK` if it successfully aborts the transaction, the /// transaction was already aborted, or the transaction is not @@ -2528,10 +2936,11 @@ pub mod spanner_client { } /// Creates a set of partition tokens that can be used to execute a query /// operation in parallel. Each of the returned partition tokens can be used - /// by [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] to specify a subset - /// of the query result to read. The same session and read-only transaction - /// must be used by the PartitionQueryRequest used to create the - /// partition tokens and the ExecuteSqlRequests that use the partition tokens. + /// by [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] to + /// specify a subset of the query result to read. The same session and + /// read-only transaction must be used by the PartitionQueryRequest used to + /// create the partition tokens and the ExecuteSqlRequests that use the + /// partition tokens. /// /// Partition tokens become invalid when the session used to create them /// is deleted, is idle for too long, begins a new transaction, or becomes too @@ -2553,12 +2962,13 @@ pub mod spanner_client { } /// Creates a set of partition tokens that can be used to execute a read /// operation in parallel. Each of the returned partition tokens can be used - /// by [StreamingRead][google.spanner.v1.Spanner.StreamingRead] to specify a subset of the read - /// result to read. The same session and read-only transaction must be used by - /// the PartitionReadRequest used to create the partition tokens and the - /// ReadRequests that use the partition tokens. There are no ordering - /// guarantees on rows returned among the returned partition tokens, or even - /// within each individual StreamingRead call issued with a partition_token. + /// by [StreamingRead][google.spanner.v1.Spanner.StreamingRead] to specify a + /// subset of the read result to read. The same session and read-only + /// transaction must be used by the PartitionReadRequest used to create the + /// partition tokens and the ReadRequests that use the partition tokens. There + /// are no ordering guarantees on rows returned among the returned partition + /// tokens, or even within each individual StreamingRead call issued with a + /// partition_token. /// /// Partition tokens become invalid when the session used to create them /// is deleted, is idle for too long, begins a new transaction, or becomes too @@ -2578,5 +2988,35 @@ pub mod spanner_client { .insert(GrpcMethod::new("google.spanner.v1.Spanner", "PartitionRead")); self.inner.unary(req, path, codec).await } + /// Batches the supplied mutation groups in a collection of efficient + /// transactions. All mutations in a group are committed atomically. However, + /// mutations across groups can be committed non-atomically in an unspecified + /// order and thus, they must be independent of each other. Partial failure is + /// possible, i.e., some groups may have been committed successfully, while + /// some may have failed. The results of individual batches are streamed into + /// the response as the batches are applied. + /// + /// BatchWrite requests are not replay protected, meaning that each mutation + /// group may be applied more than once. Replays of non-idempotent mutations + /// may have undesirable effects. For example, replays of an insert mutation + /// may produce an already exists error or if you use generated or commit + /// timestamp-based keys, it may result in additional rows being added to the + /// mutation's table. We recommend structuring your mutation groups to be + /// idempotent to avoid this issue. + pub async fn batch_write( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result>, tonic::Status> + { + self.inner.ready().await.map_err(|e| { + tonic::Status::new(tonic::Code::Unknown, format!("Service was not ready: {}", e.into())) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static("/google.spanner.v1.Spanner/BatchWrite"); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("google.spanner.v1.Spanner", "BatchWrite")); + self.inner.server_streaming(req, path, codec).await + } } } diff --git a/googleapis/src/google.storage.v2.rs b/googleapis/src/google.storage.v2.rs index eb12482e..02365287 100644 --- a/googleapis/src/google.storage.v2.rs +++ b/googleapis/src/google.storage.v2.rs @@ -44,7 +44,7 @@ pub struct CreateBucketRequest { pub parent: ::prost::alloc::string::String, /// Properties of the new bucket being inserted. /// The name of the bucket is specified in the `bucket_id` field. Populating - /// `bucket.name` field will be ignored. + /// `bucket.name` field will result in an error. /// The project of the bucket must be specified in the `bucket.project` field. /// This field must be in `projects/{projectIdentifier}` format, /// {projectIdentifier} can be the project ID or project number. The `parent` @@ -155,67 +155,6 @@ pub struct UpdateBucketRequest { #[prost(message, optional, tag = "6")] pub update_mask: ::core::option::Option<::prost_types::FieldMask>, } -/// Request message for DeleteNotificationConfig. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct DeleteNotificationConfigRequest { - /// Required. The parent bucket of the NotificationConfig. - #[prost(string, tag = "1")] - pub name: ::prost::alloc::string::String, -} -/// Request message for GetNotificationConfig. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct GetNotificationConfigRequest { - /// Required. The parent bucket of the NotificationConfig. - /// Format: - /// `projects/{project}/buckets/{bucket}/notificationConfigs/{notificationConfig}` - #[prost(string, tag = "1")] - pub name: ::prost::alloc::string::String, -} -/// Request message for CreateNotificationConfig. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct CreateNotificationConfigRequest { - /// Required. The bucket to which this NotificationConfig belongs. - #[prost(string, tag = "1")] - pub parent: ::prost::alloc::string::String, - /// Required. Properties of the NotificationConfig to be inserted. - #[prost(message, optional, tag = "2")] - pub notification_config: ::core::option::Option, -} -/// Request message for ListNotifications. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ListNotificationConfigsRequest { - /// Required. Name of a Google Cloud Storage bucket. - #[prost(string, tag = "1")] - pub parent: ::prost::alloc::string::String, - /// The maximum number of NotificationConfigs to return. The service may - /// return fewer than this value. The default value is 100. Specifying a value - /// above 100 will result in a page_size of 100. - #[prost(int32, tag = "2")] - pub page_size: i32, - /// A page token, received from a previous `ListNotificationConfigs` call. - /// Provide this to retrieve the subsequent page. - /// - /// When paginating, all other parameters provided to `ListNotificationConfigs` - /// must match the call that provided the page token. - #[prost(string, tag = "3")] - pub page_token: ::prost::alloc::string::String, -} -/// The result of a call to ListNotificationConfigs -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ListNotificationConfigsResponse { - /// The list of items. - #[prost(message, repeated, tag = "1")] - pub notification_configs: ::prost::alloc::vec::Vec, - /// A token, which can be sent as `page_token` to retrieve the next page. - /// If this field is omitted, there are no subsequent pages. - #[prost(string, tag = "2")] - pub next_page_token: ::prost::alloc::string::String, -} /// Request message for ComposeObject. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -325,6 +264,48 @@ pub struct DeleteObjectRequest { #[prost(message, optional, tag = "10")] pub common_object_request_params: ::core::option::Option, } +/// Message for restoring an object. +/// `bucket`, `object`, and `generation` **must** be set. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RestoreObjectRequest { + /// Required. Name of the bucket in which the object resides. + #[prost(string, tag = "1")] + pub bucket: ::prost::alloc::string::String, + /// Required. The name of the object to restore. + #[prost(string, tag = "2")] + pub object: ::prost::alloc::string::String, + /// Required. The specific revision of the object to restore. + #[prost(int64, tag = "3")] + pub generation: i64, + /// Makes the operation conditional on whether the object's current generation + /// matches the given value. Setting to 0 makes the operation succeed only if + /// there are no live versions of the object. + #[prost(int64, optional, tag = "4")] + pub if_generation_match: ::core::option::Option, + /// Makes the operation conditional on whether the object's live generation + /// does not match the given value. If no live object exists, the precondition + /// fails. Setting to 0 makes the operation succeed only if there is a live + /// version of the object. + #[prost(int64, optional, tag = "5")] + pub if_generation_not_match: ::core::option::Option, + /// Makes the operation conditional on whether the object's current + /// metageneration matches the given value. + #[prost(int64, optional, tag = "6")] + pub if_metageneration_match: ::core::option::Option, + /// Makes the operation conditional on whether the object's current + /// metageneration does not match the given value. + #[prost(int64, optional, tag = "7")] + pub if_metageneration_not_match: ::core::option::Option, + /// If false or unset, the bucket's default object ACL will be used. + /// If true, copy the source object's access controls. + /// Return an error if bucket has UBLA enabled. + #[prost(bool, optional, tag = "9")] + pub copy_source_acl: ::core::option::Option, + /// A set of parameters common to Storage API requests concerning an object. + #[prost(message, optional, tag = "8")] + pub common_object_request_params: ::core::option::Option, +} /// Message for canceling an in-progress resumable upload. /// `upload_id` **must** be set. #[allow(clippy::derive_partial_eq_without_eq)] @@ -418,6 +399,9 @@ pub struct GetObjectRequest { /// latest version, the default). #[prost(int64, tag = "3")] pub generation: i64, + /// If true, return the soft-deleted version of this object. + #[prost(bool, optional, tag = "11")] + pub soft_deleted: ::core::option::Option, /// Makes the operation conditional on whether the object's current generation /// matches the given value. Setting to 0 makes the operation succeed only if /// there are no live versions of the object. @@ -605,6 +589,115 @@ pub mod write_object_response { Resource(super::Object), } } +/// Request message for BidiWriteObject. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BidiWriteObjectRequest { + /// Required. The offset from the beginning of the object at which the data + /// should be written. + /// + /// In the first `WriteObjectRequest` of a `WriteObject()` action, it + /// indicates the initial offset for the `Write()` call. The value **must** be + /// equal to the `persisted_size` that a call to `QueryWriteStatus()` would + /// return (0 if this is the first write to the object). + /// + /// On subsequent calls, this value **must** be no larger than the sum of the + /// first `write_offset` and the sizes of all `data` chunks sent previously on + /// this stream. + /// + /// An invalid value will cause an error. + #[prost(int64, tag = "3")] + pub write_offset: i64, + /// Checksums for the complete object. If the checksums computed by the service + /// don't match the specified checksums the call will fail. May only be + /// provided in last request (with finish_write set). + #[prost(message, optional, tag = "6")] + pub object_checksums: ::core::option::Option, + /// For each BidiWriteObjectRequest where state_lookup is `true` or the client + /// closes the stream, the service will send a BidiWriteObjectResponse + /// containing the current persisted size. The persisted size sent in responses + /// covers all the bytes the server has persisted thus far and can be used to + /// decide what data is safe for the client to drop. Note that the object's + /// current size reported by the BidiWriteObjectResponse may lag behind the + /// number of bytes written by the client. This field is ignored if + /// `finish_write` is set to true. + #[prost(bool, tag = "7")] + pub state_lookup: bool, + /// Persists data written on the stream, up to and including the current + /// message, to permanent storage. This option should be used sparingly as it + /// may reduce performance. Ongoing writes will periodically be persisted on + /// the server even when `flush` is not set. This field is ignored if + /// `finish_write` is set to true since there's no need to checkpoint or flush + /// if this message completes the write. + #[prost(bool, tag = "8")] + pub flush: bool, + /// If `true`, this indicates that the write is complete. Sending any + /// `WriteObjectRequest`s subsequent to one in which `finish_write` is `true` + /// will cause an error. + /// For a non-resumable write (where the upload_id was not set in the first + /// message), it is an error not to set this field in the final message of the + /// stream. + #[prost(bool, tag = "9")] + pub finish_write: bool, + /// A set of parameters common to Storage API requests concerning an object. + #[prost(message, optional, tag = "10")] + pub common_object_request_params: ::core::option::Option, + /// The first message of each stream should set one of the following. + #[prost(oneof = "bidi_write_object_request::FirstMessage", tags = "1, 2")] + pub first_message: ::core::option::Option, + /// A portion of the data for the object. + #[prost(oneof = "bidi_write_object_request::Data", tags = "4")] + pub data: ::core::option::Option, +} +/// Nested message and enum types in `BidiWriteObjectRequest`. +pub mod bidi_write_object_request { + /// The first message of each stream should set one of the following. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum FirstMessage { + /// For resumable uploads. This should be the `upload_id` returned from a + /// call to `StartResumableWriteResponse`. + #[prost(string, tag = "1")] + UploadId(::prost::alloc::string::String), + /// For non-resumable uploads. Describes the overall upload, including the + /// destination bucket and object name, preconditions, etc. + #[prost(message, tag = "2")] + WriteObjectSpec(super::WriteObjectSpec), + } + /// A portion of the data for the object. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Data { + /// The data to insert. If a crc32c checksum is provided that doesn't match + /// the checksum computed by the service, the request will fail. + #[prost(message, tag = "4")] + ChecksummedData(super::ChecksummedData), + } +} +/// Response message for BidiWriteObject. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BidiWriteObjectResponse { + /// The response will set one of the following. + #[prost(oneof = "bidi_write_object_response::WriteStatus", tags = "1, 2")] + pub write_status: ::core::option::Option, +} +/// Nested message and enum types in `BidiWriteObjectResponse`. +pub mod bidi_write_object_response { + /// The response will set one of the following. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum WriteStatus { + /// The total number of bytes that have been processed for the given object + /// from all `WriteObject` calls. Only set if the upload has not finalized. + #[prost(int64, tag = "1")] + PersistedSize(i64), + /// A resource containing the metadata for the uploaded object. Only set if + /// the upload has finalized. + #[prost(message, tag = "2")] + Resource(super::Object), + } +} /// Request message for ListObjects. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -662,6 +755,20 @@ pub struct ListObjectsRequest { /// lexicographic_end (exclusive). #[prost(string, tag = "11")] pub lexicographic_end: ::prost::alloc::string::String, + /// Optional. If true, only list all soft-deleted versions of the object. + /// Soft delete policy is required to set this option. + #[prost(bool, tag = "12")] + pub soft_deleted: bool, + /// Optional. If true, will also include folders and managed folders (besides + /// objects) in the returned `prefixes`. Requires `delimiter` to be set to '/'. + #[prost(bool, tag = "13")] + pub include_folders_as_prefixes: bool, + /// Optional. Filter results to objects and prefixes that match this glob + /// pattern. See [List Objects Using + /// Glob]() + /// for the full syntax. + #[prost(string, tag = "14")] + pub match_glob: ::prost::alloc::string::String, } /// Request object for `QueryWriteStatus`. #[allow(clippy::derive_partial_eq_without_eq)] @@ -940,6 +1047,16 @@ pub struct GetServiceAccountRequest { #[prost(string, tag = "1")] pub project: ::prost::alloc::string::String, } +/// A service account, owned by Cloud Storage, which may be used when taking +/// action on behalf of a given project, for example to publish Pub/Sub +/// notifications or to retrieve security keys. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ServiceAccount { + /// The ID of the notification. + #[prost(string, tag = "1")] + pub email_address: ::prost::alloc::string::String, +} /// Request message for CreateHmacKey. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -1044,6 +1161,40 @@ pub struct UpdateHmacKeyRequest { #[prost(message, optional, tag = "3")] pub update_mask: ::core::option::Option<::prost_types::FieldMask>, } +/// Hmac Key Metadata, which includes all information other than the secret. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct HmacKeyMetadata { + /// Immutable. Resource name ID of the key in the format + /// {projectIdentifier}/{accessId}. + /// {projectIdentifier} can be the project ID or project number. + #[prost(string, tag = "1")] + pub id: ::prost::alloc::string::String, + /// Immutable. Globally unique id for keys. + #[prost(string, tag = "2")] + pub access_id: ::prost::alloc::string::String, + /// Immutable. Identifies the project that owns the service account of the + /// specified HMAC key, in the format "projects/{projectIdentifier}". + /// {projectIdentifier} can be the project ID or project number. + #[prost(string, tag = "3")] + pub project: ::prost::alloc::string::String, + /// Output only. Email of the service account the key authenticates as. + #[prost(string, tag = "4")] + pub service_account_email: ::prost::alloc::string::String, + /// Optional. State of the key. One of ACTIVE, INACTIVE, or DELETED. + /// Writable, can be updated by UpdateHmacKey operation. + #[prost(string, tag = "5")] + pub state: ::prost::alloc::string::String, + /// Output only. The creation time of the HMAC key. + #[prost(message, optional, tag = "6")] + pub create_time: ::core::option::Option<::prost_types::Timestamp>, + /// Output only. The last modification time of the HMAC key metadata. + #[prost(message, optional, tag = "7")] + pub update_time: ::core::option::Option<::prost_types::Timestamp>, + /// Optional. The etag of the HMAC key. + #[prost(string, tag = "8")] + pub etag: ::prost::alloc::string::String, +} /// Parameters that can be passed to any object request. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -1191,8 +1342,6 @@ pub struct Bucket { #[prost(string, tag = "3")] pub project: ::prost::alloc::string::String, /// Output only. The metadata generation of this bucket. - /// Attempting to set or update this field will result in a - /// [FieldViolation][google.rpc.BadRequest.FieldViolation]. #[prost(int64, tag = "4")] pub metageneration: i64, /// Immutable. The location of the bucket. Object data for objects in the @@ -1220,7 +1369,7 @@ pub struct Bucket { /// replication. "ASYNC_TURBO" enables turbo replication, valid for dual-region /// buckets only. If rpo is not specified when the bucket is created, it /// defaults to "DEFAULT". For more information, see - /// + /// #[prost(string, tag = "27")] pub rpo: ::prost::alloc::string::String, /// Access controls on the bucket. @@ -1239,8 +1388,6 @@ pub struct Bucket { #[prost(message, optional, tag = "10")] pub lifecycle: ::core::option::Option, /// Output only. The creation time of the bucket. - /// Attempting to set or update this field will result in a - /// [FieldViolation][google.rpc.BadRequest.FieldViolation]. #[prost(message, optional, tag = "11")] pub create_time: ::core::option::Option<::prost_types::Timestamp>, /// The bucket's [ Resource Sharing] @@ -1248,8 +1395,6 @@ pub struct Bucket { #[prost(message, repeated, tag = "12")] pub cors: ::prost::alloc::vec::Vec, /// Output only. The modification time of the bucket. - /// Attempting to set or update this field will result in a - /// [FieldViolation][google.rpc.BadRequest.FieldViolation]. #[prost(message, optional, tag = "13")] pub update_time: ::core::option::Option<::prost_types::Timestamp>, /// The default value for event-based hold on newly created objects in this @@ -1311,13 +1456,23 @@ pub struct Bucket { #[prost(bool, tag = "25")] pub satisfies_pzs: bool, /// Configuration that, if present, specifies the data placement for a - /// [ Region]. + /// [ + /// dual-region]. #[prost(message, optional, tag = "26")] pub custom_placement_config: ::core::option::Option, /// The bucket's Autoclass configuration. If there is no configuration, the /// Autoclass feature will be disabled and have no effect on the bucket. #[prost(message, optional, tag = "28")] pub autoclass: ::core::option::Option, + /// Optional. The bucket's hierarchical namespace configuration. If there is no + /// configuration, the hierarchical namespace feature will be disabled and have + /// no effect on the bucket. + #[prost(message, optional, tag = "32")] + pub hierarchical_namespace: ::core::option::Option, + /// Optional. The bucket's soft delete policy. The soft delete policy prevents + /// soft-deleted objects from being permanently deleted. + #[prost(message, optional, tag = "31")] + pub soft_delete_policy: ::core::option::Option, } /// Nested message and enum types in `Bucket`. pub mod bucket { @@ -1528,6 +1683,19 @@ pub mod bucket { #[prost(message, optional, tag = "4")] pub retention_duration: ::core::option::Option<::prost_types::Duration>, } + /// Soft delete policy properties of a bucket. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, Copy, PartialEq, ::prost::Message)] + pub struct SoftDeletePolicy { + /// The period of time that soft-deleted objects in the bucket must be + /// retained and cannot be permanently deleted. The duration must be greater + /// than or equal to 7 days and less than 1 year. + #[prost(message, optional, tag = "1")] + pub retention_duration: ::core::option::Option<::prost_types::Duration>, + /// Time from which the policy was effective. This is service-provided. + #[prost(message, optional, tag = "2")] + pub effective_time: ::core::option::Option<::prost_types::Timestamp>, + } /// Properties of a bucket related to versioning. /// For more on Cloud Storage versioning, see /// @@ -1570,7 +1738,7 @@ pub mod bucket { } /// Configuration for a bucket's Autoclass feature. #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, Copy, PartialEq, ::prost::Message)] + #[derive(Clone, PartialEq, ::prost::Message)] pub struct Autoclass { /// Enables Autoclass. #[prost(bool, tag = "1")] @@ -1581,6 +1749,23 @@ pub mod bucket { /// to the bucket creation time. #[prost(message, optional, tag = "2")] pub toggle_time: ::core::option::Option<::prost_types::Timestamp>, + /// An object in an Autoclass bucket will eventually cool down to the + /// terminal storage class if there is no access to the object. + /// The only valid values are NEARLINE and ARCHIVE. + #[prost(string, optional, tag = "3")] + pub terminal_storage_class: ::core::option::Option<::prost::alloc::string::String>, + /// Output only. Latest instant at which the autoclass terminal storage class + /// was updated. + #[prost(message, optional, tag = "4")] + pub terminal_storage_class_update_time: ::core::option::Option<::prost_types::Timestamp>, + } + /// Configuration for a bucket's hierarchical namespace feature. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, Copy, PartialEq, ::prost::Message)] + pub struct HierarchicalNamespace { + /// Optional. Enables the hierarchical namespace feature. + #[prost(bool, tag = "1")] + pub enabled: bool, } } /// An access-control entry. @@ -1641,7 +1826,7 @@ pub struct BucketAccessControl { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ChecksummedData { - /// The data. + /// Optional. The data. #[prost(bytes = "vec", tag = "1")] pub content: ::prost::alloc::vec::Vec, /// If set, the CRC32C digest of the content field. @@ -1663,108 +1848,38 @@ pub struct ObjectChecksums { /// [ and /// ETags: Best Practices]. /// Not all objects will provide an MD5 hash. For example, composite objects - /// provide only crc32c hashes. - /// This value is equivalent to running `cat object.txt | openssl md5 -binary` + /// provide only crc32c hashes. This value is equivalent to running `cat + /// object.txt | openssl md5 -binary` #[prost(bytes = "vec", tag = "2")] pub md5_hash: ::prost::alloc::vec::Vec, } -/// Hmac Key Metadata, which includes all information other than the secret. +/// Describes the Customer-Supplied Encryption Key mechanism used to store an +/// Object's data at rest. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] -pub struct HmacKeyMetadata { - /// Immutable. Resource name ID of the key in the format - /// {projectIdentifier}/{accessId}. - /// {projectIdentifier} can be the project ID or project number. +pub struct CustomerEncryption { + /// The encryption algorithm. #[prost(string, tag = "1")] - pub id: ::prost::alloc::string::String, - /// Immutable. Globally unique id for keys. - #[prost(string, tag = "2")] - pub access_id: ::prost::alloc::string::String, - /// Immutable. Identifies the project that owns the service account of the - /// specified HMAC key, in the format "projects/{projectIdentifier}". - /// {projectIdentifier} can be the project ID or project number. - #[prost(string, tag = "3")] - pub project: ::prost::alloc::string::String, - /// Output only. Email of the service account the key authenticates as. - #[prost(string, tag = "4")] - pub service_account_email: ::prost::alloc::string::String, - /// State of the key. One of ACTIVE, INACTIVE, or DELETED. - /// Writable, can be updated by UpdateHmacKey operation. - #[prost(string, tag = "5")] - pub state: ::prost::alloc::string::String, - /// Output only. The creation time of the HMAC key. - #[prost(message, optional, tag = "6")] - pub create_time: ::core::option::Option<::prost_types::Timestamp>, - /// Output only. The last modification time of the HMAC key metadata. - #[prost(message, optional, tag = "7")] - pub update_time: ::core::option::Option<::prost_types::Timestamp>, - /// The etag of the HMAC key. - #[prost(string, tag = "8")] - pub etag: ::prost::alloc::string::String, -} -/// A directive to publish Pub/Sub notifications upon changes to a bucket. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct NotificationConfig { - /// Required. The resource name of this NotificationConfig. - /// Format: - /// `projects/{project}/buckets/{bucket}/notificationConfigs/{notificationConfig}` - /// The `{project}` portion may be `_` for globally unique buckets. - #[prost(string, tag = "1")] - pub name: ::prost::alloc::string::String, - /// Required. The Pub/Sub topic to which this subscription publishes. Formatted - /// as: - /// '//pubsub.googleapis.com/projects/{project-identifier}/topics/{my-topic}' - #[prost(string, tag = "2")] - pub topic: ::prost::alloc::string::String, - /// The etag of the NotificationConfig. - /// If included in the metadata of GetNotificationConfigRequest, the operation - /// will only be performed if the etag matches that of the NotificationConfig. - #[prost(string, tag = "7")] - pub etag: ::prost::alloc::string::String, - /// If present, only send notifications about listed event types. If - /// empty, sent notifications for all event types. - #[prost(string, repeated, tag = "3")] - pub event_types: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - /// A list of additional attributes to attach to each Pub/Sub - /// message published for this NotificationConfig. - #[prost(map = "string, string", tag = "4")] - pub custom_attributes: ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, - /// If present, only apply this NotificationConfig to object names that - /// begin with this prefix. - #[prost(string, tag = "5")] - pub object_name_prefix: ::prost::alloc::string::String, - /// Required. The desired content of the Payload. - #[prost(string, tag = "6")] - pub payload_format: ::prost::alloc::string::String, -} -/// Describes the Customer-Supplied Encryption Key mechanism used to store an -/// Object's data at rest. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct CustomerEncryption { - /// The encryption algorithm. - #[prost(string, tag = "1")] - pub encryption_algorithm: ::prost::alloc::string::String, - /// SHA256 hash value of the encryption key. - /// In raw bytes format (not base64-encoded). - #[prost(bytes = "vec", tag = "3")] - pub key_sha256_bytes: ::prost::alloc::vec::Vec, -} -/// An object. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Object { - /// Immutable. The name of this object. Nearly any sequence of unicode - /// characters is valid. See - /// [Guidelines](). - /// Example: `test.txt` - /// The `name` field by itself does not uniquely identify a Cloud Storage - /// object. A Cloud Storage object is uniquely identified by the tuple of - /// (bucket, object, generation). - #[prost(string, tag = "1")] - pub name: ::prost::alloc::string::String, - /// Immutable. The name of the bucket containing this object. + pub encryption_algorithm: ::prost::alloc::string::String, + /// SHA256 hash value of the encryption key. + /// In raw bytes format (not base64-encoded). + #[prost(bytes = "vec", tag = "3")] + pub key_sha256_bytes: ::prost::alloc::vec::Vec, +} +/// An object. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Object { + /// Immutable. The name of this object. Nearly any sequence of unicode + /// characters is valid. See + /// [Guidelines](). + /// Example: `test.txt` + /// The `name` field by itself does not uniquely identify a Cloud Storage + /// object. A Cloud Storage object is uniquely identified by the tuple of + /// (bucket, object, generation). + #[prost(string, tag = "1")] + pub name: ::prost::alloc::string::String, + /// Immutable. The name of the bucket containing this object. #[prost(string, tag = "2")] pub bucket: ::prost::alloc::string::String, /// The etag of the object. @@ -1774,15 +1889,13 @@ pub struct Object { #[prost(string, tag = "27")] pub etag: ::prost::alloc::string::String, /// Immutable. The content generation of this object. Used for object - /// versioning. Attempting to set or update this field will result in a - /// [FieldViolation][google.rpc.BadRequest.FieldViolation]. + /// versioning. #[prost(int64, tag = "3")] pub generation: i64, /// Output only. The version of the metadata for this generation of this /// object. Used for preconditions and for detecting changes in metadata. A /// metageneration number is only meaningful in the context of a particular - /// generation of a particular object. Attempting to set or update this field - /// will result in a [FieldViolation][google.rpc.BadRequest.FieldViolation]. + /// generation of a particular object. #[prost(int64, tag = "4")] pub metageneration: i64, /// Storage class of the object. @@ -1790,8 +1903,6 @@ pub struct Object { pub storage_class: ::prost::alloc::string::String, /// Output only. Content-Length of the object data in bytes, matching /// [ 7230 ยง3.3.2]. - /// Attempting to set or update this field will result in a - /// [FieldViolation][google.rpc.BadRequest.FieldViolation]. #[prost(int64, tag = "6")] pub size: i64, /// Content-Encoding of the object data, matching @@ -1818,8 +1929,7 @@ pub struct Object { #[prost(string, tag = "11")] pub content_language: ::prost::alloc::string::String, /// Output only. If this object is noncurrent, this is the time when the object - /// became noncurrent. Attempting to set or update this field will result in a - /// [FieldViolation][google.rpc.BadRequest.FieldViolation]. + /// became noncurrent. #[prost(message, optional, tag = "12")] pub delete_time: ::core::option::Option<::prost_types::Timestamp>, /// Content-Type of the object data, matching @@ -1829,18 +1939,17 @@ pub struct Object { #[prost(string, tag = "13")] pub content_type: ::prost::alloc::string::String, /// Output only. The creation time of the object. - /// Attempting to set or update this field will result in a - /// [FieldViolation][google.rpc.BadRequest.FieldViolation]. #[prost(message, optional, tag = "14")] pub create_time: ::core::option::Option<::prost_types::Timestamp>, /// Output only. Number of underlying components that make up this object. - /// Components are accumulated by compose operations. Attempting to set or - /// update this field will result in a - /// [FieldViolation][google.rpc.BadRequest.FieldViolation]. + /// Components are accumulated by compose operations. #[prost(int32, tag = "15")] pub component_count: i32, /// Output only. Hashes for the data part of this object. This field is used - /// for output only and will be silently ignored if provided in requests. + /// for output only and will be silently ignored if provided in requests. The + /// checksums of the complete object regardless of data range. If the object is + /// downloaded in full, the client should compute one of these checksums over + /// the downloaded object and compare it against the value provided here. #[prost(message, optional, tag = "16")] pub checksums: ::core::option::Option, /// Output only. The modification time of the object metadata. @@ -1849,8 +1958,6 @@ pub struct Object { /// such as modifying custom metadata, as well as changes made by Cloud Storage /// on behalf of a requester, such as changing the storage class based on an /// Object Lifecycle Configuration. - /// Attempting to set or update this field will result in a - /// [FieldViolation][google.rpc.BadRequest.FieldViolation]. #[prost(message, optional, tag = "17")] pub update_time: ::core::option::Option<::prost_types::Timestamp>, /// Cloud KMS Key used to encrypt this object, if the object is encrypted by @@ -1859,8 +1966,6 @@ pub struct Object { pub kms_key: ::prost::alloc::string::String, /// Output only. The time at which the object's storage class was last changed. /// When the object is initially created, it will be set to time_created. - /// Attempting to set or update this field will result in a - /// [FieldViolation][google.rpc.BadRequest.FieldViolation]. #[prost(message, optional, tag = "19")] pub update_storage_class_time: ::core::option::Option<::prost_types::Timestamp>, /// Whether an object is under temporary hold. While this flag is set to true, @@ -1896,8 +2001,7 @@ pub struct Object { #[prost(bool, optional, tag = "23")] pub event_based_hold: ::core::option::Option, /// Output only. The owner of the object. This will always be the uploader of - /// the object. Attempting to set or update this field will result in a - /// [FieldViolation][google.rpc.BadRequest.FieldViolation]. + /// the object. #[prost(message, optional, tag = "24")] pub owner: ::core::option::Option, /// Metadata of Customer-Supplied Encryption Key, if the object is encrypted by @@ -1907,12 +2011,27 @@ pub struct Object { /// A user-specified timestamp set on an object. #[prost(message, optional, tag = "26")] pub custom_time: ::core::option::Option<::prost_types::Timestamp>, + /// Output only. This is the time when the object became soft-deleted. + /// + /// Soft-deleted objects are only accessible if a soft_delete_policy is + /// enabled. Also see hard_delete_time. + #[prost(message, optional, tag = "28")] + pub soft_delete_time: ::core::option::Option<::prost_types::Timestamp>, + /// Output only. The time when the object will be permanently deleted. + /// + /// Only set when an object becomes soft-deleted with a soft_delete_policy. + /// Otherwise, the object will not be accessible. + #[prost(message, optional, tag = "29")] + pub hard_delete_time: ::core::option::Option<::prost_types::Timestamp>, } /// An access-control entry. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ObjectAccessControl { - /// The access permission for the entity. + /// The access permission for the entity. One of the following values: + /// * `READER` + /// * `WRITER` + /// * `OWNER` #[prost(string, tag = "1")] pub role: ::prost::alloc::string::String, /// The ID of the access-control entry. @@ -1988,16 +2107,6 @@ pub struct ProjectTeam { #[prost(string, tag = "2")] pub team: ::prost::alloc::string::String, } -/// A service account, owned by Cloud Storage, which may be used when taking -/// action on behalf of a given project, for example to publish Pub/Sub -/// notifications or to retrieve security keys. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ServiceAccount { - /// The ID of the notification. - #[prost(string, tag = "1")] - pub email_address: ::prost::alloc::string::String, -} /// The owner of a specific resource. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -2013,16 +2122,113 @@ pub struct Owner { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct ContentRange { - /// The starting offset of the object data. + /// The starting offset of the object data. This value is inclusive. #[prost(int64, tag = "1")] pub start: i64, - /// The ending offset of the object data. + /// The ending offset of the object data. This value is exclusive. #[prost(int64, tag = "2")] pub end: i64, /// The complete length of the object data. #[prost(int64, tag = "3")] pub complete_length: i64, } +/// Request message for DeleteNotificationConfig. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DeleteNotificationConfigRequest { + /// Required. The parent bucket of the NotificationConfig. + #[prost(string, tag = "1")] + pub name: ::prost::alloc::string::String, +} +/// Request message for GetNotificationConfig. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetNotificationConfigRequest { + /// Required. The parent bucket of the NotificationConfig. + /// Format: + /// `projects/{project}/buckets/{bucket}/notificationConfigs/{notificationConfig}` + #[prost(string, tag = "1")] + pub name: ::prost::alloc::string::String, +} +/// Request message for CreateNotificationConfig. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CreateNotificationConfigRequest { + /// Required. The bucket to which this NotificationConfig belongs. + #[prost(string, tag = "1")] + pub parent: ::prost::alloc::string::String, + /// Required. Properties of the NotificationConfig to be inserted. + #[prost(message, optional, tag = "2")] + pub notification_config: ::core::option::Option, +} +/// Request message for ListNotifications. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ListNotificationConfigsRequest { + /// Required. Name of a Google Cloud Storage bucket. + #[prost(string, tag = "1")] + pub parent: ::prost::alloc::string::String, + /// Optional. The maximum number of NotificationConfigs to return. The service + /// may return fewer than this value. The default value is 100. Specifying a + /// value above 100 will result in a page_size of 100. + #[prost(int32, tag = "2")] + pub page_size: i32, + /// Optional. A page token, received from a previous `ListNotificationConfigs` + /// call. Provide this to retrieve the subsequent page. + /// + /// When paginating, all other parameters provided to `ListNotificationConfigs` + /// must match the call that provided the page token. + #[prost(string, tag = "3")] + pub page_token: ::prost::alloc::string::String, +} +/// The result of a call to ListNotificationConfigs +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ListNotificationConfigsResponse { + /// The list of items. + #[prost(message, repeated, tag = "1")] + pub notification_configs: ::prost::alloc::vec::Vec, + /// A token, which can be sent as `page_token` to retrieve the next page. + /// If this field is omitted, there are no subsequent pages. + #[prost(string, tag = "2")] + pub next_page_token: ::prost::alloc::string::String, +} +/// A directive to publish Pub/Sub notifications upon changes to a bucket. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct NotificationConfig { + /// Required. The resource name of this NotificationConfig. + /// Format: + /// `projects/{project}/buckets/{bucket}/notificationConfigs/{notificationConfig}` + /// The `{project}` portion may be `_` for globally unique buckets. + #[prost(string, tag = "1")] + pub name: ::prost::alloc::string::String, + /// Required. The Pub/Sub topic to which this subscription publishes. Formatted + /// as: + /// '//pubsub.googleapis.com/projects/{project-identifier}/topics/{my-topic}' + #[prost(string, tag = "2")] + pub topic: ::prost::alloc::string::String, + /// Optional. The etag of the NotificationConfig. + /// If included in the metadata of GetNotificationConfigRequest, the operation + /// will only be performed if the etag matches that of the NotificationConfig. + #[prost(string, tag = "7")] + pub etag: ::prost::alloc::string::String, + /// Optional. If present, only send notifications about listed event types. If + /// empty, sent notifications for all event types. + #[prost(string, repeated, tag = "3")] + pub event_types: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + /// Optional. A list of additional attributes to attach to each Pub/Sub + /// message published for this NotificationConfig. + #[prost(map = "string, string", tag = "4")] + pub custom_attributes: ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, + /// Optional. If present, only apply this NotificationConfig to object names + /// that begin with this prefix. + #[prost(string, tag = "5")] + pub object_name_prefix: ::prost::alloc::string::String, + /// Required. The desired content of the Payload. + #[prost(string, tag = "6")] + pub payload_format: ::prost::alloc::string::String, +} /// Generated client implementations. pub mod storage_client { #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] @@ -2197,10 +2403,9 @@ pub mod storage_client { .insert(GrpcMethod::new("google.storage.v2.Storage", "LockBucketRetentionPolicy")); self.inner.unary(req, path, codec).await } - /// Gets the IAM policy for a specified bucket or object. + /// Gets the IAM policy for a specified bucket. /// The `resource` field in the request should be - /// projects/_/buckets/ for a bucket or - /// projects/_/buckets//objects/ for an object. + /// `projects/_/buckets/{bucket}`. pub async fn get_iam_policy( &mut self, request: impl tonic::IntoRequest, @@ -2215,10 +2420,9 @@ pub mod storage_client { .insert(GrpcMethod::new("google.storage.v2.Storage", "GetIamPolicy")); self.inner.unary(req, path, codec).await } - /// Updates an IAM policy for the specified bucket or object. + /// Updates an IAM policy for the specified bucket. /// The `resource` field in the request should be - /// projects/_/buckets/ for a bucket or - /// projects/_/buckets//objects/ for an object. + /// `projects/_/buckets/{bucket}`. pub async fn set_iam_policy( &mut self, request: impl tonic::IntoRequest, @@ -2233,11 +2437,13 @@ pub mod storage_client { .insert(GrpcMethod::new("google.storage.v2.Storage", "SetIamPolicy")); self.inner.unary(req, path, codec).await } - /// Tests a set of permissions on the given bucket or object to see which, if - /// any, are held by the caller. + /// Tests a set of permissions on the given bucket, object, or managed folder + /// to see which, if any, are held by the caller. /// The `resource` field in the request should be - /// projects/_/buckets/ for a bucket or - /// projects/_/buckets//objects/ for an object. + /// `projects/_/buckets/{bucket}` for a bucket, + /// `projects/_/buckets/{bucket}/objects/{object}` for an object, or + /// `projects/_/buckets/{bucket}/managedFolders/{managedFolder}` + /// for a managed folder. pub async fn test_iam_permissions( &mut self, request: impl tonic::IntoRequest, @@ -2268,69 +2474,6 @@ pub mod storage_client { .insert(GrpcMethod::new("google.storage.v2.Storage", "UpdateBucket")); self.inner.unary(req, path, codec).await } - /// Permanently deletes a NotificationConfig. - pub async fn delete_notification_config( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::new(tonic::Code::Unknown, format!("Service was not ready: {}", e.into())) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/google.storage.v2.Storage/DeleteNotificationConfig"); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("google.storage.v2.Storage", "DeleteNotificationConfig")); - self.inner.unary(req, path, codec).await - } - /// View a NotificationConfig. - pub async fn get_notification_config( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::new(tonic::Code::Unknown, format!("Service was not ready: {}", e.into())) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/google.storage.v2.Storage/GetNotificationConfig"); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("google.storage.v2.Storage", "GetNotificationConfig")); - self.inner.unary(req, path, codec).await - } - /// Creates a NotificationConfig for a given bucket. - /// These NotificationConfigs, when triggered, publish messages to the - /// specified Pub/Sub topics. See - /// https://cloud.google.com/storage/docs/pubsub-notifications. - pub async fn create_notification_config( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::new(tonic::Code::Unknown, format!("Service was not ready: {}", e.into())) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/google.storage.v2.Storage/CreateNotificationConfig"); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("google.storage.v2.Storage", "CreateNotificationConfig")); - self.inner.unary(req, path, codec).await - } - /// Retrieves a list of NotificationConfigs for a given bucket. - pub async fn list_notification_configs( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::new(tonic::Code::Unknown, format!("Service was not ready: {}", e.into())) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/google.storage.v2.Storage/ListNotificationConfigs"); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("google.storage.v2.Storage", "ListNotificationConfigs")); - self.inner.unary(req, path, codec).await - } /// Concatenates a list of existing objects into a new object in the same /// bucket. pub async fn compose_object( @@ -2367,6 +2510,21 @@ pub mod storage_client { .insert(GrpcMethod::new("google.storage.v2.Storage", "DeleteObject")); self.inner.unary(req, path, codec).await } + /// Restores a soft-deleted object. + pub async fn restore_object( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner.ready().await.map_err(|e| { + tonic::Status::new(tonic::Code::Unknown, format!("Service was not ready: {}", e.into())) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static("/google.storage.v2.Storage/RestoreObject"); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("google.storage.v2.Storage", "RestoreObject")); + self.inner.unary(req, path, codec).await + } /// Cancels an in-progress resumable upload. /// /// Any attempts to write to the resumable upload after cancelling the upload @@ -2492,6 +2650,9 @@ pub mod storage_client { /// status, with a WriteObjectResponse containing the finalized object's /// metadata. /// + /// Alternatively, the BidiWriteObject operation may be used to write an + /// object with controls over flushing and the ability to fetch the ability to + /// determine the current persisted size. pub async fn write_object( &mut self, request: impl tonic::IntoStreamingRequest, @@ -2506,6 +2667,36 @@ pub mod storage_client { .insert(GrpcMethod::new("google.storage.v2.Storage", "WriteObject")); self.inner.client_streaming(req, path, codec).await } + /// Stores a new object and metadata. + /// + /// This is similar to the WriteObject call with the added support for + /// manual flushing of persisted state, and the ability to determine current + /// persisted size without closing the stream. + /// + /// The client may specify one or both of the `state_lookup` and `flush` fields + /// in each BidiWriteObjectRequest. If `flush` is specified, the data written + /// so far will be persisted to storage. If `state_lookup` is specified, the + /// service will respond with a BidiWriteObjectResponse that contains the + /// persisted size. If both `flush` and `state_lookup` are specified, the flush + /// will always occur before a `state_lookup`, so that both may be set in the + /// same request and the returned state will be the state of the object + /// post-flush. When the stream is closed, a BidiWriteObjectResponse will + /// always be sent to the client, regardless of the value of `state_lookup`. + pub async fn bidi_write_object( + &mut self, + request: impl tonic::IntoStreamingRequest, + ) -> std::result::Result>, tonic::Status> + { + self.inner.ready().await.map_err(|e| { + tonic::Status::new(tonic::Code::Unknown, format!("Service was not ready: {}", e.into())) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static("/google.storage.v2.Storage/BidiWriteObject"); + let mut req = request.into_streaming_request(); + req.extensions_mut() + .insert(GrpcMethod::new("google.storage.v2.Storage", "BidiWriteObject")); + self.inner.streaming(req, path, codec).await + } /// Retrieves a list of objects matching the criteria. pub async fn list_objects( &mut self, @@ -2671,5 +2862,68 @@ pub mod storage_client { .insert(GrpcMethod::new("google.storage.v2.Storage", "UpdateHmacKey")); self.inner.unary(req, path, codec).await } + /// Permanently deletes a NotificationConfig. + pub async fn delete_notification_config( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner.ready().await.map_err(|e| { + tonic::Status::new(tonic::Code::Unknown, format!("Service was not ready: {}", e.into())) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static("/google.storage.v2.Storage/DeleteNotificationConfig"); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("google.storage.v2.Storage", "DeleteNotificationConfig")); + self.inner.unary(req, path, codec).await + } + /// View a NotificationConfig. + pub async fn get_notification_config( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner.ready().await.map_err(|e| { + tonic::Status::new(tonic::Code::Unknown, format!("Service was not ready: {}", e.into())) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static("/google.storage.v2.Storage/GetNotificationConfig"); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("google.storage.v2.Storage", "GetNotificationConfig")); + self.inner.unary(req, path, codec).await + } + /// Creates a NotificationConfig for a given bucket. + /// These NotificationConfigs, when triggered, publish messages to the + /// specified Pub/Sub topics. See + /// https://cloud.google.com/storage/docs/pubsub-notifications. + pub async fn create_notification_config( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner.ready().await.map_err(|e| { + tonic::Status::new(tonic::Code::Unknown, format!("Service was not ready: {}", e.into())) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static("/google.storage.v2.Storage/CreateNotificationConfig"); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("google.storage.v2.Storage", "CreateNotificationConfig")); + self.inner.unary(req, path, codec).await + } + /// Retrieves a list of NotificationConfigs for a given bucket. + pub async fn list_notification_configs( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner.ready().await.map_err(|e| { + tonic::Status::new(tonic::Code::Unknown, format!("Service was not ready: {}", e.into())) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static("/google.storage.v2.Storage/ListNotificationConfigs"); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("google.storage.v2.Storage", "ListNotificationConfigs")); + self.inner.unary(req, path, codec).await + } } } diff --git a/googleapis/src/lib.rs b/googleapis/src/lib.rs index e8fe7c66..e28842b1 100644 --- a/googleapis/src/lib.rs +++ b/googleapis/src/lib.rs @@ -1,3 +1,5 @@ +#![allow(clippy::doc_lazy_continuation)] + #[path = "google.rpc.rs"] pub mod rpc; diff --git a/kms/Cargo.toml b/kms/Cargo.toml index 6f0b3501..9b97c395 100644 --- a/kms/Cargo.toml +++ b/kms/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "google-cloud-kms" -version = "0.5.1" +version = "0.6.0" edition = "2021" authors = ["yoshidan "] repository = "https://github.com/yoshidan/google-cloud-rust/tree/main/kms" @@ -13,8 +13,8 @@ documentation = "https://docs.rs/google-cloud-kms/latest/google_cloud_kms/" [dependencies] google-cloud-token = { version = "0.1.2", path = "../foundation/token" } google-cloud-auth = { optional = true, version = "0.17", path="../foundation/auth", default-features=false } -google-cloud-googleapis = { version="0.15.0", path = "../googleapis", features=["kms"]} -google-cloud-gax = { version = "0.19.1", path = "../foundation/gax"} +google-cloud-googleapis = { version="0.16.0", path = "../googleapis", features=["kms"]} +google-cloud-gax = { version = "0.19.2", path = "../foundation/gax"} tracing = "0.1" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" diff --git a/pubsub/Cargo.toml b/pubsub/Cargo.toml index 6a597cc5..72eabf5e 100644 --- a/pubsub/Cargo.toml +++ b/pubsub/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "google-cloud-pubsub" -version = "0.29.1" +version = "0.30.0" authors = ["yoshidan "] edition = "2021" repository = "https://github.com/yoshidan/google-cloud-rust/tree/main/pubsub" @@ -20,8 +20,8 @@ thiserror = "1.0" tokio-util = "0.7" google-cloud-token = { version = "0.1.2", path = "../foundation/token" } -google-cloud-gax = { version = "0.19.1", path = "../foundation/gax" } -google-cloud-googleapis = { version = "0.15.0", path = "../googleapis", features = ["pubsub"]} +google-cloud-gax = { version = "0.19.2", path = "../foundation/gax" } +google-cloud-googleapis = { version = "0.16.0", path = "../googleapis", features = ["pubsub"]} google-cloud-auth = { optional = true, version = "0.17", path="../foundation/auth", default-features=false } diff --git a/pubsub/src/subscription.rs b/pubsub/src/subscription.rs index f0424ab3..16759e71 100644 --- a/pubsub/src/subscription.rs +++ b/pubsub/src/subscription.rs @@ -11,6 +11,7 @@ use google_cloud_gax::grpc::codegen::tokio_stream::Stream; use google_cloud_gax::grpc::{Code, Status}; use google_cloud_gax::retry::RetrySetting; use google_cloud_googleapis::pubsub::v1::seek_request::Target; +use google_cloud_googleapis::pubsub::v1::subscription::AnalyticsHubSubscriptionInfo; use google_cloud_googleapis::pubsub::v1::{ BigQueryConfig, CloudStorageConfig, CreateSnapshotRequest, DeadLetterPolicy, DeleteSnapshotRequest, DeleteSubscriptionRequest, ExpirationPolicy, GetSnapshotRequest, GetSubscriptionRequest, PullRequest, PushConfig, @@ -39,6 +40,7 @@ pub struct SubscriptionConfig { pub bigquery_config: Option, pub state: i32, pub cloud_storage_config: Option, + pub analytics_hub_subscription_info: Option, } impl From for SubscriptionConfig { fn from(f: InternalSubscription) -> Self { @@ -63,6 +65,7 @@ impl From for SubscriptionConfig { enable_exactly_once_delivery: f.enable_exactly_once_delivery, state: f.state, cloud_storage_config: f.cloud_storage_config, + analytics_hub_subscription_info: f.analytics_hub_subscription_info, } } } @@ -282,6 +285,7 @@ impl Subscription { .map_err(|err: DurationError| Status::internal(err.to_string()))?, enable_exactly_once_delivery: cfg.enable_exactly_once_delivery, state: cfg.state, + analytics_hub_subscription_info: cfg.analytics_hub_subscription_info, }, retry, ) diff --git a/pubsub/src/topic.rs b/pubsub/src/topic.rs index 55594103..1d982a96 100644 --- a/pubsub/src/topic.rs +++ b/pubsub/src/topic.rs @@ -6,8 +6,8 @@ use prost_types::DurationError; use google_cloud_gax::grpc::{Code, Status}; use google_cloud_gax::retry::RetrySetting; use google_cloud_googleapis::pubsub::v1::{ - DeleteTopicRequest, GetTopicRequest, ListTopicSubscriptionsRequest, MessageStoragePolicy, SchemaSettings, - Topic as InternalTopic, + DeleteTopicRequest, GetTopicRequest, IngestionDataSourceSettings, ListTopicSubscriptionsRequest, + MessageStoragePolicy, SchemaSettings, Topic as InternalTopic, }; use crate::apiv1::publisher_client::PublisherClient; @@ -23,6 +23,7 @@ pub struct TopicConfig { pub schema_settings: Option, pub satisfies_pzs: bool, pub message_retention_duration: Option, + pub ingestion_data_source_settings: Option, } impl Default for TopicConfig { @@ -34,6 +35,7 @@ impl Default for TopicConfig { schema_settings: None, satisfies_pzs: false, message_retention_duration: None, + ingestion_data_source_settings: None, } } } @@ -84,6 +86,8 @@ impl Topic { .map(Duration::try_into) .transpose() .map_err(|err: DurationError| Status::internal(err.to_string()))?, + state: 0, + ingestion_data_source_settings: topic_config.ingestion_data_source_settings, }; self.pubc.create_topic(req, retry).await.map(|_v| ()) } diff --git a/spanner/Cargo.toml b/spanner/Cargo.toml index 4533a602..8e58ec4d 100644 --- a/spanner/Cargo.toml +++ b/spanner/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "google-cloud-spanner" -version = "0.31.1" +version = "0.32.0" authors = ["yoshidan "] edition = "2021" repository = "https://github.com/yoshidan/google-cloud-rust/tree/main/spanner" @@ -23,9 +23,9 @@ tokio-util = "0.7" bigdecimal = { version="0.4", features=["serde"] } google-cloud-token = { version = "0.1.2", path = "../foundation/token" } -google-cloud-longrunning = { version = "0.20.1", path = "../foundation/longrunning" } -google-cloud-gax = { version = "0.19.1", path = "../foundation/gax" } -google-cloud-googleapis = { version = "0.15.0", path = "../googleapis", features = ["spanner"]} +google-cloud-longrunning = { version = "0.21.0", path = "../foundation/longrunning" } +google-cloud-gax = { version = "0.19.2", path = "../foundation/gax" } +google-cloud-googleapis = { version = "0.16.0", path = "../googleapis", features = ["spanner"]} google-cloud-auth = { optional = true, version = "0.17", path="../foundation/auth", default-features=false } diff --git a/spanner/src/admin/database/mod.rs b/spanner/src/admin/database/mod.rs index 37d574f1..6bec37d7 100644 --- a/spanner/src/admin/database/mod.rs +++ b/spanner/src/admin/database/mod.rs @@ -40,6 +40,7 @@ mod tests { extra_statements: vec!["CREATE TABLE Tbl (ID STRING(MAX)) PRIMARY KEY(ID)".to_string()], encryption_config: None, database_dialect: DatabaseDialect::GoogleStandardSql.into(), + proto_descriptors: vec![], }; let creation_result = match client.create_database(request, None).await { @@ -131,6 +132,7 @@ mod tests { database: database.name.to_string(), statements: vec!["CREATE TABLE Tbl1 (ID INT64) PRIMARY KEY(ID)".to_string()], operation_id: "".to_string(), + proto_descriptors: vec![], }; let update_result = match client.update_database_ddl(request, None).await { diff --git a/spanner/src/admin/instance/mod.rs b/spanner/src/admin/instance/mod.rs index f260dca6..df0a1b84 100644 --- a/spanner/src/admin/instance/mod.rs +++ b/spanner/src/admin/instance/mod.rs @@ -44,11 +44,13 @@ mod tests { display_name: "test-instance-ut".to_string(), node_count: 0, processing_units: 0, + autoscaling_config: None, state: 0, labels: Default::default(), endpoint_uris: vec![], create_time: None, update_time: None, + edition: 0, }), }; @@ -108,6 +110,7 @@ mod tests { page_size: 1, page_token: "".to_string(), filter: "".to_string(), + instance_deadline: None, }; match client.list_instances(request, None).await { diff --git a/spanner/src/apiv1/mod.rs b/spanner/src/apiv1/mod.rs index aaa8d02e..c1d81958 100644 --- a/spanner/src/apiv1/mod.rs +++ b/spanner/src/apiv1/mod.rs @@ -47,6 +47,7 @@ mod tests { let request = BeginTransactionRequest { session: session.name.to_string(), options: Option::from(TransactionOptions { + exclude_txn_from_change_streams: false, mode: Option::from(transaction_options::Mode::ReadOnly(transaction_options::ReadOnly { return_read_timestamp: false, timestamp_bound: None, @@ -61,6 +62,7 @@ mod tests { let request = BeginTransactionRequest { session: session.name.to_string(), options: Some(TransactionOptions { + exclude_txn_from_change_streams: false, mode: Some(transaction_options::Mode::ReadWrite(transaction_options::ReadWrite::default())), }), request_options: None, @@ -189,6 +191,7 @@ mod tests { seqno: 0, query_options: None, request_options: None, + directed_read_options: None, data_boost_enabled: false, }; match client.execute_sql(request, None).await { @@ -216,6 +219,7 @@ mod tests { seqno: 0, query_options: None, request_options: None, + directed_read_options: None, data_boost_enabled: false, }; @@ -251,6 +255,7 @@ mod tests { let request = BeginTransactionRequest { session: session.name.to_string(), options: Option::from(TransactionOptions { + exclude_txn_from_change_streams: false, mode: Option::from(transaction_options::Mode::ReadOnly(transaction_options::ReadOnly { return_read_timestamp: false, timestamp_bound: None, @@ -383,6 +388,9 @@ mod tests { request_options: None, limit: 0, data_boost_enabled: false, + order_by: 0, + directed_read_options: None, + lock_hint: 0, }; match client.read(request, None).await { @@ -414,6 +422,9 @@ mod tests { request_options: None, limit: 0, data_boost_enabled: false, + order_by: 0, + directed_read_options: None, + lock_hint: 0, }; match client.streaming_read(request, None).await { @@ -463,6 +474,7 @@ mod tests { transaction_tag: "".to_string(), }), return_commit_stats: false, + max_commit_delay: None, }; match client.commit(request, None).await { diff --git a/spanner/src/apiv1/spanner_client.rs b/spanner/src/apiv1/spanner_client.rs index 0766e64b..9d4952f9 100644 --- a/spanner/src/apiv1/spanner_client.rs +++ b/spanner/src/apiv1/spanner_client.rs @@ -25,6 +25,7 @@ pub(crate) fn ping_query_request(session_name: impl Into) -> ExecuteSqlR seqno: 0, query_options: None, request_options: None, + directed_read_options: None, data_boost_enabled: false, } } diff --git a/spanner/src/client.rs b/spanner/src/client.rs index 0bf4c080..b0f0e9d5 100644 --- a/spanner/src/client.rs +++ b/spanner/src/client.rs @@ -381,6 +381,7 @@ impl Client { Some(ro), |session| async { let tx = commit_request::Transaction::SingleUseTransaction(TransactionOptions { + exclude_txn_from_change_streams: false, mode: Some(transaction_options::Mode::ReadWrite(transaction_options::ReadWrite::default())), }); match commit(session, ms.clone(), tx, options.clone()).await { diff --git a/spanner/src/session.rs b/spanner/src/session.rs index d5a0542f..9f3a970e 100644 --- a/spanner/src/session.rs +++ b/spanner/src/session.rs @@ -1099,6 +1099,7 @@ mod tests { seqno: 0, query_options: None, request_options: None, + directed_read_options: None, data_boost_enabled: false, }, None, diff --git a/spanner/src/statement.rs b/spanner/src/statement.rs index 1c336495..17659a16 100644 --- a/spanner/src/statement.rs +++ b/spanner/src/statement.rs @@ -69,6 +69,7 @@ where struct_type: None, //TODO support PG Numeric type_annotation: TypeAnnotationCode::Unspecified.into(), + proto_type_fqn: "".to_string(), } } @@ -247,6 +248,7 @@ where }) .collect(), }), + proto_type_fqn: "".to_string(), } } } @@ -302,6 +304,7 @@ where array_element_type: Some(Box::new(T::get_type())), struct_type: None, type_annotation: TypeAnnotationCode::Unspecified.into(), + proto_type_fqn: "".to_string(), } } } diff --git a/spanner/src/transaction.rs b/spanner/src/transaction.rs index bafe2c69..6314f511 100644 --- a/spanner/src/transaction.rs +++ b/spanner/src/transaction.rs @@ -146,6 +146,7 @@ impl Transaction { query_options: options.optimizer_options, request_options: Transaction::create_request_options(options.call_options.priority), data_boost_enabled: false, + directed_read_options: None, }; let session = self.session.as_mut().unwrap().deref_mut(); let reader = StatementReader { @@ -205,6 +206,9 @@ impl Transaction { partition_token: vec![], request_options: Transaction::create_request_options(options.call_options.priority), data_boost_enabled: false, + order_by: 0, + directed_read_options: None, + lock_hint: 0, }; let session = self.as_mut_session(); diff --git a/spanner/src/transaction_ro.rs b/spanner/src/transaction_ro.rs index 8caa8d57..3525b47c 100644 --- a/spanner/src/transaction_ro.rs +++ b/spanner/src/transaction_ro.rs @@ -6,8 +6,9 @@ use time::OffsetDateTime; use google_cloud_gax::grpc::Status; use google_cloud_googleapis::spanner::v1::{ - transaction_options, transaction_selector, BeginTransactionRequest, ExecuteSqlRequest, PartitionOptions, - PartitionQueryRequest, PartitionReadRequest, ReadRequest, TransactionOptions, TransactionSelector, + transaction_options, transaction_selector, BeginTransactionRequest, DirectedReadOptions, ExecuteSqlRequest, + PartitionOptions, PartitionQueryRequest, PartitionReadRequest, ReadRequest, TransactionOptions, + TransactionSelector, }; use crate::key::KeySet; @@ -58,6 +59,7 @@ impl ReadOnlyTransaction { sequence_number: AtomicI64::new(0), transaction_selector: TransactionSelector { selector: Some(transaction_selector::Selector::SingleUse(TransactionOptions { + exclude_txn_from_change_streams: false, mode: Some(transaction_options::Mode::ReadOnly(tb.into())), })), }, @@ -75,6 +77,7 @@ impl ReadOnlyTransaction { let request = BeginTransactionRequest { session: session.session.name.to_string(), options: Some(TransactionOptions { + exclude_txn_from_change_streams: false, mode: Some(transaction_options::Mode::ReadOnly(tb.into())), }), request_options: Transaction::create_request_options(options.priority), @@ -149,7 +152,7 @@ impl BatchReadOnlyTransaction { columns: &[&str], keys: impl Into + Clone, ) -> Result>, Status> { - self.partition_read_with_option(table, columns, keys, None, ReadOptions::default(), false) + self.partition_read_with_option(table, columns, keys, None, ReadOptions::default(), false, None) .await } @@ -157,6 +160,7 @@ impl BatchReadOnlyTransaction { /// the database. These partitions can be executed across multiple processes, /// even across different machines. The partition size and count hints can be /// configured using PartitionOptions. + #[allow(clippy::too_many_arguments)] pub async fn partition_read_with_option( &mut self, table: &str, @@ -165,6 +169,7 @@ impl BatchReadOnlyTransaction { po: Option, ro: ReadOptions, data_boost_enabled: bool, + directed_read_options: Option, ) -> Result>, Status> { let columns: Vec = columns.iter().map(|x| x.to_string()).collect(); let inner_keyset = keys.into().inner; @@ -200,7 +205,10 @@ impl BatchReadOnlyTransaction { resume_token: vec![], partition_token: x.partition_token, request_options: Transaction::create_request_options(ro.call_options.priority), + directed_read_options: directed_read_options.clone(), data_boost_enabled, + order_by: 0, + lock_hint: 0, }, }, }) @@ -212,7 +220,7 @@ impl BatchReadOnlyTransaction { /// partition_query returns a list of Partitions that can be used to execute a query against the database. pub async fn partition_query(&mut self, stmt: Statement) -> Result>, Status> { - self.partition_query_with_option(stmt, None, QueryOptions::default(), false) + self.partition_query_with_option(stmt, None, QueryOptions::default(), false, None) .await } @@ -223,6 +231,7 @@ impl BatchReadOnlyTransaction { po: Option, qo: QueryOptions, data_boost_enabled: bool, + directed_read_options: Option, ) -> Result>, Status> { let request = PartitionQueryRequest { session: self.get_session_name(), @@ -262,6 +271,7 @@ impl BatchReadOnlyTransaction { query_options: qo.optimizer_options.clone(), request_options: Transaction::create_request_options(qo.call_options.priority), data_boost_enabled, + directed_read_options: directed_read_options.clone(), }, }, }) diff --git a/spanner/src/transaction_rw.rs b/spanner/src/transaction_rw.rs index 15572fbc..f7161366 100644 --- a/spanner/src/transaction_rw.rs +++ b/spanner/src/transaction_rw.rs @@ -130,7 +130,10 @@ impl ReadWriteTransaction { ) -> Result { let request = BeginTransactionRequest { session: session.session.name.to_string(), - options: Some(TransactionOptions { mode: Some(mode) }), + options: Some(TransactionOptions { + exclude_txn_from_change_streams: false, + mode: Some(mode), + }), request_options: Transaction::create_request_options(options.priority), }; let result = session.spanner_client.begin_transaction(request, options.retry).await; @@ -176,6 +179,7 @@ impl ReadWriteTransaction { seqno: self.sequence_number.fetch_add(1, Ordering::Relaxed), query_options: options.optimizer_options, request_options: Transaction::create_request_options(options.call_options.priority), + directed_read_options: None, }; let session = self.as_mut_session(); @@ -327,6 +331,7 @@ pub(crate) async fn commit( transaction: Some(tx), request_options: Transaction::create_request_options(commit_options.call_options.priority), return_commit_stats: commit_options.return_commit_stats, + max_commit_delay: None, }; let result = session .spanner_client diff --git a/spanner/tests/change_stream_test.rs b/spanner/tests/change_stream_test.rs index 557e38ff..44c07b96 100644 --- a/spanner/tests/change_stream_test.rs +++ b/spanner/tests/change_stream_test.rs @@ -174,6 +174,7 @@ async fn test_read_change_stream() { database: db.to_string(), statements: vec!["CREATE CHANGE STREAM UserItemChangeStream FOR UserItem".to_string()], operation_id: "".to_string(), + proto_descriptors: vec![], }, None, ) @@ -209,6 +210,7 @@ async fn test_read_change_stream() { database: db.to_string(), statements: vec!["DROP CHANGE STREAM UserItemChangeStream".to_string()], operation_id: "".to_string(), + proto_descriptors: vec![], }, None, )