From 743da3d38fd3a54c39e1a71f0c5a80e0fb3cccac Mon Sep 17 00:00:00 2001 From: ByteBaker <42913098+ByteBaker@users.noreply.github.com> Date: Wed, 25 Sep 2024 09:53:58 +0530 Subject: [PATCH] chore: add docs, part of #37 - add pragma `#![warn(missing_docs)]` to the following - `arrow-flight` - `arrow-integration-test` - `arrow-integration-testing` --- arrow-flight/examples/flight_sql_server.rs | 3 +- arrow-flight/src/bin/flight_sql_client.rs | 3 +- arrow-flight/src/decode.rs | 7 +- arrow-flight/src/encode.rs | 3 +- arrow-flight/src/error.rs | 2 + arrow-flight/src/lib.rs | 5 + .../src/sql/arrow.flight.protocol.sql.rs | 464 ++++++++++-------- arrow-flight/src/sql/client.rs | 4 +- arrow-flight/src/sql/metadata/sql_info.rs | 13 +- arrow-flight/src/sql/metadata/xdbc_info.rs | 39 +- arrow-flight/src/sql/mod.rs | 7 +- arrow-flight/src/utils.rs | 7 +- arrow-flight/tests/flight_sql_client.rs | 5 +- arrow-integration-test/src/lib.rs | 34 ++ .../auth_basic_proto.rs | 3 + .../integration_test.rs | 3 + .../src/flight_client_scenarios/middleware.rs | 3 + .../mod.rs} | 2 + .../auth_basic_proto.rs | 4 + .../integration_test.rs | 9 +- .../src/flight_server_scenarios/middleware.rs | 4 + .../mod.rs} | 3 + arrow-integration-testing/src/lib.rs | 18 +- arrow-json/src/writer.rs | 9 +- arrow-schema/src/field.rs | 4 +- arrow/tests/array_cast.rs | 2 +- 26 files changed, 417 insertions(+), 243 deletions(-) rename arrow-integration-testing/src/{flight_client_scenarios.rs => flight_client_scenarios/mod.rs} (93%) rename arrow-integration-testing/src/{flight_server_scenarios.rs => flight_server_scenarios/mod.rs} (91%) diff --git a/arrow-flight/examples/flight_sql_server.rs b/arrow-flight/examples/flight_sql_server.rs index 81afecf8562..dd3a3943dd9 100644 --- a/arrow-flight/examples/flight_sql_server.rs +++ b/arrow-flight/examples/flight_sql_server.rs @@ -19,6 +19,7 @@ use arrow_flight::sql::server::PeekableFlightDataStream; use arrow_flight::sql::DoPutPreparedStatementResult; use base64::prelude::BASE64_STANDARD; use base64::Engine; +use core::str; use futures::{stream, Stream, TryStreamExt}; use once_cell::sync::Lazy; use prost::Message; @@ -168,7 +169,7 @@ impl FlightSqlService for FlightSqlServiceImpl { let bytes = BASE64_STANDARD .decode(base64) .map_err(|e| status!("authorization not decodable", e))?; - let str = String::from_utf8(bytes).map_err(|e| status!("authorization not parsable", e))?; + let str = str::from_utf8(&bytes).map_err(|e| status!("authorization not parsable", e))?; let parts: Vec<_> = str.split(':').collect(); let (user, pass) = match parts.as_slice() { [user, pass] => (user, pass), diff --git a/arrow-flight/src/bin/flight_sql_client.rs b/arrow-flight/src/bin/flight_sql_client.rs index c334b95a9a9..1fbf81f3b52 100644 --- a/arrow-flight/src/bin/flight_sql_client.rs +++ b/arrow-flight/src/bin/flight_sql_client.rs @@ -26,6 +26,7 @@ use arrow_flight::{ }; use arrow_schema::Schema; use clap::{Parser, Subcommand}; +use core::str; use futures::TryStreamExt; use tonic::{ metadata::MetadataMap, @@ -421,7 +422,7 @@ fn log_metadata(map: &MetadataMap, what: &'static str) { "{}: {}={}", what, k.as_str(), - String::from_utf8_lossy(v.as_ref()), + str::from_utf8(v.as_ref()).unwrap(), ); } } diff --git a/arrow-flight/src/decode.rs b/arrow-flight/src/decode.rs index 5561f256ce0..7bafc384306 100644 --- a/arrow-flight/src/decode.rs +++ b/arrow-flight/src/decode.rs @@ -388,11 +388,14 @@ struct FlightStreamState { /// FlightData and the decoded payload (Schema, RecordBatch), if any #[derive(Debug)] pub struct DecodedFlightData { + /// The original FlightData message pub inner: FlightData, + /// The decoded payload pub payload: DecodedPayload, } impl DecodedFlightData { + /// Create a new DecodedFlightData with no payload pub fn new_none(inner: FlightData) -> Self { Self { inner, @@ -400,6 +403,7 @@ impl DecodedFlightData { } } + /// Create a new DecodedFlightData with a [`Schema`] payload pub fn new_schema(inner: FlightData, schema: SchemaRef) -> Self { Self { inner, @@ -407,6 +411,7 @@ impl DecodedFlightData { } } + /// Create a new [`DecodedFlightData`] with a [`RecordBatch`] payload pub fn new_record_batch(inner: FlightData, batch: RecordBatch) -> Self { Self { inner, @@ -414,7 +419,7 @@ impl DecodedFlightData { } } - /// return the metadata field of the inner flight data + /// Return the metadata field of the inner flight data pub fn app_metadata(&self) -> Bytes { self.inner.app_metadata.clone() } diff --git a/arrow-flight/src/encode.rs b/arrow-flight/src/encode.rs index 59fa8afd58d..55bc9240321 100644 --- a/arrow-flight/src/encode.rs +++ b/arrow-flight/src/encode.rs @@ -144,6 +144,7 @@ impl Default for FlightDataEncoderBuilder { } impl FlightDataEncoderBuilder { + /// Create a new [`FlightDataEncoderBuilder`]. pub fn new() -> Self { Self::default() } @@ -1403,7 +1404,7 @@ mod tests { let input_rows = batch.num_rows(); let split = split_batch_for_grpc_response(batch.clone(), max_flight_data_size_bytes); - let sizes: Vec<_> = split.iter().map(|batch| batch.num_rows()).collect(); + let sizes: Vec<_> = split.iter().map(RecordBatch::num_rows).collect(); let output_rows: usize = sizes.iter().sum(); assert_eq!(sizes, expected_sizes, "mismatch for {batch:?}"); diff --git a/arrow-flight/src/error.rs b/arrow-flight/src/error.rs index ba979ca9f7a..499706e1ede 100644 --- a/arrow-flight/src/error.rs +++ b/arrow-flight/src/error.rs @@ -37,6 +37,7 @@ pub enum FlightError { } impl FlightError { + /// Generate a new `FlightError::ProtocolError` variant. pub fn protocol(message: impl Into) -> Self { Self::ProtocolError(message.into()) } @@ -98,6 +99,7 @@ impl From for tonic::Status { } } +/// Result type for the Apache Arrow Flight crate pub type Result = std::result::Result; #[cfg(test)] diff --git a/arrow-flight/src/lib.rs b/arrow-flight/src/lib.rs index ff9e387dab0..29a0d6acfa4 100644 --- a/arrow-flight/src/lib.rs +++ b/arrow-flight/src/lib.rs @@ -37,6 +37,7 @@ //! //! [Flight SQL]: https://arrow.apache.org/docs/format/FlightSql.html #![allow(rustdoc::invalid_html_tags)] +#![warn(missing_docs)] use arrow_ipc::{convert, writer, writer::EncodedData, writer::IpcWriteOptions}; use arrow_schema::{ArrowError, Schema}; @@ -52,6 +53,8 @@ type ArrowResult = std::result::Result; #[allow(clippy::all)] mod gen { + // Since this file is auto-generated, we suppress all warnings + #![allow(missing_docs)] include!("arrow.flight.protocol.rs"); } @@ -125,6 +128,7 @@ use flight_descriptor::DescriptorType; /// SchemaAsIpc represents a pairing of a `Schema` with IpcWriteOptions pub struct SchemaAsIpc<'a> { + /// Data type representing a schema and its IPC write options pub pair: (&'a Schema, &'a IpcWriteOptions), } @@ -682,6 +686,7 @@ impl PollInfo { } impl<'a> SchemaAsIpc<'a> { + /// Create a new `SchemaAsIpc` from a `Schema` and `IpcWriteOptions` pub fn new(schema: &'a Schema, options: &'a IpcWriteOptions) -> Self { SchemaAsIpc { pair: (schema, options), diff --git a/arrow-flight/src/sql/arrow.flight.protocol.sql.rs b/arrow-flight/src/sql/arrow.flight.protocol.sql.rs index 7a37a0b2885..28d37595ac0 100644 --- a/arrow-flight/src/sql/arrow.flight.protocol.sql.rs +++ b/arrow-flight/src/sql/arrow.flight.protocol.sql.rs @@ -200,9 +200,7 @@ pub struct CommandGetTables { /// - "%" means to match any substring with 0 or more characters. /// - "_" means to match any one character. #[prost(string, optional, tag = "3")] - pub table_name_filter_pattern: ::core::option::Option< - ::prost::alloc::string::String, - >, + pub table_name_filter_pattern: ::core::option::Option<::prost::alloc::string::String>, /// /// Specifies a filter of table types which must match. /// The table types depend on vendor/implementation. It is usually used to separate tables from views or system tables. @@ -553,24 +551,19 @@ pub struct ActionEndTransactionRequest { #[prost(bytes = "bytes", tag = "1")] pub transaction_id: ::prost::bytes::Bytes, /// Whether to commit/rollback the given transaction. - #[prost(enumeration = "action_end_transaction_request::EndTransaction", tag = "2")] + #[prost( + enumeration = "action_end_transaction_request::EndTransaction", + tag = "2" + )] pub action: i32, } /// Nested message and enum types in `ActionEndTransactionRequest`. pub mod action_end_transaction_request { - #[derive( - Clone, - Copy, - Debug, - PartialEq, - Eq, - Hash, - PartialOrd, - Ord, - ::prost::Enumeration - )] + /// The action to take at the end of a transaction. + #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum EndTransaction { + /// Unspecified action. Unspecified = 0, /// Commit the transaction. Commit = 1, @@ -620,17 +613,7 @@ pub struct ActionEndSavepointRequest { } /// Nested message and enum types in `ActionEndSavepointRequest`. pub mod action_end_savepoint_request { - #[derive( - Clone, - Copy, - Debug, - PartialEq, - Eq, - Hash, - PartialOrd, - Ord, - ::prost::Enumeration - )] + #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum EndSavepoint { Unspecified = 0, @@ -778,9 +761,8 @@ pub struct CommandPreparedStatementUpdate { pub struct CommandStatementIngest { /// The behavior for handling the table definition. #[prost(message, optional, tag = "1")] - pub table_definition_options: ::core::option::Option< - command_statement_ingest::TableDefinitionOptions, - >, + pub table_definition_options: + ::core::option::Option, /// The table to load data into. #[prost(string, tag = "2")] pub table: ::prost::alloc::string::String, @@ -802,21 +784,21 @@ pub struct CommandStatementIngest { pub transaction_id: ::core::option::Option<::prost::bytes::Bytes>, /// Backend-specific options. #[prost(map = "string, string", tag = "1000")] - pub options: ::std::collections::HashMap< - ::prost::alloc::string::String, - ::prost::alloc::string::String, - >, + pub options: + ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, } /// Nested message and enum types in `CommandStatementIngest`. pub mod command_statement_ingest { /// Options for table definition behavior #[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct TableDefinitionOptions { + /// The action to take if the target table does not exist #[prost( enumeration = "table_definition_options::TableNotExistOption", tag = "1" )] pub if_not_exist: i32, + /// The action to take if the target table already exists #[prost(enumeration = "table_definition_options::TableExistsOption", tag = "2")] pub if_exists: i32, } @@ -824,15 +806,7 @@ pub mod command_statement_ingest { pub mod table_definition_options { /// The action to take if the target table does not exist #[derive( - Clone, - Copy, - Debug, - PartialEq, - Eq, - Hash, - PartialOrd, - Ord, - ::prost::Enumeration + Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration, )] #[repr(i32)] pub enum TableNotExistOption { @@ -867,15 +841,7 @@ pub mod command_statement_ingest { } /// The action to take if the target table already exists #[derive( - Clone, - Copy, - Debug, - PartialEq, - Eq, - Hash, - PartialOrd, - Ord, - ::prost::Enumeration + Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration, )] #[repr(i32)] pub enum TableExistsOption { @@ -968,7 +934,7 @@ pub struct ActionCancelQueryRequest { #[prost(bytes = "bytes", tag = "1")] pub info: ::prost::bytes::Bytes, } -/// + /// The result of cancelling a query. /// /// The result should be wrapped in a google.protobuf.Any message. @@ -976,23 +942,18 @@ pub struct ActionCancelQueryRequest { /// This command is deprecated since 13.0.0. Use the "CancelFlightInfo" /// action with DoAction instead. #[derive(Clone, Copy, PartialEq, ::prost::Message)] +// #[deprecated( +// since = "13.0.0", +// note = "Use the 'CancelFlightInfo' action with DoAction instead." +// )] pub struct ActionCancelQueryResult { + /// The result of the cancellation. #[prost(enumeration = "action_cancel_query_result::CancelResult", tag = "1")] pub result: i32, } /// Nested message and enum types in `ActionCancelQueryResult`. pub mod action_cancel_query_result { - #[derive( - Clone, - Copy, - Debug, - PartialEq, - Eq, - Hash, - PartialOrd, - Ord, - ::prost::Enumeration - )] + #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum CancelResult { /// The cancellation status is unknown. Servers should avoid using @@ -1660,24 +1621,16 @@ impl SqlInfo { Self::FlightSqlServerReadOnly => "FLIGHT_SQL_SERVER_READ_ONLY", Self::FlightSqlServerSql => "FLIGHT_SQL_SERVER_SQL", Self::FlightSqlServerSubstrait => "FLIGHT_SQL_SERVER_SUBSTRAIT", - Self::FlightSqlServerSubstraitMinVersion => { - "FLIGHT_SQL_SERVER_SUBSTRAIT_MIN_VERSION" - } - Self::FlightSqlServerSubstraitMaxVersion => { - "FLIGHT_SQL_SERVER_SUBSTRAIT_MAX_VERSION" - } + Self::FlightSqlServerSubstraitMinVersion => "FLIGHT_SQL_SERVER_SUBSTRAIT_MIN_VERSION", + Self::FlightSqlServerSubstraitMaxVersion => "FLIGHT_SQL_SERVER_SUBSTRAIT_MAX_VERSION", Self::FlightSqlServerTransaction => "FLIGHT_SQL_SERVER_TRANSACTION", Self::FlightSqlServerCancel => "FLIGHT_SQL_SERVER_CANCEL", Self::FlightSqlServerBulkIngestion => "FLIGHT_SQL_SERVER_BULK_INGESTION", Self::FlightSqlServerIngestTransactionsSupported => { "FLIGHT_SQL_SERVER_INGEST_TRANSACTIONS_SUPPORTED" } - Self::FlightSqlServerStatementTimeout => { - "FLIGHT_SQL_SERVER_STATEMENT_TIMEOUT" - } - Self::FlightSqlServerTransactionTimeout => { - "FLIGHT_SQL_SERVER_TRANSACTION_TIMEOUT" - } + Self::FlightSqlServerStatementTimeout => "FLIGHT_SQL_SERVER_STATEMENT_TIMEOUT", + Self::FlightSqlServerTransactionTimeout => "FLIGHT_SQL_SERVER_TRANSACTION_TIMEOUT", Self::SqlDdlCatalog => "SQL_DDL_CATALOG", Self::SqlDdlSchema => "SQL_DDL_SCHEMA", Self::SqlDdlTable => "SQL_DDL_TABLE", @@ -1696,15 +1649,11 @@ impl SqlInfo { Self::SqlSupportsColumnAliasing => "SQL_SUPPORTS_COLUMN_ALIASING", Self::SqlNullPlusNullIsNull => "SQL_NULL_PLUS_NULL_IS_NULL", Self::SqlSupportsConvert => "SQL_SUPPORTS_CONVERT", - Self::SqlSupportsTableCorrelationNames => { - "SQL_SUPPORTS_TABLE_CORRELATION_NAMES" - } + Self::SqlSupportsTableCorrelationNames => "SQL_SUPPORTS_TABLE_CORRELATION_NAMES", Self::SqlSupportsDifferentTableCorrelationNames => { "SQL_SUPPORTS_DIFFERENT_TABLE_CORRELATION_NAMES" } - Self::SqlSupportsExpressionsInOrderBy => { - "SQL_SUPPORTS_EXPRESSIONS_IN_ORDER_BY" - } + Self::SqlSupportsExpressionsInOrderBy => "SQL_SUPPORTS_EXPRESSIONS_IN_ORDER_BY", Self::SqlSupportsOrderByUnrelated => "SQL_SUPPORTS_ORDER_BY_UNRELATED", Self::SqlSupportedGroupBy => "SQL_SUPPORTED_GROUP_BY", Self::SqlSupportsLikeEscapeClause => "SQL_SUPPORTS_LIKE_ESCAPE_CLAUSE", @@ -1725,9 +1674,7 @@ impl SqlInfo { Self::SqlSelectForUpdateSupported => "SQL_SELECT_FOR_UPDATE_SUPPORTED", Self::SqlStoredProceduresSupported => "SQL_STORED_PROCEDURES_SUPPORTED", Self::SqlSupportedSubqueries => "SQL_SUPPORTED_SUBQUERIES", - Self::SqlCorrelatedSubqueriesSupported => { - "SQL_CORRELATED_SUBQUERIES_SUPPORTED" - } + Self::SqlCorrelatedSubqueriesSupported => "SQL_CORRELATED_SUBQUERIES_SUPPORTED", Self::SqlSupportedUnions => "SQL_SUPPORTED_UNIONS", Self::SqlMaxBinaryLiteralLength => "SQL_MAX_BINARY_LITERAL_LENGTH", Self::SqlMaxCharLiteralLength => "SQL_MAX_CHAR_LITERAL_LENGTH", @@ -1800,15 +1747,11 @@ impl SqlInfo { } "FLIGHT_SQL_SERVER_TRANSACTION" => Some(Self::FlightSqlServerTransaction), "FLIGHT_SQL_SERVER_CANCEL" => Some(Self::FlightSqlServerCancel), - "FLIGHT_SQL_SERVER_BULK_INGESTION" => { - Some(Self::FlightSqlServerBulkIngestion) - } + "FLIGHT_SQL_SERVER_BULK_INGESTION" => Some(Self::FlightSqlServerBulkIngestion), "FLIGHT_SQL_SERVER_INGEST_TRANSACTIONS_SUPPORTED" => { Some(Self::FlightSqlServerIngestTransactionsSupported) } - "FLIGHT_SQL_SERVER_STATEMENT_TIMEOUT" => { - Some(Self::FlightSqlServerStatementTimeout) - } + "FLIGHT_SQL_SERVER_STATEMENT_TIMEOUT" => Some(Self::FlightSqlServerStatementTimeout), "FLIGHT_SQL_SERVER_TRANSACTION_TIMEOUT" => { Some(Self::FlightSqlServerTransactionTimeout) } @@ -1830,21 +1773,15 @@ impl SqlInfo { "SQL_SUPPORTS_COLUMN_ALIASING" => Some(Self::SqlSupportsColumnAliasing), "SQL_NULL_PLUS_NULL_IS_NULL" => Some(Self::SqlNullPlusNullIsNull), "SQL_SUPPORTS_CONVERT" => Some(Self::SqlSupportsConvert), - "SQL_SUPPORTS_TABLE_CORRELATION_NAMES" => { - Some(Self::SqlSupportsTableCorrelationNames) - } + "SQL_SUPPORTS_TABLE_CORRELATION_NAMES" => Some(Self::SqlSupportsTableCorrelationNames), "SQL_SUPPORTS_DIFFERENT_TABLE_CORRELATION_NAMES" => { Some(Self::SqlSupportsDifferentTableCorrelationNames) } - "SQL_SUPPORTS_EXPRESSIONS_IN_ORDER_BY" => { - Some(Self::SqlSupportsExpressionsInOrderBy) - } + "SQL_SUPPORTS_EXPRESSIONS_IN_ORDER_BY" => Some(Self::SqlSupportsExpressionsInOrderBy), "SQL_SUPPORTS_ORDER_BY_UNRELATED" => Some(Self::SqlSupportsOrderByUnrelated), "SQL_SUPPORTED_GROUP_BY" => Some(Self::SqlSupportedGroupBy), "SQL_SUPPORTS_LIKE_ESCAPE_CLAUSE" => Some(Self::SqlSupportsLikeEscapeClause), - "SQL_SUPPORTS_NON_NULLABLE_COLUMNS" => { - Some(Self::SqlSupportsNonNullableColumns) - } + "SQL_SUPPORTS_NON_NULLABLE_COLUMNS" => Some(Self::SqlSupportsNonNullableColumns), "SQL_SUPPORTED_GRAMMAR" => Some(Self::SqlSupportedGrammar), "SQL_ANSI92_SUPPORTED_LEVEL" => Some(Self::SqlAnsi92SupportedLevel), "SQL_SUPPORTS_INTEGRITY_ENHANCEMENT_FACILITY" => { @@ -1857,15 +1794,11 @@ impl SqlInfo { "SQL_CATALOG_AT_START" => Some(Self::SqlCatalogAtStart), "SQL_SCHEMAS_SUPPORTED_ACTIONS" => Some(Self::SqlSchemasSupportedActions), "SQL_CATALOGS_SUPPORTED_ACTIONS" => Some(Self::SqlCatalogsSupportedActions), - "SQL_SUPPORTED_POSITIONED_COMMANDS" => { - Some(Self::SqlSupportedPositionedCommands) - } + "SQL_SUPPORTED_POSITIONED_COMMANDS" => Some(Self::SqlSupportedPositionedCommands), "SQL_SELECT_FOR_UPDATE_SUPPORTED" => Some(Self::SqlSelectForUpdateSupported), "SQL_STORED_PROCEDURES_SUPPORTED" => Some(Self::SqlStoredProceduresSupported), "SQL_SUPPORTED_SUBQUERIES" => Some(Self::SqlSupportedSubqueries), - "SQL_CORRELATED_SUBQUERIES_SUPPORTED" => { - Some(Self::SqlCorrelatedSubqueriesSupported) - } + "SQL_CORRELATED_SUBQUERIES_SUPPORTED" => Some(Self::SqlCorrelatedSubqueriesSupported), "SQL_SUPPORTED_UNIONS" => Some(Self::SqlSupportedUnions), "SQL_MAX_BINARY_LITERAL_LENGTH" => Some(Self::SqlMaxBinaryLiteralLength), "SQL_MAX_CHAR_LITERAL_LENGTH" => Some(Self::SqlMaxCharLiteralLength), @@ -1888,9 +1821,7 @@ impl SqlInfo { "SQL_MAX_TABLE_NAME_LENGTH" => Some(Self::SqlMaxTableNameLength), "SQL_MAX_TABLES_IN_SELECT" => Some(Self::SqlMaxTablesInSelect), "SQL_MAX_USERNAME_LENGTH" => Some(Self::SqlMaxUsernameLength), - "SQL_DEFAULT_TRANSACTION_ISOLATION" => { - Some(Self::SqlDefaultTransactionIsolation) - } + "SQL_DEFAULT_TRANSACTION_ISOLATION" => Some(Self::SqlDefaultTransactionIsolation), "SQL_TRANSACTIONS_SUPPORTED" => Some(Self::SqlTransactionsSupported), "SQL_SUPPORTED_TRANSACTIONS_ISOLATION_LEVELS" => { Some(Self::SqlSupportedTransactionsIsolationLevels) @@ -1960,12 +1891,18 @@ impl SqlSupportedTransaction { } } } + +/// Whether the SQL is case-sensitive, and if so, how. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum SqlSupportedCaseSensitivity { + /// Unknown/not indicated/no support SqlCaseSensitivityUnknown = 0, + /// Case-insensitive SqlCaseSensitivityCaseInsensitive = 1, + /// Uppercase case-sensitive SqlCaseSensitivityUppercase = 2, + /// Lowercase case-sensitive SqlCaseSensitivityLowercase = 3, } impl SqlSupportedCaseSensitivity { @@ -1976,9 +1913,7 @@ impl SqlSupportedCaseSensitivity { pub fn as_str_name(&self) -> &'static str { match self { Self::SqlCaseSensitivityUnknown => "SQL_CASE_SENSITIVITY_UNKNOWN", - Self::SqlCaseSensitivityCaseInsensitive => { - "SQL_CASE_SENSITIVITY_CASE_INSENSITIVE" - } + Self::SqlCaseSensitivityCaseInsensitive => "SQL_CASE_SENSITIVITY_CASE_INSENSITIVE", Self::SqlCaseSensitivityUppercase => "SQL_CASE_SENSITIVITY_UPPERCASE", Self::SqlCaseSensitivityLowercase => "SQL_CASE_SENSITIVITY_LOWERCASE", } @@ -1996,14 +1931,27 @@ impl SqlSupportedCaseSensitivity { } } } + +/// How are the NULL values ordered in the result set. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum SqlNullOrdering { + /// NULL values are sorted higher than non-NULL values. + /// + /// For instance, when sorting in ascending order, NULL values will appear at the end. SqlNullsSortedHigh = 0, + /// NULL values are sorted lower than non-NULL values. + /// + /// For instance, when sorting in ascending order, NULL values will appear at the start. SqlNullsSortedLow = 1, + /// Regardless of the sort order, + /// NULL values are sorted at the start of the result set. SqlNullsSortedAtStart = 2, + /// Regardless of the sort order, + /// NULL values are sorted at the end of the result set. SqlNullsSortedAtEnd = 3, } + impl SqlNullOrdering { /// String value of the enum field names used in the ProtoBuf definition. /// @@ -2028,13 +1976,19 @@ impl SqlNullOrdering { } } } + +/// Which SQL grammar is supported by the server. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum SupportedSqlGrammar { + /// The server supports the minimum SQL grammar. SqlMinimumGrammar = 0, + /// The server supports the core SQL grammar. SqlCoreGrammar = 1, + /// The server supports the extended SQL grammar. SqlExtendedGrammar = 2, } + impl SupportedSqlGrammar { /// String value of the enum field names used in the ProtoBuf definition. /// @@ -2057,6 +2011,7 @@ impl SupportedSqlGrammar { } } } + #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum SupportedAnsi92SqlGrammarLevel { @@ -2086,11 +2041,16 @@ impl SupportedAnsi92SqlGrammarLevel { } } } + +/// The level of support for SQL 'JOIN' clauses. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum SqlOuterJoinsSupportLevel { + /// SQL outer joins are unsupported. SqlJoinsUnsupported = 0, + /// SQL outer joins are supported, but with limitations. SqlLimitedOuterJoins = 1, + /// SQL outer joins are fully supported. SqlFullOuterJoins = 2, } impl SqlOuterJoinsSupportLevel { @@ -2115,10 +2075,16 @@ impl SqlOuterJoinsSupportLevel { } } } + +/// The level of support for SQL 'GROUP BY' clauses. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum SqlSupportedGroupBy { + /// SQL 'GROUP BY' clauses are unsupported. SqlGroupByUnrelated = 0, + /// SQL 'GROUP BY' clauses are supported, + /// even allowing columns not in the SELECT clause + /// to be used in the 'GROUP BY' clause. SqlGroupByBeyondSelect = 1, } impl SqlSupportedGroupBy { @@ -2141,11 +2107,16 @@ impl SqlSupportedGroupBy { } } } + +/// The context where an SQL element is supported. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum SqlSupportedElementActions { + /// SQL elements are supported in procedure calls. SqlElementInProcedureCalls = 0, + /// SQL elements are supported in index definitions. SqlElementInIndexDefinitions = 1, + /// SQL elements are supported in privilege definitions. SqlElementInPrivilegeDefinitions = 2, } impl SqlSupportedElementActions { @@ -2157,29 +2128,29 @@ impl SqlSupportedElementActions { match self { Self::SqlElementInProcedureCalls => "SQL_ELEMENT_IN_PROCEDURE_CALLS", Self::SqlElementInIndexDefinitions => "SQL_ELEMENT_IN_INDEX_DEFINITIONS", - Self::SqlElementInPrivilegeDefinitions => { - "SQL_ELEMENT_IN_PRIVILEGE_DEFINITIONS" - } + Self::SqlElementInPrivilegeDefinitions => "SQL_ELEMENT_IN_PRIVILEGE_DEFINITIONS", } } /// Creates an enum from field names used in the ProtoBuf definition. pub fn from_str_name(value: &str) -> ::core::option::Option { match value { "SQL_ELEMENT_IN_PROCEDURE_CALLS" => Some(Self::SqlElementInProcedureCalls), - "SQL_ELEMENT_IN_INDEX_DEFINITIONS" => { - Some(Self::SqlElementInIndexDefinitions) - } - "SQL_ELEMENT_IN_PRIVILEGE_DEFINITIONS" => { - Some(Self::SqlElementInPrivilegeDefinitions) - } + "SQL_ELEMENT_IN_INDEX_DEFINITIONS" => Some(Self::SqlElementInIndexDefinitions), + "SQL_ELEMENT_IN_PRIVILEGE_DEFINITIONS" => Some(Self::SqlElementInPrivilegeDefinitions), _ => None, } } } + +/// Represents specific SQL commands that can operate on a "positioned" row within a result set. +/// +/// Positioned commands allow for operations on a particular row identified by a cursor. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum SqlSupportedPositionedCommands { + /// SQL positioned DELETE commands are supported. SqlPositionedDelete = 0, + /// SQL positioned UPDATE commands are supported. SqlPositionedUpdate = 1, } impl SqlSupportedPositionedCommands { @@ -2202,14 +2173,21 @@ impl SqlSupportedPositionedCommands { } } } + +/// The context in which subqueries are supported. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum SqlSupportedSubqueries { + /// SQL subqueries are supported in comparisons. SqlSubqueriesInComparisons = 0, + /// SQL subqueries are supported in EXISTS clauses. SqlSubqueriesInExists = 1, + /// SQL subqueries are supported in IN clauses. SqlSubqueriesInIns = 2, + /// SQL subqueries are supported in quantified expressions. SqlSubqueriesInQuantifieds = 3, } + impl SqlSupportedSubqueries { /// String value of the enum field names used in the ProtoBuf definition. /// @@ -2234,12 +2212,17 @@ impl SqlSupportedSubqueries { } } } + +/// Supported types of SQL unions. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum SqlSupportedUnions { + /// SQL 'UNION' is supported. SqlUnion = 0, + /// SQL 'UNION ALL' is supported. SqlUnionAll = 1, } + impl SqlSupportedUnions { /// String value of the enum field names used in the ProtoBuf definition. /// @@ -2260,15 +2243,30 @@ impl SqlSupportedUnions { } } } + +/// Represents various levels of transaction isolation in SQL. +/// +/// Transaction isolation levels control the visibility of changes +/// made by one transaction to other concurrent transactions. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum SqlTransactionIsolationLevel { + /// No transaction isolation. SqlTransactionNone = 0, + /// Lowest level of transaction isolation. + /// + /// Uncommitted transactions can be read by other transactions. SqlTransactionReadUncommitted = 1, + /// Committed transactions can be read by other transactions. SqlTransactionReadCommitted = 2, + /// Repeatable reads are guaranteed within a transaction. SqlTransactionRepeatableRead = 3, + /// Highest level of transaction isolation. + /// + /// Serializable transactions are guaranteed to be isolated. SqlTransactionSerializable = 4, } + impl SqlTransactionIsolationLevel { /// String value of the enum field names used in the ProtoBuf definition. /// @@ -2287,9 +2285,7 @@ impl SqlTransactionIsolationLevel { pub fn from_str_name(value: &str) -> ::core::option::Option { match value { "SQL_TRANSACTION_NONE" => Some(Self::SqlTransactionNone), - "SQL_TRANSACTION_READ_UNCOMMITTED" => { - Some(Self::SqlTransactionReadUncommitted) - } + "SQL_TRANSACTION_READ_UNCOMMITTED" => Some(Self::SqlTransactionReadUncommitted), "SQL_TRANSACTION_READ_COMMITTED" => Some(Self::SqlTransactionReadCommitted), "SQL_TRANSACTION_REPEATABLE_READ" => Some(Self::SqlTransactionRepeatableRead), "SQL_TRANSACTION_SERIALIZABLE" => Some(Self::SqlTransactionSerializable), @@ -2297,11 +2293,16 @@ impl SqlTransactionIsolationLevel { } } } + +/// Types of SQL transactions supported by the server. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum SqlSupportedTransactions { + /// Unspecified transaction support. SqlTransactionUnspecified = 0, + /// DDL transactions are supported. SqlDataDefinitionTransactions = 1, + /// DML transactions are supported. SqlDataManipulationTransactions = 2, } impl SqlSupportedTransactions { @@ -2320,24 +2321,39 @@ impl SqlSupportedTransactions { pub fn from_str_name(value: &str) -> ::core::option::Option { match value { "SQL_TRANSACTION_UNSPECIFIED" => Some(Self::SqlTransactionUnspecified), - "SQL_DATA_DEFINITION_TRANSACTIONS" => { - Some(Self::SqlDataDefinitionTransactions) - } - "SQL_DATA_MANIPULATION_TRANSACTIONS" => { - Some(Self::SqlDataManipulationTransactions) - } + "SQL_DATA_DEFINITION_TRANSACTIONS" => Some(Self::SqlDataDefinitionTransactions), + "SQL_DATA_MANIPULATION_TRANSACTIONS" => Some(Self::SqlDataManipulationTransactions), _ => None, } } } + +/// Type that defines various types of result sets that a SQL database can return. +/// +/// These result set types determine how the results of a query can be accessed and +/// manipulated, particularly in terms of navigation (e.g., moving forward, backward) +/// and sensitivity to changes made to the underlying data. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum SqlSupportedResultSetType { + /// Indicates that the type of the result set is not specified. + /// This may be used when the application does not require + /// specific navigation or sensitivity guarantees, or when + /// the database does not provide a distinct result set type. SqlResultSetTypeUnspecified = 0, + /// Result set type that only supports forward navigation and + /// the data must be read through in a single pass. SqlResultSetTypeForwardOnly = 1, + /// Result set type that supports scrolling through the data + /// in both directions, but the data is not sensitive to changes + /// made to the underlying data. SqlResultSetTypeScrollInsensitive = 2, + /// Result set type that supports scrolling through the data + /// in both directions and changes if the underlying data changes + /// as a result of other transactions. SqlResultSetTypeScrollSensitive = 3, } + impl SqlSupportedResultSetType { /// String value of the enum field names used in the ProtoBuf definition. /// @@ -2347,12 +2363,8 @@ impl SqlSupportedResultSetType { match self { Self::SqlResultSetTypeUnspecified => "SQL_RESULT_SET_TYPE_UNSPECIFIED", Self::SqlResultSetTypeForwardOnly => "SQL_RESULT_SET_TYPE_FORWARD_ONLY", - Self::SqlResultSetTypeScrollInsensitive => { - "SQL_RESULT_SET_TYPE_SCROLL_INSENSITIVE" - } - Self::SqlResultSetTypeScrollSensitive => { - "SQL_RESULT_SET_TYPE_SCROLL_SENSITIVE" - } + Self::SqlResultSetTypeScrollInsensitive => "SQL_RESULT_SET_TYPE_SCROLL_INSENSITIVE", + Self::SqlResultSetTypeScrollSensitive => "SQL_RESULT_SET_TYPE_SCROLL_SENSITIVE", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -2363,20 +2375,26 @@ impl SqlSupportedResultSetType { "SQL_RESULT_SET_TYPE_SCROLL_INSENSITIVE" => { Some(Self::SqlResultSetTypeScrollInsensitive) } - "SQL_RESULT_SET_TYPE_SCROLL_SENSITIVE" => { - Some(Self::SqlResultSetTypeScrollSensitive) - } + "SQL_RESULT_SET_TYPE_SCROLL_SENSITIVE" => Some(Self::SqlResultSetTypeScrollSensitive), _ => None, } } } + +/// Defines the level of concurrency supported by the SQL server for result sets. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum SqlSupportedResultSetConcurrency { + /// Concurrency level is not specified. SqlResultSetConcurrencyUnspecified = 0, + /// Concurrency level that only supports + /// read-only access to the result set. SqlResultSetConcurrencyReadOnly = 1, + /// Concurrency level that supports + /// read-write access to the result set. SqlResultSetConcurrencyUpdatable = 2, } + impl SqlSupportedResultSetConcurrency { /// String value of the enum field names used in the ProtoBuf definition. /// @@ -2384,15 +2402,9 @@ impl SqlSupportedResultSetConcurrency { /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { - Self::SqlResultSetConcurrencyUnspecified => { - "SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED" - } - Self::SqlResultSetConcurrencyReadOnly => { - "SQL_RESULT_SET_CONCURRENCY_READ_ONLY" - } - Self::SqlResultSetConcurrencyUpdatable => { - "SQL_RESULT_SET_CONCURRENCY_UPDATABLE" - } + Self::SqlResultSetConcurrencyUnspecified => "SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED", + Self::SqlResultSetConcurrencyReadOnly => "SQL_RESULT_SET_CONCURRENCY_READ_ONLY", + Self::SqlResultSetConcurrencyUpdatable => "SQL_RESULT_SET_CONCURRENCY_UPDATABLE", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -2401,40 +2413,59 @@ impl SqlSupportedResultSetConcurrency { "SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED" => { Some(Self::SqlResultSetConcurrencyUnspecified) } - "SQL_RESULT_SET_CONCURRENCY_READ_ONLY" => { - Some(Self::SqlResultSetConcurrencyReadOnly) - } - "SQL_RESULT_SET_CONCURRENCY_UPDATABLE" => { - Some(Self::SqlResultSetConcurrencyUpdatable) - } + "SQL_RESULT_SET_CONCURRENCY_READ_ONLY" => Some(Self::SqlResultSetConcurrencyReadOnly), + "SQL_RESULT_SET_CONCURRENCY_UPDATABLE" => Some(Self::SqlResultSetConcurrencyUpdatable), _ => None, } } } + +/// Defines the various data types that the SQL server can conver to and from. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum SqlSupportsConvert { + /// 'BIGINT' data type. SqlConvertBigint = 0, + /// 'BINARY' data type. SqlConvertBinary = 1, + /// 'BIT' data type. SqlConvertBit = 2, + /// 'CHAR' data type. SqlConvertChar = 3, + /// 'DATE' data type. SqlConvertDate = 4, + /// 'DECIMAL' data type. SqlConvertDecimal = 5, + /// 'FLOAT' data type. SqlConvertFloat = 6, + /// 'INTEGER' data type. SqlConvertInteger = 7, + /// 'INTERVAL DAY TIME' data type. SqlConvertIntervalDayTime = 8, + /// 'INTERVAL YEAR MONTH' data type. SqlConvertIntervalYearMonth = 9, + /// 'LONGVARBINARY' data type. SqlConvertLongvarbinary = 10, + /// 'LONGVARCHAR' data type. SqlConvertLongvarchar = 11, + /// 'NUMERIC' data type. SqlConvertNumeric = 12, + /// 'REAL' data type. SqlConvertReal = 13, + /// 'SMALLINT' data type. SqlConvertSmallint = 14, + /// 'TIME' data type. SqlConvertTime = 15, + /// 'TIMESTAMP' data type. SqlConvertTimestamp = 16, + /// 'TINYINT' data type. SqlConvertTinyint = 17, + /// 'VARBINARY' data type. SqlConvertVarbinary = 18, + /// 'VARCHAR' data type. SqlConvertVarchar = 19, } + impl SqlSupportsConvert { /// String value of the enum field names used in the ProtoBuf definition. /// @@ -2491,35 +2522,59 @@ impl SqlSupportsConvert { } } } -/// * + /// The JDBC/ODBC-defined type of any object. /// All the values here are the same as in the JDBC and ODBC specs. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum XdbcDataType { + /// Unknown data type. XdbcUnknownType = 0, + /// A single character. XdbcChar = 1, + /// A numeric value. XdbcNumeric = 2, + /// A decimal value. XdbcDecimal = 3, + /// A 32-bit integer. XdbcInteger = 4, + /// A 16-bit integer. XdbcSmallint = 5, + /// A floating-point number. XdbcFloat = 6, + /// A real number. XdbcReal = 7, + /// A double-precision floating-point number. XdbcDouble = 8, + /// A date and time value. XdbcDatetime = 9, + /// An interval value. XdbcInterval = 10, + /// A variable-length character string. XdbcVarchar = 12, + /// A date value. XdbcDate = 91, + /// A time value. XdbcTime = 92, + /// A timestamp value. XdbcTimestamp = 93, + /// A long character string. XdbcLongvarchar = -1, + /// A binary value. XdbcBinary = -2, + /// A variable-length binary string. XdbcVarbinary = -3, + /// A long binary value. XdbcLongvarbinary = -4, + /// A 64-bit integer. XdbcBigint = -5, + /// An 8-bit integer. XdbcTinyint = -6, + /// A single bit value. XdbcBit = -7, + /// A fixed-length unicode character string. XdbcWchar = -8, + /// A variable-length character string. XdbcWvarchar = -9, } impl XdbcDataType { @@ -2586,37 +2641,64 @@ impl XdbcDataType { } } } -/// * + /// Detailed subtype information for XDBC_TYPE_DATETIME and XDBC_TYPE_INTERVAL. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum XdbcDatetimeSubcode { + /// Unknown datetime unit. XdbcSubcodeUnknown = 0, + /// Year. XdbcSubcodeYear = 1, + /// Time. XdbcSubcodeTime = 2, + /// Timestamp. XdbcSubcodeTimestamp = 3, + /// Time with timezone. XdbcSubcodeTimeWithTimezone = 4, + /// Timestamp with timezone. XdbcSubcodeTimestampWithTimezone = 5, + /// Second. XdbcSubcodeSecond = 6, + /// Year to month. XdbcSubcodeYearToMonth = 7, + /// Day to hour. XdbcSubcodeDayToHour = 8, + /// Day to minute. XdbcSubcodeDayToMinute = 9, + /// Day to second. XdbcSubcodeDayToSecond = 10, + /// Hour to minute. XdbcSubcodeHourToMinute = 11, + /// Hour to second. XdbcSubcodeHourToSecond = 12, + /// Minute to second. XdbcSubcodeMinuteToSecond = 13, + /// Interval year. XdbcSubcodeIntervalYear = 101, + /// Interval month. XdbcSubcodeIntervalMonth = 102, + /// Interval day. XdbcSubcodeIntervalDay = 103, + /// Interval hour. XdbcSubcodeIntervalHour = 104, + /// Interval minute. XdbcSubcodeIntervalMinute = 105, + /// Interval second. XdbcSubcodeIntervalSecond = 106, + /// Interval year to month. XdbcSubcodeIntervalYearToMonth = 107, + /// Interval day to hour. XdbcSubcodeIntervalDayToHour = 108, + /// Interval day to minute. XdbcSubcodeIntervalDayToMinute = 109, + /// Interval day to second. XdbcSubcodeIntervalDayToSecond = 110, + /// Interval hour to minute. XdbcSubcodeIntervalHourToMinute = 111, + /// Interval hour to second. XdbcSubcodeIntervalHourToSecond = 112, + /// Interval minute to second. XdbcSubcodeIntervalMinuteToSecond = 113, } impl XdbcDatetimeSubcode { @@ -2631,9 +2713,7 @@ impl XdbcDatetimeSubcode { Self::XdbcSubcodeTime => "XDBC_SUBCODE_TIME", Self::XdbcSubcodeTimestamp => "XDBC_SUBCODE_TIMESTAMP", Self::XdbcSubcodeTimeWithTimezone => "XDBC_SUBCODE_TIME_WITH_TIMEZONE", - Self::XdbcSubcodeTimestampWithTimezone => { - "XDBC_SUBCODE_TIMESTAMP_WITH_TIMEZONE" - } + Self::XdbcSubcodeTimestampWithTimezone => "XDBC_SUBCODE_TIMESTAMP_WITH_TIMEZONE", Self::XdbcSubcodeSecond => "XDBC_SUBCODE_SECOND", Self::XdbcSubcodeYearToMonth => "XDBC_SUBCODE_YEAR_TO_MONTH", Self::XdbcSubcodeDayToHour => "XDBC_SUBCODE_DAY_TO_HOUR", @@ -2652,15 +2732,9 @@ impl XdbcDatetimeSubcode { Self::XdbcSubcodeIntervalDayToHour => "XDBC_SUBCODE_INTERVAL_DAY_TO_HOUR", Self::XdbcSubcodeIntervalDayToMinute => "XDBC_SUBCODE_INTERVAL_DAY_TO_MINUTE", Self::XdbcSubcodeIntervalDayToSecond => "XDBC_SUBCODE_INTERVAL_DAY_TO_SECOND", - Self::XdbcSubcodeIntervalHourToMinute => { - "XDBC_SUBCODE_INTERVAL_HOUR_TO_MINUTE" - } - Self::XdbcSubcodeIntervalHourToSecond => { - "XDBC_SUBCODE_INTERVAL_HOUR_TO_SECOND" - } - Self::XdbcSubcodeIntervalMinuteToSecond => { - "XDBC_SUBCODE_INTERVAL_MINUTE_TO_SECOND" - } + Self::XdbcSubcodeIntervalHourToMinute => "XDBC_SUBCODE_INTERVAL_HOUR_TO_MINUTE", + Self::XdbcSubcodeIntervalHourToSecond => "XDBC_SUBCODE_INTERVAL_HOUR_TO_SECOND", + Self::XdbcSubcodeIntervalMinuteToSecond => "XDBC_SUBCODE_INTERVAL_MINUTE_TO_SECOND", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -2671,9 +2745,7 @@ impl XdbcDatetimeSubcode { "XDBC_SUBCODE_TIME" => Some(Self::XdbcSubcodeTime), "XDBC_SUBCODE_TIMESTAMP" => Some(Self::XdbcSubcodeTimestamp), "XDBC_SUBCODE_TIME_WITH_TIMEZONE" => Some(Self::XdbcSubcodeTimeWithTimezone), - "XDBC_SUBCODE_TIMESTAMP_WITH_TIMEZONE" => { - Some(Self::XdbcSubcodeTimestampWithTimezone) - } + "XDBC_SUBCODE_TIMESTAMP_WITH_TIMEZONE" => Some(Self::XdbcSubcodeTimestampWithTimezone), "XDBC_SUBCODE_SECOND" => Some(Self::XdbcSubcodeSecond), "XDBC_SUBCODE_YEAR_TO_MONTH" => Some(Self::XdbcSubcodeYearToMonth), "XDBC_SUBCODE_DAY_TO_HOUR" => Some(Self::XdbcSubcodeDayToHour), @@ -2688,24 +2760,12 @@ impl XdbcDatetimeSubcode { "XDBC_SUBCODE_INTERVAL_HOUR" => Some(Self::XdbcSubcodeIntervalHour), "XDBC_SUBCODE_INTERVAL_MINUTE" => Some(Self::XdbcSubcodeIntervalMinute), "XDBC_SUBCODE_INTERVAL_SECOND" => Some(Self::XdbcSubcodeIntervalSecond), - "XDBC_SUBCODE_INTERVAL_YEAR_TO_MONTH" => { - Some(Self::XdbcSubcodeIntervalYearToMonth) - } - "XDBC_SUBCODE_INTERVAL_DAY_TO_HOUR" => { - Some(Self::XdbcSubcodeIntervalDayToHour) - } - "XDBC_SUBCODE_INTERVAL_DAY_TO_MINUTE" => { - Some(Self::XdbcSubcodeIntervalDayToMinute) - } - "XDBC_SUBCODE_INTERVAL_DAY_TO_SECOND" => { - Some(Self::XdbcSubcodeIntervalDayToSecond) - } - "XDBC_SUBCODE_INTERVAL_HOUR_TO_MINUTE" => { - Some(Self::XdbcSubcodeIntervalHourToMinute) - } - "XDBC_SUBCODE_INTERVAL_HOUR_TO_SECOND" => { - Some(Self::XdbcSubcodeIntervalHourToSecond) - } + "XDBC_SUBCODE_INTERVAL_YEAR_TO_MONTH" => Some(Self::XdbcSubcodeIntervalYearToMonth), + "XDBC_SUBCODE_INTERVAL_DAY_TO_HOUR" => Some(Self::XdbcSubcodeIntervalDayToHour), + "XDBC_SUBCODE_INTERVAL_DAY_TO_MINUTE" => Some(Self::XdbcSubcodeIntervalDayToMinute), + "XDBC_SUBCODE_INTERVAL_DAY_TO_SECOND" => Some(Self::XdbcSubcodeIntervalDayToSecond), + "XDBC_SUBCODE_INTERVAL_HOUR_TO_MINUTE" => Some(Self::XdbcSubcodeIntervalHourToMinute), + "XDBC_SUBCODE_INTERVAL_HOUR_TO_SECOND" => Some(Self::XdbcSubcodeIntervalHourToSecond), "XDBC_SUBCODE_INTERVAL_MINUTE_TO_SECOND" => { Some(Self::XdbcSubcodeIntervalMinuteToSecond) } @@ -2713,16 +2773,15 @@ impl XdbcDatetimeSubcode { } } } + +/// Represents nullability rules for columns. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum Nullable { - /// * /// Indicates that the fields does not allow the use of null values. NullabilityNoNulls = 0, - /// * /// Indicates that the fields allow the use of null values. NullabilityNullable = 1, - /// * /// Indicates that nullability of the fields cannot be determined. NullabilityUnknown = 2, } @@ -2748,6 +2807,8 @@ impl Nullable { } } } + +/// Representes whether a column can be used in a 'WHERE' clause. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum Searchable { @@ -2755,17 +2816,15 @@ pub enum Searchable { /// Indicates that column cannot be used in a WHERE clause. None = 0, /// * - /// Indicates that the column can be used in a WHERE clause if it is using a - /// LIKE operator. + /// Indicates that the column can be used in a WHERE clause + /// if it is using a LIKE operator. Char = 1, - /// * - /// Indicates that the column can be used In a WHERE clause with any - /// operator other than LIKE. + /// Indicates that the column can be used In a WHERE clause + /// with any operator other than LIKE. /// - /// - Allowed operators: comparison, quantified comparison, BETWEEN, - /// DISTINCT, IN, MATCH, and UNIQUE. + /// - Allowed operators: comparison, quantified comparison, + /// BETWEEN, DISTINCT, IN, MATCH, and UNIQUE. Basic = 2, - /// * /// Indicates that the column can be used in a WHERE clause using any operator. Full = 3, } @@ -2793,13 +2852,20 @@ impl Searchable { } } } + +/// Represents the action that needs to be taken when a row is deleted from a table. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum UpdateDeleteRules { + /// Deletion of a row will cause the deletion of rows in related tables. Cascade = 0, + /// Deletion will be restricted if the row is referenced by other rows. Restrict = 1, + /// Deletion will set the foreign key column values to NULL. SetNull = 2, + /// Deletion is allowed and no action is taken. NoAction = 3, + /// Deletion will set the foreign key column values to their default values. SetDefault = 4, } impl UpdateDeleteRules { diff --git a/arrow-flight/src/sql/client.rs b/arrow-flight/src/sql/client.rs index ef52aa27ef5..e45e505b2b6 100644 --- a/arrow-flight/src/sql/client.rs +++ b/arrow-flight/src/sql/client.rs @@ -695,9 +695,11 @@ fn flight_error_to_arrow_error(err: FlightError) -> ArrowError { } } -// A polymorphic structure to natively represent different types of data contained in `FlightData` +/// A polymorphic structure to natively represent different types of data contained in `FlightData` pub enum ArrowFlightData { + /// A record batch RecordBatch(RecordBatch), + /// A schema Schema(Schema), } diff --git a/arrow-flight/src/sql/metadata/sql_info.rs b/arrow-flight/src/sql/metadata/sql_info.rs index 97304d3c872..2ea30df7fc2 100644 --- a/arrow-flight/src/sql/metadata/sql_info.rs +++ b/arrow-flight/src/sql/metadata/sql_info.rs @@ -331,7 +331,7 @@ impl SqlInfoUnionBuilder { /// /// Servers constuct - usually static - [`SqlInfoData`] via the [`SqlInfoDataBuilder`], /// and build responses using [`CommandGetSqlInfo::into_builder`] -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Default)] pub struct SqlInfoDataBuilder { /// Use BTreeMap to ensure the values are sorted by value as /// to make output consistent @@ -341,17 +341,10 @@ pub struct SqlInfoDataBuilder { infos: BTreeMap, } -impl Default for SqlInfoDataBuilder { - fn default() -> Self { - Self::new() - } -} - impl SqlInfoDataBuilder { + /// Create a new SQL info builder pub fn new() -> Self { - Self { - infos: BTreeMap::new(), - } + Self::default() } /// register the specific sql metadata item diff --git a/arrow-flight/src/sql/metadata/xdbc_info.rs b/arrow-flight/src/sql/metadata/xdbc_info.rs index 2e635d3037b..485bedaebfb 100644 --- a/arrow-flight/src/sql/metadata/xdbc_info.rs +++ b/arrow-flight/src/sql/metadata/xdbc_info.rs @@ -41,24 +41,43 @@ use crate::sql::{CommandGetXdbcTypeInfo, Nullable, Searchable, XdbcDataType, Xdb /// Data structure representing type information for xdbc types. #[derive(Debug, Clone, Default)] pub struct XdbcTypeInfo { + /// The name of the type pub type_name: String, + /// The data type of the type pub data_type: XdbcDataType, + /// The column size of the type pub column_size: Option, + /// The prefix of the type pub literal_prefix: Option, + /// The suffix of the type pub literal_suffix: Option, + /// The create parameters of the type pub create_params: Option>, + /// The nullability of the type pub nullable: Nullable, + /// Whether the type is case sensitive pub case_sensitive: bool, + /// Whether the type is searchable pub searchable: Searchable, + /// Whether the type is unsigned pub unsigned_attribute: Option, + /// Whether the type has fixed precision and scale pub fixed_prec_scale: bool, + /// Whether the type is auto-incrementing pub auto_increment: Option, + /// The local type name of the type pub local_type_name: Option, + /// The minimum scale of the type pub minimum_scale: Option, + /// The maximum scale of the type pub maximum_scale: Option, + /// The SQL data type of the type pub sql_data_type: XdbcDataType, + /// The optional datetime subcode of the type pub datetime_subcode: Option, + /// The number precision radix of the type pub num_prec_radix: Option, + /// The interval precision of the type pub interval_precision: Option, } @@ -93,16 +112,6 @@ impl XdbcTypeInfoData { } } -pub struct XdbcTypeInfoDataBuilder { - infos: Vec, -} - -impl Default for XdbcTypeInfoDataBuilder { - fn default() -> Self { - Self::new() - } -} - /// A builder for [`XdbcTypeInfoData`] which is used to create [`CommandGetXdbcTypeInfo`] responses. /// /// # Example @@ -138,6 +147,16 @@ impl Default for XdbcTypeInfoDataBuilder { /// // to access the underlying record batch /// let batch = info_list.record_batch(None); /// ``` +pub struct XdbcTypeInfoDataBuilder { + infos: Vec, +} + +impl Default for XdbcTypeInfoDataBuilder { + fn default() -> Self { + Self::new() + } +} + impl XdbcTypeInfoDataBuilder { /// Create a new instance of [`XdbcTypeInfoDataBuilder`]. pub fn new() -> Self { diff --git a/arrow-flight/src/sql/mod.rs b/arrow-flight/src/sql/mod.rs index 453f608d353..82ee62512e6 100644 --- a/arrow-flight/src/sql/mod.rs +++ b/arrow-flight/src/sql/mod.rs @@ -163,7 +163,9 @@ macro_rules! prost_message_ext { /// ``` #[derive(Clone, Debug, PartialEq)] pub enum Command { - $($name($name),)* + $( + #[doc = concat!(stringify!($name), "variant")] + $name($name),)* /// Any message that is not any FlightSQL command. Unknown(Any), @@ -297,10 +299,12 @@ pub struct Any { } impl Any { + /// Checks whether the message is of type `M` pub fn is(&self) -> bool { M::type_url() == self.type_url } + /// Unpacks the contents of the message if it is of type `M` pub fn unpack(&self) -> Result, ArrowError> { if !self.is::() { return Ok(None); @@ -310,6 +314,7 @@ impl Any { Ok(Some(m)) } + /// Packs a message into an [`Any`] message pub fn pack(message: &M) -> Result { Ok(message.as_any()) } diff --git a/arrow-flight/src/utils.rs b/arrow-flight/src/utils.rs index 37d7ff9e729..f6129ddfe24 100644 --- a/arrow-flight/src/utils.rs +++ b/arrow-flight/src/utils.rs @@ -160,9 +160,12 @@ pub fn batches_to_flight_data( dictionaries.extend(encoded_dictionaries.into_iter().map(Into::into)); flight_data.push(encoded_batch.into()); } - let mut stream = vec![schema_flight_data]; + + let mut stream = Vec::with_capacity(1 + dictionaries.len() + flight_data.len()); + + stream.push(schema_flight_data); stream.extend(dictionaries); stream.extend(flight_data); - let flight_data: Vec<_> = stream.into_iter().collect(); + let flight_data = stream; Ok(flight_data) } diff --git a/arrow-flight/tests/flight_sql_client.rs b/arrow-flight/tests/flight_sql_client.rs index 349da062a82..24a3c7b28c6 100644 --- a/arrow-flight/tests/flight_sql_client.rs +++ b/arrow-flight/tests/flight_sql_client.rs @@ -32,6 +32,7 @@ use arrow_flight::sql::{ TableNotExistOption, }; use arrow_flight::Action; +use core::str; use futures::{StreamExt, TryStreamExt}; use std::collections::HashMap; use std::sync::Arc; @@ -183,13 +184,13 @@ impl FlightSqlService for FlightSqlServiceImpl { query: ActionEndTransactionRequest, _request: Request, ) -> Result<(), Status> { - let transaction_id = String::from_utf8(query.transaction_id.to_vec()) + let transaction_id = str::from_utf8(&query.transaction_id.to_vec()) .map_err(|_| Status::invalid_argument("Invalid transaction id"))?; if self .transactions .lock() .await - .remove(&transaction_id) + .remove(transaction_id) .is_none() { return Err(Status::invalid_argument("Transaction id not found")); diff --git a/arrow-integration-test/src/lib.rs b/arrow-integration-test/src/lib.rs index d1486fd5a15..ea5b545f2e8 100644 --- a/arrow-integration-test/src/lib.rs +++ b/arrow-integration-test/src/lib.rs @@ -21,6 +21,7 @@ //! //! This is not a canonical format, but provides a human-readable way of verifying language implementations +#![warn(missing_docs)] use arrow_buffer::{IntervalDayTime, IntervalMonthDayNano, ScalarBuffer}; use hex::decode; use num::BigInt; @@ -49,8 +50,11 @@ pub use schema::*; /// See #[derive(Deserialize, Serialize, Debug)] pub struct ArrowJson { + /// The Arrow schema for JSON file pub schema: ArrowJsonSchema, + /// The `RecordBatch`es in the JSON file pub batches: Vec, + /// The dictionaries in the JSON file #[serde(skip_serializing_if = "Option::is_none")] pub dictionaries: Option>, } @@ -60,7 +64,9 @@ pub struct ArrowJson { /// Fields are left as JSON `Value` as they vary by `DataType` #[derive(Deserialize, Serialize, Debug)] pub struct ArrowJsonSchema { + /// An array of JSON fields pub fields: Vec, + /// An array of metadata key-value pairs #[serde(skip_serializing_if = "Option::is_none")] pub metadata: Option>>, } @@ -68,13 +74,20 @@ pub struct ArrowJsonSchema { /// Fields are left as JSON `Value` as they vary by `DataType` #[derive(Deserialize, Serialize, Debug)] pub struct ArrowJsonField { + /// The name of the field pub name: String, + /// The data type of the field, + /// can be any valid JSON value #[serde(rename = "type")] pub field_type: Value, + /// Whether the field is nullable pub nullable: bool, + /// The children fields pub children: Vec, + /// The dictionary for the field #[serde(skip_serializing_if = "Option::is_none")] pub dictionary: Option, + /// The metadata for the field, if any #[serde(skip_serializing_if = "Option::is_none")] pub metadata: Option, } @@ -115,20 +128,28 @@ impl From<&Field> for ArrowJsonField { } } +/// Represents a dictionary-encoded field in the Arrow JSON format #[derive(Deserialize, Serialize, Debug)] pub struct ArrowJsonFieldDictionary { + /// A unique identifier for the dictionary pub id: i64, + /// The type of the dictionary index #[serde(rename = "indexType")] pub index_type: DictionaryIndexType, + /// Whether the dictionary is ordered #[serde(rename = "isOrdered")] pub is_ordered: bool, } +/// Type of an index for a dictionary-encoded field in the Arrow JSON format #[derive(Deserialize, Serialize, Debug)] pub struct DictionaryIndexType { + /// The name of the dictionary index type pub name: String, + /// Whether the dictionary index type is signed #[serde(rename = "isSigned")] pub is_signed: bool, + /// The bit width of the dictionary index type #[serde(rename = "bitWidth")] pub bit_width: i64, } @@ -137,6 +158,7 @@ pub struct DictionaryIndexType { #[derive(Deserialize, Serialize, Debug, Clone)] pub struct ArrowJsonBatch { count: usize, + /// The columns in the record batch pub columns: Vec, } @@ -144,7 +166,9 @@ pub struct ArrowJsonBatch { #[derive(Deserialize, Serialize, Debug, Clone)] #[allow(non_snake_case)] pub struct ArrowJsonDictionaryBatch { + /// The unique identifier for the dictionary pub id: i64, + /// The data for the dictionary pub data: ArrowJsonBatch, } @@ -152,15 +176,21 @@ pub struct ArrowJsonDictionaryBatch { #[derive(Deserialize, Serialize, Clone, Debug)] pub struct ArrowJsonColumn { name: String, + /// The number of elements in the column pub count: usize, + /// The validity bitmap to determine null values #[serde(rename = "VALIDITY")] pub validity: Option>, + /// The data values in the column #[serde(rename = "DATA")] pub data: Option>, + /// The offsets for variable-sized data types #[serde(rename = "OFFSET")] pub offset: Option>, // leaving as Value as 64-bit offsets are strings + /// The type id for union types #[serde(rename = "TYPE_ID")] pub type_id: Option>, + /// The children columns for nested types pub children: Option>, } @@ -189,6 +219,7 @@ impl ArrowJson { Ok(true) } + /// Convert the stored dictionaries to `Vec[RecordBatch]` pub fn get_record_batches(&self) -> Result> { let schema = self.schema.to_arrow_schema()?; @@ -275,6 +306,7 @@ impl ArrowJsonField { } } +/// Generates a [`RecordBatch`] from an Arrow JSON batch, given a schema pub fn record_batch_from_json( schema: &Schema, json_batch: ArrowJsonBatch, @@ -877,6 +909,7 @@ pub fn array_from_json( } } +/// Construct a [`DictionaryArray`] from a partially typed JSON column pub fn dictionary_array_from_json( field: &Field, json_col: ArrowJsonColumn, @@ -965,6 +998,7 @@ fn create_null_buf(json_col: &ArrowJsonColumn) -> Buffer { } impl ArrowJsonBatch { + /// Convert a [`RecordBatch`] to an [`ArrowJsonBatch`] pub fn from_batch(batch: &RecordBatch) -> ArrowJsonBatch { let mut json_batch = ArrowJsonBatch { count: batch.num_rows(), diff --git a/arrow-integration-testing/src/flight_client_scenarios/auth_basic_proto.rs b/arrow-integration-testing/src/flight_client_scenarios/auth_basic_proto.rs index 376e31e1555..34c3c7706df 100644 --- a/arrow-integration-testing/src/flight_client_scenarios/auth_basic_proto.rs +++ b/arrow-integration-testing/src/flight_client_scenarios/auth_basic_proto.rs @@ -15,6 +15,8 @@ // specific language governing permissions and limitations // under the License. +//! Scenario for testing basic auth. + use crate::{AUTH_PASSWORD, AUTH_USERNAME}; use arrow_flight::{flight_service_client::FlightServiceClient, BasicAuth, HandshakeRequest}; @@ -27,6 +29,7 @@ type Result = std::result::Result; type Client = FlightServiceClient; +/// Run a scenario that tests basic auth. pub async fn run_scenario(host: &str, port: u16) -> Result { let url = format!("http://{host}:{port}"); let mut client = FlightServiceClient::connect(url).await?; diff --git a/arrow-integration-testing/src/flight_client_scenarios/integration_test.rs b/arrow-integration-testing/src/flight_client_scenarios/integration_test.rs index 1a6c4e28a76..c8289ff446a 100644 --- a/arrow-integration-testing/src/flight_client_scenarios/integration_test.rs +++ b/arrow-integration-testing/src/flight_client_scenarios/integration_test.rs @@ -15,6 +15,8 @@ // specific language governing permissions and limitations // under the License. +//! Integration tests for the Flight client. + use crate::open_json_file; use std::collections::HashMap; @@ -40,6 +42,7 @@ type Result = std::result::Result; type Client = FlightServiceClient; +/// Run a scenario that uploads data to a Flight server and then downloads it back pub async fn run_scenario(host: &str, port: u16, path: &str) -> Result { let url = format!("http://{host}:{port}"); diff --git a/arrow-integration-testing/src/flight_client_scenarios/middleware.rs b/arrow-integration-testing/src/flight_client_scenarios/middleware.rs index 3b71edf446a..b826ad45605 100644 --- a/arrow-integration-testing/src/flight_client_scenarios/middleware.rs +++ b/arrow-integration-testing/src/flight_client_scenarios/middleware.rs @@ -15,6 +15,8 @@ // specific language governing permissions and limitations // under the License. +//! Scenario for testing middleware. + use arrow_flight::{ flight_descriptor::DescriptorType, flight_service_client::FlightServiceClient, FlightDescriptor, }; @@ -24,6 +26,7 @@ use tonic::{Request, Status}; type Error = Box; type Result = std::result::Result; +/// Run a scenario that tests middleware. pub async fn run_scenario(host: &str, port: u16) -> Result { let url = format!("http://{host}:{port}"); let conn = tonic::transport::Endpoint::new(url)?.connect().await?; diff --git a/arrow-integration-testing/src/flight_client_scenarios.rs b/arrow-integration-testing/src/flight_client_scenarios/mod.rs similarity index 93% rename from arrow-integration-testing/src/flight_client_scenarios.rs rename to arrow-integration-testing/src/flight_client_scenarios/mod.rs index 66cced5f4c2..c5794433764 100644 --- a/arrow-integration-testing/src/flight_client_scenarios.rs +++ b/arrow-integration-testing/src/flight_client_scenarios/mod.rs @@ -15,6 +15,8 @@ // specific language governing permissions and limitations // under the License. +//! Collection of utilities for testing the Flight client. + pub mod auth_basic_proto; pub mod integration_test; pub mod middleware; diff --git a/arrow-integration-testing/src/flight_server_scenarios/auth_basic_proto.rs b/arrow-integration-testing/src/flight_server_scenarios/auth_basic_proto.rs index 20d86895366..5462e5bd674 100644 --- a/arrow-integration-testing/src/flight_server_scenarios/auth_basic_proto.rs +++ b/arrow-integration-testing/src/flight_server_scenarios/auth_basic_proto.rs @@ -15,6 +15,8 @@ // specific language governing permissions and limitations // under the License. +//! Basic auth test for the Flight server. + use std::pin::Pin; use std::sync::Arc; @@ -35,6 +37,7 @@ use prost::Message; use crate::{AUTH_PASSWORD, AUTH_USERNAME}; +/// Run a scenario that tests basic auth. pub async fn scenario_setup(port: u16) -> Result { let service = AuthBasicProtoScenarioImpl { username: AUTH_USERNAME.into(), @@ -52,6 +55,7 @@ pub async fn scenario_setup(port: u16) -> Result { Ok(()) } +/// Scenario for testing basic auth. #[derive(Clone)] pub struct AuthBasicProtoScenarioImpl { username: Arc, diff --git a/arrow-integration-testing/src/flight_server_scenarios/integration_test.rs b/arrow-integration-testing/src/flight_server_scenarios/integration_test.rs index 76eb9d88019..0c58fae93df 100644 --- a/arrow-integration-testing/src/flight_server_scenarios/integration_test.rs +++ b/arrow-integration-testing/src/flight_server_scenarios/integration_test.rs @@ -15,6 +15,9 @@ // specific language governing permissions and limitations // under the License. +//! Integration tests for the Flight server. + +use core::str; use std::collections::HashMap; use std::pin::Pin; use std::sync::Arc; @@ -42,6 +45,7 @@ type TonicStream = Pin + Send + Sync + 'static>>; type Error = Box; type Result = std::result::Result; +/// Run a scenario that tests integration testing. pub async fn scenario_setup(port: u16) -> Result { let addr = super::listen_on(port).await?; @@ -65,6 +69,7 @@ struct IntegrationDataset { chunks: Vec, } +/// Flight service implementation for integration testing #[derive(Clone, Default)] pub struct FlightServiceImpl { server_location: String, @@ -100,13 +105,13 @@ impl FlightService for FlightServiceImpl { ) -> Result, Status> { let ticket = request.into_inner(); - let key = String::from_utf8(ticket.ticket.to_vec()) + let key = str::from_utf8(&ticket.ticket) .map_err(|e| Status::invalid_argument(format!("Invalid ticket: {e:?}")))?; let uploaded_chunks = self.uploaded_chunks.lock().await; let flight = uploaded_chunks - .get(&key) + .get(key) .ok_or_else(|| Status::not_found(format!("Could not find flight. {key}")))?; let options = arrow::ipc::writer::IpcWriteOptions::default(); diff --git a/arrow-integration-testing/src/flight_server_scenarios/middleware.rs b/arrow-integration-testing/src/flight_server_scenarios/middleware.rs index e8d9c521bb9..6685d45dffa 100644 --- a/arrow-integration-testing/src/flight_server_scenarios/middleware.rs +++ b/arrow-integration-testing/src/flight_server_scenarios/middleware.rs @@ -15,6 +15,8 @@ // specific language governing permissions and limitations // under the License. +//! Middleware test for the Flight server. + use std::pin::Pin; use arrow_flight::{ @@ -31,6 +33,7 @@ type TonicStream = Pin + Send + Sync + 'static>>; type Error = Box; type Result = std::result::Result; +/// Run a scenario that tests middleware. pub async fn scenario_setup(port: u16) -> Result { let service = MiddlewareScenarioImpl {}; let svc = FlightServiceServer::new(service); @@ -44,6 +47,7 @@ pub async fn scenario_setup(port: u16) -> Result { Ok(()) } +/// Middleware interceptor for testing #[derive(Clone, Default)] pub struct MiddlewareScenarioImpl {} diff --git a/arrow-integration-testing/src/flight_server_scenarios.rs b/arrow-integration-testing/src/flight_server_scenarios/mod.rs similarity index 91% rename from arrow-integration-testing/src/flight_server_scenarios.rs rename to arrow-integration-testing/src/flight_server_scenarios/mod.rs index 48d4e604568..3833e1c6335 100644 --- a/arrow-integration-testing/src/flight_server_scenarios.rs +++ b/arrow-integration-testing/src/flight_server_scenarios/mod.rs @@ -15,6 +15,7 @@ // specific language governing permissions and limitations // under the License. +//! Collection of utilities for testing the Flight server. use std::net::SocketAddr; use arrow_flight::{FlightEndpoint, Location, Ticket}; @@ -27,6 +28,7 @@ pub mod middleware; type Error = Box; type Result = std::result::Result; +/// Listen on a port and return the address pub async fn listen_on(port: u16) -> Result { let addr: SocketAddr = format!("0.0.0.0:{port}").parse()?; @@ -36,6 +38,7 @@ pub async fn listen_on(port: u16) -> Result { Ok(addr) } +/// Create a FlightEndpoint with a ticket and location pub fn endpoint(ticket: &str, location_uri: impl Into) -> FlightEndpoint { FlightEndpoint { ticket: Some(Ticket { diff --git a/arrow-integration-testing/src/lib.rs b/arrow-integration-testing/src/lib.rs index 4ce7b06a188..ba8e3876c3e 100644 --- a/arrow-integration-testing/src/lib.rs +++ b/arrow-integration-testing/src/lib.rs @@ -17,6 +17,7 @@ //! Common code used in the integration test binaries +#![warn(missing_docs)] use serde_json::Value; use arrow::array::{Array, StructArray}; @@ -42,7 +43,9 @@ pub const AUTH_PASSWORD: &str = "flight"; pub mod flight_client_scenarios; pub mod flight_server_scenarios; +/// An Arrow file in JSON format pub struct ArrowFile { + /// The schema of the file pub schema: Schema, // we can evolve this into a concrete Arrow type // this is temporarily not being read from @@ -51,12 +54,14 @@ pub struct ArrowFile { } impl ArrowFile { + /// Read a single [RecordBatch] from the file pub fn read_batch(&self, batch_num: usize) -> Result { let b = self.arrow_json["batches"].get(batch_num).unwrap(); let json_batch: ArrowJsonBatch = serde_json::from_value(b.clone()).unwrap(); record_batch_from_json(&self.schema, json_batch, Some(&self.dictionaries)) } + /// Read all [RecordBatch]es from the file pub fn read_batches(&self) -> Result> { self.arrow_json["batches"] .as_array() @@ -70,7 +75,7 @@ impl ArrowFile { } } -// Canonicalize the names of map fields in a schema +/// Canonicalize the names of map fields in a schema pub fn canonicalize_schema(schema: &Schema) -> Schema { let fields = schema .fields() @@ -107,6 +112,7 @@ pub fn canonicalize_schema(schema: &Schema) -> Schema { Schema::new(fields).with_metadata(schema.metadata().clone()) } +/// Read an Arrow file in JSON format pub fn open_json_file(json_name: &str) -> Result { let json_file = File::open(json_name)?; let reader = BufReader::new(json_file); @@ -157,10 +163,7 @@ pub fn read_gzip_json(version: &str, path: &str) -> ArrowJson { arrow_json } -// -// C Data Integration entrypoints -// - +/// C Data Integration entrypoint to export the schema from a JSON file fn cdata_integration_export_schema_from_json( c_json_name: *const i8, out: *mut FFI_ArrowSchema, @@ -173,6 +176,7 @@ fn cdata_integration_export_schema_from_json( Ok(()) } +/// C Data Integration entrypoint to export a batch from a JSON file fn cdata_integration_export_batch_from_json( c_json_name: *const i8, batch_num: c_int, @@ -263,6 +267,7 @@ pub unsafe extern "C" fn arrow_rs_free_error(c_error: *mut i8) { } } +/// A C-ABI for exporting an Arrow schema from a JSON file #[no_mangle] pub extern "C" fn arrow_rs_cdata_integration_export_schema_from_json( c_json_name: *const i8, @@ -272,6 +277,7 @@ pub extern "C" fn arrow_rs_cdata_integration_export_schema_from_json( result_to_c_error(&r) } +/// A C-ABI to compare an Arrow schema against a JSON file #[no_mangle] pub extern "C" fn arrow_rs_cdata_integration_import_schema_and_compare_to_json( c_json_name: *const i8, @@ -281,6 +287,7 @@ pub extern "C" fn arrow_rs_cdata_integration_import_schema_and_compare_to_json( result_to_c_error(&r) } +/// A C-ABI for exporting a RecordBatch from a JSON file #[no_mangle] pub extern "C" fn arrow_rs_cdata_integration_export_batch_from_json( c_json_name: *const i8, @@ -291,6 +298,7 @@ pub extern "C" fn arrow_rs_cdata_integration_export_batch_from_json( result_to_c_error(&r) } +/// A C-ABI to compare a RecordBatch against a JSON file #[no_mangle] pub extern "C" fn arrow_rs_cdata_integration_import_batch_and_compare_to_json( c_json_name: *const i8, diff --git a/arrow-json/src/writer.rs b/arrow-json/src/writer.rs index 86d2e88d99f..d973206ccf7 100644 --- a/arrow-json/src/writer.rs +++ b/arrow-json/src/writer.rs @@ -397,6 +397,7 @@ where #[cfg(test)] mod tests { + use core::str; use std::fs::{read_to_string, File}; use std::io::{BufReader, Seek}; use std::sync::Arc; @@ -1111,7 +1112,7 @@ mod tests { } } - let result = String::from_utf8(buf).unwrap(); + let result = str::from_utf8(&buf).unwrap(); let expected = read_to_string(test_file).unwrap(); for (r, e) in result.lines().zip(expected.lines()) { let mut expected_json = serde_json::from_str::(e).unwrap(); @@ -1150,7 +1151,7 @@ mod tests { fn json_writer_empty() { let mut writer = ArrayWriter::new(vec![] as Vec); writer.finish().unwrap(); - assert_eq!(String::from_utf8(writer.into_inner()).unwrap(), ""); + assert_eq!(str::from_utf8(&writer.into_inner()).unwrap(), ""); } #[test] @@ -1279,7 +1280,7 @@ mod tests { writer.write(&batch).unwrap(); } - let result = String::from_utf8(buf).unwrap(); + let result = str::from_utf8(&buf).unwrap(); let expected = read_to_string(test_file).unwrap(); for (r, e) in result.lines().zip(expected.lines()) { let mut expected_json = serde_json::from_str::(e).unwrap(); @@ -1321,7 +1322,7 @@ mod tests { writer.write_batches(&batches).unwrap(); } - let result = String::from_utf8(buf).unwrap(); + let result = str::from_utf8(&buf).unwrap(); let expected = read_to_string(test_file).unwrap(); // result is eq to 2 same batches let expected = format!("{expected}\n{expected}"); diff --git a/arrow-schema/src/field.rs b/arrow-schema/src/field.rs index fc4852a3d37..b532ea8616b 100644 --- a/arrow-schema/src/field.rs +++ b/arrow-schema/src/field.rs @@ -610,14 +610,14 @@ mod test { #[test] fn test_new_with_string() { // Fields should allow owned Strings to support reuse - let s = String::from("c1"); + let s = "c1"; Field::new(s, DataType::Int64, false); } #[test] fn test_new_dict_with_string() { // Fields should allow owned Strings to support reuse - let s = String::from("c1"); + let s = "c1"; Field::new_dict(s, DataType::Int64, false, 4, false); } diff --git a/arrow/tests/array_cast.rs b/arrow/tests/array_cast.rs index 0fd89cc2bff..64dec5a2bc0 100644 --- a/arrow/tests/array_cast.rs +++ b/arrow/tests/array_cast.rs @@ -179,7 +179,7 @@ fn test_can_cast_types() { /// Create instances of arrays with varying types for cast tests fn get_arrays_of_all_types() -> Vec { - let tz_name = String::from("+08:00"); + let tz_name = "+08:00"; let binary_data: Vec<&[u8]> = vec![b"foo", b"bar"]; vec![ Arc::new(BinaryArray::from(binary_data.clone())),