diff --git a/Cargo.toml b/Cargo.toml index de79afeba..f8e46de5e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -29,16 +29,16 @@ members = [ resolver = "2" [workspace.dependencies] -arrow = { version = "45.0.0" } -arrow-flight = { version = "45.0.0", features = ["flight-sql-experimental"] } -arrow-schema = { version = "45.0.0", default-features = false } +arrow = { version = "46.0.0" } +arrow-flight = { version = "46.0.0", features = ["flight-sql-experimental"] } +arrow-schema = { version = "46.0.0", default-features = false } configure_me = { version = "0.4.0" } configure_me_codegen = { version = "0.4.4" } -datafusion = "30.0.0" -datafusion-cli = "30.0.0" -datafusion-proto = "30.0.0" -object_store = "0.6.1" -sqlparser = "0.36.1" +datafusion = "31.0.0" +datafusion-cli = "31.0.0" +datafusion-proto = "31.0.0" +object_store = "0.7.0" +sqlparser = "0.37.0" tonic = { version = "0.9" } tonic-build = { version = "0.9", default-features = false, features = [ "transport", diff --git a/ballista/client/src/context.rs b/ballista/client/src/context.rs index 640ee77a1..a0671acf2 100644 --- a/ballista/client/src/context.rs +++ b/ballista/client/src/context.rs @@ -616,6 +616,8 @@ mod tests { infinite_source: false, insert_mode: datafusion::datasource::listing::ListingTableInsertMode::Error, + file_type_write_options: None, + single_file: false, }; let table_paths = listing_table diff --git a/ballista/core/src/cache_layer/mod.rs b/ballista/core/src/cache_layer/mod.rs index ef511eb4f..86e52395f 100644 --- a/ballista/core/src/cache_layer/mod.rs +++ b/ballista/core/src/cache_layer/mod.rs @@ -41,7 +41,7 @@ mod tests { use futures::TryStreamExt; use object_store::local::LocalFileSystem; use object_store::path::Path; - use object_store::{GetResult, ObjectStore}; + use object_store::{GetResultPayload, ObjectStore}; use std::io::Write; use std::sync::Arc; use tempfile::NamedTempFile; @@ -71,8 +71,8 @@ mod tests { source_object_store.clone(), )); let actual_source = source_object_store.get(&source_location).await.unwrap(); - match actual_source { - GetResult::File(file, _) => { + match actual_source.payload { + GetResultPayload::File(file, _) => { assert_eq!(test_bytes.len(), file.metadata()?.len() as usize); } _ => { @@ -99,13 +99,13 @@ mod tests { source_object_store_with_key.clone(), ); let actual_cache = cache_object_store.get(&source_location).await.unwrap(); - match actual_cache { - GetResult::File(_, _) => { + match actual_cache.payload { + GetResultPayload::File(_, _) => { return Err(BallistaError::General( "Data stream instead of file should be returned".to_string(), )) } - GetResult::Stream(s) => { + GetResultPayload::Stream(s) => { let mut buf: Vec = vec![]; s.try_fold(&mut buf, |acc, part| async move { let mut part: Vec = part.into(); diff --git a/ballista/scheduler/src/flight_sql.rs b/ballista/scheduler/src/flight_sql.rs index df28c886e..942930a17 100644 --- a/ballista/scheduler/src/flight_sql.rs +++ b/ballista/scheduler/src/flight_sql.rs @@ -446,8 +446,8 @@ impl FlightSqlServiceImpl { > { type FlightResult = Result; let (tx, rx): (Sender, Receiver) = channel(2); - let schema = (*rb.schema()).clone(); - let flights = batches_to_flight_data(schema, vec![rb]) + let schema = rb.schema(); + let flights = batches_to_flight_data(&schema, vec![rb]) .map_err(|_| Status::internal("Error encoding batches".to_string()))?; for flight in flights { tx.send(Ok(flight)) diff --git a/benchmarks/src/bin/tpch.rs b/benchmarks/src/bin/tpch.rs index b708b557b..60e6ee6d3 100644 --- a/benchmarks/src/bin/tpch.rs +++ b/benchmarks/src/bin/tpch.rs @@ -846,6 +846,8 @@ async fn get_table( file_sort_order: vec![], infinite_source: false, insert_mode: ListingTableInsertMode::Error, + file_type_write_options: None, + single_file: false, }; let url = ListingTableUrl::parse(path)?;