From 3b29e2f3b0d151f91f5b75ae87a0c16d72acf54c Mon Sep 17 00:00:00 2001 From: sky <3374614481@qq.com> Date: Mon, 9 Sep 2024 18:12:28 +0800 Subject: [PATCH 01/15] feat: continue vacuum drop table on per-table cleanup failures --- .../src/storages/fuse/operations/handler.rs | 5 +- .../fuse/operations/vacuum_drop_tables.rs | 174 ++++++++++-------- .../vacuum_handler/src/vacuum_handler.rs | 13 +- .../interpreter_vacuum_drop_tables.rs | 9 +- 4 files changed, 119 insertions(+), 82 deletions(-) diff --git a/src/query/ee/src/storages/fuse/operations/handler.rs b/src/query/ee/src/storages/fuse/operations/handler.rs index 7544ad528b3e..b4e19a297724 100644 --- a/src/query/ee/src/storages/fuse/operations/handler.rs +++ b/src/query/ee/src/storages/fuse/operations/handler.rs @@ -22,14 +22,13 @@ use databend_common_catalog::table::Table; use databend_common_catalog::table_context::TableContext; use databend_common_exception::Result; use databend_common_storages_fuse::FuseTable; -use databend_enterprise_vacuum_handler::vacuum_handler::VacuumDropFileInfo; +use databend_enterprise_vacuum_handler::vacuum_handler::VacuumDropTablesResult; use databend_enterprise_vacuum_handler::VacuumHandler; use databend_enterprise_vacuum_handler::VacuumHandlerWrapper; use crate::storages::fuse::do_vacuum; use crate::storages::fuse::operations::vacuum_temporary_files::do_vacuum_temporary_files; use crate::storages::fuse::vacuum_drop_tables; - pub struct RealVacuumHandler {} #[async_trait::async_trait] @@ -49,7 +48,7 @@ impl VacuumHandler for RealVacuumHandler { threads_nums: usize, tables: Vec>, dry_run_limit: Option, - ) -> Result>> { + ) -> VacuumDropTablesResult { vacuum_drop_tables(threads_nums, tables, dry_run_limit).await } diff --git a/src/query/ee/src/storages/fuse/operations/vacuum_drop_tables.rs b/src/query/ee/src/storages/fuse/operations/vacuum_drop_tables.rs index aa62b1c3371f..d67ae15776cc 100644 --- a/src/query/ee/src/storages/fuse/operations/vacuum_drop_tables.rs +++ b/src/query/ee/src/storages/fuse/operations/vacuum_drop_tables.rs @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +use std::collections::HashSet; use std::sync::Arc; use std::time::Instant; @@ -21,92 +22,111 @@ use databend_common_exception::Result; use databend_common_meta_app::schema::TableInfo; use databend_common_storages_fuse::FuseTable; use databend_enterprise_vacuum_handler::vacuum_handler::VacuumDropFileInfo; +use databend_enterprise_vacuum_handler::vacuum_handler::VacuumDropTablesResult; use futures_util::TryStreamExt; use log::error; use log::info; use opendal::EntryMode; use opendal::Metakey; use opendal::Operator; - #[async_backtrace::framed] pub async fn do_vacuum_drop_table( tables: Vec<(TableInfo, Operator)>, dry_run_limit: Option, -) -> Result>> { +) -> VacuumDropTablesResult { let mut list_files = vec![]; + let mut failed_dbs = HashSet::new(); + let mut failed_tables = HashSet::new(); for (table_info, operator) in tables { - let dir = format!( - "{}/", - FuseTable::parse_storage_prefix_from_table_info(&table_info)? - ); - - info!( - "vacuum drop table {:?} dir {:?}, is_external_table:{:?}", - table_info.name, - dir, - table_info.meta.storage_params.is_some() - ); - - let start = Instant::now(); - - match dry_run_limit { - None => { - let result = operator.remove_all(&dir).await; - if let Err(ref err) = result { - error!("failed to remove all in directory {}: {}", dir, err); - } - result?; + let result = + vacuum_drop_single_table(&table_info, operator, dry_run_limit, &mut list_files).await; + if result.is_err() { + let db_name = table_info.desc.split('.').next().unwrap(); + let table_id = table_info.ident.table_id; + failed_dbs.insert(db_name.to_string()); + failed_tables.insert(table_id); + } + } + Ok(if dry_run_limit.is_some() { + (Some(list_files), failed_dbs, failed_tables) + } else { + (None, failed_dbs, failed_tables) + }) +} + +async fn vacuum_drop_single_table( + table_info: &TableInfo, + operator: Operator, + dry_run_limit: Option, + list_files: &mut Vec, +) -> Result<()> { + let dir = format!( + "{}/", + FuseTable::parse_storage_prefix_from_table_info(table_info)? + ); + + info!( + "vacuum drop table {:?} dir {:?}, is_external_table:{:?}", + table_info.name, + dir, + table_info.meta.storage_params.is_some() + ); + + let start = Instant::now(); + + match dry_run_limit { + None => { + let result = operator.remove_all(&dir).await; + if let Err(ref err) = result { + error!("failed to remove all in directory {}: {}", dir, err); } - Some(dry_run_limit) => { - let mut ds = operator - .lister_with(&dir) - .recursive(true) - .metakey(Metakey::Mode) - .metakey(Metakey::ContentLength) - .await?; - - loop { - let entry = ds.try_next().await; - match entry { - Ok(Some(de)) => { - let meta = de.metadata(); - if EntryMode::FILE == meta.mode() { - list_files.push(( - table_info.name.clone(), - de.name().to_string(), - meta.content_length(), - )); - if list_files.len() >= dry_run_limit { - break; - } + result?; + } + Some(dry_run_limit) => { + let mut ds = operator + .lister_with(&dir) + .recursive(true) + .metakey(Metakey::Mode) + .metakey(Metakey::ContentLength) + .await?; + + loop { + let entry = ds.try_next().await; + match entry { + Ok(Some(de)) => { + let meta = de.metadata(); + if EntryMode::FILE == meta.mode() { + list_files.push(( + table_info.name.clone(), + de.name().to_string(), + meta.content_length(), + )); + if list_files.len() >= dry_run_limit { + break; } } - Ok(None) => break, - Err(e) => { - if e.kind() == opendal::ErrorKind::NotFound { - info!("target not found, ignored. {}", e); - continue; - } else { - return Err(e.into()); - } + } + Ok(None) => break, + Err(e) => { + if e.kind() == opendal::ErrorKind::NotFound { + info!("target not found, ignored. {}", e); + continue; + } else { + return Err(e.into()); } } } } - }; - - info!( - "vacuum drop table {:?} dir {:?}, cost:{:?}", - table_info.name, - dir, - start.elapsed() - ); - } - Ok(if dry_run_limit.is_some() { - Some(list_files) - } else { - None - }) + } + }; + + info!( + "vacuum drop table {:?} dir {:?}, cost:{:?}", + table_info.name, + dir, + start.elapsed() + ); + Ok(()) } #[async_backtrace::framed] @@ -114,7 +134,7 @@ pub async fn vacuum_drop_tables_by_table_info( num_threads: usize, table_infos: Vec<(TableInfo, Operator)>, dry_run_limit: Option, -) -> Result>> { +) -> VacuumDropTablesResult { let start = Instant::now(); let num_tables = table_infos.len(); @@ -157,17 +177,21 @@ pub async fn vacuum_drop_tables_by_table_info( // longer be roll-forward. if dry_run_limit.is_some() { let mut ret_files = vec![]; - for file in result { - if let Some(files) = file? { + for res in result { + if let Some(files) = res?.0 { ret_files.extend(files); } } - Some(ret_files) + (Some(ret_files), HashSet::new(), HashSet::new()) } else { - for file in result { - let _ = file?; + let mut failed_dbs = HashSet::new(); + let mut failed_tables = HashSet::new(); + for res in result { + let (_, db, tbl) = res?; + failed_dbs.extend(db); + failed_tables.extend(tbl); } - None + (None, failed_dbs, failed_tables) } }; @@ -185,7 +209,7 @@ pub async fn vacuum_drop_tables( threads_nums: usize, tables: Vec>, dry_run_limit: Option, -) -> Result>> { +) -> VacuumDropTablesResult { let num_tables = tables.len(); info!("vacuum_drop_tables {} tables", num_tables); diff --git a/src/query/ee_features/vacuum_handler/src/vacuum_handler.rs b/src/query/ee_features/vacuum_handler/src/vacuum_handler.rs index 0971b32a44a6..b8437918c201 100644 --- a/src/query/ee_features/vacuum_handler/src/vacuum_handler.rs +++ b/src/query/ee_features/vacuum_handler/src/vacuum_handler.rs @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +use std::collections::HashSet; use std::sync::Arc; use std::time::Duration; @@ -22,10 +23,16 @@ use databend_common_catalog::table::Table; use databend_common_catalog::table_context::TableContext; use databend_common_exception::Result; use databend_common_storages_fuse::FuseTable; - // (TableName, file, file size) pub type VacuumDropFileInfo = (String, String, u64); +// (drop_files, failed_dbs, failed_tables) +pub type VacuumDropTablesResult = Result<( + Option>, + HashSet, + HashSet, +)>; + #[async_trait::async_trait] pub trait VacuumHandler: Sync + Send { async fn do_vacuum( @@ -41,7 +48,7 @@ pub trait VacuumHandler: Sync + Send { threads_nums: usize, tables: Vec>, dry_run_limit: Option, - ) -> Result>>; + ) -> VacuumDropTablesResult; async fn do_vacuum_temporary_files( &self, @@ -79,7 +86,7 @@ impl VacuumHandlerWrapper { threads_nums: usize, tables: Vec>, dry_run_limit: Option, - ) -> Result>> { + ) -> VacuumDropTablesResult { self.handler .do_vacuum_drop_tables(threads_nums, tables, dry_run_limit) .await diff --git a/src/query/service/src/interpreters/interpreter_vacuum_drop_tables.rs b/src/query/service/src/interpreters/interpreter_vacuum_drop_tables.rs index 1c6ced64c453..eb87f42990cc 100644 --- a/src/query/service/src/interpreters/interpreter_vacuum_drop_tables.rs +++ b/src/query/service/src/interpreters/interpreter_vacuum_drop_tables.rs @@ -159,7 +159,7 @@ impl Interpreter for VacuumDropTablesInterpreter { let handler = get_vacuum_handler(); let threads_nums = self.ctx.get_settings().get_max_threads()? as usize; - let files_opt = handler + let (files_opt, failed_dbs, failed_tables) = handler .do_vacuum_drop_tables( threads_nums, tables, @@ -172,6 +172,13 @@ impl Interpreter for VacuumDropTablesInterpreter { .await?; // gc meta data only when not dry run if self.plan.option.dry_run.is_none() { + let drop_ids = drop_ids + .into_iter() + .filter(|id| match id { + DroppedId::Db(_, db_name) => !failed_dbs.contains(db_name), + DroppedId::Table(_, table_id, _) => !failed_tables.contains(table_id), + }) + .collect(); self.gc_drop_tables(catalog, drop_ids).await?; } From c12b23f255d175dc26051f365987566c3285c934 Mon Sep 17 00:00:00 2001 From: sky <3374614481@qq.com> Date: Mon, 9 Sep 2024 18:41:18 +0800 Subject: [PATCH 02/15] modify ut --- .../ee/tests/it/storages/fuse/operations/vacuum.rs | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/src/query/ee/tests/it/storages/fuse/operations/vacuum.rs b/src/query/ee/tests/it/storages/fuse/operations/vacuum.rs index 333c370f431b..494505b6b228 100644 --- a/src/query/ee/tests/it/storages/fuse/operations/vacuum.rs +++ b/src/query/ee/tests/it/storages/fuse/operations/vacuum.rs @@ -305,9 +305,9 @@ async fn test_fuse_do_vacuum_drop_table_deletion_error() -> Result<()> { let operator = OperatorBuilder::new(faulty_accessor.clone()).finish(); let tables = vec![(table_info, operator)]; - let result = do_vacuum_drop_table(tables, None).await; - assert!(result.is_err()); - + let result = do_vacuum_drop_table(tables, None).await?; + assert!(!result.1.is_empty()); + assert!(!result.2.is_empty()); // verify that accessor.delete() was called assert!(faulty_accessor.hit_delete_operation()); @@ -339,7 +339,8 @@ async fn test_fuse_vacuum_drop_tables_in_parallel_with_deletion_error() -> Resul assert!(faulty_accessor.hit_delete_operation()); // verify that errors of deletions are not swallowed - assert!(result.is_err()); + assert!(!result.1.is_empty()); + assert!(!result.2.is_empty()); } // Case 2: parallel vacuum dropped tables @@ -427,8 +428,9 @@ async fn test_fuse_do_vacuum_drop_table_external_storage() -> Result<()> { let operator = OperatorBuilder::new(accessor.clone()).finish(); let tables = vec![(table_info, operator)]; - let result = do_vacuum_drop_table(tables, None).await; - assert!(result.is_err()); + let result = do_vacuum_drop_table(tables, None).await?; + assert!(!result.1.is_empty()); + assert!(!result.2.is_empty()); // verify that accessor.delete() was called assert!(!accessor.hit_delete_operation()); From a625abe9236050a223440a661fce616d0264a0d5 Mon Sep 17 00:00:00 2001 From: sky <3374614481@qq.com> Date: Mon, 9 Sep 2024 19:22:55 +0800 Subject: [PATCH 03/15] fix ut --- src/query/ee/tests/it/storages/fuse/operations/vacuum.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/query/ee/tests/it/storages/fuse/operations/vacuum.rs b/src/query/ee/tests/it/storages/fuse/operations/vacuum.rs index 494505b6b228..b81d8e5d0f8d 100644 --- a/src/query/ee/tests/it/storages/fuse/operations/vacuum.rs +++ b/src/query/ee/tests/it/storages/fuse/operations/vacuum.rs @@ -334,7 +334,7 @@ async fn test_fuse_vacuum_drop_tables_in_parallel_with_deletion_error() -> Resul // with one table and one thread, `vacuum_drop_tables_by_table_info` will NOT run in parallel let tables = vec![table]; let num_threads = 1; - let result = vacuum_drop_tables_by_table_info(num_threads, tables, None).await; + let result = vacuum_drop_tables_by_table_info(num_threads, tables, None).await?; // verify that accessor.delete() was called assert!(faulty_accessor.hit_delete_operation()); From e200bea039952cb3614dda7cfc5541d177e40db5 Mon Sep 17 00:00:00 2001 From: sky <3374614481@qq.com> Date: Mon, 9 Sep 2024 20:05:04 +0800 Subject: [PATCH 04/15] fix ut --- src/query/ee/tests/it/storages/fuse/operations/vacuum.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/query/ee/tests/it/storages/fuse/operations/vacuum.rs b/src/query/ee/tests/it/storages/fuse/operations/vacuum.rs index b81d8e5d0f8d..e6776e7b16ca 100644 --- a/src/query/ee/tests/it/storages/fuse/operations/vacuum.rs +++ b/src/query/ee/tests/it/storages/fuse/operations/vacuum.rs @@ -352,11 +352,12 @@ async fn test_fuse_vacuum_drop_tables_in_parallel_with_deletion_error() -> Resul // with 2 tables and 2 threads, `vacuum_drop_tables_by_table_info` will run in parallel (one table per thread) let tables = vec![table.clone(), table]; let num_threads = 2; - let result = vacuum_drop_tables_by_table_info(num_threads, tables, None).await; + let result = vacuum_drop_tables_by_table_info(num_threads, tables, None).await?; // verify that accessor.delete() was called assert!(faulty_accessor.hit_delete_operation()); // verify that errors of deletions are not swallowed - assert!(result.is_err()); + assert!(!result.1.is_empty()); + assert!(!result.2.is_empty()); } Ok(()) From 1c78d89e93562be2fff10a975e66a250cb24b674 Mon Sep 17 00:00:00 2001 From: sky <3374614481@qq.com> Date: Tue, 10 Sep 2024 02:17:57 +0800 Subject: [PATCH 05/15] fix db name, gc_drop_tables() --- src/meta/api/src/schema_api_impl.rs | 36 +++++++------- src/meta/api/src/schema_api_test_suite.rs | 10 ++-- src/meta/app/src/schema/table.rs | 19 +++++++- .../fuse/operations/vacuum_drop_tables.rs | 2 +- .../interpreter_vacuum_drop_tables.rs | 48 ++++++++++++++----- 5 files changed, 79 insertions(+), 36 deletions(-) diff --git a/src/meta/api/src/schema_api_impl.rs b/src/meta/api/src/schema_api_impl.rs index 24f7139f1fd2..b61d7c18f807 100644 --- a/src/meta/api/src/schema_api_impl.rs +++ b/src/meta/api/src/schema_api_impl.rs @@ -2884,6 +2884,7 @@ impl + ?Sized> SchemaApi for KV { drop_ids.push(DroppedId::Db( db_info.database_id.db_id, db_info.name_ident.database_name().to_string(), + Arc::new(vec![]), )); } if num == left_num { @@ -2893,27 +2894,30 @@ impl + ?Sized> SchemaApi for KV { }); } } else { - table_infos.iter().for_each(|(table_info, db_id)| { - if !drop_db { - drop_ids.push(DroppedId::Table( - *db_id, - table_info.ident.table_id, - table_info.name.clone(), - )) - } - }); - drop_table_infos.extend( - table_infos - .into_iter() - .map(|(table_info, _)| table_info) - .collect::>(), - ); if drop_db { drop_ids.push(DroppedId::Db( db_info.database_id.db_id, db_info.name_ident.database_name().to_string(), + Arc::new( + table_infos + .iter() + .map(|(table_info, _)| { + (table_info.ident.table_id, table_info.name.clone()) + }) + .collect(), + ), )); + } else { + table_infos.iter().for_each(|(table_info, db_id)| { + drop_ids.push(DroppedId::Table( + *db_id, + table_info.ident.table_id, + table_info.name.clone(), + )) + }); } + drop_table_infos + .extend(table_infos.into_iter().map(|(table_info, _)| table_info)); } } @@ -2974,7 +2978,7 @@ impl + ?Sized> SchemaApi for KV { async fn gc_drop_tables(&self, req: GcDroppedTableReq) -> Result<(), KVAppError> { for drop_id in req.drop_ids { match drop_id { - DroppedId::Db(db_id, db_name) => { + DroppedId::Db(db_id, db_name, _) => { gc_dropped_db_by_id(self, db_id, &req.tenant, db_name).await? } DroppedId::Table(db_id, table_id, table_name) => { diff --git a/src/meta/api/src/schema_api_test_suite.rs b/src/meta/api/src/schema_api_test_suite.rs index 9d7b6d9a8d34..7d36008f469b 100644 --- a/src/meta/api/src/schema_api_test_suite.rs +++ b/src/meta/api/src/schema_api_test_suite.rs @@ -4023,10 +4023,12 @@ impl SchemaApiTestSuite { drop_ids_1.push(DroppedId::Db( *res.db_id, db_name.database_name().to_string(), + Arc::new(vec![]), )); drop_ids_2.push(DroppedId::Db( *res.db_id, db_name.database_name().to_string(), + Arc::new(vec![]), )); let req = CreateTableReq { @@ -4063,7 +4065,7 @@ impl SchemaApiTestSuite { let res = mt.create_database(create_db_req.clone()).await?; let db_id = res.db_id; - drop_ids_2.push(DroppedId::Db(*db_id, "db2".to_string())); + drop_ids_2.push(DroppedId::Db(*db_id, "db2".to_string(), Arc::new(vec![]))); info!("--- create and drop db2.tb1"); { @@ -4262,13 +4264,13 @@ impl SchemaApiTestSuite { left_table_id.cmp(right_table_id) } } - (DroppedId::Db(left_db_id, _), DroppedId::Db(right_db_id, _)) => { + (DroppedId::Db(left_db_id, _, _), DroppedId::Db(right_db_id, _, _)) => { left_db_id.cmp(right_db_id) } - (DroppedId::Db(left_db_id, _), DroppedId::Table(right_db_id, _, _)) => { + (DroppedId::Db(left_db_id, _, _), DroppedId::Table(right_db_id, _, _)) => { left_db_id.cmp(right_db_id) } - (DroppedId::Table(left_db_id, _, _), DroppedId::Db(right_db_id, _)) => { + (DroppedId::Table(left_db_id, _, _), DroppedId::Db(right_db_id, _, _)) => { left_db_id.cmp(right_db_id) } } diff --git a/src/meta/app/src/schema/table.rs b/src/meta/app/src/schema/table.rs index e1de3eacae85..401ef8febb7a 100644 --- a/src/meta/app/src/schema/table.rs +++ b/src/meta/app/src/schema/table.rs @@ -25,6 +25,7 @@ use std::time::Duration; use anyerror::func_name; use chrono::DateTime; use chrono::Utc; +use databend_common_exception::ErrorCode; use databend_common_exception::Result; use databend_common_expression::FieldIndex; use databend_common_expression::TableField; @@ -201,6 +202,20 @@ pub struct TableInfo { pub db_type: DatabaseType, } +impl TableInfo { + pub fn database_name(&self) -> Result<&str> { + if self.engine() != "FUSE" { + return Err(ErrorCode::Internal(format!( + "Invalid engine: {}", + self.engine() + ))); + } + let database_name = self.desc.split('.').next().unwrap(); + let database_name = &database_name[1..database_name.len() - 1]; + Ok(database_name) + } +} + #[derive(serde::Serialize, serde::Deserialize, Clone, Debug, Eq, PartialEq, Default)] pub struct TableStatistics { /// Number of rows @@ -907,8 +922,8 @@ pub struct ListDroppedTableReq { #[derive(Clone, Debug, PartialEq, Eq)] pub enum DroppedId { - // db id, db name - Db(u64, String), + // db id, db name, (table id, table name)s + Db(u64, String, Arc>), // db id, table id, table name Table(u64, u64, String), } diff --git a/src/query/ee/src/storages/fuse/operations/vacuum_drop_tables.rs b/src/query/ee/src/storages/fuse/operations/vacuum_drop_tables.rs index d67ae15776cc..1167a330c0c7 100644 --- a/src/query/ee/src/storages/fuse/operations/vacuum_drop_tables.rs +++ b/src/query/ee/src/storages/fuse/operations/vacuum_drop_tables.rs @@ -41,7 +41,7 @@ pub async fn do_vacuum_drop_table( let result = vacuum_drop_single_table(&table_info, operator, dry_run_limit, &mut list_files).await; if result.is_err() { - let db_name = table_info.desc.split('.').next().unwrap(); + let db_name = table_info.database_name()?; let table_id = table_info.ident.table_id; failed_dbs.insert(db_name.to_string()); failed_tables.insert(table_id); diff --git a/src/query/service/src/interpreters/interpreter_vacuum_drop_tables.rs b/src/query/service/src/interpreters/interpreter_vacuum_drop_tables.rs index eb87f42990cc..8a0a75c93e4c 100644 --- a/src/query/service/src/interpreters/interpreter_vacuum_drop_tables.rs +++ b/src/query/service/src/interpreters/interpreter_vacuum_drop_tables.rs @@ -65,11 +65,11 @@ impl VacuumDropTablesInterpreter { let mut drop_db_table_ids = vec![]; for drop_id in drop_ids { match drop_id { - DroppedId::Db(db_id, db_name) => { - drop_db_ids.push(DroppedId::Db(db_id, db_name)); + DroppedId::Db(_, _, _) => { + drop_db_ids.push(drop_id); } - DroppedId::Table(db_id, table_id, table_name) => { - drop_db_table_ids.push(DroppedId::Table(db_id, table_id, table_name)); + DroppedId::Table(_, _, _) => { + drop_db_table_ids.push(drop_id); } } } @@ -146,7 +146,7 @@ impl Interpreter for VacuumDropTablesInterpreter { "vacuum drop table from db {:?}, get_drop_table_infos return tables: {:?}, drop_ids: {:?}", self.plan.database, tables.len(), - drop_ids.len() + drop_ids ); // TODO buggy, table as catalog obj should be allowed to drop @@ -172,14 +172,36 @@ impl Interpreter for VacuumDropTablesInterpreter { .await?; // gc meta data only when not dry run if self.plan.option.dry_run.is_none() { - let drop_ids = drop_ids - .into_iter() - .filter(|id| match id { - DroppedId::Db(_, db_name) => !failed_dbs.contains(db_name), - DroppedId::Table(_, table_id, _) => !failed_tables.contains(table_id), - }) - .collect(); - self.gc_drop_tables(catalog, drop_ids).await?; + let mut success_dropped_ids = vec![]; + for drop_id in drop_ids { + match &drop_id { + DroppedId::Db(db_id, db_name, tables) => { + if !failed_dbs.contains(db_name) { + success_dropped_ids.push(drop_id); + } else { + for (table_id, table_name) in tables.iter() { + if !failed_tables.contains(table_id) { + success_dropped_ids.push(DroppedId::Table( + *db_id, + *table_id, + table_name.clone(), + )); + } + } + } + } + DroppedId::Table(_, table_id, _) => { + if !failed_tables.contains(table_id) { + success_dropped_ids.push(drop_id); + } + } + } + } + info!( + "failed dbs:{:?}, failed_tables:{:?}, success_drop_ids:{:?}", + failed_dbs, failed_tables, success_dropped_ids + ); + self.gc_drop_tables(catalog, success_dropped_ids).await?; } match files_opt { From 4b9700795b0656a4dfd387bef38f24fe26e5f74e Mon Sep 17 00:00:00 2001 From: sky <3374614481@qq.com> Date: Tue, 10 Sep 2024 10:26:44 +0800 Subject: [PATCH 06/15] fix ut --- src/meta/api/src/schema_api_test_suite.rs | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/src/meta/api/src/schema_api_test_suite.rs b/src/meta/api/src/schema_api_test_suite.rs index 7d36008f469b..c6dd719acb01 100644 --- a/src/meta/api/src/schema_api_test_suite.rs +++ b/src/meta/api/src/schema_api_test_suite.rs @@ -4275,6 +4275,15 @@ impl SchemaApiTestSuite { } } } + fn is_dropped_id_eq(l: &DroppedId, r: &DroppedId) -> bool { + match (l, r) { + ( + DroppedId::Db(left_db_id, left_db_name, _), + DroppedId::Db(right_db_id, right_db_name, _), + ) => left_db_id == right_db_id && left_db_name == right_db_name, + _ => l == r, + } + } // case 1: test AllDroppedTables with filter time { let now = Utc::now(); @@ -4287,7 +4296,10 @@ impl SchemaApiTestSuite { // sort drop id by table id let mut sort_drop_ids = resp.drop_ids; sort_drop_ids.sort_by(cmp_dropped_id); - assert_eq!(sort_drop_ids, drop_ids_1); + assert_eq!(sort_drop_ids.len(), drop_ids_1.len()); + for (id1, id2) in sort_drop_ids.iter().zip(drop_ids_1.iter()) { + assert!(is_dropped_id_eq(id1, id2)); + } let expected: BTreeSet = [ "'db1'.'tb1'".to_string(), @@ -4316,7 +4328,10 @@ impl SchemaApiTestSuite { // sort drop id by table id let mut sort_drop_ids = resp.drop_ids; sort_drop_ids.sort_by(cmp_dropped_id); - assert_eq!(sort_drop_ids, drop_ids_2); + assert_eq!(sort_drop_ids.len(), drop_ids_2.len()); + for (id1, id2) in sort_drop_ids.iter().zip(drop_ids_2.iter()) { + assert!(is_dropped_id_eq(id1, id2)); + } let expected: BTreeSet = [ "'db1'.'tb1'".to_string(), From a6d0de93d2ff250ba2663516679d9e2e65c12b26 Mon Sep 17 00:00:00 2001 From: sky <3374614481@qq.com> Date: Tue, 10 Sep 2024 11:31:53 +0800 Subject: [PATCH 07/15] add test --- .../vacuum_drop_table_continue.result | 18 ++++++++ .../01_vacuum/vacuum_drop_table_continue.sh | 42 +++++++++++++++++++ 2 files changed, 60 insertions(+) create mode 100644 tests/suites/5_ee/01_vacuum/vacuum_drop_table_continue.result create mode 100755 tests/suites/5_ee/01_vacuum/vacuum_drop_table_continue.sh diff --git a/tests/suites/5_ee/01_vacuum/vacuum_drop_table_continue.result b/tests/suites/5_ee/01_vacuum/vacuum_drop_table_continue.result new file mode 100644 index 000000000000..ab6c604174f9 --- /dev/null +++ b/tests/suites/5_ee/01_vacuum/vacuum_drop_table_continue.result @@ -0,0 +1,18 @@ +>>>> create or replace database test_vacuum_drop_table_continue +>>>> create table test_vacuum_drop_table_continue.a(c int) 'fs:///tmp/test_vacuum_drop_table_continue/' +>>>> create table test_vacuum_drop_table_continue.b(c int) +>>>> create table test_vacuum_drop_table_continue.c(c int) +>>>> create table test_vacuum_drop_table_continue.d(c int) +>>>> insert into test_vacuum_drop_table_continue.a values (1) +>>>> insert into test_vacuum_drop_table_continue.b values (1) +>>>> insert into test_vacuum_drop_table_continue.c values (1) +>>>> insert into test_vacuum_drop_table_continue.d values (1) +>>>> drop database test_vacuum_drop_table_continue +>>>> set data_retention_time_in_days=0; vacuum drop table +>>>> undrop database test_vacuum_drop_table_continue +>>>> use test_vacuum_drop_table_continue;show tables +a +<<<< +>>>> select * from test_vacuum_drop_table_continue.a +1 +<<<< diff --git a/tests/suites/5_ee/01_vacuum/vacuum_drop_table_continue.sh b/tests/suites/5_ee/01_vacuum/vacuum_drop_table_continue.sh new file mode 100755 index 000000000000..cd2b715d9de3 --- /dev/null +++ b/tests/suites/5_ee/01_vacuum/vacuum_drop_table_continue.sh @@ -0,0 +1,42 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +. "$CURDIR"/../../../shell_env.sh + +stmt "create or replace database test_vacuum_drop_table_continue" + +mkdir -p /tmp/test_vacuum_drop_table_continue/ + +stmt "create table test_vacuum_drop_table_continue.a(c int) 'fs:///tmp/test_vacuum_drop_table_continue/'" + +stmt "create table test_vacuum_drop_table_continue.b(c int)" + +stmt "create table test_vacuum_drop_table_continue.c(c int)" + +stmt "create table test_vacuum_drop_table_continue.d(c int)" + + +stmt "insert into test_vacuum_drop_table_continue.a values (1)" + +stmt "insert into test_vacuum_drop_table_continue.b values (1)" + +stmt "insert into test_vacuum_drop_table_continue.c values (1)" + +stmt "insert into test_vacuum_drop_table_continue.d values (1)" + +chmod 444 /tmp/test_vacuum_drop_table_continue/ + +stmt "drop database test_vacuum_drop_table_continue" + +# can't vacuum files of table a, but can go on vacuum other tables +stmt "set data_retention_time_in_days=0; vacuum drop table" + +chmod 755 /tmp/test_vacuum_drop_table_continue/ +find /tmp/test_vacuum_drop_table_continue/ -type d -exec chmod 755 {} + +find /tmp/test_vacuum_drop_table_continue/ -type f -exec chmod 644 {} + + +stmt "undrop database test_vacuum_drop_table_continue" + +query "use test_vacuum_drop_table_continue;show tables" + +query "select * from test_vacuum_drop_table_continue.a" From faa260694c5b36dd38ed5758f5fe87289b65a48a Mon Sep 17 00:00:00 2001 From: sky <3374614481@qq.com> Date: Tue, 10 Sep 2024 11:44:03 +0800 Subject: [PATCH 08/15] fix ut --- src/meta/app/src/schema/table.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/meta/app/src/schema/table.rs b/src/meta/app/src/schema/table.rs index 401ef8febb7a..8ee799ed6a71 100644 --- a/src/meta/app/src/schema/table.rs +++ b/src/meta/app/src/schema/table.rs @@ -375,7 +375,7 @@ impl Default for TableMeta { fn default() -> Self { TableMeta { schema: Arc::new(TableSchema::empty()), - engine: "".to_string(), + engine: "FUSE".to_string(), engine_options: BTreeMap::new(), storage_params: None, part_prefix: "".to_string(), From 6ac552a422edb49841c2e5d99e4676bb744708cc Mon Sep 17 00:00:00 2001 From: sky <3374614481@qq.com> Date: Tue, 10 Sep 2024 12:02:26 +0800 Subject: [PATCH 09/15] fix test name --- ...e_continue.result => 01_003_vacuum_drop_table_continue.result} | 0 ...rop_table_continue.sh => 01_003_vacuum_drop_table_continue.sh} | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename tests/suites/5_ee/01_vacuum/{vacuum_drop_table_continue.result => 01_003_vacuum_drop_table_continue.result} (100%) rename tests/suites/5_ee/01_vacuum/{vacuum_drop_table_continue.sh => 01_003_vacuum_drop_table_continue.sh} (100%) diff --git a/tests/suites/5_ee/01_vacuum/vacuum_drop_table_continue.result b/tests/suites/5_ee/01_vacuum/01_003_vacuum_drop_table_continue.result similarity index 100% rename from tests/suites/5_ee/01_vacuum/vacuum_drop_table_continue.result rename to tests/suites/5_ee/01_vacuum/01_003_vacuum_drop_table_continue.result diff --git a/tests/suites/5_ee/01_vacuum/vacuum_drop_table_continue.sh b/tests/suites/5_ee/01_vacuum/01_003_vacuum_drop_table_continue.sh similarity index 100% rename from tests/suites/5_ee/01_vacuum/vacuum_drop_table_continue.sh rename to tests/suites/5_ee/01_vacuum/01_003_vacuum_drop_table_continue.sh From 93b91cdc9bac3461c156bec2def1bc91f6de7661 Mon Sep 17 00:00:00 2001 From: sky <3374614481@qq.com> Date: Tue, 10 Sep 2024 13:24:05 +0800 Subject: [PATCH 10/15] fix ut --- src/query/ee/tests/it/storages/fuse/operations/vacuum.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/query/ee/tests/it/storages/fuse/operations/vacuum.rs b/src/query/ee/tests/it/storages/fuse/operations/vacuum.rs index e6776e7b16ca..f0d01c1aa69b 100644 --- a/src/query/ee/tests/it/storages/fuse/operations/vacuum.rs +++ b/src/query/ee/tests/it/storages/fuse/operations/vacuum.rs @@ -294,6 +294,7 @@ async fn test_fuse_do_vacuum_drop_table_deletion_error() -> Result<()> { .meta .options .insert(OPT_KEY_DATABASE_ID.to_owned(), "1".to_owned()); + table_info.desc = "`default`.`t`".to_string(); use test_accessor::AccessorFaultyDeletion; // Operator with mocked accessor that will fail on `remove_all` @@ -321,7 +322,7 @@ async fn test_fuse_vacuum_drop_tables_in_parallel_with_deletion_error() -> Resul .meta .options .insert(OPT_KEY_DATABASE_ID.to_owned(), "1".to_owned()); - + table_info.desc = "`default`.`t`".to_string(); use test_accessor::AccessorFaultyDeletion; // Case 1: non-parallel vacuum dropped tables @@ -418,6 +419,7 @@ async fn test_fuse_do_vacuum_drop_table_external_storage() -> Result<()> { }; let table_info = TableInfo { + desc: "`default`.`t`".to_string(), meta, ..Default::default() }; From 8ab285da6e4817265d57efd0720a983e4423440e Mon Sep 17 00:00:00 2001 From: sky <3374614481@qq.com> Date: Tue, 10 Sep 2024 15:53:22 +0800 Subject: [PATCH 11/15] refactor DroppedId --- src/meta/api/src/schema_api_impl.rs | 40 +++++------ src/meta/api/src/schema_api_test_suite.rs | 71 +++++++++++++------ src/meta/app/src/schema/table.rs | 7 +- .../interpreter_vacuum_drop_tables.rs | 8 ++- 4 files changed, 80 insertions(+), 46 deletions(-) diff --git a/src/meta/api/src/schema_api_impl.rs b/src/meta/api/src/schema_api_impl.rs index b61d7c18f807..6dcee25ad510 100644 --- a/src/meta/api/src/schema_api_impl.rs +++ b/src/meta/api/src/schema_api_impl.rs @@ -2881,11 +2881,11 @@ impl + ?Sized> SchemaApi for KV { // if limit is Some, append DroppedId::Db only when table_infos is empty if drop_db && table_infos.is_empty() { - drop_ids.push(DroppedId::Db( - db_info.database_id.db_id, - db_info.name_ident.database_name().to_string(), - Arc::new(vec![]), - )); + drop_ids.push(DroppedId::Db { + db_id: db_info.database_id.db_id, + db_name: db_info.name_ident.database_name().to_string(), + tables: vec![], + }); } if num == left_num { return Ok(ListDroppedTableResp { @@ -2895,18 +2895,16 @@ impl + ?Sized> SchemaApi for KV { } } else { if drop_db { - drop_ids.push(DroppedId::Db( - db_info.database_id.db_id, - db_info.name_ident.database_name().to_string(), - Arc::new( - table_infos - .iter() - .map(|(table_info, _)| { - (table_info.ident.table_id, table_info.name.clone()) - }) - .collect(), - ), - )); + drop_ids.push(DroppedId::Db { + db_id: db_info.database_id.db_id, + db_name: db_info.name_ident.database_name().to_string(), + tables: table_infos + .iter() + .map(|(table_info, _)| { + (table_info.ident.table_id, table_info.name.clone()) + }) + .collect(), + }); } else { table_infos.iter().for_each(|(table_info, db_id)| { drop_ids.push(DroppedId::Table( @@ -2978,9 +2976,11 @@ impl + ?Sized> SchemaApi for KV { async fn gc_drop_tables(&self, req: GcDroppedTableReq) -> Result<(), KVAppError> { for drop_id in req.drop_ids { match drop_id { - DroppedId::Db(db_id, db_name, _) => { - gc_dropped_db_by_id(self, db_id, &req.tenant, db_name).await? - } + DroppedId::Db { + db_id, + db_name, + tables: _, + } => gc_dropped_db_by_id(self, db_id, &req.tenant, db_name).await?, DroppedId::Table(db_id, table_id, table_name) => { gc_dropped_table_by_id(self, &req.tenant, db_id, table_id, table_name).await? } diff --git a/src/meta/api/src/schema_api_test_suite.rs b/src/meta/api/src/schema_api_test_suite.rs index c6dd719acb01..4f3e2483c8f3 100644 --- a/src/meta/api/src/schema_api_test_suite.rs +++ b/src/meta/api/src/schema_api_test_suite.rs @@ -4020,16 +4020,16 @@ impl SchemaApiTestSuite { }; let res = mt.create_database(req).await?; - drop_ids_1.push(DroppedId::Db( - *res.db_id, - db_name.database_name().to_string(), - Arc::new(vec![]), - )); - drop_ids_2.push(DroppedId::Db( - *res.db_id, - db_name.database_name().to_string(), - Arc::new(vec![]), - )); + drop_ids_1.push(DroppedId::Db { + db_id: *res.db_id, + db_name: db_name.database_name().to_string(), + tables: vec![], + }); + drop_ids_2.push(DroppedId::Db { + db_id: *res.db_id, + db_name: db_name.database_name().to_string(), + tables: vec![], + }); let req = CreateTableReq { create_option: CreateOption::Create, @@ -4065,7 +4065,11 @@ impl SchemaApiTestSuite { let res = mt.create_database(create_db_req.clone()).await?; let db_id = res.db_id; - drop_ids_2.push(DroppedId::Db(*db_id, "db2".to_string(), Arc::new(vec![]))); + drop_ids_2.push(DroppedId::Db { + db_id: *db_id, + db_name: "db2".to_string(), + tables: vec![], + }); info!("--- create and drop db2.tb1"); { @@ -4264,22 +4268,45 @@ impl SchemaApiTestSuite { left_table_id.cmp(right_table_id) } } - (DroppedId::Db(left_db_id, _, _), DroppedId::Db(right_db_id, _, _)) => { - left_db_id.cmp(right_db_id) - } - (DroppedId::Db(left_db_id, _, _), DroppedId::Table(right_db_id, _, _)) => { - left_db_id.cmp(right_db_id) - } - (DroppedId::Table(left_db_id, _, _), DroppedId::Db(right_db_id, _, _)) => { - left_db_id.cmp(right_db_id) - } + ( + DroppedId::Db { + db_id: left_db_id, .. + }, + DroppedId::Db { + db_id: right_db_id, .. + }, + ) => left_db_id.cmp(right_db_id), + ( + DroppedId::Db { + db_id: left_db_id, + db_name: _, + tables: _, + }, + DroppedId::Table(right_db_id, _, _), + ) => left_db_id.cmp(right_db_id), + ( + DroppedId::Table(left_db_id, _, _), + DroppedId::Db { + db_id: right_db_id, + db_name: _, + tables: _, + }, + ) => left_db_id.cmp(right_db_id), } } fn is_dropped_id_eq(l: &DroppedId, r: &DroppedId) -> bool { match (l, r) { ( - DroppedId::Db(left_db_id, left_db_name, _), - DroppedId::Db(right_db_id, right_db_name, _), + DroppedId::Db { + db_id: left_db_id, + db_name: left_db_name, + tables: _, + }, + DroppedId::Db { + db_id: right_db_id, + db_name: right_db_name, + tables: _, + }, ) => left_db_id == right_db_id && left_db_name == right_db_name, _ => l == r, } diff --git a/src/meta/app/src/schema/table.rs b/src/meta/app/src/schema/table.rs index 8ee799ed6a71..ab1fe33dbd79 100644 --- a/src/meta/app/src/schema/table.rs +++ b/src/meta/app/src/schema/table.rs @@ -922,8 +922,11 @@ pub struct ListDroppedTableReq { #[derive(Clone, Debug, PartialEq, Eq)] pub enum DroppedId { - // db id, db name, (table id, table name)s - Db(u64, String, Arc>), + Db { + db_id: u64, + db_name: String, + tables: Vec<(u64, String)>, + }, // db id, table id, table name Table(u64, u64, String), } diff --git a/src/query/service/src/interpreters/interpreter_vacuum_drop_tables.rs b/src/query/service/src/interpreters/interpreter_vacuum_drop_tables.rs index 8a0a75c93e4c..a7f1467a9e86 100644 --- a/src/query/service/src/interpreters/interpreter_vacuum_drop_tables.rs +++ b/src/query/service/src/interpreters/interpreter_vacuum_drop_tables.rs @@ -65,7 +65,7 @@ impl VacuumDropTablesInterpreter { let mut drop_db_table_ids = vec![]; for drop_id in drop_ids { match drop_id { - DroppedId::Db(_, _, _) => { + DroppedId::Db { .. } => { drop_db_ids.push(drop_id); } DroppedId::Table(_, _, _) => { @@ -175,7 +175,11 @@ impl Interpreter for VacuumDropTablesInterpreter { let mut success_dropped_ids = vec![]; for drop_id in drop_ids { match &drop_id { - DroppedId::Db(db_id, db_name, tables) => { + DroppedId::Db { + db_id, + db_name, + tables, + } => { if !failed_dbs.contains(db_name) { success_dropped_ids.push(drop_id); } else { From 6463bc6272d135eddd3a1c192046e71c087ce72b Mon Sep 17 00:00:00 2001 From: sky <3374614481@qq.com> Date: Tue, 10 Sep 2024 16:44:28 +0800 Subject: [PATCH 12/15] unify code --- src/meta/api/src/schema_api_impl.rs | 61 ++++++++--------------------- 1 file changed, 17 insertions(+), 44 deletions(-) diff --git a/src/meta/api/src/schema_api_impl.rs b/src/meta/api/src/schema_api_impl.rs index 6dcee25ad510..44fcdcc6ef9d 100644 --- a/src/meta/api/src/schema_api_impl.rs +++ b/src/meta/api/src/schema_api_impl.rs @@ -2866,57 +2866,30 @@ impl + ?Sized> SchemaApi for KV { let table_infos = do_get_table_history(self, db_filter, left_num).await?; - // check if reach the limit - if let Some(left_num) = left_num { - let num = min(left_num, table_infos.len()); - for table_info in table_infos.iter().take(num) { - let (table_info, db_id) = table_info; + let limit = left_num.unwrap_or(table_infos.len()); + let this_db_drop_table_infos = &table_infos[..limit]; + + if limit >= this_db_drop_table_infos.len() && drop_db { + drop_ids.push(DroppedId::Db { + db_id: db_info.database_id.db_id, + db_name: db_info.name_ident.database_name().to_string(), + tables: this_db_drop_table_infos + .iter() + .map(|(table_info, _)| { + (table_info.ident.table_id, table_info.name.clone()) + }) + .collect(), + }); + } else { + for (table_info, db_id) in this_db_drop_table_infos { drop_ids.push(DroppedId::Table( *db_id, table_info.ident.table_id, table_info.name.clone(), )); - drop_table_infos.push(table_info.clone()); - } - - // if limit is Some, append DroppedId::Db only when table_infos is empty - if drop_db && table_infos.is_empty() { - drop_ids.push(DroppedId::Db { - db_id: db_info.database_id.db_id, - db_name: db_info.name_ident.database_name().to_string(), - tables: vec![], - }); - } - if num == left_num { - return Ok(ListDroppedTableResp { - drop_table_infos, - drop_ids, - }); - } - } else { - if drop_db { - drop_ids.push(DroppedId::Db { - db_id: db_info.database_id.db_id, - db_name: db_info.name_ident.database_name().to_string(), - tables: table_infos - .iter() - .map(|(table_info, _)| { - (table_info.ident.table_id, table_info.name.clone()) - }) - .collect(), - }); - } else { - table_infos.iter().for_each(|(table_info, db_id)| { - drop_ids.push(DroppedId::Table( - *db_id, - table_info.ident.table_id, - table_info.name.clone(), - )) - }); } - drop_table_infos - .extend(table_infos.into_iter().map(|(table_info, _)| table_info)); } + drop_table_infos.extend(this_db_drop_table_infos); } return Ok(ListDroppedTableResp { From 5e2a9f09c7e802a75c9db6490b47e6d9ab6365d4 Mon Sep 17 00:00:00 2001 From: sky <3374614481@qq.com> Date: Tue, 10 Sep 2024 16:50:32 +0800 Subject: [PATCH 13/15] make lint --- src/meta/api/src/schema_api_impl.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/meta/api/src/schema_api_impl.rs b/src/meta/api/src/schema_api_impl.rs index 44fcdcc6ef9d..b29711691516 100644 --- a/src/meta/api/src/schema_api_impl.rs +++ b/src/meta/api/src/schema_api_impl.rs @@ -2889,7 +2889,11 @@ impl + ?Sized> SchemaApi for KV { )); } } - drop_table_infos.extend(this_db_drop_table_infos); + drop_table_infos.extend( + this_db_drop_table_infos + .iter() + .map(|(table_info, _)| table_info.clone()), + ); } return Ok(ListDroppedTableResp { From 2f114112663e7507842f91c403f5c2de784325b9 Mon Sep 17 00:00:00 2001 From: sky <3374614481@qq.com> Date: Tue, 10 Sep 2024 17:39:40 +0800 Subject: [PATCH 14/15] fix --- src/meta/api/src/schema_api_impl.rs | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/src/meta/api/src/schema_api_impl.rs b/src/meta/api/src/schema_api_impl.rs index b29711691516..8f18d5971195 100644 --- a/src/meta/api/src/schema_api_impl.rs +++ b/src/meta/api/src/schema_api_impl.rs @@ -2865,15 +2865,13 @@ impl + ?Sized> SchemaApi for KV { }; let table_infos = do_get_table_history(self, db_filter, left_num).await?; + let take_num = left_num.unwrap_or(usize::MAX); - let limit = left_num.unwrap_or(table_infos.len()); - let this_db_drop_table_infos = &table_infos[..limit]; - - if limit >= this_db_drop_table_infos.len() && drop_db { + if drop_db && take_num > table_infos.len() { drop_ids.push(DroppedId::Db { db_id: db_info.database_id.db_id, db_name: db_info.name_ident.database_name().to_string(), - tables: this_db_drop_table_infos + tables: table_infos .iter() .map(|(table_info, _)| { (table_info.ident.table_id, table_info.name.clone()) @@ -2881,7 +2879,7 @@ impl + ?Sized> SchemaApi for KV { .collect(), }); } else { - for (table_info, db_id) in this_db_drop_table_infos { + for (table_info, db_id) in table_infos.iter().take(take_num) { drop_ids.push(DroppedId::Table( *db_id, table_info.ident.table_id, @@ -2890,8 +2888,9 @@ impl + ?Sized> SchemaApi for KV { } } drop_table_infos.extend( - this_db_drop_table_infos + table_infos .iter() + .take(take_num) .map(|(table_info, _)| table_info.clone()), ); } From 9ff306ef599e24cd7e7eabe01d9f51a3d6c34c33 Mon Sep 17 00:00:00 2001 From: sky <3374614481@qq.com> Date: Tue, 10 Sep 2024 18:18:50 +0800 Subject: [PATCH 15/15] add comment --- src/meta/api/src/schema_api_impl.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/meta/api/src/schema_api_impl.rs b/src/meta/api/src/schema_api_impl.rs index 8f18d5971195..43d48471bc7a 100644 --- a/src/meta/api/src/schema_api_impl.rs +++ b/src/meta/api/src/schema_api_impl.rs @@ -2867,6 +2867,7 @@ impl + ?Sized> SchemaApi for KV { let table_infos = do_get_table_history(self, db_filter, left_num).await?; let take_num = left_num.unwrap_or(usize::MAX); + // A DB can be removed only when all its tables are removed. if drop_db && take_num > table_infos.len() { drop_ids.push(DroppedId::Db { db_id: db_info.database_id.db_id,