Skip to content

Commit

Permalink
Remove uses of #[allow(dead_code)] in favor of _identifier (apache#13328
Browse files Browse the repository at this point in the history
)

* Remove uses of #[allow(dead_code)] in favor of _identifier

* update comments
  • Loading branch information
ding-young authored and alamb committed Nov 13, 2024
1 parent 9b414e6 commit 67ee5be
Show file tree
Hide file tree
Showing 9 changed files with 29 additions and 34 deletions.
8 changes: 4 additions & 4 deletions datafusion-examples/examples/advanced_parquet_index.rs
Original file line number Diff line number Diff line change
Expand Up @@ -229,9 +229,9 @@ async fn main() -> Result<()> {
/// `file1.parquet` contains values `0..1000`
#[derive(Debug)]
pub struct IndexTableProvider {
/// Where the file is stored (cleanup on drop)
#[allow(dead_code)]
tmpdir: TempDir,
/// Pointer to temporary file storage. Keeping it in scope to prevent temporary folder
/// to be deleted prematurely
_tmpdir: TempDir,
/// The file that is being read.
indexed_file: IndexedFile,
/// The underlying object store
Expand All @@ -250,7 +250,7 @@ impl IndexTableProvider {

Ok(Self {
indexed_file,
tmpdir,
_tmpdir: tmpdir,
object_store,
use_row_selections: AtomicBool::new(false),
})
Expand Down
10 changes: 5 additions & 5 deletions datafusion/core/tests/parquet/external_access_plan.rs
Original file line number Diff line number Diff line change
Expand Up @@ -313,7 +313,7 @@ impl TestFull {
} = self;

let TestData {
temp_file: _,
_temp_file: _,
schema,
file_name,
file_size,
Expand Down Expand Up @@ -361,9 +361,9 @@ impl TestFull {

// Holds necessary data for these tests to reuse the same parquet file
struct TestData {
// field is present as on drop the file is deleted
#[allow(dead_code)]
temp_file: NamedTempFile,
/// Pointer to temporary file storage. Keeping it in scope to prevent temporary folder
/// to be deleted prematurely
_temp_file: NamedTempFile,
schema: SchemaRef,
file_name: String,
file_size: u64,
Expand Down Expand Up @@ -402,7 +402,7 @@ fn get_test_data() -> &'static TestData {
let file_size = temp_file.path().metadata().unwrap().len();

TestData {
temp_file,
_temp_file: temp_file,
schema,
file_name,
file_size,
Expand Down
5 changes: 2 additions & 3 deletions datafusion/core/tests/parquet/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -100,10 +100,9 @@ enum Unit {
/// table "t" registered, pointing at a parquet file made with
/// `make_test_file`
struct ContextWithParquet {
#[allow(dead_code)]
/// temp file parquet data is written to. The file is cleaned up
/// when dropped
file: NamedTempFile,
_file: NamedTempFile,
provider: Arc<dyn TableProvider>,
ctx: SessionContext,
}
Expand Down Expand Up @@ -217,7 +216,7 @@ impl ContextWithParquet {
ctx.register_table("t", provider.clone()).unwrap();

Self {
file,
_file: file,
provider,
ctx,
}
Expand Down
5 changes: 2 additions & 3 deletions datafusion/execution/src/disk_manager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ impl DiskManager {

let dir_index = thread_rng().gen_range(0..local_dirs.len());
Ok(RefCountedTempFile {
parent_temp_dir: Arc::clone(&local_dirs[dir_index]),
_parent_temp_dir: Arc::clone(&local_dirs[dir_index]),
tempfile: Builder::new()
.tempfile_in(local_dirs[dir_index].as_ref())
.map_err(DataFusionError::IoError)?,
Expand All @@ -153,8 +153,7 @@ impl DiskManager {
pub struct RefCountedTempFile {
/// The reference to the directory in which temporary files are created to ensure
/// it is not cleaned up prior to the NamedTempFile
#[allow(dead_code)]
parent_temp_dir: Arc<TempDir>,
_parent_temp_dir: Arc<TempDir>,
tempfile: NamedTempFile,
}

Expand Down
5 changes: 2 additions & 3 deletions datafusion/physical-plan/src/joins/cross_join.rs
Original file line number Diff line number Diff line change
Expand Up @@ -53,8 +53,7 @@ struct JoinLeftData {
merged_batch: RecordBatch,
/// Track memory reservation for merged_batch. Relies on drop
/// semantics to release reservation when JoinLeftData is dropped.
#[allow(dead_code)]
reservation: MemoryReservation,
_reservation: MemoryReservation,
}

#[allow(rustdoc::private_intra_doc_links)]
Expand Down Expand Up @@ -209,7 +208,7 @@ async fn load_left_input(

Ok(JoinLeftData {
merged_batch,
reservation,
_reservation: reservation,
})
}

Expand Down
5 changes: 2 additions & 3 deletions datafusion/physical-plan/src/joins/hash_join.rs
Original file line number Diff line number Diff line change
Expand Up @@ -92,8 +92,7 @@ struct JoinLeftData {
probe_threads_counter: AtomicUsize,
/// Memory reservation that tracks memory used by `hash_map` hash table
/// `batch`. Cleared on drop.
#[allow(dead_code)]
reservation: MemoryReservation,
_reservation: MemoryReservation,
}

impl JoinLeftData {
Expand All @@ -110,7 +109,7 @@ impl JoinLeftData {
batch,
visited_indices_bitmap,
probe_threads_counter,
reservation,
_reservation: reservation,
}
}

Expand Down
7 changes: 3 additions & 4 deletions datafusion/physical-plan/src/joins/nested_loop_join.rs
Original file line number Diff line number Diff line change
Expand Up @@ -69,22 +69,21 @@ struct JoinLeftData {
probe_threads_counter: AtomicUsize,
/// Memory reservation for tracking batch and bitmap
/// Cleared on `JoinLeftData` drop
#[allow(dead_code)]
reservation: MemoryReservation,
_reservation: MemoryReservation,
}

impl JoinLeftData {
fn new(
batch: RecordBatch,
bitmap: SharedBitmapBuilder,
probe_threads_counter: AtomicUsize,
reservation: MemoryReservation,
_reservation: MemoryReservation,
) -> Self {
Self {
batch,
bitmap,
probe_threads_counter,
reservation,
_reservation,
}
}

Expand Down
10 changes: 4 additions & 6 deletions datafusion/physical-plan/src/repartition/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -623,7 +623,7 @@ impl ExecutionPlan for RepartitionExec {
Box::pin(PerPartitionStream {
schema: Arc::clone(&schema_captured),
receiver,
drop_helper: Arc::clone(&abort_helper),
_drop_helper: Arc::clone(&abort_helper),
reservation: Arc::clone(&reservation),
}) as SendableRecordBatchStream
})
Expand Down Expand Up @@ -651,7 +651,7 @@ impl ExecutionPlan for RepartitionExec {
num_input_partitions_processed: 0,
schema: input.schema(),
input: rx.swap_remove(0),
drop_helper: abort_helper,
_drop_helper: abort_helper,
reservation,
}) as SendableRecordBatchStream)
}
Expand Down Expand Up @@ -906,8 +906,7 @@ struct RepartitionStream {
input: DistributionReceiver<MaybeBatch>,

/// Handle to ensure background tasks are killed when no longer needed.
#[allow(dead_code)]
drop_helper: Arc<Vec<SpawnedTask<()>>>,
_drop_helper: Arc<Vec<SpawnedTask<()>>>,

/// Memory reservation.
reservation: SharedMemoryReservation,
Expand Down Expand Up @@ -970,8 +969,7 @@ struct PerPartitionStream {
receiver: DistributionReceiver<MaybeBatch>,

/// Handle to ensure background tasks are killed when no longer needed.
#[allow(dead_code)]
drop_helper: Arc<Vec<SpawnedTask<()>>>,
_drop_helper: Arc<Vec<SpawnedTask<()>>>,

/// Memory reservation.
reservation: SharedMemoryReservation,
Expand Down
8 changes: 5 additions & 3 deletions datafusion/physical-plan/src/sorts/cursor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -156,8 +156,7 @@ pub struct RowValues {

/// Tracks for the memory used by in the `Rows` of this
/// cursor. Freed on drop
#[allow(dead_code)]
reservation: MemoryReservation,
_reservation: MemoryReservation,
}

impl RowValues {
Expand All @@ -173,7 +172,10 @@ impl RowValues {
"memory reservation mismatch"
);
assert!(rows.num_rows() > 0);
Self { rows, reservation }
Self {
rows,
_reservation: reservation,
}
}
}

Expand Down

0 comments on commit 67ee5be

Please sign in to comment.