Skip to content

Commit

Permalink
stroage: fix a bug in handling last chunk of blob
Browse files Browse the repository at this point in the history
Fix a bug in handling last chunk of blob.

Signed-off-by: Jiang Liu <[email protected]>
  • Loading branch information
jiangliu committed Dec 29, 2022
1 parent e0cfa20 commit cdce8f5
Show file tree
Hide file tree
Showing 7 changed files with 97 additions and 34 deletions.
2 changes: 1 addition & 1 deletion rafs/src/fs.rs
Original file line number Diff line number Diff line change
Expand Up @@ -438,7 +438,7 @@ impl Rafs {
let batch_size = 1024 * 1024 * 2;

for blob in &blob_infos {
let blob_size = blob.compressed_size();
let blob_size = blob.compressed_data_size();
let count = div_round_up(blob_size, batch_size);

let mut pre_offset = 0u64;
Expand Down
10 changes: 5 additions & 5 deletions src/bin/nydus-image/core/context.rs
Original file line number Diff line number Diff line change
Expand Up @@ -461,7 +461,7 @@ impl BlobContext {
.set_inlined_chunk_digest(features.contains(BlobFeatures::INLINED_CHUNK_DIGEST));
blob_ctx
.blob_meta_header
.set_cap_inlined_meta(features.contains(BlobFeatures::CAP_INLINED_META));
.set_cap_tar_toc(features.contains(BlobFeatures::CAP_TAR_TOC));

blob_ctx
}
Expand Down Expand Up @@ -583,7 +583,7 @@ impl BlobContext {

// TODO: check the logic to reset prefetch size
pub fn set_blob_prefetch_size(&mut self, ctx: &BuildContext) {
if (self.compressed_blob_size > 0
if (self.uncompressed_blob_size > 0
|| (ctx.conversion_type == ConversionType::EStargzIndexToRef
&& !self.blob_id.is_empty()))
&& ctx.prefetch.policy != PrefetchPolicy::Blob
Expand Down Expand Up @@ -652,7 +652,7 @@ impl BlobContext {

/// Get blob id if the blob has some chunks.
pub fn blob_id(&mut self) -> Option<String> {
if self.compressed_blob_size > 0 {
if self.uncompressed_blob_size > 0 {
Some(self.blob_id.to_string())
} else {
None
Expand Down Expand Up @@ -1070,9 +1070,9 @@ impl BuildContext {
features: Features,
) -> Self {
let blob_features = if blob_inline_meta {
BlobFeatures::INLINED_META | BlobFeatures::CAP_INLINED_META
BlobFeatures::INLINED_META | BlobFeatures::CAP_TAR_TOC
} else {
BlobFeatures::CAP_INLINED_META
BlobFeatures::CAP_TAR_TOC
};
BuildContext {
blob_id,
Expand Down
3 changes: 2 additions & 1 deletion src/bin/nydus-image/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -995,9 +995,10 @@ impl Command {
let mut blob_ids = Vec::new();
for (idx, blob) in blobs.iter().enumerate() {
println!(
"\t {}: {}, compressed size 0x{:x}, uncompressed size 0x{:x}, chunks: 0x{:x}, features: {}",
"\t {}: {}, compressed data size 0x{:x}, compressed file size 0x{:x}, uncompressed file size 0x{:x}, chunks: 0x{:x}, features: {}",
idx,
blob.blob_id(),
blob.compressed_data_size(),
blob.compressed_size(),
blob.uncompressed_size(),
blob.chunk_count(),
Expand Down
2 changes: 1 addition & 1 deletion src/bin/nydusd/fs_cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -510,7 +510,7 @@ impl FsCacheHandler {
Some(1) => nydus_api::default_batch_size() as u64,
Some(s) => s as u64,
};
let blob_size = blob_info.compressed_size();
let blob_size = blob_info.compressed_data_size();
let count = (blob_size + size - 1) / size;
let mut blob_req = Vec::with_capacity(count as usize);
let mut pre_offset = 0u64;
Expand Down
2 changes: 1 addition & 1 deletion storage/src/cache/filecache/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -221,7 +221,7 @@ impl FileCacheEntry {
// Set cache file to its expected size.
let file_size = file.metadata()?.len();
let cached_file_size = if mgr.cache_raw_data {
blob_info.compressed_size()
blob_info.compressed_data_size()
} else {
blob_info.uncompressed_size()
};
Expand Down
22 changes: 18 additions & 4 deletions storage/src/device.rs
Original file line number Diff line number Diff line change
Expand Up @@ -59,8 +59,9 @@ bitflags! {
const ZRAN = 0x0000_0008;
/// Chunk digest array is inlined in the data blob.
const INLINED_CHUNK_DIGEST = 0x0000_0010;
/// Inlined-meta capability, used to support backward compatibility for legacy converters.
const CAP_INLINED_META = 0x4000_0000;
/// Data blob are encoded with Tar header and optionally ToC.
/// It's also a flag indicating that images are generated with `nydus-image` v2.2 or newer.
const CAP_TAR_TOC = 0x4000_0000;
/// Rafs V5 image without extended blob table, this is an internal flag.
const _V5_NO_EXT_BLOB_TABLE = 0x8000_0000;
}
Expand Down Expand Up @@ -199,7 +200,7 @@ impl BlobInfo {
/// Get the id of the blob, with special handling of `inlined-meta` case.
pub fn blob_id(&self) -> String {
if (self.has_feature(BlobFeatures::INLINED_META) && !self.has_feature(BlobFeatures::ZRAN))
|| !self.has_feature(BlobFeatures::CAP_INLINED_META)
|| !self.has_feature(BlobFeatures::CAP_TAR_TOC)
{
let guard = self.meta_path.lock().unwrap();
if !guard.is_empty() {
Expand All @@ -214,7 +215,20 @@ impl BlobInfo {
&self.blob_id
}

/// Get size of the compressed blob.
/// Get size of compressed chunk data, not including `blob.meta`, `blob.chunk`, `toc` etc.
pub fn compressed_data_size(&self) -> u64 {
if self.has_feature(BlobFeatures::ZRAN) {
// It's the size of OCIv1 targz blob.
self.compressed_size
} else if self.has_feature(BlobFeatures::CAP_TAR_TOC) && self.meta_ci_is_valid() {
// There's a tar header between chunk data and compression information.
self.meta_ci_offset - 0x200
} else {
self.compressed_size
}
}

/// Get size of the compressed blob, including `blob.meta`, `blob.chunk`, `toc` etc.
pub fn compressed_size(&self) -> u64 {
self.compressed_size
}
Expand Down
90 changes: 69 additions & 21 deletions storage/src/meta/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -283,11 +283,11 @@ impl BlobMetaHeaderOndisk {
}

/// Set flag indicating having inlined-meta capability.
pub fn set_cap_inlined_meta(&mut self, enable: bool) {
pub fn set_cap_tar_toc(&mut self, enable: bool) {
if enable {
self.s_features |= BlobFeatures::CAP_INLINED_META.bits();
self.s_features |= BlobFeatures::CAP_TAR_TOC.bits();
} else {
self.s_features &= !BlobFeatures::CAP_INLINED_META.bits();
self.s_features &= !BlobFeatures::CAP_TAR_TOC.bits();
}
}

Expand Down Expand Up @@ -404,7 +404,7 @@ impl BlobMetaInfo {
let mut state = BlobMetaState {
blob_index: blob_info.blob_index(),
blob_features: blob_info.features().bits(),
compressed_size: blob_info.compressed_size(),
compressed_size: blob_info.compressed_data_size(),
uncompressed_size: round_up_4k(blob_info.uncompressed_size()),
chunk_info_array: chunk_infos,
chunk_digest_array: Default::default(),
Expand Down Expand Up @@ -1202,7 +1202,6 @@ impl BlobMetaChunkArray {
}
}

let mut vec = Vec::with_capacity(128);
for entry in &chunk_info_array[index..] {
entry.validate(state)?;
if !entry.is_zran() {
Expand All @@ -1212,8 +1211,8 @@ impl BlobMetaChunkArray {
}
if entry.get_zran_index() != zran_last {
let ctx = &state.zran_info_array[entry.get_zran_index() as usize];
if count + ctx.out_size() as u64 > batch_size
&& entry.uncompressed_offset() > end
if count + ctx.out_size() as u64 >= batch_size
&& entry.uncompressed_offset() >= end
{
return Ok(vec);
}
Expand All @@ -1223,7 +1222,17 @@ impl BlobMetaChunkArray {
vec.push(BlobMetaChunk::new(index, state));
index += 1;
}
return Ok(vec);

if let Some(c) = vec.last() {
if c.uncompressed_end() >= end {
return Ok(vec);
}
}
return Err(einval!(format!(
"entry not found index {} chunk_info_array.len {}",
index,
chunk_info_array.len(),
)));
}

vec.push(BlobMetaChunk::new(index, state));
Expand All @@ -1242,7 +1251,7 @@ impl BlobMetaChunkArray {
entry.uncompressed_size(),
last_end
)));
} else if last_end >= end && entry.aligned_uncompressed_end() > batch_end {
} else if last_end >= end && entry.aligned_uncompressed_end() >= batch_end {
// Avoid read amplify if next chunk is too big.
return Ok(vec);
}
Expand All @@ -1254,11 +1263,25 @@ impl BlobMetaChunkArray {
}
}

Err(einval!(format!(
"entry not found index {} chunk_info_array.len {}",
index,
chunk_info_array.len(),
)))
if last_end >= end {
Ok(vec)
} else {
panic!(
"entry not found index {} chunk_info_array.len {}, last_end 0x{:x}, end 0x{:x}, blob compressed size 0x{:x}",
index,
chunk_info_array.len(),
last_end,
end,
state.uncompressed_size,
)
/*
Err(einval!(format!(
"entry not found index {} chunk_info_array.len {}",
index,
chunk_info_array.len(),
)))
*/
}
}
}

Expand Down Expand Up @@ -1294,7 +1317,6 @@ impl BlobMetaChunkArray {
}
}

let mut vec = Vec::with_capacity(128);
for entry in &chunk_info_array[index..] {
entry.validate(state)?;
if !entry.is_zran() {
Expand All @@ -1314,7 +1336,17 @@ impl BlobMetaChunkArray {
vec.push(BlobMetaChunk::new(index, state));
index += 1;
}
return Ok(vec);

if let Some(c) = vec.last() {
if c.uncompressed_end() >= end {
return Ok(vec);
}
}
return Err(einval!(format!(
"entry not found index {} chunk_info_array.len {}",
index,
chunk_info_array.len(),
)));
}

vec.push(BlobMetaChunk::new(index, state));
Expand All @@ -1338,11 +1370,27 @@ impl BlobMetaChunkArray {
}
}

Err(einval!(format!(
"entry not found index {} chunk_info_array.len {}",
index,
chunk_info_array.len(),
)))
if last_end >= end {
Ok(vec)
} else {
panic!(
"entry not found index {} chunk_info_array.len {}, last_end 0x{:x}, end 0x{:x}, blob compressed size 0x{:x}",
index,
chunk_info_array.len(),
last_end,
end,
state.compressed_size,
)
/*
Err(einval!(format!(
"entry not found index {} chunk_info_array.len {}, last_end 0x{:x}, end 0x{:x}",
index,
chunk_info_array.len(),
last_end,
end,
)))
*/
}
}
}

Expand Down

0 comments on commit cdce8f5

Please sign in to comment.