Skip to content

Commit

Permalink
Remove is_dirty parameter from unpin_page method
Browse files Browse the repository at this point in the history
  • Loading branch information
lewiszlw committed Feb 25, 2024
1 parent 27cb7b4 commit 27f1690
Show file tree
Hide file tree
Showing 5 changed files with 47 additions and 52 deletions.
27 changes: 13 additions & 14 deletions bustubx/src/buffer/buffer_pool.rs
Original file line number Diff line number Diff line change
Expand Up @@ -101,14 +101,13 @@ impl BufferPoolManager {
}

// 从缓冲池中取消固定页
pub fn unpin_page_id(&self, page_id: PageId, is_dirty: bool) -> BustubxResult<()> {
pub fn unpin_page_id(&self, page_id: PageId) -> BustubxResult<()> {
if let Some(frame_id) = self.page_table.get(&page_id) {
let page = self.pool[*frame_id].clone();
if page.read().unwrap().pin_count == 0 {
return Ok(());
}
page.write().unwrap().pin_count -= 1;
page.write().unwrap().is_dirty |= is_dirty;
let pin_count = page.read().unwrap().pin_count;
if pin_count == 0 {
self.replacer
Expand All @@ -125,9 +124,9 @@ impl BufferPoolManager {
}
}

pub fn unpin_page(&self, page: Arc<RwLock<Page>>, is_dirty: bool) -> BustubxResult<()> {
pub fn unpin_page(&self, page: Arc<RwLock<Page>>) -> BustubxResult<()> {
let page_id = page.read().unwrap().page_id;
self.unpin_page_id(page_id, is_dirty)
self.unpin_page_id(page_id)
}

// 将缓冲池中指定页写回磁盘
Expand Down Expand Up @@ -235,7 +234,7 @@ mod tests {
let page4 = buffer_pool.new_page();
assert!(page4.is_err());

buffer_pool.unpin_page_id(page1_id, false).unwrap();
buffer_pool.unpin_page_id(page1_id).unwrap();
let page5 = buffer_pool.new_page().unwrap();
let page5_id = page5.read().unwrap().page_id;
assert_eq!(buffer_pool.pool[0].read().unwrap().page_id, page5_id,);
Expand All @@ -256,7 +255,7 @@ mod tests {
let page4 = buffer_pool.new_page();
assert!(page4.is_err());

buffer_pool.unpin_page_id(page1_id, true).unwrap();
buffer_pool.unpin_page_id(page1_id).unwrap();
let page5 = buffer_pool.new_page();
assert!(page5.is_ok());
}
Expand All @@ -271,23 +270,23 @@ mod tests {

let page1 = buffer_pool.new_page().unwrap();
let page1_id = page1.read().unwrap().page_id;
buffer_pool.unpin_page_id(page1_id, true).unwrap();
buffer_pool.unpin_page_id(page1_id).unwrap();

let page2 = buffer_pool.new_page().unwrap();
let page2_id = page2.read().unwrap().page_id;
buffer_pool.unpin_page_id(page2_id, false).unwrap();
buffer_pool.unpin_page_id(page2_id).unwrap();

let page3 = buffer_pool.new_page().unwrap();
let page3_id = page3.read().unwrap().page_id;
buffer_pool.unpin_page_id(page3_id, false).unwrap();
buffer_pool.unpin_page_id(page3_id).unwrap();

let page = buffer_pool.fetch_page(page1_id).unwrap();
assert_eq!(page.read().unwrap().page_id, page1_id);
buffer_pool.unpin_page_id(page1_id, false).unwrap();
buffer_pool.unpin_page_id(page1_id).unwrap();

let page = buffer_pool.fetch_page(page2_id).unwrap();
assert_eq!(page.read().unwrap().page_id, page2_id);
buffer_pool.unpin_page_id(page2_id, false).unwrap();
buffer_pool.unpin_page_id(page2_id).unwrap();

assert_eq!(buffer_pool.replacer.read().unwrap().size(), 3);
}
Expand All @@ -302,15 +301,15 @@ mod tests {

let page1 = buffer_pool.new_page().unwrap();
let page1_id = page1.read().unwrap().page_id;
buffer_pool.unpin_page_id(page1_id, true).unwrap();
buffer_pool.unpin_page_id(page1_id).unwrap();

let page2 = buffer_pool.new_page().unwrap();
let page2_id = page2.read().unwrap().page_id;
buffer_pool.unpin_page_id(page2_id, true).unwrap();
buffer_pool.unpin_page_id(page2_id).unwrap();

let page3 = buffer_pool.new_page().unwrap();
let page3_id = page3.read().unwrap().page_id;
buffer_pool.unpin_page_id(page3_id, false).unwrap();
buffer_pool.unpin_page_id(page3_id).unwrap();

let res = buffer_pool.delete_page(page1_id).unwrap();
assert!(res);
Expand Down
2 changes: 1 addition & 1 deletion bustubx/src/catalog/information.rs
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,7 @@ fn load_table_last_page_id(
loop {
let page = catalog.buffer_pool.fetch_page(page_id)?;
let (table_page, _) = TablePageCodec::decode(page.read().unwrap().data(), schema.clone())?;
catalog.buffer_pool.unpin_page_id(page_id, false)?;
catalog.buffer_pool.unpin_page_id(page_id)?;

if table_page.header.next_page_id == INVALID_PAGE_ID {
return Ok(page_id);
Expand Down
2 changes: 1 addition & 1 deletion bustubx/src/common/util.rs
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ pub(crate) fn pretty_format_index_tree(index: &BPlusTreeIndex) -> BustubxResult<
let page = index.buffer_pool.fetch_page(page_id)?;
let (curr_page, _) =
BPlusTreePageCodec::decode(page.read().unwrap().data(), index.key_schema.clone())?;
index.buffer_pool.unpin_page(page, false)?;
index.buffer_pool.unpin_page(page)?;

match curr_page {
BPlusTreePage::Internal(internal_page) => {
Expand Down
48 changes: 23 additions & 25 deletions bustubx/src/storage/index.rs
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ impl BPlusTreeIndex {
.set_data(page_bytes_to_array(&BPlusTreePageCodec::encode(
&curr_tree_page,
)));
self.buffer_pool.unpin_page(curr_page.clone(), true)?;
self.buffer_pool.unpin_page(curr_page.clone())?;

let curr_page_id = curr_page.read().unwrap().page_id;
if let Some(parent_page_id) = context.read_set.pop_back() {
Expand All @@ -107,7 +107,7 @@ impl BPlusTreeIndex {
parent_page.read().unwrap().data(),
self.key_schema.clone(),
)?;
self.buffer_pool.unpin_page(parent_page.clone(), false)?;
self.buffer_pool.unpin_page(parent_page.clone())?;
parent_tree_page.insert_internalkv(internalkv);

curr_page = parent_page;
Expand All @@ -129,7 +129,7 @@ impl BPlusTreeIndex {
new_root_page.write().unwrap().set_data(page_bytes_to_array(
&BPlusTreeInternalPageCodec::encode(&new_root_internal_page),
));
self.buffer_pool.unpin_page(new_root_page.clone(), true)?;
self.buffer_pool.unpin_page(new_root_page.clone())?;

// 更新root page id
self.root_page_id.store(new_root_page_id, Ordering::SeqCst);
Expand All @@ -145,7 +145,7 @@ impl BPlusTreeIndex {
.set_data(page_bytes_to_array(&BPlusTreePageCodec::encode(
&curr_tree_page,
)));
self.buffer_pool.unpin_page(curr_page, true)?;
self.buffer_pool.unpin_page(curr_page)?;

Ok(())
}
Expand Down Expand Up @@ -248,7 +248,7 @@ impl BPlusTreeIndex {
// 更新root page id
self.root_page_id.store(new_page_id, Ordering::SeqCst);

self.buffer_pool.unpin_page_id(new_page_id, true)?;
self.buffer_pool.unpin_page_id(new_page_id)?;
Ok(())
}

Expand All @@ -268,7 +268,7 @@ impl BPlusTreeIndex {
self.key_schema.clone(),
)?;
let result = leaf_tree_page.look_up(key);
self.buffer_pool.unpin_page(leaf_page, false)?;
self.buffer_pool.unpin_page(leaf_page)?;
Ok(result)
}

Expand All @@ -294,7 +294,7 @@ impl BPlusTreeIndex {
.read_set
.push_back(curr_page.read().unwrap().page_id);
// 释放上一页
self.buffer_pool.unpin_page(curr_page, false)?;
self.buffer_pool.unpin_page(curr_page)?;
// 查找下一页
let next_page_id = internal_page.look_up(key);
let next_page = self.buffer_pool.fetch_page(next_page_id)?;
Expand All @@ -306,7 +306,7 @@ impl BPlusTreeIndex {
curr_tree_page = next_tree_page;
}
BPlusTreePage::Leaf(_leaf_page) => {
self.buffer_pool.unpin_page(curr_page.clone(), false)?;
self.buffer_pool.unpin_page(curr_page.clone())?;
return Ok(Some(curr_page));
}
}
Expand All @@ -333,7 +333,7 @@ impl BPlusTreeIndex {
new_page.write().unwrap().set_data(page_bytes_to_array(
&BPlusTreeLeafPageCodec::encode(&new_leaf_page),
));
self.buffer_pool.unpin_page_id(new_page_id, true)?;
self.buffer_pool.unpin_page_id(new_page_id)?;

Ok((new_leaf_page.key_at(0).clone(), new_page_id))
}
Expand All @@ -348,7 +348,7 @@ impl BPlusTreeIndex {
new_page.write().unwrap().set_data(page_bytes_to_array(
&BPlusTreeInternalPageCodec::encode(&new_internal_page),
));
self.buffer_pool.unpin_page_id(new_page_id, true)?;
self.buffer_pool.unpin_page_id(new_page_id)?;

let min_leafkv = self.find_subtree_min_leafkv(new_page_id)?;
Ok((min_leafkv.0, new_page_id))
Expand Down Expand Up @@ -445,15 +445,15 @@ impl BPlusTreeIndex {
page.write()
.unwrap()
.set_data(page_bytes_to_array(&BPlusTreePageCodec::encode(&tree_page)));
self.buffer_pool.unpin_page_id(page_id, true)?;
self.buffer_pool.unpin_page_id(page_id)?;

borrowed_page
.write()
.unwrap()
.set_data(page_bytes_to_array(&BPlusTreePageCodec::encode(
&borrowed_tree_page,
)));
self.buffer_pool.unpin_page_id(borrowed_page_id, true)?;
self.buffer_pool.unpin_page_id(borrowed_page_id)?;

// 更新父节点
let parent_page = self.buffer_pool.fetch_page(parent_page_id)?;
Expand All @@ -466,7 +466,7 @@ impl BPlusTreeIndex {
parent_page.write().unwrap().set_data(page_bytes_to_array(
&BPlusTreeInternalPageCodec::encode(&parent_internal_page),
));
self.buffer_pool.unpin_page_id(parent_page_id, true)?;
self.buffer_pool.unpin_page_id(parent_page_id)?;
Ok(true)
}

Expand All @@ -480,7 +480,7 @@ impl BPlusTreeIndex {
parent_page.read().unwrap().data(),
self.key_schema.clone(),
)?;
self.buffer_pool.unpin_page_id(parent_page_id, false)?;
self.buffer_pool.unpin_page_id(parent_page_id)?;
Ok(parent_page.sibling_page_ids(child_page_id))
}

Expand Down Expand Up @@ -532,10 +532,10 @@ impl BPlusTreeIndex {
.set_data(page_bytes_to_array(&BPlusTreePageCodec::encode(
&left_tree_page,
)));
self.buffer_pool.unpin_page_id(left_page_id, true)?;
self.buffer_pool.unpin_page_id(left_page_id)?;

// 删除右边页
self.buffer_pool.unpin_page_id(right_page_id, false)?;
self.buffer_pool.unpin_page_id(right_page_id)?;
self.buffer_pool.delete_page(right_page_id)?;

// 更新父节点
Expand All @@ -552,14 +552,14 @@ impl BPlusTreeIndex {
{
self.root_page_id.store(left_page_id, Ordering::SeqCst);
// 删除旧的根节点
self.buffer_pool.unpin_page_id(parent_page_id, false)?;
self.buffer_pool.unpin_page_id(parent_page_id)?;
self.buffer_pool.delete_page(parent_page_id)?;
Ok(left_page_id)
} else {
parent_page.write().unwrap().set_data(page_bytes_to_array(
&BPlusTreeInternalPageCodec::encode(&parent_internal_page),
));
self.buffer_pool.unpin_page_id(parent_page_id, true)?;
self.buffer_pool.unpin_page_id(parent_page_id)?;
Ok(parent_page_id)
}
}
Expand All @@ -578,7 +578,7 @@ impl BPlusTreeIndex {
let curr_page = self.buffer_pool.fetch_page(page_id)?;
let (mut curr_tree_page, _) =
BPlusTreePageCodec::decode(curr_page.read().unwrap().data(), self.key_schema.clone())?;
self.buffer_pool.unpin_page(curr_page.clone(), false)?;
self.buffer_pool.unpin_page(curr_page.clone())?;
loop {
match curr_tree_page {
BPlusTreePage::Internal(internal_page) => {
Expand All @@ -594,7 +594,7 @@ impl BPlusTreeIndex {
self.key_schema.clone(),
)?
.0;
self.buffer_pool.unpin_page(next_page, false)?;
self.buffer_pool.unpin_page(next_page)?;
}
BPlusTreePage::Leaf(leaf_page) => {
let index = if min_or_max {
Expand All @@ -614,7 +614,7 @@ impl BPlusTreeIndex {
.fetch_page(self.root_page_id.load(Ordering::SeqCst))?;
let (mut curr_tree_page, _) =
BPlusTreePageCodec::decode(curr_page.read().unwrap().data(), self.key_schema.clone())?;
self.buffer_pool.unpin_page(curr_page.clone(), false)?;
self.buffer_pool.unpin_page(curr_page.clone())?;
loop {
match curr_tree_page {
BPlusTreePage::Internal(internal_page) => {
Expand All @@ -625,7 +625,7 @@ impl BPlusTreeIndex {
self.key_schema.clone(),
)?
.0;
self.buffer_pool.unpin_page(next_page, false)?;
self.buffer_pool.unpin_page(next_page)?;
}
BPlusTreePage::Leaf(leaf_page) => {
return Ok(leaf_page);
Expand Down Expand Up @@ -668,9 +668,7 @@ impl TreeIndexIterator {
self.index.key_schema.clone(),
)?
.0;
self.index
.buffer_pool
.unpin_page(next_page.clone(), false)?;
self.index.buffer_pool.unpin_page(next_page.clone())?;
Ok(true)
}
}
Expand Down
20 changes: 9 additions & 11 deletions bustubx/src/storage/table_heap.rs
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ impl TableHeap {
.write()
.unwrap()
.set_data(page_bytes_to_array(&TablePageCodec::encode(&table_page)));
buffer_pool.unpin_page_id(first_page_id, true)?;
buffer_pool.unpin_page_id(first_page_id)?;

Ok(Self {
schema,
Expand Down Expand Up @@ -89,7 +89,7 @@ impl TableHeap {
.set_data(page_bytes_to_array(&TablePageCodec::encode(
&last_table_page,
)));
self.buffer_pool.unpin_page_id(last_page_id, true)?;
self.buffer_pool.unpin_page_id(last_page_id)?;

// Update last_page_id.
last_page_id = next_page_id;
Expand All @@ -106,7 +106,7 @@ impl TableHeap {
.set_data(page_bytes_to_array(&TablePageCodec::encode(
&last_table_page,
)));
self.buffer_pool.unpin_page_id(last_page_id, true)?;
self.buffer_pool.unpin_page_id(last_page_id)?;

// Map the slot_id to a Rid and return
Ok(Rid::new(last_page_id, slot_id as u32))
Expand All @@ -121,7 +121,7 @@ impl TableHeap {
page.write()
.unwrap()
.set_data(page_bytes_to_array(&TablePageCodec::encode(&table_page)));
self.buffer_pool.unpin_page_id(rid.page_id, true)?;
self.buffer_pool.unpin_page_id(rid.page_id)?;
Ok(())
}

Expand All @@ -130,7 +130,7 @@ impl TableHeap {
let (table_page, _) =
TablePageCodec::decode(page.read().unwrap().data(), self.schema.clone())?;
let result = table_page.tuple(rid.slot_num as u16)?;
self.buffer_pool.unpin_page_id(rid.page_id, false)?;
self.buffer_pool.unpin_page_id(rid.page_id)?;
Ok(result)
}

Expand All @@ -139,7 +139,7 @@ impl TableHeap {
let (table_page, _) =
TablePageCodec::decode(page.read().unwrap().data(), self.schema.clone())?;
let result = table_page.tuple_meta(rid.slot_num as u16)?;
self.buffer_pool.unpin_page_id(rid.page_id, false)?;
self.buffer_pool.unpin_page_id(rid.page_id)?;
Ok(result)
}

Expand All @@ -151,9 +151,7 @@ impl TableHeap {
.expect("Can not fetch page");
let (table_page, _) =
TablePageCodec::decode(page.read().unwrap().data(), self.schema.clone()).unwrap();
self.buffer_pool
.unpin_page_id(first_page_id, false)
.unwrap();
self.buffer_pool.unpin_page_id(first_page_id).unwrap();
if table_page.header.num_tuples == 0 {
// TODO 忽略删除的tuple
None
Expand All @@ -169,7 +167,7 @@ impl TableHeap {
.expect("Can not fetch page");
let (table_page, _) =
TablePageCodec::decode(page.read().unwrap().data(), self.schema.clone()).unwrap();
self.buffer_pool.unpin_page_id(rid.page_id, false).unwrap();
self.buffer_pool.unpin_page_id(rid.page_id).unwrap();
let next_rid = table_page.get_next_rid(&rid);
if next_rid.is_some() {
return next_rid;
Expand All @@ -185,7 +183,7 @@ impl TableHeap {
let (next_table_page, _) =
TablePageCodec::decode(next_page.read().unwrap().data(), self.schema.clone()).unwrap();
self.buffer_pool
.unpin_page_id(table_page.header.next_page_id, false)
.unpin_page_id(table_page.header.next_page_id)
.unwrap();
if next_table_page.header.num_tuples == 0 {
// TODO 忽略删除的tuple
Expand Down

0 comments on commit 27f1690

Please sign in to comment.