row_purge_remove_clust_if_poss():
static MY_ATTRIBUTE((warn_unused_result)) bool row_purge_remove_clust_if_poss(
purge_node_t *node) /*!< in/out: row purge node */
{
/** 尝试仅从叶子节点上删除此 rec */
if (row_purge_remove_clust_if_poss_low(node, BTR_MODIFY_LEAF)) {
return (true);
}
/** 从整个B树中删除此 rec */
for (ulint n_tries = 0; n_tries < BTR_CUR_RETRY_DELETE_N_TIMES; n_tries++) {
if (row_purge_remove_clust_if_poss_low(
node, BTR_MODIFY_TREE | BTR_LATCH_FOR_DELETE)) {
return (true);
}
os_thread_sleep(BTR_CUR_RETRY_SLEEP_TIME);
}
return (false);
}
/**===========row_purge_remove_clust_if_poss_low=============*/
static MY_ATTRIBUTE((warn_unused_result)) bool row_purge_remove_clust_if_poss_low(
purge_node_t *node, /*!< in/out: row purge node */
ulint mode) /*!< in: BTR_MODIFY_LEAF or BTR_MODIFY_TREE */
{
...
/** 获取聚集索引 */
index = node->table->first_index();
...
/** 获取此 rec 在B树中的位置,如果获取不到说明此 rec 可能已被删除,直接退出 */
if (!row_purge_reposition_pcur(mode, node, &mtr)) {
/* The record was already removed. */
goto func_exit;
}
/** 获取 rec 及 offsets 等信息 */
rec = btr_pcur_get_rec(&node->pcur);
offsets = rec_get_offsets(rec, index, offsets_, ULINT_UNDEFINED, &heap);
/** 校验我们需要 purge 的 rec 中的 roll_ptr 与 B 树中 rec 的 roll_ptr 是否一致。
如果不一致,说明此 record 又被更新了,故退出。*/
if (node->roll_ptr != row_get_rec_roll_ptr(rec, index, offsets)) {
/* Someone else has modified the record later: do not remove */
goto func_exit;
}
...
/** 先尝试只从叶子节点上删除此 rec,不行则从整个B树上删除此 rec */
if (mode == BTR_MODIFY_LEAF) {
success =
btr_cur_optimistic_delete(btr_pcur_get_btr_cur(&node->pcur), 0, &mtr);
} else {
dberr_t err;
ut_ad(mode == (BTR_MODIFY_TREE | BTR_LATCH_FOR_DELETE));
btr_cur_pessimistic_delete(&err, FALSE, btr_pcur_get_btr_cur(&node->pcur),
0, false, node->trx_id, node->undo_no,
node->rec_type, &mtr);
...
}
}
func_exit:
...
return (success);
}
复制
因为btr_cur_optimistic_delete()与btr_cur_pessimistic_delete()在之前介绍row_purge_remove_sec_if_poss()删除二级索引是就已经介绍,所以这里就不在赘述。
至此我们已经介绍完清理一条TRX_UNDO_DEL_MARK_RECundo log需要进行那些操作。
TRX_UNDO_UPD_EXIST_REC
TRX_UNDO_UPD_EXIST_REC类型的undo log record的入口函数为row_purge_upd_exist_or_extern()将旧的二级索引记录清理掉,其具体流程如下:
static void row_purge_upd_exist_or_extern_func(
#ifdef UNIV_DEBUG
const que_thr_t *thr, /*!< in: query thread */
#endif /* UNIV_DEBUG */
purge_node_t *node, /*!< in: row purge node */
trx_undo_rec_t *undo_rec) /*!< in: record to purge */
{
...
/** 如果 rec_type 为 TRX_UNDO_UPD_DEL_REC,那么说明此 rec 之前被删除,二级索引也被删除。
如果cmpl_info 为 UPD_NODE_NO_ORD_CHANGE,那么说明二级索引没有被修改。*/
if (node->rec_type == TRX_UNDO_UPD_DEL_REC ||
(node->cmpl_info & UPD_NODE_NO_ORD_CHANGE)) {
goto skip_secondaries;
}
...
/** 遍历改 rec 的所有二级索引 */
while (node->index != NULL) {
/** 跳过可能已经损坏的二级索引 */
dict_table_skip_corrupt_index(node->index);
row_purge_skip_uncommitted_virtual_index(node->index);
if (!node->index) {
break;
}
/** 构造出此 rec 更新之前的 rec */
if (row_upd_changes_ord_field_binary(node->index, node->update, thr, NULL,
NULL)) {
/* Build the older version of the index entry */
dtuple_t *entry = row_build_index_entry_low(node->row, NULL, node->index,
heap, ROW_BUILD_FOR_PURGE);
/** 将 old rec 从二级索引上删除 */
row_purge_remove_sec_if_poss(node, node->index, entry);
...
}
node->index = node->index->next();
}
mem_heap_free(heap);
skip_secondaries:
/* Free possible externally stored fields */
for (ulint i = 0; i < upd_get_n_fields(node->update); i++) {
const upd_field_t *ufield = upd_get_nth_field(node->update, i);
if (dfield_is_ext(&ufield->new_val)) {
buf_block_t *block;
...
/* We use the fact that new_val points to
undo_rec and get thus the offset of
dfield data inside the undo record. Then we
can calculate from node->roll_ptr the file
address of the new_val data */
internal_offset =
((const byte *)dfield_get_data(&ufield->new_val)) - undo_rec;
ut_a(internal_offset < UNIV_PAGE_SIZE);
trx_undo_decode_roll_ptr(node->roll_ptr, &is_insert, &rseg_id, &page_no,
&offset);
/* If table is temp then it can't have its undo log
residing in rollback segment with REDO log enabled. */
bool is_temp = node->table->is_temporary();
undo_space_id = trx_rseg_id_to_space_id(rseg_id, is_temp);
mtr_start(&mtr);
/* We have to acquire an SX-latch to the clustered
index tree (exclude other tree changes) */
index = node->table->first_index();
mtr_sx_lock(dict_index_get_lock(index), &mtr);
/* NOTE: we must also acquire an X-latch to the
root page of the tree. We will need it when we
free pages from the tree. If the tree is of height 1,
the tree X-latch does NOT protect the root page,
because it is also a leaf page. Since we will have a
latch on an undo log page, we would break the
latching order if we would only later latch the
root page of such a tree! */
btr_root_get(index, &mtr);
block = buf_page_get(page_id_t(undo_space_id, page_no), univ_page_size,
RW_X_LATCH, &mtr);
buf_block_dbg_add_level(block, SYNC_TRX_UNDO_PAGE);
data_field = buf_block_get_frame(block) + offset + internal_offset;
ut_a(dfield_get_len(&ufield->new_val) >= BTR_EXTERN_FIELD_REF_SIZE);
byte *field_ref = data_field + dfield_get_len(&ufield->new_val) -
BTR_EXTERN_FIELD_REF_SIZE;
lob::BtrContext btr_ctx(&mtr, NULL, index, NULL, NULL, block);
lob::DeleteContext ctx(btr_ctx, field_ref, 0, false);
lob::ref_t lobref(field_ref);
/** 将 blob 数据清理掉 */
lob::purge(&ctx, index, node->modifier_trx_id,
trx_undo_rec_get_undo_no(undo_rec), lobref, node->rec_type,
ufield);
mtr_commit(&mtr);
}
}
}
复制
因为row_purge_remove_sec_if_poss()在之前介绍row_purge_del_mark()清理TRX_UNDO_DEL_MARK_REC类型的undo log时就已经介绍过,所以这里就不在赘述。
总结
至此,我们就已经把undo log是如何进行purge的已经全部介绍完;关于blob数据如何清理,后面有机会继续介绍。
参考内容
文章转载自ZzzMickey,如果涉嫌侵权,请发送邮件至:contact@modb.pro进行举报,并提供相关证据,一经查实,墨天轮将立刻删除相关内容。
评论
相关阅读
2025年4月中国数据库流行度排行榜:OB高分复登顶,崖山稳驭撼十强
墨天轮编辑部
2066次阅读
2025-04-09 15:33:27
国内独家|阿里云首发MongoDB 8.0,性能提升“快”人一步
阿里云瑶池数据库
200次阅读
2025-04-24 09:53:13
从mysql社区版迁移到信创PolardbX-DN上
金同学
172次阅读
2025-04-01 09:50:14
SQL 优化之 OR 子句改写
xiongcc
100次阅读
2025-04-21 00:08:06
揭秘PostgreSQL SELECT查询中的意外“写”行为
PolarDB
39次阅读
2025-04-15 09:56:40
PolarDB Oracle 兼容性里程碑(2024年10月至2025年3月)
PolarDB
36次阅读
2025-04-07 09:57:52
华鼎冷链科技 × 阿里云瑶池数据库,打造全链路协同的智慧冷链新标杆
阿里云瑶池数据库
34次阅读
2025-04-01 12:27:03
秒级修改字段
胖橘
33次阅读
2025-04-23 11:33:45
PolarDB MySQL之在只读节点上创建自定义临时表
xiaozhuo
30次阅读
2025-04-21 15:49:05
基于Ganos GeoSOT地理网格模型:无人机路径规划能力实践
千寻
29次阅读
2025-04-23 15:41:11
热门文章
MySQL · 源码阅读 · InnoDB Export/Import Tablespace解析
2024-02-26 334浏览
PolarDB MySQL · 功能特性 · Cube, grouping sets功能介绍与实现
2024-01-29 244浏览
MySQL · 源码分析 · 鉴权过程
2024-01-29 241浏览
MySQL · 引擎特性 · PolarDB Innodb刷脏优化(一)
2023-12-28 232浏览
MySQL · 源码解析 · MySQL 8.0.23 Hypergraph Join Optimizer代码详解(一)
2024-02-26 226浏览