Oracle的DBMS_DATAPUMP是一个强大的工具,用于高效地将数据从一个数据库导出到另一个数据库,或者在同一个数据库中移动数据。它是Oracle提供的一种数据泵技术,用于实现数据迁移、备份和恢复等操作
LightDB24.1开始支持该特性,测试如下:
create database testdb01 with owner = lightdb encoding = 'UTF8' lightdb_syntax_compatible_type = 'oracle'; alter user lightdb with password 'lightdb'; lightdb@postgres=# \c testdb01 You are now connected to database "testdb01" as user "lightdb". compatible type: oracle create schema at01; grant usage on schema at01 to public; create table at01.q1(id int,vals varchar2(32)); insert into at01.q1(id,vals) select generate_series, '#' || generate_series from generate_series(1, 10000000); create table at01.w1(id int,vals varchar2(32)); insert into at01.w1(id,vals) select generate_series, '#' || generate_series from generate_series(1, 10000000); create table at01.e1(id int,vals varchar2(32)); insert into at01.e1(id,vals) select generate_series, '#' || generate_series from generate_series(1, 10000000); create table at01.r1(id int,vals varchar2(32)); insert into at01.r1(id,vals) select generate_series, '#' || generate_series from generate_series(1, 10000000); create schema bt01; grant usage on schema bt01 to public; create table bt01.a1(id int,vals varchar2(32)); insert into bt01.a1(id,vals) select generate_series, '#' || generate_series from generate_series(1, 10000000); create table bt01.s1(id int,vals varchar2(32)); insert into bt01.s1(id,vals) select generate_series, '#' || generate_series from generate_series(1, 10000000); create table bt01.d1(id int,vals varchar2(32)); insert into bt01.d1(id,vals) select generate_series, '#' || generate_series from generate_series(1, 10000000); create table bt01.f1(id int,vals varchar2(32)); insert into bt01.f1(id,vals) select generate_series, '#' || generate_series from generate_series(1, 10000000); select count(*) from at01.q1; select count(*) from at01.w1; select count(*) from at01.e1; select count(*) from at01.r1; select count(*) from bt01.a1; select count(*) from bt01.s1; select count(*) from bt01.d1; select count(*) from bt01.f1; create or replace directory dmpdir as '/home/lightdb/datapump'; select * from utl_file.utl_file_dir; select dbms_output.serveroutput('t'); declare hdl numeric; jbs varchar2; begin dbms_output.put_line('step 001 ' || systimestamp); hdl := dbms_datapump.open(operation => 'EXPORT', job_mode => 'TABLE', remote_link => NULL, job_name => 'EXAMPLE001', version => 'LATEST'); dbms_output.put_line('handle = ' || hdl); dbms_output.put_line('step 002 ' || systimestamp); dbms_datapump.add_file(handle => hdl, filename => 'example001.dmp', directory => 'DMPDIR'); dbms_datapump.add_file(handle => hdl, filename => 'example001.log', directory => 'DMPDIR', filesize => '', filetype => dbms_datapump.ku$_file_type_log_file); dbms_output.put_line('step 003 ' || systimestamp); dbms_datapump.metadata_filter(handle => hdl, name => 'SCHEMA_EXPR', value => 'LIKE ''%bt%'''); dbms_datapump.metadata_filter(handle => hdl, name => 'NAME_EXPR', value => 'LIKE ''%1%'''); dbms_datapump.set_parameter(handle => hdl, name => 'COMPRESSION', value => 'ALL'); dbms_datapump.set_parallel(handle => hdl, degree => 2); dbms_output.put_line('step 004 ' || systimestamp); dbms_datapump.start_job(handle => hdl); dbms_output.put_line('step 005 ' || systimestamp); dbms_datapump.wait_for_job(handle => hdl,job_state => jbs); dbms_output.put_line('step 006 ' || systimestamp); dbms_datapump.stop_job(handle => hdl); dbms_output.put_line('step 007 ' || systimestamp); end; / ERROR: job already exists HINT: select a different job name, or stop the currently executing job and re-try the operation CONTEXT: PL/oraSQL function dbms_datapump.open(varchar2,varchar2,varchar2,varchar2,varchar2) line 28 at RAISE PL/oraSQL function inline_code_block line 7 at assignment
复制
停止该EXAMPLE001 job
lightdb@testdb01=# call dbms_datapump.stop_job(handle => dbms_datapump.attach(job_name => 'EXAMPLE001', job_owner => 'lightdb')); CALL lightdb@testdb01=# declare dbms_datapump.set_parameter(handle => hdl, name => 'COMPRESSION', value => 'ALL'); dbms_datapump.set_parallel(handle => hdl, degree => 2); lightdb@testdb01$# hdl numeric; lightdb@testdb01$# jbs varchar2; lightdb@testdb01$# begin lightdb@testdb01$# dbms_output.put_line('step 001 ' || systimestamp); lightdb@testdb01$# -- remote_link: 如果此参数的值非空,则它提供远程数据库的数据库链接名称,该数据库将成为当前作业的数据和元数据源。 lightdb@testdb01$# hdl := dbms_datapump.open(operation => 'EXPORT', job_mode => 'TABLE', remote_link => NULL, job_name => 'EXAMPLE001', version => 'LATEST'); lightdb@testdb01$# dbms_output.put_line('handle = ' || hdl); lightdb@testdb01$# lightdb@testdb01$# dbms_output.put_line('step 002 ' || systimestamp); lightdb@testdb01$# dbms_datapump.add_file(handle => hdl, filename => 'example001.dmp', directory => 'DMPDIR'); lightdb@testdb01$# dbms_datapump.add_file(handle => hdl, filename => 'example001.log', directory => 'DMPDIR', filesize => '', filetype => dbms_datapump.ku$_file_type_log_file); lightdb@testdb01$# lightdb@testdb01$# dbms_output.put_line('step 003 ' || systimestamp); lightdb@testdb01$# dbms_datapump.metadata_filter(handle => hdl, name => 'SCHEMA_EXPR', value => 'LIKE ''%bt%'''); lightdb@testdb01$# dbms_datapump.metadata_filter(handle => hdl, name => 'NAME_EXPR', value => 'LIKE ''%1%'''); lightdb@testdb01$# dbms_datapump.set_parameter(handle => hdl, name => 'COMPRESSION', value => 'ALL'); lightdb@testdb01$# dbms_datapump.set_parallel(handle => hdl, degree => 2); lightdb@testdb01$# lightdb@testdb01$# dbms_output.put_line('step 004 ' || systimestamp); lightdb@testdb01$# dbms_datapump.start_job(handle => hdl); lightdb@testdb01$# lightdb@testdb01$# dbms_output.put_line('step 005 ' || systimestamp); lightdb@testdb01$# dbms_datapump.wait_for_job(handle => hdl,job_state => jbs); lightdb@testdb01$# lightdb@testdb01$# dbms_output.put_line('step 006 ' || systimestamp); lightdb@testdb01$# dbms_datapump.stop_job(handle => hdl); lightdb@testdb01$# lightdb@testdb01$# dbms_output.put_line('step 007 ' || systimestamp); lightdb@testdb01$# end; lightdb@testdb01$# / NOTICE: export cmd:lt_dump -v --lt-exclude-lightdb-objects -d testdb01 -p 1991 -U lightdb -j 2 -n '*bt*' -t '(*bt*).(*1*)' -F d -f /home/lightdb/datapump/example001.dmp > /home/lightdb/datapump/example001.log 2>&1 & echo $! DO
复制
drop table bt01.a1; drop table bt01.s1; drop table bt01.d1; drop table bt01.f1; declare hdl numeric; jbs varchar2; begin dbms_output.put_line('step 001 ' || systimestamp); hdl := dbms_datapump.open(operation => 'IMPORT', job_mode => 'TABLE', remote_link => NULL, job_name => 'EXAMPLE001', version => 'LATEST'); dbms_output.put_line('handle = ' || hdl); dbms_output.put_line('step 002 ' || systimestamp); dbms_datapump.add_file(handle => hdl, filename => 'example001.dmp', directory => 'DMPDIR'); dbms_datapump.add_file(handle => hdl, filename => 'example001.log', directory => 'DMPDIR', filesize => '', filetype => dbms_datapump.ku$_file_type_log_file); dbms_datapump.set_parallel(handle => hdl, degree => 2); dbms_output.put_line('step 003 ' || systimestamp); dbms_datapump.start_job(handle => hdl); dbms_output.put_line('step 004 ' || systimestamp); dbms_datapump.wait_for_job(handle => hdl,job_state => jbs); dbms_output.put_line('step 005 ' || systimestamp); dbms_datapump.stop_job(handle => hdl); dbms_output.put_line('step 006 ' || systimestamp); end; / lightdb@testdb01=# declare lightdb@testdb01$# hdl numeric; lightdb@testdb01$# jbs varchar2; lightdb@testdb01$# begin lightdb@testdb01$# dbms_output.put_line('step 001 ' || systimestamp); lightdb@testdb01$# hdl := dbms_datapump.open(operation => 'IMPORT', job_mode => 'TABLE', remote_link => NULL, job_name => 'EXAMPLE001', version => 'LATEST'); lightdb@testdb01$# dbms_output.put_line('handle = ' || hdl); lightdb@testdb01$# dbms_output.put_line('step 002 ' || systimestamp); lightdb@testdb01$# dbms_datapump.add_file(handle => hdl, filename => 'example001.dmp', directory => 'DMPDIR'); lightdb@testdb01$# dbms_datapump.add_file(handle => hdl, filename => 'example001.log', directory => 'DMPDIR', filesize => '', filetype => dbms_datapump.ku$_file_type_log_file); lightdb@testdb01$# dbms_datapump.set_parallel(handle => hdl, degree => 2); lightdb@testdb01$# dbms_output.put_line('step 003 ' || systimestamp); lightdb@testdb01$# dbms_datapump.start_job(handle => hdl); lightdb@testdb01$# dbms_output.put_line('step 004 ' || systimestamp); lightdb@testdb01$# dbms_datapump.wait_for_job(handle => hdl,job_state => jbs); lightdb@testdb01$# dbms_output.put_line('step 005 ' || systimestamp); lightdb@testdb01$# dbms_datapump.stop_job(handle => hdl); lightdb@testdb01$# dbms_output.put_line('step 006 ' || systimestamp); lightdb@testdb01$# end; lightdb@testdb01$# / NOTICE: import cmd:lt_restore -v -d testdb01 -p 1991 -U lightdb -j 2 -F d /home/lightdb/datapump/example001.dmp > /home/lightdb/datapump/example001.log 2>&1 & echo $! DO select count(*) from bt01.a1; select count(*) from bt01.s1; select count(*) from bt01.d1; select count(*) from bt01.f1;
复制
最后修改时间:2024-04-18 10:17:05
「喜欢这篇文章,您的关注和赞赏是给作者最好的鼓励」
关注作者
【版权声明】本文为墨天轮用户原创内容,转载时必须标注文章的来源(墨天轮),文章链接,文章作者等基本信息,否则作者和墨天轮有权追究责任。如果您发现墨天轮中有涉嫌抄袭或者侵权的内容,欢迎发送邮件至:contact@modb.pro进行举报,并提供相关证据,一经查实,墨天轮将立刻删除相关内容。
文章被以下合辑收录
评论
相关阅读
2025年3月中国数据库排行榜:PolarDB夺魁傲群雄,GoldenDB晋位入三强
墨天轮编辑部
1652次阅读
2025-03-11 17:13:58
【专家观点】罗敏:从理论到真实SQL,感受DeepSeek如何做性能优化
墨天轮编辑部
1240次阅读
2025-03-06 16:45:38
【专家有话说第五期】在不同年龄段,DBA应该怎样规划自己的职业发展?
墨天轮编辑部
1219次阅读
2025-03-13 11:40:53
01. HarmonyOS Next应用开发实践与技术解析
若城
1177次阅读
2025-03-04 21:06:20
DeepSeek R1助力,腾讯AI代码助手解锁音乐创作新
若城
1161次阅读
2025-03-05 09:05:00
03 HarmonyOS Next仪表盘案例详解(二):进阶篇
若城
1160次阅读
2025-03-04 21:08:36
05 HarmonyOS NEXT高效编程秘籍:Arkts函数调用与声明优化深度解析
若城
1152次阅读
2025-03-04 22:46:06
04 高效HarmonyOS NEXT编程:ArkTS数据结构优化与属性访问最佳实践
若城
1142次阅读
2025-03-04 21:09:35
02 HarmonyOS Next仪表盘案例详解(一):基础篇
若城
1138次阅读
2025-03-04 21:07:43
06 HarmonyOS Next性能优化之LazyForEach 列表渲染基础与实现详解 (一)
若城
1132次阅读
2025-03-05 21:09:40
TA的专栏
热门文章
tcp keepalive 保活机制相关参数和LightDB长连接中断问题
2023-03-15 3887浏览
硬件知识之PCIe(一)
2022-11-09 3378浏览
AMD Zen 3 Ryzen 深入评测:5950X、5900X、5800X 和 5600X 测试
2023-07-07 2864浏览
ERROR: ORA-24345: A Truncation or null fetch error occurred (DBD SUCCESS_WITH_INFO: OCIStmtFetch
2023-05-24 2463浏览
Oracle Blob常见函数
2023-03-22 2057浏览
目录