1. Create directory and verify
---------------------------
sql> set lines 200;
set pages 50;
col directory_name format a25;
col owner format a15;
col directory_path format a60;
Select * from dba_directories;
--
sql> Create or replace Directory PUMP_BK_DIR as '/u01/app/oracle/datapump_dir';
sql> grant EXP_FULL_DATABASE to sys;
sql> grant IMP_FULL_DATABASE to sys;
sql> grant READ, WRITE ON DIRECTORY PUMP_BK_DIR to sys;
---
sql> Drop directory Directory_name;
2. Check Pre-structure:
--------------------
select * from nls_database_parameters;
select name,(bytes/1024/1024/1024) from v$datafile;
select file_id,file_name,(bytes/1024/1024/1024) from dba_data_files;
set pagesize 0
set long 100000
--
SELECT dbms_metadata.get_ddl('USER','MEAI') FROM dual;
SELECT DBMS_METADATA.GET_GRANTED_DDL('ROLE_GRANT','MEAI') from dual;
3. Conditional export / import:
----------------------------
$ expdp '"sys/oracle as sysdba"' query=wf_inorder:\"where status_code=\'Open\'\"
tables=esbuser.wf_inorder DIRECTORY=DUMP_DIR1 DUMPFILE=wf_inorder_open.dmp LOGFILE=wf_inorder_exp.log
$ impdp '"sys/oracle as sysdba"' DIRECTORY=DUMP_DIR1 DUMPFILE=wf_inorder_open.dmp LOGFILE=wf_inorder_imp.log
tables = esbuser.wf_inorder REMAP_TABLE = esbuser.wf_inorder:wf_inorder_copy;
--------------
$ expdp '"sys/oracle as sysdba"' query=wf_inorder:\"where status_code not in \(\'Complete\'\)\"
tables=esbuser.wf_inorder DIRECTORY=DUMP_DIR1 DUMPFILE=wf_inorder_open_4.dmp LOGFILE=wf_inorder_exp_4.log
$ impdp '"sys/oracle as sysdba"' DIRECTORY=DUMP_DIR1 DUMPFILE=wf_inorder_open_4.dmp LOGFILE=wf_inorder_imp_4.log
tables = esbuser.wf_inorder REMAP_TABLE = esbuser.wf_inorder:wf_inorder_4;
4. Table Level export / import:
----------------------------
$ nohup expdp system/oracle DIRECTORY=DAILY_BK_DIR TABLES=scott.emp,scott.dept DUMPFILE=emp_dept.dmp LOGFILE=emp_dept.log &
$ exp '"sys/oracle as sysdba"' TABLES=sys.aud$ FILE=/u02/dbteam/Backup_DIR/aud.dmp LOG=/u02/dbteam/Backup_DIR/aud.log
$ expdp TABLES=scott.emp DIRECTORY=DAILY_BK_DIR DUMPFILE=emp_dept.DMP LOGFILE=exp_empdept.log or
$ expdp scott/tiger DIRECTORY=DAILY_BK_DIR DUMPFILE=emp_dept.DMP LOGFILE=exp_empdept.log or
$ expdp FULL=Y DIRECTORY=DAILY_BK_DIR DUMPFILE=emp_dept.DMP LOGFILE=exp_empdept.log
$ nohup expdp '"sys/oracle as sysdba"' DIRECTORY=DAILY_BK_DIR DUMPFILE=emp_dept.DMP
LOGFILE=<DIRECTORY_NAME>:imp_empdept.log 1>emp_dept_exp.log 2>emp_dept_err.log &
Note: If the table already exit then you can't load with same name. You can load one table ( part ) from the dump file.
=> Estimate table backup / export size
expdp '"sys/oracle as sysdba"' TABLES=SIEBEL.S_SRM_REQUEST DIRECTORY=DUMP_DIR4 estimate_only=y NOLOGFILE=y
=> create a parfile to contain all the parameters that are going to be passed as arguments to the expdp statement
Parfile_srm_request.par contents for reference:
---------------------------------------------------------------------------------------
DIRECTORY = DUMP_DIR4
DUMPFILE = S_SRM_REQUEST_31AUG18.dmp
LOGFILE = S_SRM_REQUEST_31AUG18_exp.log
SCHEMAS = SIEBEL
INCLUDE = TABLE:"IN ('S_SRM_REQUEST')"
QUERY = SIEBEL.S_SRM_REQUEST:"where status not in ('SUCCESS','ERROR','CANCELED')"
---------------------------------------------------------------------------------------
=> Export the table in dump file:
$ export ORACLE_SID=sblprd1
$ echo $ORACLE_SID
$ expdp '"sys/oracle as sysdba"' parfile=parfile_srm_request.par
=> 1 line export command
$ expdp '"sys/oracle as sysdba"' query=S_SRM_REQUEST:\"where status not in \(\'SUCCESS\'\,\'ERROR\'\,\'CANCELED\'\)\" tables=SIEBEL.S_SRM_REQUEST
DIRECTORY=DUMP_DIR4 DUMPFILE=S_SRM_REQUEST_31AUG18.dmp LOGFILE=S_SRM_REQUEST_31AUG18_exp.log
---
$ expdp '"sys/oracle as sysdba"' query=S_SRM_REQUEST:\"where status not in \(\'SUCCESS\'\)\" tables=SIEBEL.S_SRM_REQUEST DIRECTORY=DUMP_DIR4 DUMPFILE=S_SRM_REQUEST_31AUG18.dmp LOGFILE=S_SRM_REQUEST_31AUG18_exp.log
------
$ impdp '"sys/oracle as sysdba"' DIRECTORY=DAILY_BK_DIR DUMPFILE=emp_dept.dmp LOGFILE=imp_dept.log
5. Parallel export / Import:
-------------------------
nohup expdp system/password schemas=WEB directory=mydir dumpfile=WEB%U.dmp FILESIZE=500G PARALLEL=32 logfile=expWEB_$(date +"%Y%m%d-%H%M%S").log &
--
nohup impdp '"sys/sysora10g as sysdba"' schemas=WEB directory=mydir dumpfile=WEB%U.dmp PARALLEL=32 logfile=ImpWEB_19Jul22.log JOB_NAME=ImpWEB
Note:......
ORA-39095: Dump file space has been exhausted: Unable to allocate 8192 bytes
Parallel Export Fails With Error ORA-39095 If There Are Less Specified Files Than Slaves (Doc ID 433391.1)
Cause:.....
If the dumpfiles are less than the parallel processes, then the dumpfile may be locked by one process and the other
process waits for the dumpfile to write. And sometimes the process may not release the lock even if the process finished.
So the entire task will not end as the other processes are waiting for the lock.
5. Schema Level:
-------------
$ expdp system/oracle DIRECTORY=DAILY_BK_DIR SCHEMAS=scott DUMPFILE=scott.dmp LOGFILE=expscott.log
$ expdp system/oracle DIRECTORY=DAILY_BK_DIR DUMPFILE=scott.dmp LOGFILE=impscott.log
--
$ impdp system/oracle DIRECTORY=DAILY_BK_DIR REMAP_SCHEMA=Scott:scott1 DUMPFILE=scott.dmp LOGFILE=impscott.log
Note: When remap_schema is use it create a new schema and load all object in its or we can load in existent schema.
nohup expdp '"sys/pass as sysdba"' schemas=DAWOOD DIRECTORY=MYDIR1 dumpfile=DAWOOD.dmp LOGFILE=DAWOODIMP.log >> /backup1/dmp/nohup_DAWOOD.log 2>&1
nohup impdp '"sys/pass as sysdba"' schemas=DAWOOD DIRECTORY=MYDIR1 dumpfile=DAWOOD.dmp LOGFILE=DAWOODIMP.log >> /backup1/dmp/nohup_DAWOOD.log 2>&1
6. Schema Level excluding tables:
------------------------------
$ nohup expdp \"/ as sysdba\" parfile=siebel_bk_parfile.par &
DIRECTORY = Dir_name
DUMPFILE = Backup_file_name.dmp
LOGFILE = logfile_name.log
SCHEMAS = Schema_name
PARALLEL = 4
EXCLUDE = TABLE:"IN ('table-1','table-2','table-3')"
QUERY = SIEBEL.S_SRM_REQUEST:"where status not in ('SUCCESS','ERROR','CANCELED')"
7. Schema Level include:
---------------------
param1.par contents
-------------------
DIRECTORY = S_SRM_REQUEST
DUMPFILE = S_SRM_REQUEST_%U.dmp
LOGFILE = S_SRM_REQUEST.log
SCHEMAS = SIEBEL
PARALLEL = 4
INCLUDE = TABLE:"IN ('S_SRM_REQUEST')"
QUERY = SIEBEL.S_SRM_REQUEST:"where status not in ('SUCCESS','ERROR','CANCELED')"
8. Tablespace Level:
-----------------
$ expdp system/oracle TABLESPACES=users DIRECTORY=DAILY_BK_DIR DUMPFILE=tbsaxyz.dmp LOGFILE=tbsxyz.log
Note: create tablespace with same name into another database and do import
$ impdp IMPDP system/oracle TABLESPACES=users DIRECTORY=DAILY_BK_DIR DUMPFILE=tbsaxyz.dmp LOGFILE=imp_tbsxyz.log
9. Database Level:
---------------
$ expdp system/oracle FULL=Y DIRECTORY=DAILY_BK_DIR DUMPFILE=fulldb.dmp LOGFILE=fulldb.log
10.Remap:
-------
$ impdp '"sys/oracle as sysdba"' DIRECTORY=DAILY_BK_DIR DUMPFILE=emp_dept.dmp LOGFILE=emp_dept.log TABLES=scott.emp,scott.dept
REMAP_TABLE=scott.emp:emp_new,scott.dept:dept_new
-----
$ impdp '"sys/oracle as sysdba"' DIRECTORY=DAILY_BK_DIR DUMPFILE=emp.dmp LOGFILE=emp_imp.log TABLES=scott.emp REMAP_TABLE=scott.emp:EMP_BK
11.Monitor imprt progress using sql queries and datapump utility:
**************************************************************
set lines 200;
col owner_name format a10;
col job_name format a20;
col operation format a30;
col job_mode format a10;
col state format a15;
col operation format a13;
Select * from dba_datapump_jobs;
-------------------------------------------------------------------------------------------------------------------
OWNER_NAME JOB_NAME OPERATION JOB_MODE STATE DEGREE ATTACHED_SESSIONS DATAPUMP_SESSIONS
---------- --------------- ------------- ---------- --------------- ---------- ----------------- -----------------
SYS IMPBILL IMPORT SCHEMA EXECUTING 1 1 3
-------------------------------------------------------------------------------------------------------------------
Select * from dba_datapump_sessions order by inst_id;
------------------------------------------------------------------------
OWNER_NAME JOB_NAME INST_ID SADDR SESSION_TYPE
---------- --------------- ---------- ---------------- --------------
SYS IMPBILL 1 0700010291D83420 DBMS_DATAPUMP
SYS IMPBILL 1 0700010291A20E10 WORKER
SYS IMPBILL 1 0700010291866790 MASTER
------------------------------------------------------------------------
col event format a35;
col client_info format a20;
col state format a10;
col spid format a10;
col state format a20;
select sid, start_time, totalwork sofar, (sofar/totalwork) * 100 pct_done
from v$session_longops
where totalwork > sofar AND opname NOT LIKE '%aggregate%';
--
col opname format a30;
Select sid, serial#,opname, context, timestamp, time_remaining,
elapsed_seconds, sofar, totalwork,ROUND(SOFAR/TOTALWORK*100,2) "% complete"
From v$session_longops where totalwork != 0 AND SOFAR <> totalwork;
--
col opname format a15;
col target format a10;
col units format a10;
col message format a70;
select * from
( select opname, start_time, target, sofar, totalwork, units, elapsed_seconds, message
from gv$session_longops order by start_time desc ) where rownum <=1;
--
set linesize 140;
column opname format a50;
SELECT SID, SERIAL#, CONTEXT,opname,SOFAR, TOTALWORK,
ROUND(SOFAR/TOTALWORK*100,2) "%COMPLETE"
FROM GV$SESSION_LONGOPS
--WHERE OPNAME LIKE 'SYS_EXPORT%'
WHERE OPNAME LIKE 'SYS_IMP%'
AND OPNAME NOT LIKE '%aggregate%'
AND TOTALWORK != 0
AND SOFAR <> TOTALWORK;
--
select object_path, comments from schema_export_objects
where object_path like '%grant' and object_path not like '%/%';
=> Using datapump utility
************************
Select * from DBA_DATAPUMP_JOBS where state not in ('NOT RUNNING');
Note: Pick job name and take expdp/impdp session
--
$ impdp '"sys/oracle as sysdba"' ATTACH=<job_name>
$ impdp '"sys/oracle as sysdba"' ATTACH=IMPWEB
--
Import> status
--
Once you are attached to job, Type Kill_Job or stop_job=immediate
Import> kill_job
or
Import> stop_job=immediate
=> To start a job
Import> start_job[=SKIP_CURRENT]
Import> CONTINUE_CLIENT -> to check progress
=> Restart Job
import> start_job
import> continue_client
Job IMPWEB has been reopened at Wed Jul 20 15:36:45 2022
Restarting "SYS"."IMPWEB": "sys/******** AS SYSDBA" schemas=WEB directory=mydir dumpfile=WEB%U.dmp PARALLEL=32 LOGFILE=ImpWEB_19Jul22.log JOB_NAME=ImpWEB
13.Dropping un-used jobs:
----------------------
Following statement can be used to generate the drop table statement for the master table:
Doc ID 336014.1 - How To Cleanup Orphaned DataPump Jobs In DBA_DATAPUMP_JOBS ?
set lines 200;
col owner_name format a10;
col job_name format a20;
col operation format a30;
col job_mode format a10;
col state format a15;
col operation format a13;
Select * from dba_datapump_jobs;
---
select 'drop table '||o.owner||'.'||object_name||' purge;'
from dba_objects o, dba_datapump_jobs j
where o.owner=j.owner_name and o.object_name=j.job_name
and j.job_name not like 'bin$%';
14.References:
Doc 1400974.1 : How To Break And Restart A DataPump Export Or Import Job
0 Comments