前言
一、分区表清理
目标:
分区表BI_DW.KPY_FACT_CAYENNE_INPUT保留三個月,分区键TEST_START_TIME。
1.查詢表大小:
SELECT owner,
segment_name,
SEGMENT_TYPE,
ROUND (SUM (bytes) / 1024 / 1024 / 1024, 2) GB
FROM dba_segments
WHERE SEGMENT_TYPE LIKE 'TABLE%'
GROUP BY segment_name, SEGMENT_TYPE, owner
ORDER BY GB DESC;
BI_DW KPY_FACT_CAYENNE_INPUT TABLE PARTITION 647.1 541.8
select 647.1 - 541.8 from dual; --105.3
2.登录OS,确定导出数据是否足够空间:
df -h
3.查看DB中参数parallel 是否开启及导出数据存放位置:
select * from dba_directories;
LHC /Data/expdp/lhc
select * from v$parameter where name like '%parallel%';
alter system set parallel_servers_target=30;
alter system set parallel_max_servers=30;
select * from dba_datapump_jobs;
4.使用EXPDP导出数据:
查询表分区表:
BI_DW.KPY_FACT_CAYENNE_INPUT
PTT_P202007
PTT_P202108
导出出腳本:
SELECT PARTITION_POSITION FROM DBA_TAB_PARTITIONS
WHERE TABLE_OWNER='BI_DW'
AND TABLE_NAME='KPY_FACT_CAYENNE_INPUT' AND PARTITION_NAME='PTT_P202007'
UNION
SELECT PARTITION_POSITION FROM DBA_TAB_PARTITIONS
WHERE TABLE_OWNER='BI_DW'
AND TABLE_NAME='KPY_FACT_CAYENNE_INPUT' AND PARTITION_NAME='PTT_P202108'
SELECT 'expdp ********/******** directory=LHC dumpfile='
|| TABLE_OWNER
|| '_'
|| TABLE_NAME
|| '_'
|| PARTITION_NAME
|| '_%U.dmp filesize=30G tables='
|| TABLE_OWNER
|| '.'
|| TABLE_NAME
|| ':'
|| PARTITION_NAME
|| ' CONTENT=DATA_ONLY parallel=10 COMPRESSION=ALL logfile='
|| TABLE_OWNER
|| '_'
|| TABLE_NAME
|| '_'
|| PARTITION_NAME
|| '.log'
FROM DBA_TAB_PARTITIONS
WHERE TABLE_OWNER = 'BI_DW'
AND TABLE_NAME = 'KPY_FACT_CAYENNE_INPUT'
AND PARTITION_POSITION BETWEEN 1 AND 14
====> vi BI_DW_KPY_FACT_CAYENNE_INPUT.sh
expdp ********/******** directory=LHC dumpfile=BI_DW_KPY_FACT_CAYENNE_INPUT_PTT_P202007_%U.dmp filesize=30G tables=BI_DW.KPY_FACT_CAYENNE_INPUT:PTT_P202007 CONTENT=DATA_ONLY parallel=10 COMPRESSION=ALL logfile=BI_DW_KPY_FACT_CAYENNE_INPUT_PTT_P202007.log
expdp ********/******** directory=LHC dumpfile=BI_DW_KPY_FACT_CAYENNE_INPUT_PTT_P202008_%U.dmp filesize=30G tables=BI_DW.KPY_FACT_CAYENNE_INPUT:PTT_P202008 CONTENT=DATA_ONLY parallel=10 COMPRESSION=ALL logfile=BI_DW_KPY_FACT_CAYENNE_INPUT_PTT_P202008.log
expdp ********/******** directory=LHC dumpfile=BI_DW_KPY_FACT_CAYENNE_INPUT_PTT_P202009_%U.dmp filesize=30G tables=BI_DW.KPY_FACT_CAYENNE_INPUT:PTT_P202009 CONTENT=DATA_ONLY parallel=10 COMPRESSION=ALL logfile=BI_DW_KPY_FACT_CAYENNE_INPUT_PTT_P202009.log
expdp ********/******** directory=LHC dumpfile=BI_DW_KPY_FACT_CAYENNE_INPUT_PTT_P202010_%U.dmp filesize=30G tables=BI_DW.KPY_FACT_CAYENNE_INPUT:PTT_P202010 CONTENT=DATA_ONLY parallel=10 COMPRESSION=ALL logfile=BI_DW_KPY_FACT_CAYENNE_INPUT_PTT_P202010.log
expdp ********/******** directory=LHC dumpfile=BI_DW_KPY_FACT_CAYENNE_INPUT_PTT_P202011_%U.dmp filesize=30G tables=BI_DW.KPY_FACT_CAYENNE_INPUT:PTT_P202011 CONTENT=DATA_ONLY parallel=10 COMPRESSION=ALL logfile=BI_DW_KPY_FACT_CAYENNE_INPUT_PTT_P202011.log
expdp ********/******** directory=LHC dumpfile=BI_DW_KPY_FACT_CAYENNE_INPUT_PTT_P202012_%U.dmp filesize=30G tables=BI_DW.KPY_FACT_CAYENNE_INPUT:PTT_P202012 CONTENT=DATA_ONLY parallel=10 COMPRESSION=ALL logfile=BI_DW_KPY_FACT_CAYENNE_INPUT_PTT_P202012.log
expdp ********/******** directory=LHC dumpfile=BI_DW_KPY_FACT_CAYENNE_INPUT_PTT_P202101_%U.dmp filesize=30G tables=BI_DW.KPY_FACT_CAYENNE_INPUT:PTT_P202101 CONTENT=DATA_ONLY parallel=10 COMPRESSION=ALL logfile=BI_DW_KPY_FACT_CAYENNE_INPUT_PTT_P202101.log
expdp ********/******** directory=LHC dumpfile=BI_DW_KPY_FACT_CAYENNE_INPUT_PTT_P202102_%U.dmp filesize=30G tables=BI_DW.KPY_FACT_CAYENNE_INPUT:PTT_P202102 CONTENT=DATA_ONLY parallel=10 COMPRESSION=ALL logfile=BI_DW_KPY_FACT_CAYENNE_INPUT_PTT_P202102.log
expdp ********/******** directory=LHC dumpfile=BI_DW_KPY_FACT_CAYENNE_INPUT_PTT_P202103_%U.dmp filesize=30G tables=BI_DW.KPY_FACT_CAYENNE_INPUT:PTT_P202103 CONTENT=DATA_ONLY parallel=10 COMPRESSION=ALL logfile=BI_DW_KPY_FACT_CAYENNE_INPUT_PTT_P202103.log
expdp ********/******** directory=LHC dumpfile=BI_DW_KPY_FACT_CAYENNE_INPUT_PTT_P202104_%U.dmp filesize=30G tables=BI_DW.KPY_FACT_CAYENNE_INPUT:PTT_P202104 CONTENT=DATA_ONLY parallel=10 COMPRESSION=ALL logfile=BI_DW_KPY_FACT_CAYENNE_INPUT_PTT_P202104.log
expdp ********/******** directory=LHC dumpfile=BI_DW_KPY_FACT_CAYENNE_INPUT_PTT_P202105_%U.dmp filesize=30G tables=BI_DW.KPY_FACT_CAYENNE_INPUT:PTT_P202105 CONTENT=DATA_ONLY parallel=10 COMPRESSION=ALL logfile=BI_DW_KPY_FACT_CAYENNE_INPUT_PTT_P202105.log
expdp ********/******** directory=LHC dumpfile=BI_DW_KPY_FACT_CAYENNE_INPUT_PTT_P202106_%U.dmp filesize=30G tables=BI_DW.KPY_FACT_CAYENNE_INPUT:PTT_P202106 CONTENT=DATA_ONLY parallel=10 COMPRESSION=ALL logfile=BI_DW_KPY_FACT_CAYENNE_INPUT_PTT_P202106.log
expdp ********/******** directory=LHC dumpfile=BI_DW_KPY_FACT_CAYENNE_INPUT_PTT_P202107_%U.dmp filesize=30G tables=BI_DW.KPY_FACT_CAYENNE_INPUT:PTT_P202107 CONTENT=DATA_ONLY parallel=10 COMPRESSION=ALL logfile=BI_DW_KPY_FACT_CAYENNE_INPUT_PTT_P202107.log
expdp ********/******** directory=LHC dumpfile=BI_DW_KPY_FACT_CAYENNE_INPUT_PTT_P202108_%U.dmp filesize=30G tables=BI_DW.KPY_FACT_CAYENNE_INPUT:PTT_P202108 CONTENT=DATA_ONLY parallel=10 COMPRESSION=ALL logfile=BI_DW_KPY_FACT_CAYENNE_INPUT_PTT_P202108.log
>>>>>sh BI_DW_KPY_FACT_CAYENNE_INPUT.sh
5.truncate partition:
---确保导出的记录数一致后删除
SELECT 'SELECT COUNT(*) INTO V_COUNT FROM '||TABLE_OWNER||'.'||TABLE_NAME||' PARTITION ('||PARTITION_NAME||'); DBMS_OUTPUT.PUT_LINE(v_count);'
FROM DBA_TAB_PARTITIONS
WHERE TABLE_OWNER = 'BI_DW'
AND TABLE_NAME = 'KPY_FACT_CAYENNE_INPUT'
AND PARTITION_POSITION BETWEEN 1 AND 14
SELECT COUNT(*) FROM BI_DW.KPY_FACT_CAYENNE_INPUT PARTITION (PTT_P202007);
SELECT COUNT(*) FROM BI_DW.KPY_FACT_CAYENNE_INPUT PARTITION (PTT_P202008);
SELECT COUNT(*) FROM BI_DW.KPY_FACT_CAYENNE_INPUT PARTITION (PTT_P202009);
SELECT COUNT(*) FROM BI_DW.KPY_FACT_CAYENNE_INPUT PARTITION (PTT_P202010);
SELECT COUNT(*) FROM BI_DW.KPY_FACT_CAYENNE_INPUT PARTITION (PTT_P202011);
SELECT COUNT(*) FROM BI_DW.KPY_FACT_CAYENNE_INPUT PARTITION (PTT_P202012);
SELECT COUNT(*) FROM BI_DW.KPY_FACT_CAYENNE_INPUT PARTITION (PTT_P202101);
SELECT COUNT(*) FROM BI_DW.KPY_FACT_CAYENNE_INPUT PARTITION (PTT_P202102);
SELECT COUNT(*) FROM BI_DW.KPY_FACT_CAYENNE_INPUT PARTITION (PTT_P202103);
SELECT COUNT(*) FROM BI_DW.KPY_FACT_CAYENNE_INPUT PARTITION (PTT_P202104);
SELECT COUNT(*) FROM BI_DW.KPY_FACT_CAYENNE_INPUT PARTITION (PTT_P202105);
SELECT COUNT(*) FROM BI_DW.KPY_FACT_CAYENNE_INPUT PARTITION (PTT_P202106);
SELECT COUNT(*) FROM BI_DW.KPY_FACT_CAYENNE_INPUT PARTITION (PTT_P202107);
SELECT COUNT(*) FROM BI_DW.KPY_FACT_CAYENNE_INPUT PARTITION (PTT_P202108);
确保导出的记录数一致后删除:
SELECT 'ALTER TABLE '||TABLE_OWNER||'.'||TABLE_NAME||' TRUNCATE PARTITION '||PARTITION_NAME||' drop storage;'
FROM DBA_TAB_PARTITIONS
WHERE TABLE_OWNER = 'BI_DW'
AND TABLE_NAME = 'KPY_FACT_CAYENNE_INPUT'
AND PARTITION_POSITION BETWEEN 1 AND 14;
ALTER TABLE BI_DW.KPY_FACT_CAYENNE_INPUT TRUNCATE PARTITION PTT_P202007 drop storage;
ALTER TABLE BI_DW.KPY_FACT_CAYENNE_INPUT TRUNCATE PARTITION PTT_P202008 drop storage;
ALTER TABLE BI_DW.KPY_FACT_CAYENNE_INPUT TRUNCATE PARTITION PTT_P202009 drop storage;
ALTER TABLE BI_DW.KPY_FACT_CAYENNE_INPUT TRUNCATE PARTITION PTT_P202010 drop storage;
ALTER TABLE BI_DW.KPY_FACT_CAYENNE_INPUT TRUNCATE PARTITION PTT_P202011 drop storage;
ALTER TABLE BI_DW.KPY_FACT_CAYENNE_INPUT TRUNCATE PARTITION PTT_P202012 drop storage;
ALTER TABLE BI_DW.KPY_FACT_CAYENNE_INPUT TRUNCATE PARTITION PTT_P202101 drop storage;
ALTER TABLE BI_DW.KPY_FACT_CAYENNE_INPUT TRUNCATE PARTITION PTT_P202102 drop storage;
ALTER TABLE BI_DW.KPY_FACT_CAYENNE_INPUT TRUNCATE PARTITION PTT_P202103 drop storage;
ALTER TABLE BI_DW.KPY_FACT_CAYENNE_INPUT TRUNCATE PARTITION PTT_P202104 drop storage;
ALTER TABLE BI_DW.KPY_FACT_CAYENNE_INPUT TRUNCATE PARTITION PTT_P202105 drop storage;
ALTER TABLE BI_DW.KPY_FACT_CAYENNE_INPUT TRUNCATE PARTITION PTT_P202106 drop storage;
ALTER TABLE BI_DW.KPY_FACT_CAYENNE_INPUT TRUNCATE PARTITION PTT_P202107 drop storage;
ALTER TABLE BI_DW.KPY_FACT_CAYENNE_INPUT TRUNCATE PARTITION PTT_P202108 drop storage;
6.rebuild index:
经测试,在truncate分区时,只有在分区中有数据且被truncate掉时HASH索引和普通索引会失效.
查看失效索引(HASH索引会失效):
SELECT * from dba_IND_PARTITIONS where index_owner='BI_DW' AND STATUS='UNUSABLE';
如果失效的是分区索引(包括HASH索引),可以用下面的脚本重建:
SELECT 'alter index '||INDEX_OWNER||'.'||INDEX_NAME||' rebuild partition '||PARTITION_NAME|| ' online nologging parallel 10;'
FROM DBA_IND_PARTITIONS
WHERE INDEX_OWNER='BI_DW' AND STATUS='UNUSABLE';
HASH索引不能夠加online和nologging.
查看失效的普通索引:
SELECT * FROM DBA_INDEXES WHERE STATUS ='UNUSABLE'
如果失效的是普通索引,用下面的腳本:
比如:
alter INDEX ZBCDMP2_DMPDB2.IDX_R_WIP_LOG_T_ID rebuild nologging parallel 10;
alter index ZBCDMP2_DMPDB2.IDX_R_WIP_LOG_T_ID noparallel;
alter index ZBCDMP2_DMPDB2.IDX_R_WIP_LOG_T_ID initrans 200;
-----查看原來的initrans 是多大,前後保持一致
7.检查存储过程是否失效:
---如果失效执行如下脚本
used by 中点击编译invaild 存储过程
也可以等所有的作业完最后执行
cd $ORACLE_HOME/rdbms/admin
sqlplus / as sysdba
sql>@utlrp.sql;
8.收集統計信息:
execute dbms_stats.gather_table_stats
(ownname => 'MLBII',tabname => 'R_WIP_LOG_T' ,
estimate_percent => DBMS_STATS.AUTO_SAMPLE_SIZE,method_opt => 'for all columns size auto',
cascade => true ,degree=>6);
9.还原DB中参数值:
select * from v$parameter where name like '%parallel%';
alter system set parallel_servers_target=0;
alter system set parallel_max_servers=0;
select * from v$parameter where name like '%parallel%';
alter system set parallel_servers_target=0;
alter system set parallel_max_servers=0;
alter system set parallel_servers_target=0;
alter system set parallel_max_servers=0;
select * from dba_indexes where degree>1;
版权声明:本文为weixin_40068220原创文章,遵循CC 4.0 BY-SA版权协议,转载请附上原文出处链接和本声明。