Monday, December 9, 2019

blocking sessions and tabelspace

set pages 1000
set long 2000
    set colsep '  |  '
    set linesize 190
    col LOCKED_EVT for a15
    col SEC_WAIT for 99999999
    col LOCKED_USER for a60
    col LOCKER_USER for a60


    with t as (
        select
            a.event as LOCKED_EVT,
            a.wait_time_micro/1000000 as LOCKED_WAIT_SEC,
            a.osuser as locked_osuser,
            a.schemaname as LOCKED_SCHEMANAME,
            a.machine as locked_machine,
            a.sid as locked_sid,
            a.serial# as LOCKED_SERIAL,
            a.program as locked_program,
            a.Action as locked_Action,
            a.Module as locked_Module,
            prior a.sid as LOCKER_SID,
            prior a."SERIAL#" as LOCKER_SERIAL,
            prior a.osuser as locker_osuser,
            prior a.schemaname as LOCKER_SCHEMANAME,
            prior a.machine as locker_mashine,
            prior a.program as locker_program,
            prior a.Action as locker_Action,
            prior a.Module as locker_Module,
            prior a.seconds_in_wait as LOCKER_SECONDS_IN_WAIT,
            a.sql_id as locked_sql_id,
            a.prev_sql_id as locked_prev_sql_id,
            prior a.sql_id as locker_sql_id,
            prior a.prev_sql_id as locker_prev_sql_id,
            prior a.sql_address,
            prior a.event as LOCKER_EVT,
            a.final_blocking_session,
            level lvl,
            prior a.status
        from
            gv$session a
        where
            a.blocking_session is not null
        and
            level>1
        connect by
            nocycle prior a.sid= a.blocking_session
        start with
            a.blocking_session is null
    )
    select
        t.LOCKED_WAIT_SEC as SEC_WAIT,
        t.LOCKED_EVT,
        'Sid: ' || t.locked_sid || chr(10) ||
            'Serial: ' || t.LOCKED_SERIAL || chr(10) ||
            'OS user: ' || t.locked_osuser  || chr(10) ||
            'Schema: ' || t.LOCKED_SCHEMANAME || chr(10)||
            'Machine: ' || t.locked_machine || chr(10) ||
            'Program: ' || locked_program || chr(10) ||
            'ACTION: ' || locked_Action || chr(10) ||
            'Module: ' || locked_Module || chr(10) || chr(10) ||
            'Curr sql: ' || t.locked_sql_id || chr(10)||
            SUBSTR((select s.sql_text from v$sql s where s.sql_id=t.locked_sql_id and s.loaded_versions=1  and rownum=1),0,250) || chr(10) || chr(10) ||
            'Prev sql: ' || t.locked_prev_sql_id || chr(10)||
            SUBSTR((select s.sql_text from v$sql s where s.sql_id=t.locked_prev_sql_id and s.loaded_versions=1 and rownum=1),0,250) LOCKED_USER,
        'Sid: ' || t.LOCKER_SID || chr(10) ||
            'Serial: ' || t.LOCKER_SERIAL || chr(10) ||
            'OS user: ' || t.locker_osuser  || chr(10) ||
            'Schema: ' || t.LOCKER_SCHEMANAME || chr(10)||
            'Machine: ' || t.locker_mashine || chr(10) ||
            'Program: ' || locker_program || chr(10) ||
            'Action: ' || locker_Action || chr(10) ||
            'Module: ' || locker_Module || chr(10) || chr(10) ||
            'Curr sql: ' || t.locker_sql_id || chr(10)||
            SUBSTR((select s.sql_text from v$sql s where s.sql_id=t.locker_sql_id and s.loaded_versions=1 and rownum=1),0,250) || chr(10) || chr(10) ||
            'Prev sql: ' || t.locker_prev_sql_id || chr(10)||
            SUBSTR((select s.sql_text from v$sql s where s.sql_id=t.locker_prev_sql_id and s.loaded_versions=1 and rownum=1),0,250) LOCKER_USER
    from
        t
    -- where locked_evt like 'enq%'
    order by
        lvl desc,decode(substr(locked_evt,1,3),'enq',0,'log',2,1), LOCKED_WAIT_SEC desc
    /


Tablespace usage

set lines 160
col name for a30
col file_name for a60
set pages 1000
select (select decode(extent_management,'LOCAL','*',' ')
          from dba_tablespaces where tablespace_name = b.tablespace_name) ||
nvl(b.tablespace_name,
             nvl(a.tablespace_name,'UNKOWN')) name,
       Mega_alloc Mega,
       Mega_alloc-nvl(Mega_free,0) used,
       nvl(Mega_free,0) free,
       ((Mega_alloc-nvl(Mega_free,0))/
                          Mega_alloc)*100 pct_used,
       nvl(largest,0) largest,
       nvl(Mega_max,Mega_alloc) Max_Size,
       decode( Mega_max, 0, 0, (Mega_alloc/Mega_max)*100) pct_max_used
from ( select sum(bytes)/1024/1024 Mega_free,
              max(bytes)/1024/1024 largest,
              tablespace_name
       from  sys.dba_free_space
       group by tablespace_name ) a,
     ( select sum(bytes)/1024/1024 Mega_alloc,
              sum(maxbytes)/1024/1024 Mega_max,
              tablespace_name
       from sys.dba_data_files
       group by tablespace_name
       union all
      select sum(bytes)/1024/1024 Mega_alloc,
              sum(maxbytes)/1024/1024 Mega_max,
              tablespace_name
       from sys.dba_temp_files
       group by tablespace_name )b
where a.tablespace_name (+) = b.tablespace_name
and a.tablespace_name='&tablespace_name' order by 5;

select file_name,bytes/1024/1024/1024,maxbytes/1024/1024/1024,AUTOEXTENSIBLE,status,INCREMENT_BY from dba_data_files where tablespace_name = '&NAME' order by 2 desc;


Tuesday, December 3, 2019

Snapshot Standby database and how to convert physical standby database to snapshot standby database (READ WRITE mode)

Snapshot Standby database and how to convert physical standby database to snapshot standby database (READ WRITE mode)


SQL> ALTER SESSION SET NLS_DATE_FORMAT = 'DD-MON-YYYY HH24:MI:SS';

Session altered.

SQL> SELECT   a.thread#,  b. last_seq, a.applied_seq, a. last_app_timestamp, b.last_seq-a.applied_seq   ARC_DIFF FROM (SELECT  thread#, MAX(sequence#) applied_seq, MAX(next_time) last_app_timestamp FROM gv$archived_log WHERE applied = 'YES' GROUP BY thread#) a,           (SELECT  thread#, MAX (sequence#) last_seq FROM gv$archived_log GROUP BY thread#) b WHERE a.thread# = b.thread#;

   THREAD#   LAST_SEQ APPLIED_SEQ LAST_APP_TIMESTAMP     ARC_DIFF
---------- ---------- ----------- -------------------- ----------
         1         75          75 04-DEC-2019 11:09:55          0
         2         69          69 04-DEC-2019 11:07:26          0

SQL> select status,instance_name,database_role,open_mode from v$database,v$Instance;

STATUS       INSTANCE_NAME    DATABASE_ROLE    OPEN_MODE
------------ ---------------- ---------------- --------------------
MOUNTED      RAJDBA           PHYSICAL STANDBY MOUNTED

SQL> select thread#,max(sequence#) from v$archived_log where applied='YES' group by thread#;

   THREAD# MAX(SEQUENCE#)
---------- --------------
         1             75
         2             69

SQL> select flashback_on from v$database;

FLASHBACK_ON
------------------
NO

SQL> show parameter db_recovery_file_dest

NAME                                 TYPE        VALUE
------------------------------------ ----------- ------------------------------
db_recovery_file_dest                string      /fra
db_recovery_file_dest_size           big integer 820G
SQL> alter database flashback_on on;
alter database flashback_on on
                            *
ERROR at line 1:
ORA-02231: missing or invalid option to ALTER DATABASE


SQL> alter database flashback on;
alter database flashback on
*
ERROR at line 1:
ORA-01153: an incompatible media recovery is active


SQL> alter database recover managed standby database cancel;

Database altered.

SQL> alter database flashback on;

Database altered.

SQL> select flashback_on from v$database;

FLASHBACK_ON
------------------
YES

Once the standby database is mounted, convert the Physical standby database to snapshot standby


SQL> alter database convert to snapshot standby;

Database altered.

SQL> alter database open;

Database altered.

SQL> select status,instance_name,database_role,open_mode from v$database,v$Instance;

STATUS       INSTANCE_NAME    DATABASE_ROLE    OPEN_MODE
------------ ---------------- ---------------- --------------------
OPEN         RAJDBA           SNAPSHOT STANDBY READ WRITE

SET PAUSE ON
SET PAUSE 'Press Return to Continue'
SET PAGESIZE 60
SET LINESIZE 300
SET VERIFY OFF

COLUMN scn FOR 999,999,999,999,999
COLUMN Incar FOR 99
COLUMN name FOR A25
COLUMN storage_size FOR 999,999,999,999
COLUMN guarantee_flashback_database FOR A3

SELECT 
      database_incarnation# as Incar,
      scn,
      name,
      time,
      storage_size,
      guarantee_flashback_database
FROM 
      v$restore_point
ORDER BY 4
/
INCAR                  SCN NAME                      TIME                                                                            STORAGE_SIZE GUA
----- -------------------- ------------------------- --------------------------------------------------------------------------- ---------------- ---
    3    6,086,495,469,099 SNAPSHOT_STANDBY_REQUIRED 04-DEC-19 11.24.09.000000000 AM                                                  209,715,200 YES
                           _12/04/2019 11:24:09

SQL> select open_mode from v$database;
Press Return to Continue

OPEN_MODE
--------------------
READ WRITE

SQL> !hostname
rajhost

SQL>


Steps on converting back a snapshot standby database to physical standby database.



SQL> select open_mode from v$database;
Press Return to Continue

OPEN_MODE
--------------------
READ WRITE

SQL> !hostname
rajhost

SQL> shut immediate
Database closed.
Database dismounted.
ORACLE instance shut down.
SQL> startup mount
ORA-32004: obsolete or deprecated parameter(s) specified for RDBMS instance
ORACLE instance started.
Press Return to Continue

Total System Global Area 1.0737E+10 bytes
Fixed Size                  3722144 bytes
Variable Size            1275069536 bytes
Database Buffers         9428795392 bytes
Redo Buffers               29831168 bytes
Database mounted.
SQL> alter database convert to physical standby;

Database altered.

SQL> select status,instance_name,database_role,open_mode from v$database,v$Instance;
Press Return to Continue

STATUS       INSTANCE_NAME    DATABASE_ROLE    OPEN_MODE
------------ ---------------- ---------------- --------------------
MOUNTED      RAJDBA           PHYSICAL STANDBY MOUNTED

SQL>

SQL> ALTER DATABASE RECOVER MANAGED STANDBY DATABASE USING ARCHIVED LOGFILE DISCONNECT;

Database altered.

SQL> SELECT RECOVERY_MODE FROM V$ARCHIVE_DEST_STATUS;
Press Return to Continue

RECOVERY_MODE
-----------------------
MANAGED

Tuesday, June 4, 2019

EM Query




  Snapshots (take snaps before & after test): – exec DBMS_WORKLOAD_REPOSITORY.CREATE_SNAPSHOT;   
AWR Report (@?/rdbms/admin/awrrpt.sql) – Workload Repository Report – Top events to start   
ADDM report (@?/rdbms/admin/addmrpt.sql) – Oracle provided recommendations for further analysis 
  ASH Report (use particularly for concurrency issues) – @?/rdbms/admin/ashrpt.sql  
 SQR Report (With sqlid get the explain plan) – @?/rdbms/admin/awrsqrpt.sql

tune sql

@$ORACLE_HOME/rdbms/admin/sqltrpt.sql



set linesize 200
col BEGIN_INTERVAL_TIME format a70
select * from (select snap_id,begin_interval_time from dba_hist_snapshot order by begin_interval_time desc) where rownum < 3;
---------------------------------------------
Set pages 1000
Set lines 75
Select a.execution_end, b.type, b.impact, d.rank, d.type, 
'Message           : '||b.message MESSAGE,
'Command To correct: '||c.command COMMAND,
'Action Message    : '||c.message ACTION_MESSAGE
From dba_advisor_tasks a, dba_advisor_findings b,
Dba_advisor_actions c, dba_advisor_recommendations d
Where a.owner=b.owner and a.task_id=b.task_id
And b.task_id=d.task_id and b.finding_id=d.finding_id
And a.task_id=c.task_id and d.rec_id=c.rec_Id
And a.task_name like 'ADDM%' and a.status='COMPLETED'
and a.description like '%4782%'
Order by b.impact, d.rank;

-----------------------------------------------------------

export ORACLE_SID=$1
export ORAENV_ASK=NO
. oraenv
sqlplus -S / as sysdba <<oogy
set echo off
set lines 100
set pages 200
set trimspool on
set termout off
set feedback off

column dcol new_value mydate noprint
select to_char(sysdate,'YYMMDD') dcol from dual;

spool /home/oraprd/scripts/dbreport_$1_&mydate..txt

ttitle 'Average Active Sessions in the last week: Instance $1'

column sample_hour format a16
select
   to_char(round(sub1.sample_time, 'HH24'), 'YYYY-MM-DD HH24:MI') as sample_hour
,    round(avg(sub1.on_cpu),1) as cpu_avg
,    round(avg(sub1.waiting),1) as wait_avg
,    round(avg(sub1.active_sessions),1) as act_avg
,    round( (variance(sub1.active_sessions)/avg(sub1.active_sessions)),1) as act_var_mean
from
   ( -- sub1: one row per second, the resolution of SAMPLE_TIME
     select
        sample_id,
        sample_time,
        sum(decode(session_state, 'ON CPU', 1, 0))  as on_cpu,
        sum(decode(session_state, 'WAITING', 1, 0)) as waiting,
        count(*) as active_sessions
     from
        dba_hist_active_sess_history
     where
        sample_time > sysdate - 7
     group by
        sample_id,
        sample_time
   ) sub1
group by
   round(sub1.sample_time, 'HH24')
order by
   round(sub1.sample_time, 'HH24');


ttitle 'Most expensive queries in the last week: Instance $1'
-- gets most expensive queries
-- (by time spent, change "order by" to use another metric)
-- after a specific date
select
   sub.sql_id,
   sub.seconds_used,
   sub.executions,
   sub.gets
from
   ( -- sub to sort before rownum
     select
        sql_id,
        round(sum(elapsed_time_delta)/1000000) as seconds_used,
        sum(executions_delta) as executions,
        sum(buffer_gets_delta) as gets
     from
        dba_hist_snapshot natural join dba_hist_sqlstat
     where
        begin_interval_time > sysdate - 7
     group by
        sql_id
     order by
        2 desc
   ) sub
where
   rownum < 30
;


set long 32768
ttitle 'Text for most expensive SQL in the last week: Instance $1'
select sql_text
from dba_hist_sqltext
where sql_id =
(
select sub.sql_id
from
   ( -- sub to sort before rownum
     select
        sql_id,
        round(sum(elapsed_time_delta)/1000000) as seconds_since_date,
        sum(executions_delta) as execs_since_date,
        sum(buffer_gets_delta) as gets_since_date
     from
        dba_hist_snapshot natural join dba_hist_sqlstat
     where
        begin_interval_time > sysdate - 7
     group by
        sql_id
     order by
        2 desc
   ) sub
where
   rownum = 1
);

spool off;
exit
oogy

Collect statistics on fixed objects using DBMS_STATS.GATHER_FIXED_OBJECTS_STATS.

# Fixed objects stats 

1. Check fixed object stats 

             select count(1) from tab_stats$; 

2. Gather fixed objects stats

           exec dbms_stats.gather_fixed_objects_stats;

3. Check fixed object stats 
              select count(1) from tab_stats$; 


# Gather system stat  

execute dbms_stats.gather_system_stats('Start');
-- few hour delay during high workload
execute dbms_stats.gather_system_stats('Stop');

# Flush shared pool

Alter system flush shared_pool;

 Roll back if the change is not successful:


1. exec dbms_stats.delete_fixed_objects_stats();
2. exec DBMS_STATS.DELETE_SYSTEM_STATS;

---------------------------------------------------------------------------
http://www.dba-oracle.com/t_dbms_stats_gather_system_stats.htm

The dbms_stats.gather_system_stats procedure is especially useful for multi-mode Oracle shops that run OLTP during the day and DSS at night.  You invoke the dbms_stats.gather_system_stats procedure as an elapsed time capture, making sure to collect the statistics during a representative heavy workload:

execute dbms_stats.gather_system_stats('Start');
-- one hour delay during high workload
execute dbms_stats.gather_system_stats('Stop');

The data collection mechanism of the dbms_stats.gather_system_stats procedure works in a similar fashion to my script that measures I/O times to optimizer the optimizer_index_cost_adj parameter.  The dbms_stats.gather_system_stats also related to the under-documented  _optimizer_cost_model parameter and your db_file_multiblock_read_count setting.

The output from dbms_stats.gather_system_stats is stored in the aux_stats$ table and you can query it as follows:

select pname, pval1 from sys.aux_stats$;

Here are the data items collected by dbms_stats.gather_system_stats:

No Workload (NW) stats:

CPUSPEEDNW - CPU speed
IOSEEKTIM - The I/O seek time in milliseconds
IOTFRSPEED - I/O transfer speed in milliseconds
Workload-related stats:

SREADTIM  - Single block read time in milliseconds
MREADTIM - Multiblock read time in ms
CPUSPEED - CPU speed
MBRC - Average blocks read per multiblock read (see db_file_multiblock_read_count)
MAXTHR - Maximum I/O throughput (for OPQ only)
SLAVETHR - OPQ Factotum (slave) throughput (OPQ only)   
The dbms_stats.gather_system_stats procedure is very similar to my script for setting optimizer_index_cost_adj, where I compare the relative costs of sequential and scattered read times:

select 
   sum(a.time_waited_micro)/sum(a.total_waits)/1000000 c1, 
   sum(b.time_waited_micro)/sum(b.total_waits)/1000000 c2,
   (
      sum(a.total_waits) / 
      sum(a.total_waits + b.total_waits)
   ) * 100 c3,
   (
      sum(b.total_waits) / 
      sum(a.total_waits + b.total_waits)
   ) * 100 c4,
  (
      sum(b.time_waited_micro) /
      sum(b.total_waits)) / 
      (sum(a.time_waited_micro)/sum(a.total_waits)
   ) * 100 c5 
from 
   dba_hist_system_event a, 
   dba_hist_system_event b
where 
   a.snap_id = b.snap_id
and 
   a.event_name = 'db file scattered read'
and 
   b.event_name = 'db file sequential read';


SELECT module, sql_text, username, disk_reads_per_exec, buffer_gets,   
       disk_reads, parse_calls, sorts, executions, rows_processed, hit_ratio,   
       first_load_time, last_load_time, sharable_mem, persistent_mem,  runtime_mem, cpu_time, cpu_time/1000 as cputime_per_execution,   
       elapsed_time
FROM    
    (SELECT module, sql_text ,     
            u.username,
            round((s.disk_reads/decode(s.executions,0,1, s.executions)),2) disk_reads_per_exec,
            s.disk_reads, s.buffer_gets, s.parse_calls, s.sorts, s.executions ,
            s.rows_processed,     
            100 - round(100 *  s.disk_reads/greatest(s.buffer_gets,1),2) hit_ratio,
            s.first_load_time ,  s.last_load_time,   sharable_mem ,     persistent_mem ,     runtime_mem, 
            cpu_time,     elapsed_time,     address,     hash_value   
     FROM   sys.v_$sql s, sys.all_users u    
     WHERE  s.parsing_user_id=u.user_id
     --and module = 'JDBC Thin Client'
        and UPPER(u.username) like '%E%' 


set lines 1000 pages 50000
set markup html on
spool active.html



SELECT *
FROM   (SELECT Substr(a.sql_text,1,50) sql_text,
               Trunc(a.disk_reads/Decode(a.executions,0,1,a.executions)) reads_per_execution, 
               a.buffer_gets, 
               a.disk_reads, 
               a.executions, 
               a.sorts,
               a.address
        FROM   v$sqlarea a
        ORDER BY 2 DESC)
WHERE  rownum <= &&1;

SELECT module, sql_text, username, disk_reads_per_exec, buffer_gets, 
 disk_reads, parse_calls, sorts, executions, rows_processed, hit_ratio, 
 first_load_time, last_load_time, sharable_mem, persistent_mem, runtime_mem, cpu_time, cpu_time/1000 as cputime_per_execution, 
 elapsed_time 
 FROM 
 (SELECT module, sql_text , 
 u.username, 
 round((s.disk_reads/decode(s.executions,0,1, s.executions)),2) disk_reads_per_exec, 
 s.disk_reads, s.buffer_gets, s.parse_calls, s.sorts, s.executions , 
 s.rows_processed, 
 100 - round(100 * s.disk_reads/greatest(s.buffer_gets,1),2) hit_ratio, 
 s.first_load_time , s.last_load_time, sharable_mem , persistent_mem , runtime_mem, 
 cpu_time, elapsed_time, address, hash_value 
 FROM sys.v_$sql s, sys.all_users u 
 WHERE s.parsing_user_id=u.user_id 
 --and module = 'JDBC Thin Client' 
 and UPPER(u.username) like '%E%' 
 ORDER BY cpu_time desc, last_load_time desc) WHERE rownum <= 50; 

--------------------------------------------------------------------------------------


Collect statistics on fixed objects using DBMS_STATS.GATHER_FIXED_OBJECTS_STATS. 

 exec dbms_stats.gather_fixed_objects_stats;

 start the system start gathering

 EXEC DBMS_STATS.GATHER_SYSTEM_STATS('START');

 select OWNER, TABLE_NAME, LAST_ANALYZED from dba_tab_statistics where table_name='X$KGLDP';

 The stop the gathering after  2 to 3 days
  
EXEC DBMS_STATS.GATHER_SYSTEM_STATS('STOP');

set lines 180 pages 1000
col client_name for a40
col attributes for a60
select client_name, status,attributes,service_name from dba_autotask_client
/

BEGIN
  DBMS_AUTO_TASK_ADMIN.disable(
    client_name => 'auto space advisor',
    operation   => NULL,
    window_name => NULL);
END;
/

BEGIN
  DBMS_AUTO_TASK_ADMIN.disable(
    client_name => 'sql tuning advisor',
    operation   => NULL,
    window_name => NULL);
END;
/

BEGIN
  DBMS_AUTO_TASK_ADMIN.disable(
    client_name => 'auto optimizer stats collection',
    operation   => NULL,
    window_name => NULL);
END;
/

select client_name, status,attributes,service_name from dba_autotask_client
/

## Enabling
BEGIN
dbms_auto_task_admin.enable(client_name => 'sql tuning advisor', operation => NULL, window_name => NULL);
END;
/

Monday, June 3, 2019

Clone RDBMS RAC binaries in 12c in silent mode - one node

Clone RDBMS RAC binaries in 12c in silent mode

Copy binaries from  /h01/app/oracle/db/12.1.0.2_03  to /h01/app/oracle/db/12.1.0.2_02

 /h01/app/oracle/db/12.1.0.2_03 > cp -Rp /h01/app/oracle/db/12.1.0.2_02


export ORACLE_HOME=/h01/app/oracle/db/12.1.0.2_02
export PATH=$ORACLE_HOME/bin:$PATH

2. Run the clone command in silent mode on both the DB hosts one after the other as “oracle” user

perl  /h01/app/oracle/db/12.1.0.2_02/clone/bin/clone.pl -silent ORACLE_HOME="/h01/app/oracle/db/12.1.0.2_02" ORACLE_HOME_NAME="OraDB12Home2" ORACLE_BASE="/h01/app" '-O"CLUSTER_NODES={raj}"' '-O"LOCAL_NODE=raj"'

oracle@raj (SID:DEMO) /h01/app/oracle/db/12.1.0.2_02 $ export ORACLE_HOME=/h01/app/oracle/db/12.1.0.2_02
oracle@raj (SID:DEMO) /h01/app/oracle/db/12.1.0.2_02 $ export PATH=$ORACLE_HOME/bin:$PATH
oracle@raj (SID:DEMO) /h01/app/oracle/db/12.1.0.2_02 $ DES={raj}"' '-O"LOCAL_NODE=raj"'                                          <
./runInstaller -clone -waitForCompletion  "ORACLE_HOME=/h01/app/oracle/db/12.1.0.2_02" "ORACLE_HOME_NAME=OraDB12Home2" "ORACLE_BASE=/h01/app" "CLUSTER_NODES={raj}" "LOCAL_NODE=raj" -silent -paramFile /h01/app/oracle/db/12.1.0.2_02/clone/clone_oraparam.ini
Starting Oracle Universal Installer...

Checking Temp space: must be greater than 500 MB.   Actual 4023 MB    Passed
Checking swap space: must be greater than 500 MB.   Actual 32767 MB    Passed
Preparing to launch Oracle Universal Installer from /tmp/OraInstall2019-06-04_02-18-54PM. Please wait ...You can find the log of this install session at:
 /c01/app/oraInventory/logs/cloneActions2019-06-04_02-18-54PM.log
..................................................   5% Done.
..................................................   10% Done.
..................................................   15% Done.
..................................................   20% Done.
..................................................   25% Done.
..................................................   30% Done.
..................................................   35% Done.
..................................................   40% Done.
..................................................   45% Done.
..................................................   50% Done.
..................................................   55% Done.
..................................................   60% Done.
..................................................   65% Done.
..................................................   70% Done.
..................................................   75% Done.
..................................................   80% Done.
..................................................   85% Done.
..........
Copy files in progress.

Copy files successful.

Link binaries in progress.

Link binaries successful.

Setup files in progress.

Setup files successful.

Setup Inventory in progress.

Setup Inventory successful.

Finish Setup successful.
The cloning of OraDB12Home2 was successful.
Please check '/c01/app/oraInventory/logs/cloneActions2019-06-04_02-18-54PM.log' for more details.

Setup Oracle Base in progress.

Setup Oracle Base successful.
..................................................   95% Done.

As a root user, execute the following script(s):
        1. /h01/app/oracle/db/12.1.0.2_02/root.sh

Execute /h01/app/oracle/db/12.1.0.2_02/root.sh on the following nodes:
[raj]


..................................................   100% Done.
oracle@raj (SID:DEMO) /h01/app/oracle/db/12.1.0.2_02 $ grep $ORACLE_HOME /c01/app/oraInventory/ContentsXML/inventory.xml
<HOME NAME="OraDB12Home2" LOC="/h01/app/oracle/db/12.1.0.2_02" TYPE="O" IDX="27">


3. Run the root.sh on both the nodes

[root@raj ~]# /h01/app/oracle/db/12.1.0.2_02/root.sh

4. You can confirm this by checking the inventory file

grep $ORACLE_HOME /c01/app/oraInventory/ContentsXML/inventory.xml

Tuesday, April 23, 2019

A failover is typically used only when the Oracle primary database becomes unavailable

A failover is typically used only when the primary database becomes unavailable, and there is no possibility of restoring it to service within a reasonable period of time

Performing a Failover to a Physical Standby Database –

Step 01- Flush any unsent redo from the primary database to the target standby database

If the primary database can be mounted, it may be possible to flush any unsent archived and current redo from the primary database to the standby database. If this operation is successful, a zero data loss failover is possible even if the primary database is not in a zero data loss data protection mode. Mount, but do not open primary database. If primary database cannot be mounted go to step 2. On Primary database ( if accessible during real disaster )

SQL> ALTER SYSTEM FLUSH REDO TO target_db_name;

For target_db_name, specify the DB_UNIQUE_NAME of the standby database that is to receive the redo flushed from the primary database. This statement flushes any unsent redo from the primary database to the standby database, and waits for that redo to be applied to the standby database. If this statement completes without any errors,

go to Step 3. If the statement completes with any error, or

 if it must be stopped because you cannot wait any longer for the statement to complete, continue with Step 2.


Step 2 – Identify and resolve any archived redo log gaps On Standby database

SQL> SELECT DATABASE_MODE,RECOVERY_MODE,GAP_STATUS FROM V$ARCHIVE_DEST_STATUS;

SQL> select process,pid,status,thread#,sequence#,block#,blocks from v$managed_standby;

Wait for logs indicates it’s not applied to DR. 

If possible, copy any missing archived redo log files to the target standby database from the primary database. 
If primary database is not accessible then there will be data loss.

We must run the following query to learn last applied archive log sequence number.
SELECT UNIQUE THREAD# AS THREAD, MAX(SEQUENCE#) OVER (PARTITION BY thread#) AS LAST from V$ARCHIVED_LOG;
 If you can access archive logs which are not copied to standby then copy archives to standby. After copy archive log files we must register them to standby database. 
This operation must be done for every thread.

SQL> alter database register physical logfile '/b01/fra/tstssby_991834413_1_102.arc ';
 Check the standby database for redo gap. If there is a gap then we must copy archive log files and register.

As a result of the above query until it returns to zero.
stop the redo apply process in standby database.

 3 
 Step 3 – Stop Redo Apply


 4 
 Step 4 – Finish applying all received Redo Data

 Verify that the target standby database is ready to become a primary database

SQL> SELECT SWITCHOVER_STATUS FROM V$DATABASE; 

SWITCHOVER_STATUS 

TO PRIMARY 

SQL> ALTER DATABASE RECOVER MANAGED STANDBY DATABASE CANCEL;
Database altered.

SQL> ALTER DATABASE RECOVER MANAGED STANDBY DATABASE FINISH;

Database altered.

SQL> ALTER DATABASE COMMIT TO SWITCHOVER TO PRIMARY WITH SESSION SHUTDOWN;

Database altered.

SQL> ALTER DATABASE OPEN;
Database altered.
SQL>
SQL> select status,instance_name,database_role from v$database,v$instance;

STATUS       INSTANCE_NAME    DATABASE_ROLE
------------ ---------------- ----------------
OPEN         TSTSBY           PRIMARY

 5   On Standby database
SQL> ALTER DATABASE RECOVER MANAGED STANDBY DATABASE FINISH;

 If this statement completes with  error

If the error condition cannot be resolved, a failover can still be performed (with some data loss) by issuing the following SQL statement On Standby database

SQL> ALTER DATABASE ACTIVATE PHYSICAL STANDBY DATABASE;

 Open the new primary database On Standby database

SQL> ALTER DATABASE OPEN;

   Convert the database to a physical standby database 
1   
On Old Primary database

SQL> ALTER DATABASE CONVERT TO PHYSICAL STANDBY;

This statement will dismount the database after successfully converting the control file to a standby control file

SQL> SHUTDOWN IMMEDIATE;

SQL> STARTUP MOUNT;

 2  
On New Primary database

query to see the current state of the archive destinations

 SQL> SELECT DEST_ID, DEST_NAME, STATUS, PROTECTION_MODE, DESTINATION, ERROR,SRL FROM V$ARCHIVE_DEST_STATUS;

 If necessary, enable the destination

SQL> ALTER SYSTEM SET LOG_ARCHIVE_DEST_STATE_n=ENABLE;

Perform a log switch to ensure the standby database begins receiving redo data from the new primary database

 SQL> ALTER SYSTEM SWITCH LOGFILE;

SQL> SELECT DEST_ID, DEST_NAME, STATUS, PROTECTION_MODE, DESTINATION, ERROR,SRL FROM V$ARCHIVE_DEST_STATUS;
  
Start Redo Apply on the new physical standby database

 On New Physical standby database

SQL> ALTER DATABASE RECOVER MANAGED STANDBY DATABASE USING CURRENT LOGFILE DISCONNECT;


About Me

My photo
Sydney, NSW, Australia
An experienced IT professional (14+ years) worked in multiple industries such as consulting, education, financial, retail sectors. Carries good work experience in relational database design, analysis, modeling, development, administration, implementation, trouble shooting, support etc. Experienced in Oracle/SQL Server/MySQL DBA involving setup, configuration, tuning, backups, disaster recovery, high availability Oracle 11g/12C/19C RAC clusters; SQL server 2008/2012/2016 clusters, Oracle Engineered Systems such as EXADATA, ODA and Oracle and Azure Cloud. Performed Software Installations, Migrations, Database Capacity Planning, Automation of backup implementation, Cloud migration Tuning Oracle in windows/Unix platforms. In addition, experienced in UNIX administration, Shell scripting. PowerShell scripting A team player with communication skills.