search audit trail

select * 
from dba_audit_trail t 
where trunc(t.timestamp) = to_date('2017-05-17', 'yyyy-mm-dd') 
and username not in ('DBSNMP')
and action_name not in ('LOGON', 'LOGOFF')
and obj_name = 'table_name'
;

some ldap commands

eusm createDomain 
  domain_name="" 
  realm_dn="dc=..,dc=.." 
  ldap_host="" 
  ldap_port= 
  ldap_user_dn="" 
  ldap_user_password="**"

oidctl connect=OIDDB 
  server=oidrepld 
  instance=1 
  componentname= 
  flags="-h  -p " 
  start | stop


oidcmprec operation=[reconcile | compare] 
  source=host1:port 
  destination=host2:port
  base='("cn=,cn=,cn=..,cn=..,cn=oraclecontext,dc=..,dc=..")' 
  scope=sub

oidcmprec operation=reconcile 
  source=host1:port 
  destination=host2:port 
  base='("cn=,cn=oraclecontext,dc=..,dc=..")' 
  scope=sub


ldapsearch -h  -p  -D cn= -w ** -b "dc=..,dc=.." "(cn=)"

ldapsearch -h  -p  -D cn= -w ** -b "cn=users,dc=..,dc=.." "(uid=)" 

check Linux packages


#!/bin/bash 

array=( 
  binutils-2.23.52.0.1.el7.x86_64 
  compat-libcap1-1.10-1.x86_64 
  compat-libstdc++-33-3.2.3-69.el7.x86_64 
  compat-libstdc++-33-3.2.3-69.el7.i686 
  gcc-4.8.2-0.el7.x86_64 
  gcc-c++-4.8.2-0.el7.x86_64 
  glibc-2.17-0.0.el7.i686 
  glibc-2.17-0.0.el7.x86_64 
  glibc-devel-2.17-0.0.el7.x86_64 
  ksh.x86_64 
  libgcc-4.8.2-00.el7.i686 
  libgcc-4.8.2-00.el7.x86_64 
  libstdc++-4.8.2-00.el7.x86_64 
  libstdc++-4.8.2-00.el7.i686 
  libstdc++-devel-4.8.2-00.el7.x86_64 
  libaio-0.3.109-00.el7.x86_64 
  libaio-devel-0.3.109-00.el7.x86_64 
  make-3.82-00.el7.x86_64 
  sysstat-10.1.5-00.el7.x86_64 
) 

len=${#array[*]} 
i=0 
line='-------------------------------------------------------------------' 
while [[ $i -lt $len ]]; do 
  req_pkg="^${array[$i]%%.*}" 
  req_arch="${array[$i]##*.}" 
  req_arch="${req_arch/6/.}"; req_arch="${req_arch/3/.}$" 
  curr_rpm=`rpm -qa --qf "%{NAME}-%{VERSION}-%{RELEASE}.%{ARCH}\n" | grep ${req_pkg} | grep 

${req_arch}`; 
  if [ $(expr "${curr_rpm%%.*}" \>= "${req_pkg}") -eq 1 ]; 
  then 
    result1=pass; else result1=failed; 
  fi 
  printf "%s %s [$result1]\n" ${array[$i]} ${line:${#array[$i]}} 
  echo $curr_rpm; 
  let i++; 
done 

oracle database install

# set environment variable
export DISPLAY=hostname:0.0
export TMP=/u01/tmp
export TMPDIR=/u01/tmp

# check limits

--  open file descriptors (1024-65536)
$ ulimit -sn  
$ ulimit -Hn  

-- number of processes available to a single user (2047-16384)
$ ulimit -Su 
$ ulimit -Hu

-- size of the stack segment of the process (10240KB - 32768KB)
$ ulimit -Ss  
$ ulimit -Hs

# set limits (/etc/security/limits.conf)
oracle  hard    nproc   16384
oracle  soft    nproc   2047
oracle  hard    nofile  65536
oracle  soft    nofile  4096
oracle  hard    stack   32768
oracle  soft    stack   10240

./runIntaller

# dbca create database

export ORACLE_BASE=/uo1/app/oracle
export ORACLE_HOME=/uo1/app/oracle/product/12.2.0/dbhome_1
export PATH=$PATH:$ORACLE_HOME/bin
export DISPLAY=hostname:0.0

dbca

delete duplicated rows

select * 
  from   (
    select f.*, 
	   count(*) over (partition by title, release_date) ct
    from   films f
  )
  where  ct > 1;

-- oracle
delete films
  where  rowid not in (
    select min(rowid)
    from   films
    group  by title, release_date
  );

-- sql server
delete films
  where  %%physloc%% not in (
    select min(%%physloc%%)
    from   films
    group  by title, release_date
  );

sql plan baseline


optimizer_capture_sql_plan_baselines=FALSE
optimizer_use_sql_plan_baselines=TRUE;


-- create tuning set
BEGIN
  dbms_sqltune.create_sqlset(
      sqlset_name => 'STS1', 
      sqlset_owner => 'SYSTEM');
END;
/

-- populate tuning set from cursor cache
DECLARE
  stscur dbms_sqltune.sqlset_cursor;
BEGIN
  
  open stscur for
    select value(p)
    from   table(dbms_sqltune.select_cursor_cache(
                   basic_filter      => 'sql_id = ''...''',
                   object_filter     => null,
                   ranking_measure1  => null,
                   ranking_measure2  => null,
                   ranking_measure3  => null,
                   result_percentage => null,
                   result_limit      => null,
                   attribute_list    => 'ALL')) P;
                   
  DBMS_SQLTUNE.LOAD_SQLSET(sqlset_name => 'STS1',
                           populate_cursor => stscur,
                           sqlset_owner    => 'SYSTEM');
END;
/

-- populate tuning set from AWR
DECLARE
  cur DBMS_SQLTUNE.SQLSET_CURSOR;
BEGIN  
  OPEN cur FOR
    SELECT VALUE(P)
      FROM table(
        DBMS_SQLTUNE.SELECT_WORKLOAD_REPOSITORY(
                    begin_snap        => 1234,
                    end_snap          => 1235,
                    basic_filter      => 'sql_id = ''...''',
                    object_filter     => NULL, 
                    ranking_measure1  => NULL,
                    ranking_measure2  => NULL,
                    ranking_measure3  => NULL,
                    result_percentage => NULL,
                    result_limit      => NULL,
                    attribute_list    => 'ALL')) P;

  DBMS_SQLTUNE.LOAD_SQLSET(sqlset_name => 'STS1',
                           populate_cursor => cur,
                           sqlset_owner    => 'SYSTEM');
END;
/

-- select plans in tuning set
SELECT
     first_load_time,
     executions as execs,
     parsing_schema_name,
     elapsed_time  / 1000000 as elapsed_time_secs  ,
     cpu_time / 1000000 as cpu_time_secs           ,
     buffer_gets,
     disk_reads,
     direct_writes,
     rows_processed,
     fetches,
     optimizer_cost,
     sql_plan,
     plan_hash_value,
     sql_id,
     sql_text
   FROM TABLE(DBMS_SQLTUNE.SELECT_SQLSET(
                   sqlset_name  => 'STS1',
                   sqlset_owner => 'SYSTEM'))
;


-- create baseline using the plan in tunning set                           
DECLARE
  my_plans pls_integer;
BEGIN
  my_plans := DBMS_SPM.LOAD_PLANS_FROM_SQLSET(
       sqlset_name  => 'STS1', 
       sqlset_owner => 'SYSTEM',
       basic_filter=>'plan_hash_value = ''1239572551'''
     );
 dbms_output.put_line(my_plans);                           
END;
/

-- check baselines
select sql_handle, 
       plan_name, 
       enabled, 
       accepted, 
       sql_text
  from dba_sql_plan_baselines
 where sql_text like '%...%'
;

-- accept plan for baseline
DECLARE
  report clob;
BEGIN
  report := dbms_spm.evolve_sql_plan_baseline(
    sql_handle => 'SQL_a...', 
    plan_name  => 'SQL_PLAN_a...');
END;
/

unplug and plug PDB

alter pluggable database pdb1 close instances=all;
alter pluggable database pdb1 unplug into '/tmp/pdb1.xml';
drop pluggable database pdb1 keep datafiles;
create pluggable database pdb1 using '/tmp/pdb1.xml' NOCOPY TEMPFILE REUSE;

transport sql tunning sets from 11g to 12c

 

 

1. create sql tunning set

2. add sql to tunning set from AWR

3. check sql tunning set
   SELECT SQL_ID, PARSING_SCHEMA_NAME AS "SCH", SQL_TEXT, 
          ELAPSED_TIME AS "ELAPSED", BUFFER_GETS
   FROM   TABLE( DBMS_SQLTUNE.SELECT_SQLSET( 
                    sqlset_name =>  '<sts_name>', 
                    sqlset_owner => '<sts_owner>' ) );

4. create staging table
   BEGIN
     DBMS_SQLTUNE.CREATE_STGTAB_SQLSET ( 
       table_name  => '<staging_table_name>',   
       schema_name => '<schema>',   
       db_version  => DBMS_SQLTUNE.STS_STGTAB_11_2_VERSION 
     );
   END;
   /

5. pack tunning set
   BEGIN
     DBMS_SQLTUNE.PACK_STGTAB_SQLSET (      
       sqlset_name          => '<sts_name>', 
       sqlset_owner         => '<sts_owner>',   
       staging_table_name   => '<staging_table_name>',   
       staging_schema_owner => '<schema>',   
       db_version           => DBMS_SQLTUNE.STS_STGTAB_11_2_VERSION 
     );
   END;
   / 

6. remap con_dbid
   BEGIN
     DBMS_SQLTUNE.REMAP_STGTAB_SQLSET (
       staging_table_name   => '<staging_table_name>',   
       staging_schema_owner => '<schema>',   
       old_con_dbid         => 1234,   
       new_con_dbid         => 5678
   );

7. expdp/impdp

8. unpack tunning set from staging table
   BEGIN
     DBMS_SQLTUNE.UNPACK_STGTAB_SQLSET (
       sqlset_name        => '%',   
       replace            => true,   
       staging_table_name => '<staging_table_name>');
     END;
   /

certificates, keystores, wallets

cerficicates can be imported into oracle wallet, or java keystore.

option 1: import certificates into oracle wallet,

orapki wallet create -wallet <wallet_location> 

orapki wallet add 
  -wallet <wallet_location> 
  -trusted_cert -cert <certificate_location>

orapki wallet display -wallet <wallet_location> 

./okvutil upload -l "<wallet_location>" 
  -t wallet 
  -g "<virtual_wallet_name>"

option 2: import certificates into java keystore

keytool 
  -import 
  -trustcacerts 
  -alias <alias_name> 
  -file <cert_filename.crt> 
  -keystore <filename.jks>

keytool -list -v -keystore <filename.jks>

./okvutil upload -l "<keystore_location>" 
  -t jks 
  -g "<virtual_wallet_name>"

create materialized view with prebuilt table

-- step 1: create table
create table scott.weekly_orders
...

-- step 2: create materialized view
create materialized view scott.weekly_orders
  ON PREBUILT TABLE WITHOUT REDUCED PRECISION
  USING INDEX 
  REFRESH COMPLETE ON DEMAND
  USING DEFAULT LOCAL ROLLBACK SEGMENT
  USING ENFORCED CONSTRAINTS DISABLE QUERY REWRITE
as
select ...
from scott.orders
where ...
;